
(FPCore (a b) :precision binary64 (/ (exp a) (+ (exp a) (exp b))))
double code(double a, double b) {
return exp(a) / (exp(a) + exp(b));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = exp(a) / (exp(a) + exp(b))
end function
public static double code(double a, double b) {
return Math.exp(a) / (Math.exp(a) + Math.exp(b));
}
def code(a, b): return math.exp(a) / (math.exp(a) + math.exp(b))
function code(a, b) return Float64(exp(a) / Float64(exp(a) + exp(b))) end
function tmp = code(a, b) tmp = exp(a) / (exp(a) + exp(b)); end
code[a_, b_] := N[(N[Exp[a], $MachinePrecision] / N[(N[Exp[a], $MachinePrecision] + N[Exp[b], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{a}}{e^{a} + e^{b}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 20 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b) :precision binary64 (/ (exp a) (+ (exp a) (exp b))))
double code(double a, double b) {
return exp(a) / (exp(a) + exp(b));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = exp(a) / (exp(a) + exp(b))
end function
public static double code(double a, double b) {
return Math.exp(a) / (Math.exp(a) + Math.exp(b));
}
def code(a, b): return math.exp(a) / (math.exp(a) + math.exp(b))
function code(a, b) return Float64(exp(a) / Float64(exp(a) + exp(b))) end
function tmp = code(a, b) tmp = exp(a) / (exp(a) + exp(b)); end
code[a_, b_] := N[(N[Exp[a], $MachinePrecision] / N[(N[Exp[a], $MachinePrecision] + N[Exp[b], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{a}}{e^{a} + e^{b}}
\end{array}
(FPCore (a b) :precision binary64 (pow (* (exp (- a)) (+ (exp b) (exp a))) -1.0))
double code(double a, double b) {
return pow((exp(-a) * (exp(b) + exp(a))), -1.0);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (exp(-a) * (exp(b) + exp(a))) ** (-1.0d0)
end function
public static double code(double a, double b) {
return Math.pow((Math.exp(-a) * (Math.exp(b) + Math.exp(a))), -1.0);
}
def code(a, b): return math.pow((math.exp(-a) * (math.exp(b) + math.exp(a))), -1.0)
function code(a, b) return Float64(exp(Float64(-a)) * Float64(exp(b) + exp(a))) ^ -1.0 end
function tmp = code(a, b) tmp = (exp(-a) * (exp(b) + exp(a))) ^ -1.0; end
code[a_, b_] := N[Power[N[(N[Exp[(-a)], $MachinePrecision] * N[(N[Exp[b], $MachinePrecision] + N[Exp[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(e^{-a} \cdot \left(e^{b} + e^{a}\right)\right)}^{-1}
\end{array}
Initial program 98.8%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6498.8
lift-+.f64N/A
+-commutativeN/A
lower-+.f6498.8
Applied rewrites98.8%
Final simplification98.8%
(FPCore (a b) :precision binary64 (/ (exp a) (+ (exp a) (pow (exp (- b)) -1.0))))
double code(double a, double b) {
return exp(a) / (exp(a) + pow(exp(-b), -1.0));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = exp(a) / (exp(a) + (exp(-b) ** (-1.0d0)))
end function
public static double code(double a, double b) {
return Math.exp(a) / (Math.exp(a) + Math.pow(Math.exp(-b), -1.0));
}
def code(a, b): return math.exp(a) / (math.exp(a) + math.pow(math.exp(-b), -1.0))
function code(a, b) return Float64(exp(a) / Float64(exp(a) + (exp(Float64(-b)) ^ -1.0))) end
function tmp = code(a, b) tmp = exp(a) / (exp(a) + (exp(-b) ^ -1.0)); end
code[a_, b_] := N[(N[Exp[a], $MachinePrecision] / N[(N[Exp[a], $MachinePrecision] + N[Power[N[Exp[(-b)], $MachinePrecision], -1.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{a}}{e^{a} + {\left(e^{-b}\right)}^{-1}}
\end{array}
Initial program 98.8%
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh---cosh-revN/A
lower-/.f64N/A
sinh-coshN/A
lower-exp.f64N/A
lower-neg.f6498.8
Applied rewrites98.8%
Final simplification98.8%
(FPCore (a b) :precision binary64 (/ (exp a) (+ (exp a) (exp b))))
double code(double a, double b) {
return exp(a) / (exp(a) + exp(b));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = exp(a) / (exp(a) + exp(b))
end function
public static double code(double a, double b) {
return Math.exp(a) / (Math.exp(a) + Math.exp(b));
}
def code(a, b): return math.exp(a) / (math.exp(a) + math.exp(b))
function code(a, b) return Float64(exp(a) / Float64(exp(a) + exp(b))) end
function tmp = code(a, b) tmp = exp(a) / (exp(a) + exp(b)); end
code[a_, b_] := N[(N[Exp[a], $MachinePrecision] / N[(N[Exp[a], $MachinePrecision] + N[Exp[b], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{a}}{e^{a} + e^{b}}
\end{array}
Initial program 98.8%
(FPCore (a b) :precision binary64 (if (<= a -7.5e-7) (pow (- (exp (- a)) -1.0) -1.0) (pow (+ (exp b) 1.0) -1.0)))
double code(double a, double b) {
double tmp;
if (a <= -7.5e-7) {
tmp = pow((exp(-a) - -1.0), -1.0);
} else {
tmp = pow((exp(b) + 1.0), -1.0);
}
return tmp;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8) :: tmp
if (a <= (-7.5d-7)) then
tmp = (exp(-a) - (-1.0d0)) ** (-1.0d0)
else
tmp = (exp(b) + 1.0d0) ** (-1.0d0)
end if
code = tmp
end function
public static double code(double a, double b) {
double tmp;
if (a <= -7.5e-7) {
tmp = Math.pow((Math.exp(-a) - -1.0), -1.0);
} else {
tmp = Math.pow((Math.exp(b) + 1.0), -1.0);
}
return tmp;
}
def code(a, b): tmp = 0 if a <= -7.5e-7: tmp = math.pow((math.exp(-a) - -1.0), -1.0) else: tmp = math.pow((math.exp(b) + 1.0), -1.0) return tmp
function code(a, b) tmp = 0.0 if (a <= -7.5e-7) tmp = Float64(exp(Float64(-a)) - -1.0) ^ -1.0; else tmp = Float64(exp(b) + 1.0) ^ -1.0; end return tmp end
function tmp_2 = code(a, b) tmp = 0.0; if (a <= -7.5e-7) tmp = (exp(-a) - -1.0) ^ -1.0; else tmp = (exp(b) + 1.0) ^ -1.0; end tmp_2 = tmp; end
code[a_, b_] := If[LessEqual[a, -7.5e-7], N[Power[N[(N[Exp[(-a)], $MachinePrecision] - -1.0), $MachinePrecision], -1.0], $MachinePrecision], N[Power[N[(N[Exp[b], $MachinePrecision] + 1.0), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \leq -7.5 \cdot 10^{-7}:\\
\;\;\;\;{\left(e^{-a} - -1\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;{\left(e^{b} + 1\right)}^{-1}\\
\end{array}
\end{array}
if a < -7.5000000000000002e-7Initial program 99.9%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
lift-+.f64N/A
+-commutativeN/A
lower-+.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f6498.9
Applied rewrites98.9%
if -7.5000000000000002e-7 < a Initial program 98.3%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f6498.3
Applied rewrites98.3%
Final simplification98.5%
(FPCore (a b) :precision binary64 (if (<= a -0.4) (/ (exp a) (+ 2.0 a)) (pow (+ (exp b) 1.0) -1.0)))
double code(double a, double b) {
double tmp;
if (a <= -0.4) {
tmp = exp(a) / (2.0 + a);
} else {
tmp = pow((exp(b) + 1.0), -1.0);
}
return tmp;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8) :: tmp
if (a <= (-0.4d0)) then
tmp = exp(a) / (2.0d0 + a)
else
tmp = (exp(b) + 1.0d0) ** (-1.0d0)
end if
code = tmp
end function
public static double code(double a, double b) {
double tmp;
if (a <= -0.4) {
tmp = Math.exp(a) / (2.0 + a);
} else {
tmp = Math.pow((Math.exp(b) + 1.0), -1.0);
}
return tmp;
}
def code(a, b): tmp = 0 if a <= -0.4: tmp = math.exp(a) / (2.0 + a) else: tmp = math.pow((math.exp(b) + 1.0), -1.0) return tmp
function code(a, b) tmp = 0.0 if (a <= -0.4) tmp = Float64(exp(a) / Float64(2.0 + a)); else tmp = Float64(exp(b) + 1.0) ^ -1.0; end return tmp end
function tmp_2 = code(a, b) tmp = 0.0; if (a <= -0.4) tmp = exp(a) / (2.0 + a); else tmp = (exp(b) + 1.0) ^ -1.0; end tmp_2 = tmp; end
code[a_, b_] := If[LessEqual[a, -0.4], N[(N[Exp[a], $MachinePrecision] / N[(2.0 + a), $MachinePrecision]), $MachinePrecision], N[Power[N[(N[Exp[b], $MachinePrecision] + 1.0), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \leq -0.4:\\
\;\;\;\;\frac{e^{a}}{2 + a}\\
\mathbf{else}:\\
\;\;\;\;{\left(e^{b} + 1\right)}^{-1}\\
\end{array}
\end{array}
if a < -0.40000000000000002Initial program 100.0%
Taylor expanded in b around 0
+-commutativeN/A
lower-+.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in a around 0
Applied rewrites98.9%
if -0.40000000000000002 < a Initial program 98.3%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f6497.7
Applied rewrites97.7%
Final simplification98.0%
(FPCore (a b) :precision binary64 (if (<= a -0.4) (/ (exp a) 2.0) (pow (+ (exp b) 1.0) -1.0)))
double code(double a, double b) {
double tmp;
if (a <= -0.4) {
tmp = exp(a) / 2.0;
} else {
tmp = pow((exp(b) + 1.0), -1.0);
}
return tmp;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8) :: tmp
if (a <= (-0.4d0)) then
tmp = exp(a) / 2.0d0
else
tmp = (exp(b) + 1.0d0) ** (-1.0d0)
end if
code = tmp
end function
public static double code(double a, double b) {
double tmp;
if (a <= -0.4) {
tmp = Math.exp(a) / 2.0;
} else {
tmp = Math.pow((Math.exp(b) + 1.0), -1.0);
}
return tmp;
}
def code(a, b): tmp = 0 if a <= -0.4: tmp = math.exp(a) / 2.0 else: tmp = math.pow((math.exp(b) + 1.0), -1.0) return tmp
function code(a, b) tmp = 0.0 if (a <= -0.4) tmp = Float64(exp(a) / 2.0); else tmp = Float64(exp(b) + 1.0) ^ -1.0; end return tmp end
function tmp_2 = code(a, b) tmp = 0.0; if (a <= -0.4) tmp = exp(a) / 2.0; else tmp = (exp(b) + 1.0) ^ -1.0; end tmp_2 = tmp; end
code[a_, b_] := If[LessEqual[a, -0.4], N[(N[Exp[a], $MachinePrecision] / 2.0), $MachinePrecision], N[Power[N[(N[Exp[b], $MachinePrecision] + 1.0), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \leq -0.4:\\
\;\;\;\;\frac{e^{a}}{2}\\
\mathbf{else}:\\
\;\;\;\;{\left(e^{b} + 1\right)}^{-1}\\
\end{array}
\end{array}
if a < -0.40000000000000002Initial program 100.0%
Taylor expanded in b around 0
+-commutativeN/A
lower-+.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in a around 0
Applied rewrites98.9%
if -0.40000000000000002 < a Initial program 98.3%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f6497.7
Applied rewrites97.7%
Final simplification98.0%
(FPCore (a b)
:precision binary64
(if (<= a -1e+103)
(pow (fma (- (* (* a a) -0.16666666666666666) 1.0) a 2.0) -1.0)
(if (<= a -0.048)
(pow (* (* (+ (/ (+ (/ 2.0 b) 1.0) b) 0.5) b) b) -1.0)
(pow (fma (fma (fma 0.16666666666666666 b 0.5) b 1.0) b 2.0) -1.0))))
double code(double a, double b) {
double tmp;
if (a <= -1e+103) {
tmp = pow(fma((((a * a) * -0.16666666666666666) - 1.0), a, 2.0), -1.0);
} else if (a <= -0.048) {
tmp = pow(((((((2.0 / b) + 1.0) / b) + 0.5) * b) * b), -1.0);
} else {
tmp = pow(fma(fma(fma(0.16666666666666666, b, 0.5), b, 1.0), b, 2.0), -1.0);
}
return tmp;
}
function code(a, b) tmp = 0.0 if (a <= -1e+103) tmp = fma(Float64(Float64(Float64(a * a) * -0.16666666666666666) - 1.0), a, 2.0) ^ -1.0; elseif (a <= -0.048) tmp = Float64(Float64(Float64(Float64(Float64(Float64(2.0 / b) + 1.0) / b) + 0.5) * b) * b) ^ -1.0; else tmp = fma(fma(fma(0.16666666666666666, b, 0.5), b, 1.0), b, 2.0) ^ -1.0; end return tmp end
code[a_, b_] := If[LessEqual[a, -1e+103], N[Power[N[(N[(N[(N[(a * a), $MachinePrecision] * -0.16666666666666666), $MachinePrecision] - 1.0), $MachinePrecision] * a + 2.0), $MachinePrecision], -1.0], $MachinePrecision], If[LessEqual[a, -0.048], N[Power[N[(N[(N[(N[(N[(N[(2.0 / b), $MachinePrecision] + 1.0), $MachinePrecision] / b), $MachinePrecision] + 0.5), $MachinePrecision] * b), $MachinePrecision] * b), $MachinePrecision], -1.0], $MachinePrecision], N[Power[N[(N[(N[(0.16666666666666666 * b + 0.5), $MachinePrecision] * b + 1.0), $MachinePrecision] * b + 2.0), $MachinePrecision], -1.0], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \leq -1 \cdot 10^{+103}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\left(a \cdot a\right) \cdot -0.16666666666666666 - 1, a, 2\right)\right)}^{-1}\\
\mathbf{elif}\;a \leq -0.048:\\
\;\;\;\;{\left(\left(\left(\frac{\frac{2}{b} + 1}{b} + 0.5\right) \cdot b\right) \cdot b\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, b, 0.5\right), b, 1\right), b, 2\right)\right)}^{-1}\\
\end{array}
\end{array}
if a < -1e103Initial program 100.0%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
lift-+.f64N/A
+-commutativeN/A
lower-+.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
Applied rewrites100.0%
Taylor expanded in a around 0
Applied rewrites100.0%
Taylor expanded in a around inf
Applied rewrites100.0%
if -1e103 < a < -0.048000000000000001Initial program 99.9%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f6429.6
Applied rewrites29.6%
Taylor expanded in b around 0
Applied rewrites13.9%
Taylor expanded in b around inf
Applied rewrites52.6%
if -0.048000000000000001 < a Initial program 98.3%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f6497.7
Applied rewrites97.7%
Taylor expanded in b around 0
Applied rewrites69.3%
Final simplification72.3%
(FPCore (a b)
:precision binary64
(if (<= a -1e+103)
(pow (fma (- (* (* a a) -0.16666666666666666) 1.0) a 2.0) -1.0)
(if (<= a -112000.0)
(* (pow b 5.0) -0.0020833333333333333)
(pow (fma (fma (fma 0.16666666666666666 b 0.5) b 1.0) b 2.0) -1.0))))
double code(double a, double b) {
double tmp;
if (a <= -1e+103) {
tmp = pow(fma((((a * a) * -0.16666666666666666) - 1.0), a, 2.0), -1.0);
} else if (a <= -112000.0) {
tmp = pow(b, 5.0) * -0.0020833333333333333;
} else {
tmp = pow(fma(fma(fma(0.16666666666666666, b, 0.5), b, 1.0), b, 2.0), -1.0);
}
return tmp;
}
function code(a, b) tmp = 0.0 if (a <= -1e+103) tmp = fma(Float64(Float64(Float64(a * a) * -0.16666666666666666) - 1.0), a, 2.0) ^ -1.0; elseif (a <= -112000.0) tmp = Float64((b ^ 5.0) * -0.0020833333333333333); else tmp = fma(fma(fma(0.16666666666666666, b, 0.5), b, 1.0), b, 2.0) ^ -1.0; end return tmp end
code[a_, b_] := If[LessEqual[a, -1e+103], N[Power[N[(N[(N[(N[(a * a), $MachinePrecision] * -0.16666666666666666), $MachinePrecision] - 1.0), $MachinePrecision] * a + 2.0), $MachinePrecision], -1.0], $MachinePrecision], If[LessEqual[a, -112000.0], N[(N[Power[b, 5.0], $MachinePrecision] * -0.0020833333333333333), $MachinePrecision], N[Power[N[(N[(N[(0.16666666666666666 * b + 0.5), $MachinePrecision] * b + 1.0), $MachinePrecision] * b + 2.0), $MachinePrecision], -1.0], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \leq -1 \cdot 10^{+103}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\left(a \cdot a\right) \cdot -0.16666666666666666 - 1, a, 2\right)\right)}^{-1}\\
\mathbf{elif}\;a \leq -112000:\\
\;\;\;\;{b}^{5} \cdot -0.0020833333333333333\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, b, 0.5\right), b, 1\right), b, 2\right)\right)}^{-1}\\
\end{array}
\end{array}
if a < -1e103Initial program 100.0%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
lift-+.f64N/A
+-commutativeN/A
lower-+.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
Applied rewrites100.0%
Taylor expanded in a around 0
Applied rewrites100.0%
Taylor expanded in a around inf
Applied rewrites100.0%
if -1e103 < a < -112000Initial program 100.0%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f6424.7
Applied rewrites24.7%
Taylor expanded in b around 0
Applied rewrites2.8%
Taylor expanded in b around inf
Applied rewrites71.1%
if -112000 < a Initial program 98.3%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f6497.3
Applied rewrites97.3%
Taylor expanded in b around 0
Applied rewrites68.9%
Final simplification74.1%
(FPCore (a b) :precision binary64 (if (<= b 9.2e+102) (pow (fma (- (* (fma -0.16666666666666666 a 0.5) a) 1.0) a 2.0) -1.0) (pow (fma (fma (fma 0.16666666666666666 b 0.5) b 1.0) b 2.0) -1.0)))
double code(double a, double b) {
double tmp;
if (b <= 9.2e+102) {
tmp = pow(fma(((fma(-0.16666666666666666, a, 0.5) * a) - 1.0), a, 2.0), -1.0);
} else {
tmp = pow(fma(fma(fma(0.16666666666666666, b, 0.5), b, 1.0), b, 2.0), -1.0);
}
return tmp;
}
function code(a, b) tmp = 0.0 if (b <= 9.2e+102) tmp = fma(Float64(Float64(fma(-0.16666666666666666, a, 0.5) * a) - 1.0), a, 2.0) ^ -1.0; else tmp = fma(fma(fma(0.16666666666666666, b, 0.5), b, 1.0), b, 2.0) ^ -1.0; end return tmp end
code[a_, b_] := If[LessEqual[b, 9.2e+102], N[Power[N[(N[(N[(N[(-0.16666666666666666 * a + 0.5), $MachinePrecision] * a), $MachinePrecision] - 1.0), $MachinePrecision] * a + 2.0), $MachinePrecision], -1.0], $MachinePrecision], N[Power[N[(N[(N[(0.16666666666666666 * b + 0.5), $MachinePrecision] * b + 1.0), $MachinePrecision] * b + 2.0), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 9.2 \cdot 10^{+102}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.16666666666666666, a, 0.5\right) \cdot a - 1, a, 2\right)\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, b, 0.5\right), b, 1\right), b, 2\right)\right)}^{-1}\\
\end{array}
\end{array}
if b < 9.1999999999999995e102Initial program 98.5%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6498.5
lift-+.f64N/A
+-commutativeN/A
lower-+.f6498.5
Applied rewrites98.5%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f6477.3
Applied rewrites77.3%
Taylor expanded in a around 0
Applied rewrites64.8%
if 9.1999999999999995e102 < b Initial program 100.0%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
Applied rewrites100.0%
Final simplification70.7%
(FPCore (a b) :precision binary64 (if (<= b 9.2e+102) (pow (fma (- (* (* a a) -0.16666666666666666) 1.0) a 2.0) -1.0) (pow (fma (fma (fma 0.16666666666666666 b 0.5) b 1.0) b 2.0) -1.0)))
double code(double a, double b) {
double tmp;
if (b <= 9.2e+102) {
tmp = pow(fma((((a * a) * -0.16666666666666666) - 1.0), a, 2.0), -1.0);
} else {
tmp = pow(fma(fma(fma(0.16666666666666666, b, 0.5), b, 1.0), b, 2.0), -1.0);
}
return tmp;
}
function code(a, b) tmp = 0.0 if (b <= 9.2e+102) tmp = fma(Float64(Float64(Float64(a * a) * -0.16666666666666666) - 1.0), a, 2.0) ^ -1.0; else tmp = fma(fma(fma(0.16666666666666666, b, 0.5), b, 1.0), b, 2.0) ^ -1.0; end return tmp end
code[a_, b_] := If[LessEqual[b, 9.2e+102], N[Power[N[(N[(N[(N[(a * a), $MachinePrecision] * -0.16666666666666666), $MachinePrecision] - 1.0), $MachinePrecision] * a + 2.0), $MachinePrecision], -1.0], $MachinePrecision], N[Power[N[(N[(N[(0.16666666666666666 * b + 0.5), $MachinePrecision] * b + 1.0), $MachinePrecision] * b + 2.0), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 9.2 \cdot 10^{+102}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\left(a \cdot a\right) \cdot -0.16666666666666666 - 1, a, 2\right)\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, b, 0.5\right), b, 1\right), b, 2\right)\right)}^{-1}\\
\end{array}
\end{array}
if b < 9.1999999999999995e102Initial program 98.5%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6498.5
lift-+.f64N/A
+-commutativeN/A
lower-+.f6498.5
Applied rewrites98.5%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f6477.3
Applied rewrites77.3%
Taylor expanded in a around 0
Applied rewrites64.8%
Taylor expanded in a around inf
Applied rewrites64.5%
if 9.1999999999999995e102 < b Initial program 100.0%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
Applied rewrites100.0%
Final simplification70.5%
(FPCore (a b) :precision binary64 (if (<= b 1e+103) (/ (exp a) 2.0) (pow (fma (fma (fma 0.16666666666666666 b 0.5) b 1.0) b 2.0) -1.0)))
double code(double a, double b) {
double tmp;
if (b <= 1e+103) {
tmp = exp(a) / 2.0;
} else {
tmp = pow(fma(fma(fma(0.16666666666666666, b, 0.5), b, 1.0), b, 2.0), -1.0);
}
return tmp;
}
function code(a, b) tmp = 0.0 if (b <= 1e+103) tmp = Float64(exp(a) / 2.0); else tmp = fma(fma(fma(0.16666666666666666, b, 0.5), b, 1.0), b, 2.0) ^ -1.0; end return tmp end
code[a_, b_] := If[LessEqual[b, 1e+103], N[(N[Exp[a], $MachinePrecision] / 2.0), $MachinePrecision], N[Power[N[(N[(N[(0.16666666666666666 * b + 0.5), $MachinePrecision] * b + 1.0), $MachinePrecision] * b + 2.0), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 10^{+103}:\\
\;\;\;\;\frac{e^{a}}{2}\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, b, 0.5\right), b, 1\right), b, 2\right)\right)}^{-1}\\
\end{array}
\end{array}
if b < 1e103Initial program 98.5%
Taylor expanded in b around 0
+-commutativeN/A
lower-+.f64N/A
lower-exp.f6475.8
Applied rewrites75.8%
Taylor expanded in a around 0
Applied rewrites74.7%
if 1e103 < b Initial program 100.0%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
Applied rewrites100.0%
Final simplification78.9%
(FPCore (a b) :precision binary64 (if (<= b 2e+92) (pow (- (fma (- (* 0.5 a) 1.0) a 1.0) -1.0) -1.0) (pow (fma (fma (fma 0.16666666666666666 b 0.5) b 1.0) b 2.0) -1.0)))
double code(double a, double b) {
double tmp;
if (b <= 2e+92) {
tmp = pow((fma(((0.5 * a) - 1.0), a, 1.0) - -1.0), -1.0);
} else {
tmp = pow(fma(fma(fma(0.16666666666666666, b, 0.5), b, 1.0), b, 2.0), -1.0);
}
return tmp;
}
function code(a, b) tmp = 0.0 if (b <= 2e+92) tmp = Float64(fma(Float64(Float64(0.5 * a) - 1.0), a, 1.0) - -1.0) ^ -1.0; else tmp = fma(fma(fma(0.16666666666666666, b, 0.5), b, 1.0), b, 2.0) ^ -1.0; end return tmp end
code[a_, b_] := If[LessEqual[b, 2e+92], N[Power[N[(N[(N[(N[(0.5 * a), $MachinePrecision] - 1.0), $MachinePrecision] * a + 1.0), $MachinePrecision] - -1.0), $MachinePrecision], -1.0], $MachinePrecision], N[Power[N[(N[(N[(0.16666666666666666 * b + 0.5), $MachinePrecision] * b + 1.0), $MachinePrecision] * b + 2.0), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 2 \cdot 10^{+92}:\\
\;\;\;\;{\left(\mathsf{fma}\left(0.5 \cdot a - 1, a, 1\right) - -1\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, b, 0.5\right), b, 1\right), b, 2\right)\right)}^{-1}\\
\end{array}
\end{array}
if b < 2.0000000000000001e92Initial program 98.5%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6498.5
lift-+.f64N/A
+-commutativeN/A
lower-+.f6498.5
Applied rewrites98.5%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f6477.2
Applied rewrites77.2%
Taylor expanded in a around 0
Applied rewrites61.9%
if 2.0000000000000001e92 < b Initial program 100.0%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
Applied rewrites97.9%
Final simplification68.1%
(FPCore (a b) :precision binary64 (if (<= b 2.15e+140) (pow (- (fma (- (* 0.5 a) 1.0) a 1.0) -1.0) -1.0) (pow (fma (fma 0.5 b 1.0) b 2.0) -1.0)))
double code(double a, double b) {
double tmp;
if (b <= 2.15e+140) {
tmp = pow((fma(((0.5 * a) - 1.0), a, 1.0) - -1.0), -1.0);
} else {
tmp = pow(fma(fma(0.5, b, 1.0), b, 2.0), -1.0);
}
return tmp;
}
function code(a, b) tmp = 0.0 if (b <= 2.15e+140) tmp = Float64(fma(Float64(Float64(0.5 * a) - 1.0), a, 1.0) - -1.0) ^ -1.0; else tmp = fma(fma(0.5, b, 1.0), b, 2.0) ^ -1.0; end return tmp end
code[a_, b_] := If[LessEqual[b, 2.15e+140], N[Power[N[(N[(N[(N[(0.5 * a), $MachinePrecision] - 1.0), $MachinePrecision] * a + 1.0), $MachinePrecision] - -1.0), $MachinePrecision], -1.0], $MachinePrecision], N[Power[N[(N[(0.5 * b + 1.0), $MachinePrecision] * b + 2.0), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 2.15 \cdot 10^{+140}:\\
\;\;\;\;{\left(\mathsf{fma}\left(0.5 \cdot a - 1, a, 1\right) - -1\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\mathsf{fma}\left(0.5, b, 1\right), b, 2\right)\right)}^{-1}\\
\end{array}
\end{array}
if b < 2.15000000000000001e140Initial program 98.6%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6498.6
lift-+.f64N/A
+-commutativeN/A
lower-+.f6498.6
Applied rewrites98.6%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f6476.5
Applied rewrites76.5%
Taylor expanded in a around 0
Applied rewrites60.8%
if 2.15000000000000001e140 < b Initial program 100.0%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
Applied rewrites95.2%
Final simplification65.9%
(FPCore (a b)
:precision binary64
(if (<= b 0.85)
(pow (- (- 1.0 a) -1.0) -1.0)
(if (<= b 2.15e+140)
(pow (* (* a a) 0.5) -1.0)
(pow (* (* 0.5 b) b) -1.0))))
double code(double a, double b) {
double tmp;
if (b <= 0.85) {
tmp = pow(((1.0 - a) - -1.0), -1.0);
} else if (b <= 2.15e+140) {
tmp = pow(((a * a) * 0.5), -1.0);
} else {
tmp = pow(((0.5 * b) * b), -1.0);
}
return tmp;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8) :: tmp
if (b <= 0.85d0) then
tmp = ((1.0d0 - a) - (-1.0d0)) ** (-1.0d0)
else if (b <= 2.15d+140) then
tmp = ((a * a) * 0.5d0) ** (-1.0d0)
else
tmp = ((0.5d0 * b) * b) ** (-1.0d0)
end if
code = tmp
end function
public static double code(double a, double b) {
double tmp;
if (b <= 0.85) {
tmp = Math.pow(((1.0 - a) - -1.0), -1.0);
} else if (b <= 2.15e+140) {
tmp = Math.pow(((a * a) * 0.5), -1.0);
} else {
tmp = Math.pow(((0.5 * b) * b), -1.0);
}
return tmp;
}
def code(a, b): tmp = 0 if b <= 0.85: tmp = math.pow(((1.0 - a) - -1.0), -1.0) elif b <= 2.15e+140: tmp = math.pow(((a * a) * 0.5), -1.0) else: tmp = math.pow(((0.5 * b) * b), -1.0) return tmp
function code(a, b) tmp = 0.0 if (b <= 0.85) tmp = Float64(Float64(1.0 - a) - -1.0) ^ -1.0; elseif (b <= 2.15e+140) tmp = Float64(Float64(a * a) * 0.5) ^ -1.0; else tmp = Float64(Float64(0.5 * b) * b) ^ -1.0; end return tmp end
function tmp_2 = code(a, b) tmp = 0.0; if (b <= 0.85) tmp = ((1.0 - a) - -1.0) ^ -1.0; elseif (b <= 2.15e+140) tmp = ((a * a) * 0.5) ^ -1.0; else tmp = ((0.5 * b) * b) ^ -1.0; end tmp_2 = tmp; end
code[a_, b_] := If[LessEqual[b, 0.85], N[Power[N[(N[(1.0 - a), $MachinePrecision] - -1.0), $MachinePrecision], -1.0], $MachinePrecision], If[LessEqual[b, 2.15e+140], N[Power[N[(N[(a * a), $MachinePrecision] * 0.5), $MachinePrecision], -1.0], $MachinePrecision], N[Power[N[(N[(0.5 * b), $MachinePrecision] * b), $MachinePrecision], -1.0], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 0.85:\\
\;\;\;\;{\left(\left(1 - a\right) - -1\right)}^{-1}\\
\mathbf{elif}\;b \leq 2.15 \cdot 10^{+140}:\\
\;\;\;\;{\left(\left(a \cdot a\right) \cdot 0.5\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;{\left(\left(0.5 \cdot b\right) \cdot b\right)}^{-1}\\
\end{array}
\end{array}
if b < 0.849999999999999978Initial program 98.4%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6498.4
lift-+.f64N/A
+-commutativeN/A
lower-+.f6498.4
Applied rewrites98.4%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f6479.4
Applied rewrites79.4%
Taylor expanded in a around 0
Applied rewrites54.0%
if 0.849999999999999978 < b < 2.15000000000000001e140Initial program 100.0%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
lift-+.f64N/A
+-commutativeN/A
lower-+.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f6453.5
Applied rewrites53.5%
Taylor expanded in a around 0
Applied rewrites34.4%
Taylor expanded in a around inf
Applied rewrites33.9%
if 2.15000000000000001e140 < b Initial program 100.0%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
Applied rewrites95.2%
Taylor expanded in b around inf
Applied rewrites95.2%
Final simplification58.2%
(FPCore (a b) :precision binary64 (if (<= b 2.15e+140) (pow (fma (- (* 0.5 a) 1.0) a 2.0) -1.0) (pow (fma (fma 0.5 b 1.0) b 2.0) -1.0)))
double code(double a, double b) {
double tmp;
if (b <= 2.15e+140) {
tmp = pow(fma(((0.5 * a) - 1.0), a, 2.0), -1.0);
} else {
tmp = pow(fma(fma(0.5, b, 1.0), b, 2.0), -1.0);
}
return tmp;
}
function code(a, b) tmp = 0.0 if (b <= 2.15e+140) tmp = fma(Float64(Float64(0.5 * a) - 1.0), a, 2.0) ^ -1.0; else tmp = fma(fma(0.5, b, 1.0), b, 2.0) ^ -1.0; end return tmp end
code[a_, b_] := If[LessEqual[b, 2.15e+140], N[Power[N[(N[(N[(0.5 * a), $MachinePrecision] - 1.0), $MachinePrecision] * a + 2.0), $MachinePrecision], -1.0], $MachinePrecision], N[Power[N[(N[(0.5 * b + 1.0), $MachinePrecision] * b + 2.0), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 2.15 \cdot 10^{+140}:\\
\;\;\;\;{\left(\mathsf{fma}\left(0.5 \cdot a - 1, a, 2\right)\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\mathsf{fma}\left(0.5, b, 1\right), b, 2\right)\right)}^{-1}\\
\end{array}
\end{array}
if b < 2.15000000000000001e140Initial program 98.6%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6498.6
lift-+.f64N/A
+-commutativeN/A
lower-+.f6498.6
Applied rewrites98.6%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f6476.5
Applied rewrites76.5%
Taylor expanded in a around 0
Applied rewrites60.8%
if 2.15000000000000001e140 < b Initial program 100.0%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
Applied rewrites95.2%
Final simplification65.9%
(FPCore (a b) :precision binary64 (if (<= a -3.4e+141) (pow (* (* a a) 0.5) -1.0) (pow (fma (fma 0.5 b 1.0) b 2.0) -1.0)))
double code(double a, double b) {
double tmp;
if (a <= -3.4e+141) {
tmp = pow(((a * a) * 0.5), -1.0);
} else {
tmp = pow(fma(fma(0.5, b, 1.0), b, 2.0), -1.0);
}
return tmp;
}
function code(a, b) tmp = 0.0 if (a <= -3.4e+141) tmp = Float64(Float64(a * a) * 0.5) ^ -1.0; else tmp = fma(fma(0.5, b, 1.0), b, 2.0) ^ -1.0; end return tmp end
code[a_, b_] := If[LessEqual[a, -3.4e+141], N[Power[N[(N[(a * a), $MachinePrecision] * 0.5), $MachinePrecision], -1.0], $MachinePrecision], N[Power[N[(N[(0.5 * b + 1.0), $MachinePrecision] * b + 2.0), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \leq -3.4 \cdot 10^{+141}:\\
\;\;\;\;{\left(\left(a \cdot a\right) \cdot 0.5\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\mathsf{fma}\left(0.5, b, 1\right), b, 2\right)\right)}^{-1}\\
\end{array}
\end{array}
if a < -3.3999999999999998e141Initial program 100.0%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
lift-+.f64N/A
+-commutativeN/A
lower-+.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
Applied rewrites100.0%
Taylor expanded in a around 0
Applied rewrites92.5%
Taylor expanded in a around inf
Applied rewrites92.5%
if -3.3999999999999998e141 < a Initial program 98.6%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f6487.1
Applied rewrites87.1%
Taylor expanded in b around 0
Applied rewrites58.7%
Final simplification63.5%
(FPCore (a b) :precision binary64 (if (<= b 8.5e+89) (pow (- (- 1.0 a) -1.0) -1.0) (pow (* (* 0.5 b) b) -1.0)))
double code(double a, double b) {
double tmp;
if (b <= 8.5e+89) {
tmp = pow(((1.0 - a) - -1.0), -1.0);
} else {
tmp = pow(((0.5 * b) * b), -1.0);
}
return tmp;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8) :: tmp
if (b <= 8.5d+89) then
tmp = ((1.0d0 - a) - (-1.0d0)) ** (-1.0d0)
else
tmp = ((0.5d0 * b) * b) ** (-1.0d0)
end if
code = tmp
end function
public static double code(double a, double b) {
double tmp;
if (b <= 8.5e+89) {
tmp = Math.pow(((1.0 - a) - -1.0), -1.0);
} else {
tmp = Math.pow(((0.5 * b) * b), -1.0);
}
return tmp;
}
def code(a, b): tmp = 0 if b <= 8.5e+89: tmp = math.pow(((1.0 - a) - -1.0), -1.0) else: tmp = math.pow(((0.5 * b) * b), -1.0) return tmp
function code(a, b) tmp = 0.0 if (b <= 8.5e+89) tmp = Float64(Float64(1.0 - a) - -1.0) ^ -1.0; else tmp = Float64(Float64(0.5 * b) * b) ^ -1.0; end return tmp end
function tmp_2 = code(a, b) tmp = 0.0; if (b <= 8.5e+89) tmp = ((1.0 - a) - -1.0) ^ -1.0; else tmp = ((0.5 * b) * b) ^ -1.0; end tmp_2 = tmp; end
code[a_, b_] := If[LessEqual[b, 8.5e+89], N[Power[N[(N[(1.0 - a), $MachinePrecision] - -1.0), $MachinePrecision], -1.0], $MachinePrecision], N[Power[N[(N[(0.5 * b), $MachinePrecision] * b), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 8.5 \cdot 10^{+89}:\\
\;\;\;\;{\left(\left(1 - a\right) - -1\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;{\left(\left(0.5 \cdot b\right) \cdot b\right)}^{-1}\\
\end{array}
\end{array}
if b < 8.50000000000000045e89Initial program 98.5%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6498.5
lift-+.f64N/A
+-commutativeN/A
lower-+.f6498.5
Applied rewrites98.5%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f6477.2
Applied rewrites77.2%
Taylor expanded in a around 0
Applied rewrites49.6%
if 8.50000000000000045e89 < b Initial program 100.0%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in b around 0
Applied rewrites83.2%
Taylor expanded in b around inf
Applied rewrites83.2%
Final simplification55.4%
(FPCore (a b) :precision binary64 (pow (- (- 1.0 a) -1.0) -1.0))
double code(double a, double b) {
return pow(((1.0 - a) - -1.0), -1.0);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = ((1.0d0 - a) - (-1.0d0)) ** (-1.0d0)
end function
public static double code(double a, double b) {
return Math.pow(((1.0 - a) - -1.0), -1.0);
}
def code(a, b): return math.pow(((1.0 - a) - -1.0), -1.0)
function code(a, b) return Float64(Float64(1.0 - a) - -1.0) ^ -1.0 end
function tmp = code(a, b) tmp = ((1.0 - a) - -1.0) ^ -1.0; end
code[a_, b_] := N[Power[N[(N[(1.0 - a), $MachinePrecision] - -1.0), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\left(1 - a\right) - -1\right)}^{-1}
\end{array}
Initial program 98.8%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6498.8
lift-+.f64N/A
+-commutativeN/A
lower-+.f6498.8
Applied rewrites98.8%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f6469.0
Applied rewrites69.0%
Taylor expanded in a around 0
Applied rewrites41.7%
Final simplification41.7%
(FPCore (a b) :precision binary64 (pow (- 2.0 a) -1.0))
double code(double a, double b) {
return pow((2.0 - a), -1.0);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (2.0d0 - a) ** (-1.0d0)
end function
public static double code(double a, double b) {
return Math.pow((2.0 - a), -1.0);
}
def code(a, b): return math.pow((2.0 - a), -1.0)
function code(a, b) return Float64(2.0 - a) ^ -1.0 end
function tmp = code(a, b) tmp = (2.0 - a) ^ -1.0; end
code[a_, b_] := N[Power[N[(2.0 - a), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(2 - a\right)}^{-1}
\end{array}
Initial program 98.8%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
sinh-coshN/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6498.8
lift-+.f64N/A
+-commutativeN/A
lower-+.f6498.8
Applied rewrites98.8%
Taylor expanded in b around 0
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
exp-negN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f64N/A
lower-exp.f64N/A
lower-neg.f6469.0
Applied rewrites69.0%
Taylor expanded in a around 0
Applied rewrites41.7%
Final simplification41.7%
(FPCore (a b) :precision binary64 0.5)
double code(double a, double b) {
return 0.5;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = 0.5d0
end function
public static double code(double a, double b) {
return 0.5;
}
def code(a, b): return 0.5
function code(a, b) return 0.5 end
function tmp = code(a, b) tmp = 0.5; end
code[a_, b_] := 0.5
\begin{array}{l}
\\
0.5
\end{array}
Initial program 98.8%
Taylor expanded in a around 0
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f6479.9
Applied rewrites79.9%
Taylor expanded in b around 0
Applied rewrites40.9%
(FPCore (a b) :precision binary64 (/ 1.0 (+ 1.0 (exp (- b a)))))
double code(double a, double b) {
return 1.0 / (1.0 + exp((b - a)));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = 1.0d0 / (1.0d0 + exp((b - a)))
end function
public static double code(double a, double b) {
return 1.0 / (1.0 + Math.exp((b - a)));
}
def code(a, b): return 1.0 / (1.0 + math.exp((b - a)))
function code(a, b) return Float64(1.0 / Float64(1.0 + exp(Float64(b - a)))) end
function tmp = code(a, b) tmp = 1.0 / (1.0 + exp((b - a))); end
code[a_, b_] := N[(1.0 / N[(1.0 + N[Exp[N[(b - a), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{1 + e^{b - a}}
\end{array}
herbie shell --seed 2024342
(FPCore (a b)
:name "Quotient of sum of exps"
:precision binary64
:alt
(! :herbie-platform default (/ 1 (+ 1 (exp (- b a)))))
(/ (exp a) (+ (exp a) (exp b))))