
(FPCore (x) :precision binary64 (/ (exp x) (- (exp x) 1.0)))
double code(double x) {
return exp(x) / (exp(x) - 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(x) / (exp(x) - 1.0d0)
end function
public static double code(double x) {
return Math.exp(x) / (Math.exp(x) - 1.0);
}
def code(x): return math.exp(x) / (math.exp(x) - 1.0)
function code(x) return Float64(exp(x) / Float64(exp(x) - 1.0)) end
function tmp = code(x) tmp = exp(x) / (exp(x) - 1.0); end
code[x_] := N[(N[Exp[x], $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{e^{x} - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (exp x) (- (exp x) 1.0)))
double code(double x) {
return exp(x) / (exp(x) - 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(x) / (exp(x) - 1.0d0)
end function
public static double code(double x) {
return Math.exp(x) / (Math.exp(x) - 1.0);
}
def code(x): return math.exp(x) / (math.exp(x) - 1.0)
function code(x) return Float64(exp(x) / Float64(exp(x) - 1.0)) end
function tmp = code(x) tmp = exp(x) / (exp(x) - 1.0); end
code[x_] := N[(N[Exp[x], $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{e^{x} - 1}
\end{array}
(FPCore (x) :precision binary64 (/ -1.0 (expm1 (- x))))
double code(double x) {
return -1.0 / expm1(-x);
}
public static double code(double x) {
return -1.0 / Math.expm1(-x);
}
def code(x): return -1.0 / math.expm1(-x)
function code(x) return Float64(-1.0 / expm1(Float64(-x))) end
code[x_] := N[(-1.0 / N[(Exp[(-x)] - 1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\mathsf{expm1}\left(-x\right)}
\end{array}
Initial program 36.7%
lift-exp.f64N/A
lift-exp.f64N/A
flip--N/A
clear-numN/A
clear-numN/A
flip--N/A
lift--.f64N/A
clear-numN/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
lift--.f64N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
lift-exp.f64N/A
rec-expN/A
*-inversesN/A
lower-expm1.f64N/A
Applied egg-rr100.0%
(FPCore (x)
:precision binary64
(if (<= (exp x) 5e-5)
(/ -1.0 (* x (* (* x x) (fma x 0.041666666666666664 -0.16666666666666666))))
(fma
x
(fma x (* x -0.001388888888888889) 0.08333333333333333)
(+ 0.5 (/ 1.0 x)))))
double code(double x) {
double tmp;
if (exp(x) <= 5e-5) {
tmp = -1.0 / (x * ((x * x) * fma(x, 0.041666666666666664, -0.16666666666666666)));
} else {
tmp = fma(x, fma(x, (x * -0.001388888888888889), 0.08333333333333333), (0.5 + (1.0 / x)));
}
return tmp;
}
function code(x) tmp = 0.0 if (exp(x) <= 5e-5) tmp = Float64(-1.0 / Float64(x * Float64(Float64(x * x) * fma(x, 0.041666666666666664, -0.16666666666666666)))); else tmp = fma(x, fma(x, Float64(x * -0.001388888888888889), 0.08333333333333333), Float64(0.5 + Float64(1.0 / x))); end return tmp end
code[x_] := If[LessEqual[N[Exp[x], $MachinePrecision], 5e-5], N[(-1.0 / N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * 0.041666666666666664 + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(x * N[(x * -0.001388888888888889), $MachinePrecision] + 0.08333333333333333), $MachinePrecision] + N[(0.5 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;e^{x} \leq 5 \cdot 10^{-5}:\\
\;\;\;\;\frac{-1}{x \cdot \left(\left(x \cdot x\right) \cdot \mathsf{fma}\left(x, 0.041666666666666664, -0.16666666666666666\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x, \mathsf{fma}\left(x, x \cdot -0.001388888888888889, 0.08333333333333333\right), 0.5 + \frac{1}{x}\right)\\
\end{array}
\end{array}
if (exp.f64 x) < 5.00000000000000024e-5Initial program 100.0%
lift-exp.f64N/A
lift-exp.f64N/A
flip--N/A
clear-numN/A
clear-numN/A
flip--N/A
lift--.f64N/A
clear-numN/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
lift--.f64N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
lift-exp.f64N/A
rec-expN/A
*-inversesN/A
lower-expm1.f64N/A
Applied egg-rr100.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6471.3
Simplified71.3%
Taylor expanded in x around inf
sub-negN/A
distribute-rgt-inN/A
cube-multN/A
unpow2N/A
associate-*r*N/A
cube-multN/A
unpow2N/A
associate-*r*N/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
distribute-rgt-inN/A
metadata-evalN/A
sub-negN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
Simplified71.3%
if 5.00000000000000024e-5 < (exp.f64 x) Initial program 8.4%
Taylor expanded in x around 0
*-lft-identityN/A
associate-/l*N/A
associate-*l/N/A
distribute-lft-inN/A
*-commutativeN/A
associate-+r+N/A
distribute-lft-inN/A
associate-*l/N/A
*-lft-identityN/A
+-commutativeN/A
associate-*r*N/A
lft-mult-inverseN/A
*-lft-identityN/A
lower-fma.f64N/A
Simplified98.8%
Final simplification90.4%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma x (fma x 0.041666666666666664 -0.16666666666666666) 0.5))
(t_1 (* x t_0)))
(if (<= x -2e+103)
(/ 6.0 (* x (* x x)))
(/ -1.0 (/ (* x (fma t_1 t_1 -1.0)) (fma x t_0 1.0))))))
double code(double x) {
double t_0 = fma(x, fma(x, 0.041666666666666664, -0.16666666666666666), 0.5);
double t_1 = x * t_0;
double tmp;
if (x <= -2e+103) {
tmp = 6.0 / (x * (x * x));
} else {
tmp = -1.0 / ((x * fma(t_1, t_1, -1.0)) / fma(x, t_0, 1.0));
}
return tmp;
}
function code(x) t_0 = fma(x, fma(x, 0.041666666666666664, -0.16666666666666666), 0.5) t_1 = Float64(x * t_0) tmp = 0.0 if (x <= -2e+103) tmp = Float64(6.0 / Float64(x * Float64(x * x))); else tmp = Float64(-1.0 / Float64(Float64(x * fma(t_1, t_1, -1.0)) / fma(x, t_0, 1.0))); end return tmp end
code[x_] := Block[{t$95$0 = N[(x * N[(x * 0.041666666666666664 + -0.16666666666666666), $MachinePrecision] + 0.5), $MachinePrecision]}, Block[{t$95$1 = N[(x * t$95$0), $MachinePrecision]}, If[LessEqual[x, -2e+103], N[(6.0 / N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(-1.0 / N[(N[(x * N[(t$95$1 * t$95$1 + -1.0), $MachinePrecision]), $MachinePrecision] / N[(x * t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.041666666666666664, -0.16666666666666666\right), 0.5\right)\\
t_1 := x \cdot t\_0\\
\mathbf{if}\;x \leq -2 \cdot 10^{+103}:\\
\;\;\;\;\frac{6}{x \cdot \left(x \cdot x\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{-1}{\frac{x \cdot \mathsf{fma}\left(t\_1, t\_1, -1\right)}{\mathsf{fma}\left(x, t\_0, 1\right)}}\\
\end{array}
\end{array}
if x < -2e103Initial program 100.0%
lift-exp.f64N/A
lift-exp.f64N/A
flip--N/A
clear-numN/A
clear-numN/A
flip--N/A
lift--.f64N/A
clear-numN/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
lift--.f64N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
lift-exp.f64N/A
rec-expN/A
*-inversesN/A
lower-expm1.f64N/A
Applied egg-rr100.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64100.0
Simplified100.0%
Taylor expanded in x around inf
sub-negN/A
distribute-rgt-inN/A
cube-multN/A
unpow2N/A
associate-*r*N/A
cube-multN/A
unpow2N/A
associate-*r*N/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
distribute-rgt-inN/A
metadata-evalN/A
sub-negN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
Simplified100.0%
Taylor expanded in x around 0
lower-/.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Simplified100.0%
if -2e103 < x Initial program 23.2%
lift-exp.f64N/A
lift-exp.f64N/A
flip--N/A
clear-numN/A
clear-numN/A
flip--N/A
lift--.f64N/A
clear-numN/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
lift--.f64N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
lift-exp.f64N/A
rec-expN/A
*-inversesN/A
lower-expm1.f64N/A
Applied egg-rr100.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6488.0
Simplified88.0%
lift-fma.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
*-commutativeN/A
lift-fma.f64N/A
flip-+N/A
associate-*l/N/A
lower-/.f64N/A
Applied egg-rr92.8%
Final simplification94.1%
(FPCore (x)
:precision binary64
(/
-1.0
(*
x
(fma
x
(fma
x
(*
(- 0.027777777777777776 (* (* x x) 0.001736111111111111))
(/ 1.0 -0.16666666666666666))
0.5)
-1.0))))
double code(double x) {
return -1.0 / (x * fma(x, fma(x, ((0.027777777777777776 - ((x * x) * 0.001736111111111111)) * (1.0 / -0.16666666666666666)), 0.5), -1.0));
}
function code(x) return Float64(-1.0 / Float64(x * fma(x, fma(x, Float64(Float64(0.027777777777777776 - Float64(Float64(x * x) * 0.001736111111111111)) * Float64(1.0 / -0.16666666666666666)), 0.5), -1.0))) end
code[x_] := N[(-1.0 / N[(x * N[(x * N[(x * N[(N[(0.027777777777777776 - N[(N[(x * x), $MachinePrecision] * 0.001736111111111111), $MachinePrecision]), $MachinePrecision] * N[(1.0 / -0.16666666666666666), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \left(0.027777777777777776 - \left(x \cdot x\right) \cdot 0.001736111111111111\right) \cdot \frac{1}{-0.16666666666666666}, 0.5\right), -1\right)}
\end{array}
Initial program 36.7%
lift-exp.f64N/A
lift-exp.f64N/A
flip--N/A
clear-numN/A
clear-numN/A
flip--N/A
lift--.f64N/A
clear-numN/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
lift--.f64N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
lift-exp.f64N/A
rec-expN/A
*-inversesN/A
lower-expm1.f64N/A
Applied egg-rr100.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6490.1
Simplified90.1%
+-commutativeN/A
flip-+N/A
div-invN/A
lower-*.f64N/A
lower--.f64N/A
metadata-evalN/A
swap-sqrN/A
lift-*.f64N/A
lower-*.f64N/A
metadata-evalN/A
lower-/.f64N/A
lower--.f64N/A
lower-*.f6490.1
Applied egg-rr90.1%
Taylor expanded in x around 0
Simplified91.0%
(FPCore (x)
:precision binary64
(/
1.0
(/
x
(/
-1.0
(fma
x
(fma x (fma x 0.041666666666666664 -0.16666666666666666) 0.5)
-1.0)))))
double code(double x) {
return 1.0 / (x / (-1.0 / fma(x, fma(x, fma(x, 0.041666666666666664, -0.16666666666666666), 0.5), -1.0)));
}
function code(x) return Float64(1.0 / Float64(x / Float64(-1.0 / fma(x, fma(x, fma(x, 0.041666666666666664, -0.16666666666666666), 0.5), -1.0)))) end
code[x_] := N[(1.0 / N[(x / N[(-1.0 / N[(x * N[(x * N[(x * 0.041666666666666664 + -0.16666666666666666), $MachinePrecision] + 0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{x}{\frac{-1}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.041666666666666664, -0.16666666666666666\right), 0.5\right), -1\right)}}}
\end{array}
Initial program 36.7%
lift-exp.f64N/A
lift-exp.f64N/A
flip--N/A
clear-numN/A
clear-numN/A
flip--N/A
lift--.f64N/A
clear-numN/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
lift--.f64N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
lift-exp.f64N/A
rec-expN/A
*-inversesN/A
lower-expm1.f64N/A
Applied egg-rr100.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6490.1
Simplified90.1%
lift-fma.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
*-commutativeN/A
associate-/r*N/A
clear-numN/A
lower-/.f64N/A
lower-/.f64N/A
lower-/.f6490.1
Applied egg-rr90.1%
(FPCore (x)
:precision binary64
(if (<= x -3.4)
(/
-1.0
(* x (* x (fma x (fma x 0.041666666666666664 -0.16666666666666666) 0.5))))
(fma
x
(fma x (* x -0.001388888888888889) 0.08333333333333333)
(+ 0.5 (/ 1.0 x)))))
double code(double x) {
double tmp;
if (x <= -3.4) {
tmp = -1.0 / (x * (x * fma(x, fma(x, 0.041666666666666664, -0.16666666666666666), 0.5)));
} else {
tmp = fma(x, fma(x, (x * -0.001388888888888889), 0.08333333333333333), (0.5 + (1.0 / x)));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -3.4) tmp = Float64(-1.0 / Float64(x * Float64(x * fma(x, fma(x, 0.041666666666666664, -0.16666666666666666), 0.5)))); else tmp = fma(x, fma(x, Float64(x * -0.001388888888888889), 0.08333333333333333), Float64(0.5 + Float64(1.0 / x))); end return tmp end
code[x_] := If[LessEqual[x, -3.4], N[(-1.0 / N[(x * N[(x * N[(x * N[(x * 0.041666666666666664 + -0.16666666666666666), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(x * N[(x * -0.001388888888888889), $MachinePrecision] + 0.08333333333333333), $MachinePrecision] + N[(0.5 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.4:\\
\;\;\;\;\frac{-1}{x \cdot \left(x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.041666666666666664, -0.16666666666666666\right), 0.5\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x, \mathsf{fma}\left(x, x \cdot -0.001388888888888889, 0.08333333333333333\right), 0.5 + \frac{1}{x}\right)\\
\end{array}
\end{array}
if x < -3.39999999999999991Initial program 100.0%
lift-exp.f64N/A
lift-exp.f64N/A
flip--N/A
clear-numN/A
clear-numN/A
flip--N/A
lift--.f64N/A
clear-numN/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
lift--.f64N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
lift-exp.f64N/A
rec-expN/A
*-inversesN/A
lower-expm1.f64N/A
Applied egg-rr100.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6471.3
Simplified71.3%
Taylor expanded in x around inf
Simplified71.3%
if -3.39999999999999991 < x Initial program 8.4%
Taylor expanded in x around 0
*-lft-identityN/A
associate-/l*N/A
associate-*l/N/A
distribute-lft-inN/A
*-commutativeN/A
associate-+r+N/A
distribute-lft-inN/A
associate-*l/N/A
*-lft-identityN/A
+-commutativeN/A
associate-*r*N/A
lft-mult-inverseN/A
*-lft-identityN/A
lower-fma.f64N/A
Simplified98.8%
Final simplification90.4%
(FPCore (x)
:precision binary64
(if (<= x -3.75)
(/ -24.0 (* x (* x (* x x))))
(fma
x
(fma x (* x -0.001388888888888889) 0.08333333333333333)
(+ 0.5 (/ 1.0 x)))))
double code(double x) {
double tmp;
if (x <= -3.75) {
tmp = -24.0 / (x * (x * (x * x)));
} else {
tmp = fma(x, fma(x, (x * -0.001388888888888889), 0.08333333333333333), (0.5 + (1.0 / x)));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -3.75) tmp = Float64(-24.0 / Float64(x * Float64(x * Float64(x * x)))); else tmp = fma(x, fma(x, Float64(x * -0.001388888888888889), 0.08333333333333333), Float64(0.5 + Float64(1.0 / x))); end return tmp end
code[x_] := If[LessEqual[x, -3.75], N[(-24.0 / N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(x * N[(x * -0.001388888888888889), $MachinePrecision] + 0.08333333333333333), $MachinePrecision] + N[(0.5 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.75:\\
\;\;\;\;\frac{-24}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x, \mathsf{fma}\left(x, x \cdot -0.001388888888888889, 0.08333333333333333\right), 0.5 + \frac{1}{x}\right)\\
\end{array}
\end{array}
if x < -3.75Initial program 100.0%
lift-exp.f64N/A
lift-exp.f64N/A
flip--N/A
clear-numN/A
clear-numN/A
flip--N/A
lift--.f64N/A
clear-numN/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
lift--.f64N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
lift-exp.f64N/A
rec-expN/A
*-inversesN/A
lower-expm1.f64N/A
Applied egg-rr100.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6471.3
Simplified71.3%
Taylor expanded in x around inf
lower-/.f64N/A
metadata-evalN/A
pow-plusN/A
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6471.3
Simplified71.3%
if -3.75 < x Initial program 8.4%
Taylor expanded in x around 0
*-lft-identityN/A
associate-/l*N/A
associate-*l/N/A
distribute-lft-inN/A
*-commutativeN/A
associate-+r+N/A
distribute-lft-inN/A
associate-*l/N/A
*-lft-identityN/A
+-commutativeN/A
associate-*r*N/A
lft-mult-inverseN/A
*-lft-identityN/A
lower-fma.f64N/A
Simplified98.8%
Final simplification90.4%
(FPCore (x)
:precision binary64
(/
-1.0
(*
x
(fma
x
(fma x (fma x 0.041666666666666664 -0.16666666666666666) 0.5)
-1.0))))
double code(double x) {
return -1.0 / (x * fma(x, fma(x, fma(x, 0.041666666666666664, -0.16666666666666666), 0.5), -1.0));
}
function code(x) return Float64(-1.0 / Float64(x * fma(x, fma(x, fma(x, 0.041666666666666664, -0.16666666666666666), 0.5), -1.0))) end
code[x_] := N[(-1.0 / N[(x * N[(x * N[(x * N[(x * 0.041666666666666664 + -0.16666666666666666), $MachinePrecision] + 0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.041666666666666664, -0.16666666666666666\right), 0.5\right), -1\right)}
\end{array}
Initial program 36.7%
lift-exp.f64N/A
lift-exp.f64N/A
flip--N/A
clear-numN/A
clear-numN/A
flip--N/A
lift--.f64N/A
clear-numN/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
lift--.f64N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
lift-exp.f64N/A
rec-expN/A
*-inversesN/A
lower-expm1.f64N/A
Applied egg-rr100.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6490.1
Simplified90.1%
(FPCore (x) :precision binary64 (if (<= x -4.2) (/ -24.0 (* x (* x (* x x)))) (fma x 0.08333333333333333 (+ 0.5 (/ 1.0 x)))))
double code(double x) {
double tmp;
if (x <= -4.2) {
tmp = -24.0 / (x * (x * (x * x)));
} else {
tmp = fma(x, 0.08333333333333333, (0.5 + (1.0 / x)));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -4.2) tmp = Float64(-24.0 / Float64(x * Float64(x * Float64(x * x)))); else tmp = fma(x, 0.08333333333333333, Float64(0.5 + Float64(1.0 / x))); end return tmp end
code[x_] := If[LessEqual[x, -4.2], N[(-24.0 / N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * 0.08333333333333333 + N[(0.5 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -4.2:\\
\;\;\;\;\frac{-24}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x, 0.08333333333333333, 0.5 + \frac{1}{x}\right)\\
\end{array}
\end{array}
if x < -4.20000000000000018Initial program 100.0%
lift-exp.f64N/A
lift-exp.f64N/A
flip--N/A
clear-numN/A
clear-numN/A
flip--N/A
lift--.f64N/A
clear-numN/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
lift--.f64N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
lift-exp.f64N/A
rec-expN/A
*-inversesN/A
lower-expm1.f64N/A
Applied egg-rr100.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6471.3
Simplified71.3%
Taylor expanded in x around inf
lower-/.f64N/A
metadata-evalN/A
pow-plusN/A
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6471.3
Simplified71.3%
if -4.20000000000000018 < x Initial program 8.4%
Taylor expanded in x around 0
*-lft-identityN/A
associate-/l*N/A
associate-*l/N/A
+-commutativeN/A
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
associate-+l+N/A
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
lft-mult-inverseN/A
*-lft-identityN/A
*-commutativeN/A
associate-*l/N/A
*-lft-identityN/A
lower-fma.f64N/A
*-lft-identityN/A
associate-*l/N/A
distribute-rgt-inN/A
Simplified98.7%
Final simplification90.2%
(FPCore (x) :precision binary64 (if (<= x -4.2) (/ 6.0 (* x (* x x))) (fma x 0.08333333333333333 (+ 0.5 (/ 1.0 x)))))
double code(double x) {
double tmp;
if (x <= -4.2) {
tmp = 6.0 / (x * (x * x));
} else {
tmp = fma(x, 0.08333333333333333, (0.5 + (1.0 / x)));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -4.2) tmp = Float64(6.0 / Float64(x * Float64(x * x))); else tmp = fma(x, 0.08333333333333333, Float64(0.5 + Float64(1.0 / x))); end return tmp end
code[x_] := If[LessEqual[x, -4.2], N[(6.0 / N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * 0.08333333333333333 + N[(0.5 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -4.2:\\
\;\;\;\;\frac{6}{x \cdot \left(x \cdot x\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x, 0.08333333333333333, 0.5 + \frac{1}{x}\right)\\
\end{array}
\end{array}
if x < -4.20000000000000018Initial program 100.0%
lift-exp.f64N/A
lift-exp.f64N/A
flip--N/A
clear-numN/A
clear-numN/A
flip--N/A
lift--.f64N/A
clear-numN/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
lift--.f64N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
lift-exp.f64N/A
rec-expN/A
*-inversesN/A
lower-expm1.f64N/A
Applied egg-rr100.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6471.3
Simplified71.3%
Taylor expanded in x around inf
sub-negN/A
distribute-rgt-inN/A
cube-multN/A
unpow2N/A
associate-*r*N/A
cube-multN/A
unpow2N/A
associate-*r*N/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
distribute-rgt-inN/A
metadata-evalN/A
sub-negN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
Simplified71.3%
Taylor expanded in x around 0
lower-/.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6459.4
Simplified59.4%
if -4.20000000000000018 < x Initial program 8.4%
Taylor expanded in x around 0
*-lft-identityN/A
associate-/l*N/A
associate-*l/N/A
+-commutativeN/A
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
associate-+l+N/A
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
lft-mult-inverseN/A
*-lft-identityN/A
*-commutativeN/A
associate-*l/N/A
*-lft-identityN/A
lower-fma.f64N/A
*-lft-identityN/A
associate-*l/N/A
distribute-rgt-inN/A
Simplified98.7%
Final simplification86.5%
(FPCore (x) :precision binary64 (fma x 0.08333333333333333 (+ 0.5 (/ 1.0 x))))
double code(double x) {
return fma(x, 0.08333333333333333, (0.5 + (1.0 / x)));
}
function code(x) return fma(x, 0.08333333333333333, Float64(0.5 + Float64(1.0 / x))) end
code[x_] := N[(x * 0.08333333333333333 + N[(0.5 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, 0.08333333333333333, 0.5 + \frac{1}{x}\right)
\end{array}
Initial program 36.7%
Taylor expanded in x around 0
*-lft-identityN/A
associate-/l*N/A
associate-*l/N/A
+-commutativeN/A
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
associate-+l+N/A
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
lft-mult-inverseN/A
*-lft-identityN/A
*-commutativeN/A
associate-*l/N/A
*-lft-identityN/A
lower-fma.f64N/A
*-lft-identityN/A
associate-*l/N/A
distribute-rgt-inN/A
Simplified69.0%
Final simplification69.0%
(FPCore (x) :precision binary64 (/ (fma x 0.5 1.0) x))
double code(double x) {
return fma(x, 0.5, 1.0) / x;
}
function code(x) return Float64(fma(x, 0.5, 1.0) / x) end
code[x_] := N[(N[(x * 0.5 + 1.0), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(x, 0.5, 1\right)}{x}
\end{array}
Initial program 36.7%
lift-exp.f64N/A
lift-exp.f64N/A
flip--N/A
clear-numN/A
clear-numN/A
flip--N/A
lift--.f64N/A
clear-numN/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
lift--.f64N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
lift-exp.f64N/A
rec-expN/A
*-inversesN/A
lower-expm1.f64N/A
Applied egg-rr100.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6490.1
Simplified90.1%
Taylor expanded in x around 0
lower-/.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6468.7
Simplified68.7%
(FPCore (x) :precision binary64 (+ 0.5 (/ 1.0 x)))
double code(double x) {
return 0.5 + (1.0 / x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.5d0 + (1.0d0 / x)
end function
public static double code(double x) {
return 0.5 + (1.0 / x);
}
def code(x): return 0.5 + (1.0 / x)
function code(x) return Float64(0.5 + Float64(1.0 / x)) end
function tmp = code(x) tmp = 0.5 + (1.0 / x); end
code[x_] := N[(0.5 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 + \frac{1}{x}
\end{array}
Initial program 36.7%
Taylor expanded in x around 0
*-lft-identityN/A
associate-*l/N/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-+.f64N/A
lower-/.f64N/A
associate-*l*N/A
rgt-mult-inverseN/A
metadata-eval68.7
Simplified68.7%
Final simplification68.7%
(FPCore (x) :precision binary64 (/ 1.0 x))
double code(double x) {
return 1.0 / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / x
end function
public static double code(double x) {
return 1.0 / x;
}
def code(x): return 1.0 / x
function code(x) return Float64(1.0 / x) end
function tmp = code(x) tmp = 1.0 / x; end
code[x_] := N[(1.0 / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x}
\end{array}
Initial program 36.7%
Taylor expanded in x around 0
lower-/.f6468.2
Simplified68.2%
(FPCore (x) :precision binary64 (* x 0.08333333333333333))
double code(double x) {
return x * 0.08333333333333333;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * 0.08333333333333333d0
end function
public static double code(double x) {
return x * 0.08333333333333333;
}
def code(x): return x * 0.08333333333333333
function code(x) return Float64(x * 0.08333333333333333) end
function tmp = code(x) tmp = x * 0.08333333333333333; end
code[x_] := N[(x * 0.08333333333333333), $MachinePrecision]
\begin{array}{l}
\\
x \cdot 0.08333333333333333
\end{array}
Initial program 36.7%
Taylor expanded in x around 0
*-lft-identityN/A
associate-/l*N/A
associate-*l/N/A
+-commutativeN/A
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
associate-+l+N/A
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
lft-mult-inverseN/A
*-lft-identityN/A
*-commutativeN/A
associate-*l/N/A
*-lft-identityN/A
lower-fma.f64N/A
*-lft-identityN/A
associate-*l/N/A
distribute-rgt-inN/A
Simplified69.0%
Taylor expanded in x around inf
*-commutativeN/A
metadata-evalN/A
lft-mult-inverseN/A
associate-*l*N/A
lower-*.f64N/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-eval3.6
Simplified3.6%
(FPCore (x) :precision binary64 0.5)
double code(double x) {
return 0.5;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.5d0
end function
public static double code(double x) {
return 0.5;
}
def code(x): return 0.5
function code(x) return 0.5 end
function tmp = code(x) tmp = 0.5; end
code[x_] := 0.5
\begin{array}{l}
\\
0.5
\end{array}
Initial program 36.7%
Taylor expanded in x around 0
*-lft-identityN/A
associate-*l/N/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-+.f64N/A
lower-/.f64N/A
associate-*l*N/A
rgt-mult-inverseN/A
metadata-eval68.7
Simplified68.7%
Taylor expanded in x around inf
Simplified3.4%
(FPCore (x) :precision binary64 (/ (- 1.0) (expm1 (- x))))
double code(double x) {
return -1.0 / expm1(-x);
}
public static double code(double x) {
return -1.0 / Math.expm1(-x);
}
def code(x): return -1.0 / math.expm1(-x)
function code(x) return Float64(Float64(-1.0) / expm1(Float64(-x))) end
code[x_] := N[((-1.0) / N[(Exp[(-x)] - 1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\mathsf{expm1}\left(-x\right)}
\end{array}
herbie shell --seed 2024210
(FPCore (x)
:name "expq2 (section 3.11)"
:precision binary64
:pre (> 710.0 x)
:alt
(! :herbie-platform default (/ (- 1) (expm1 (- x))))
(/ (exp x) (- (exp x) 1.0)))