
(FPCore (x) :precision binary64 (/ (exp x) (- (exp x) 1.0)))
double code(double x) {
return exp(x) / (exp(x) - 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(x) / (exp(x) - 1.0d0)
end function
public static double code(double x) {
return Math.exp(x) / (Math.exp(x) - 1.0);
}
def code(x): return math.exp(x) / (math.exp(x) - 1.0)
function code(x) return Float64(exp(x) / Float64(exp(x) - 1.0)) end
function tmp = code(x) tmp = exp(x) / (exp(x) - 1.0); end
code[x_] := N[(N[Exp[x], $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{e^{x} - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (exp x) (- (exp x) 1.0)))
double code(double x) {
return exp(x) / (exp(x) - 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(x) / (exp(x) - 1.0d0)
end function
public static double code(double x) {
return Math.exp(x) / (Math.exp(x) - 1.0);
}
def code(x): return math.exp(x) / (math.exp(x) - 1.0)
function code(x) return Float64(exp(x) / Float64(exp(x) - 1.0)) end
function tmp = code(x) tmp = exp(x) / (exp(x) - 1.0); end
code[x_] := N[(N[Exp[x], $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{e^{x} - 1}
\end{array}
(FPCore (x) :precision binary64 (/ -1.0 (expm1 (- 0.0 x))))
double code(double x) {
return -1.0 / expm1((0.0 - x));
}
public static double code(double x) {
return -1.0 / Math.expm1((0.0 - x));
}
def code(x): return -1.0 / math.expm1((0.0 - x))
function code(x) return Float64(-1.0 / expm1(Float64(0.0 - x))) end
code[x_] := N[(-1.0 / N[(Exp[N[(0.0 - x), $MachinePrecision]] - 1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\mathsf{expm1}\left(0 - x\right)}
\end{array}
Initial program 44.4%
clear-numN/A
frac-2negN/A
/-lowering-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
rec-expN/A
*-inversesN/A
accelerator-lowering-expm1.f64N/A
neg-lowering-neg.f64100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (/ (exp x) x))
double code(double x) {
return exp(x) / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(x) / x
end function
public static double code(double x) {
return Math.exp(x) / x;
}
def code(x): return math.exp(x) / x
function code(x) return Float64(exp(x) / x) end
function tmp = code(x) tmp = exp(x) / x; end
code[x_] := N[(N[Exp[x], $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{x}
\end{array}
Initial program 44.4%
Taylor expanded in x around 0
Simplified97.4%
(FPCore (x)
:precision binary64
(let* ((t_0 (+ 0.5 (* x (+ -0.16666666666666666 (* x 0.041666666666666664)))))
(t_1 (* t_0 (* (* x x) t_0))))
(if (<= x -6.8e+51)
(/ -96.0 (* x (* x (* x (* x x)))))
(/
-1.0
(/ (/ (* x (- 1.0 (* t_1 t_1))) (+ 1.0 t_1)) (- -1.0 (* x t_0)))))))
double code(double x) {
double t_0 = 0.5 + (x * (-0.16666666666666666 + (x * 0.041666666666666664)));
double t_1 = t_0 * ((x * x) * t_0);
double tmp;
if (x <= -6.8e+51) {
tmp = -96.0 / (x * (x * (x * (x * x))));
} else {
tmp = -1.0 / (((x * (1.0 - (t_1 * t_1))) / (1.0 + t_1)) / (-1.0 - (x * t_0)));
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = 0.5d0 + (x * ((-0.16666666666666666d0) + (x * 0.041666666666666664d0)))
t_1 = t_0 * ((x * x) * t_0)
if (x <= (-6.8d+51)) then
tmp = (-96.0d0) / (x * (x * (x * (x * x))))
else
tmp = (-1.0d0) / (((x * (1.0d0 - (t_1 * t_1))) / (1.0d0 + t_1)) / ((-1.0d0) - (x * t_0)))
end if
code = tmp
end function
public static double code(double x) {
double t_0 = 0.5 + (x * (-0.16666666666666666 + (x * 0.041666666666666664)));
double t_1 = t_0 * ((x * x) * t_0);
double tmp;
if (x <= -6.8e+51) {
tmp = -96.0 / (x * (x * (x * (x * x))));
} else {
tmp = -1.0 / (((x * (1.0 - (t_1 * t_1))) / (1.0 + t_1)) / (-1.0 - (x * t_0)));
}
return tmp;
}
def code(x): t_0 = 0.5 + (x * (-0.16666666666666666 + (x * 0.041666666666666664))) t_1 = t_0 * ((x * x) * t_0) tmp = 0 if x <= -6.8e+51: tmp = -96.0 / (x * (x * (x * (x * x)))) else: tmp = -1.0 / (((x * (1.0 - (t_1 * t_1))) / (1.0 + t_1)) / (-1.0 - (x * t_0))) return tmp
function code(x) t_0 = Float64(0.5 + Float64(x * Float64(-0.16666666666666666 + Float64(x * 0.041666666666666664)))) t_1 = Float64(t_0 * Float64(Float64(x * x) * t_0)) tmp = 0.0 if (x <= -6.8e+51) tmp = Float64(-96.0 / Float64(x * Float64(x * Float64(x * Float64(x * x))))); else tmp = Float64(-1.0 / Float64(Float64(Float64(x * Float64(1.0 - Float64(t_1 * t_1))) / Float64(1.0 + t_1)) / Float64(-1.0 - Float64(x * t_0)))); end return tmp end
function tmp_2 = code(x) t_0 = 0.5 + (x * (-0.16666666666666666 + (x * 0.041666666666666664))); t_1 = t_0 * ((x * x) * t_0); tmp = 0.0; if (x <= -6.8e+51) tmp = -96.0 / (x * (x * (x * (x * x)))); else tmp = -1.0 / (((x * (1.0 - (t_1 * t_1))) / (1.0 + t_1)) / (-1.0 - (x * t_0))); end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(0.5 + N[(x * N[(-0.16666666666666666 + N[(x * 0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 * N[(N[(x * x), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -6.8e+51], N[(-96.0 / N[(x * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(-1.0 / N[(N[(N[(x * N[(1.0 - N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 + t$95$1), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - N[(x * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 0.5 + x \cdot \left(-0.16666666666666666 + x \cdot 0.041666666666666664\right)\\
t_1 := t\_0 \cdot \left(\left(x \cdot x\right) \cdot t\_0\right)\\
\mathbf{if}\;x \leq -6.8 \cdot 10^{+51}:\\
\;\;\;\;\frac{-96}{x \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{-1}{\frac{\frac{x \cdot \left(1 - t\_1 \cdot t\_1\right)}{1 + t\_1}}{-1 - x \cdot t\_0}}\\
\end{array}
\end{array}
if x < -6.79999999999999969e51Initial program 100.0%
clear-numN/A
frac-2negN/A
/-lowering-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
rec-expN/A
*-inversesN/A
accelerator-lowering-expm1.f64N/A
neg-lowering-neg.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6486.1%
Simplified86.1%
Taylor expanded in x around inf
associate-*r/N/A
/-lowering-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
neg-mul-1N/A
associate-*r/N/A
metadata-evalN/A
distribute-neg-fracN/A
/-lowering-/.f64N/A
metadata-evalN/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6486.1%
Simplified86.1%
Taylor expanded in x around 0
/-lowering-/.f64N/A
metadata-evalN/A
pow-plusN/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-plusN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6495.8%
Simplified95.8%
if -6.79999999999999969e51 < x Initial program 16.3%
clear-numN/A
frac-2negN/A
/-lowering-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
rec-expN/A
*-inversesN/A
accelerator-lowering-expm1.f64N/A
neg-lowering-neg.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6489.1%
Simplified89.1%
*-commutativeN/A
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr90.7%
flip--N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr95.2%
Final simplification95.4%
(FPCore (x)
:precision binary64
(let* ((t_0
(+ 0.5 (* x (+ -0.16666666666666666 (* x 0.041666666666666664))))))
(if (<= x -5e+155)
(/ -2.0 (* x x))
(/
-1.0
(/
(* x (- 1.0 (* t_0 (* (* x x) t_0))))
(- -1.0 (* x (+ 0.5 (* x -0.16666666666666666)))))))))
double code(double x) {
double t_0 = 0.5 + (x * (-0.16666666666666666 + (x * 0.041666666666666664)));
double tmp;
if (x <= -5e+155) {
tmp = -2.0 / (x * x);
} else {
tmp = -1.0 / ((x * (1.0 - (t_0 * ((x * x) * t_0)))) / (-1.0 - (x * (0.5 + (x * -0.16666666666666666)))));
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = 0.5d0 + (x * ((-0.16666666666666666d0) + (x * 0.041666666666666664d0)))
if (x <= (-5d+155)) then
tmp = (-2.0d0) / (x * x)
else
tmp = (-1.0d0) / ((x * (1.0d0 - (t_0 * ((x * x) * t_0)))) / ((-1.0d0) - (x * (0.5d0 + (x * (-0.16666666666666666d0))))))
end if
code = tmp
end function
public static double code(double x) {
double t_0 = 0.5 + (x * (-0.16666666666666666 + (x * 0.041666666666666664)));
double tmp;
if (x <= -5e+155) {
tmp = -2.0 / (x * x);
} else {
tmp = -1.0 / ((x * (1.0 - (t_0 * ((x * x) * t_0)))) / (-1.0 - (x * (0.5 + (x * -0.16666666666666666)))));
}
return tmp;
}
def code(x): t_0 = 0.5 + (x * (-0.16666666666666666 + (x * 0.041666666666666664))) tmp = 0 if x <= -5e+155: tmp = -2.0 / (x * x) else: tmp = -1.0 / ((x * (1.0 - (t_0 * ((x * x) * t_0)))) / (-1.0 - (x * (0.5 + (x * -0.16666666666666666))))) return tmp
function code(x) t_0 = Float64(0.5 + Float64(x * Float64(-0.16666666666666666 + Float64(x * 0.041666666666666664)))) tmp = 0.0 if (x <= -5e+155) tmp = Float64(-2.0 / Float64(x * x)); else tmp = Float64(-1.0 / Float64(Float64(x * Float64(1.0 - Float64(t_0 * Float64(Float64(x * x) * t_0)))) / Float64(-1.0 - Float64(x * Float64(0.5 + Float64(x * -0.16666666666666666)))))); end return tmp end
function tmp_2 = code(x) t_0 = 0.5 + (x * (-0.16666666666666666 + (x * 0.041666666666666664))); tmp = 0.0; if (x <= -5e+155) tmp = -2.0 / (x * x); else tmp = -1.0 / ((x * (1.0 - (t_0 * ((x * x) * t_0)))) / (-1.0 - (x * (0.5 + (x * -0.16666666666666666))))); end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(0.5 + N[(x * N[(-0.16666666666666666 + N[(x * 0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -5e+155], N[(-2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision], N[(-1.0 / N[(N[(x * N[(1.0 - N[(t$95$0 * N[(N[(x * x), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - N[(x * N[(0.5 + N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 0.5 + x \cdot \left(-0.16666666666666666 + x \cdot 0.041666666666666664\right)\\
\mathbf{if}\;x \leq -5 \cdot 10^{+155}:\\
\;\;\;\;\frac{-2}{x \cdot x}\\
\mathbf{else}:\\
\;\;\;\;\frac{-1}{\frac{x \cdot \left(1 - t\_0 \cdot \left(\left(x \cdot x\right) \cdot t\_0\right)\right)}{-1 - x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)}}\\
\end{array}
\end{array}
if x < -4.9999999999999999e155Initial program 100.0%
clear-numN/A
frac-2negN/A
/-lowering-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
rec-expN/A
*-inversesN/A
accelerator-lowering-expm1.f64N/A
neg-lowering-neg.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64100.0%
Simplified100.0%
Taylor expanded in x around inf
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f64100.0%
Simplified100.0%
if -4.9999999999999999e155 < x Initial program 30.3%
clear-numN/A
frac-2negN/A
/-lowering-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
rec-expN/A
*-inversesN/A
accelerator-lowering-expm1.f64N/A
neg-lowering-neg.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6485.0%
Simplified85.0%
*-commutativeN/A
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr84.9%
Taylor expanded in x around 0
*-commutativeN/A
*-lowering-*.f6492.3%
Simplified92.3%
Final simplification93.9%
(FPCore (x) :precision binary64 (if (<= x -680.0) (/ -96.0 (* x (* x (* x (* x x))))) (+ (/ 1.0 x) (+ 0.5 (* x 0.08333333333333333)))))
double code(double x) {
double tmp;
if (x <= -680.0) {
tmp = -96.0 / (x * (x * (x * (x * x))));
} else {
tmp = (1.0 / x) + (0.5 + (x * 0.08333333333333333));
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-680.0d0)) then
tmp = (-96.0d0) / (x * (x * (x * (x * x))))
else
tmp = (1.0d0 / x) + (0.5d0 + (x * 0.08333333333333333d0))
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= -680.0) {
tmp = -96.0 / (x * (x * (x * (x * x))));
} else {
tmp = (1.0 / x) + (0.5 + (x * 0.08333333333333333));
}
return tmp;
}
def code(x): tmp = 0 if x <= -680.0: tmp = -96.0 / (x * (x * (x * (x * x)))) else: tmp = (1.0 / x) + (0.5 + (x * 0.08333333333333333)) return tmp
function code(x) tmp = 0.0 if (x <= -680.0) tmp = Float64(-96.0 / Float64(x * Float64(x * Float64(x * Float64(x * x))))); else tmp = Float64(Float64(1.0 / x) + Float64(0.5 + Float64(x * 0.08333333333333333))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -680.0) tmp = -96.0 / (x * (x * (x * (x * x)))); else tmp = (1.0 / x) + (0.5 + (x * 0.08333333333333333)); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -680.0], N[(-96.0 / N[(x * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 / x), $MachinePrecision] + N[(0.5 + N[(x * 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -680:\\
\;\;\;\;\frac{-96}{x \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{x} + \left(0.5 + x \cdot 0.08333333333333333\right)\\
\end{array}
\end{array}
if x < -680Initial program 100.0%
clear-numN/A
frac-2negN/A
/-lowering-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
rec-expN/A
*-inversesN/A
accelerator-lowering-expm1.f64N/A
neg-lowering-neg.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6474.0%
Simplified74.0%
Taylor expanded in x around inf
associate-*r/N/A
/-lowering-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
neg-mul-1N/A
associate-*r/N/A
metadata-evalN/A
distribute-neg-fracN/A
/-lowering-/.f64N/A
metadata-evalN/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6474.0%
Simplified74.0%
Taylor expanded in x around 0
/-lowering-/.f64N/A
metadata-evalN/A
pow-plusN/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-plusN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6482.3%
Simplified82.3%
if -680 < x Initial program 8.2%
Taylor expanded in x around 0
*-lft-identityN/A
associate-/l*N/A
associate-*l/N/A
distribute-lft-inN/A
*-rgt-identityN/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
distribute-rgt-inN/A
*-commutativeN/A
associate-*r*N/A
lft-mult-inverseN/A
*-lft-identityN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
rgt-mult-inverseN/A
metadata-eval97.7%
Simplified97.7%
Final simplification91.6%
(FPCore (x) :precision binary64 (if (<= x -4.1) (/ -24.0 (* x (* x (* x x)))) (+ (/ 1.0 x) (+ 0.5 (* x 0.08333333333333333)))))
double code(double x) {
double tmp;
if (x <= -4.1) {
tmp = -24.0 / (x * (x * (x * x)));
} else {
tmp = (1.0 / x) + (0.5 + (x * 0.08333333333333333));
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-4.1d0)) then
tmp = (-24.0d0) / (x * (x * (x * x)))
else
tmp = (1.0d0 / x) + (0.5d0 + (x * 0.08333333333333333d0))
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= -4.1) {
tmp = -24.0 / (x * (x * (x * x)));
} else {
tmp = (1.0 / x) + (0.5 + (x * 0.08333333333333333));
}
return tmp;
}
def code(x): tmp = 0 if x <= -4.1: tmp = -24.0 / (x * (x * (x * x))) else: tmp = (1.0 / x) + (0.5 + (x * 0.08333333333333333)) return tmp
function code(x) tmp = 0.0 if (x <= -4.1) tmp = Float64(-24.0 / Float64(x * Float64(x * Float64(x * x)))); else tmp = Float64(Float64(1.0 / x) + Float64(0.5 + Float64(x * 0.08333333333333333))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -4.1) tmp = -24.0 / (x * (x * (x * x))); else tmp = (1.0 / x) + (0.5 + (x * 0.08333333333333333)); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -4.1], N[(-24.0 / N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 / x), $MachinePrecision] + N[(0.5 + N[(x * 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -4.1:\\
\;\;\;\;\frac{-24}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{x} + \left(0.5 + x \cdot 0.08333333333333333\right)\\
\end{array}
\end{array}
if x < -4.0999999999999996Initial program 100.0%
clear-numN/A
frac-2negN/A
/-lowering-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
rec-expN/A
*-inversesN/A
accelerator-lowering-expm1.f64N/A
neg-lowering-neg.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6473.4%
Simplified73.4%
Taylor expanded in x around inf
/-lowering-/.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6473.4%
Simplified73.4%
if -4.0999999999999996 < x Initial program 7.6%
Taylor expanded in x around 0
*-lft-identityN/A
associate-/l*N/A
associate-*l/N/A
distribute-lft-inN/A
*-rgt-identityN/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
distribute-rgt-inN/A
*-commutativeN/A
associate-*r*N/A
lft-mult-inverseN/A
*-lft-identityN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
rgt-mult-inverseN/A
metadata-eval98.3%
Simplified98.3%
Final simplification88.3%
(FPCore (x) :precision binary64 (if (<= x -4.5) (/ -2.0 (* x x)) (+ (/ 1.0 x) (+ 0.5 (* x 0.08333333333333333)))))
double code(double x) {
double tmp;
if (x <= -4.5) {
tmp = -2.0 / (x * x);
} else {
tmp = (1.0 / x) + (0.5 + (x * 0.08333333333333333));
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-4.5d0)) then
tmp = (-2.0d0) / (x * x)
else
tmp = (1.0d0 / x) + (0.5d0 + (x * 0.08333333333333333d0))
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= -4.5) {
tmp = -2.0 / (x * x);
} else {
tmp = (1.0 / x) + (0.5 + (x * 0.08333333333333333));
}
return tmp;
}
def code(x): tmp = 0 if x <= -4.5: tmp = -2.0 / (x * x) else: tmp = (1.0 / x) + (0.5 + (x * 0.08333333333333333)) return tmp
function code(x) tmp = 0.0 if (x <= -4.5) tmp = Float64(-2.0 / Float64(x * x)); else tmp = Float64(Float64(1.0 / x) + Float64(0.5 + Float64(x * 0.08333333333333333))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -4.5) tmp = -2.0 / (x * x); else tmp = (1.0 / x) + (0.5 + (x * 0.08333333333333333)); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -4.5], N[(-2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 / x), $MachinePrecision] + N[(0.5 + N[(x * 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -4.5:\\
\;\;\;\;\frac{-2}{x \cdot x}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{x} + \left(0.5 + x \cdot 0.08333333333333333\right)\\
\end{array}
\end{array}
if x < -4.5Initial program 100.0%
clear-numN/A
frac-2negN/A
/-lowering-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
rec-expN/A
*-inversesN/A
accelerator-lowering-expm1.f64N/A
neg-lowering-neg.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6453.6%
Simplified53.6%
Taylor expanded in x around inf
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6453.6%
Simplified53.6%
if -4.5 < x Initial program 7.6%
Taylor expanded in x around 0
*-lft-identityN/A
associate-/l*N/A
associate-*l/N/A
distribute-lft-inN/A
*-rgt-identityN/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
distribute-rgt-inN/A
*-commutativeN/A
associate-*r*N/A
lft-mult-inverseN/A
*-lft-identityN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
rgt-mult-inverseN/A
metadata-eval98.3%
Simplified98.3%
Final simplification80.5%
(FPCore (x) :precision binary64 (if (<= x -1.76) (/ -2.0 (* x x)) (+ 0.5 (/ 1.0 x))))
double code(double x) {
double tmp;
if (x <= -1.76) {
tmp = -2.0 / (x * x);
} else {
tmp = 0.5 + (1.0 / x);
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-1.76d0)) then
tmp = (-2.0d0) / (x * x)
else
tmp = 0.5d0 + (1.0d0 / x)
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= -1.76) {
tmp = -2.0 / (x * x);
} else {
tmp = 0.5 + (1.0 / x);
}
return tmp;
}
def code(x): tmp = 0 if x <= -1.76: tmp = -2.0 / (x * x) else: tmp = 0.5 + (1.0 / x) return tmp
function code(x) tmp = 0.0 if (x <= -1.76) tmp = Float64(-2.0 / Float64(x * x)); else tmp = Float64(0.5 + Float64(1.0 / x)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -1.76) tmp = -2.0 / (x * x); else tmp = 0.5 + (1.0 / x); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -1.76], N[(-2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision], N[(0.5 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.76:\\
\;\;\;\;\frac{-2}{x \cdot x}\\
\mathbf{else}:\\
\;\;\;\;0.5 + \frac{1}{x}\\
\end{array}
\end{array}
if x < -1.76000000000000001Initial program 100.0%
clear-numN/A
frac-2negN/A
/-lowering-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
neg-sub0N/A
associate-+l-N/A
neg-sub0N/A
+-commutativeN/A
sub-negN/A
div-subN/A
rec-expN/A
*-inversesN/A
accelerator-lowering-expm1.f64N/A
neg-lowering-neg.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6453.6%
Simplified53.6%
Taylor expanded in x around inf
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6453.6%
Simplified53.6%
if -1.76000000000000001 < x Initial program 7.6%
Taylor expanded in x around 0
*-lft-identityN/A
associate-*l/N/A
distribute-rgt-inN/A
*-lft-identityN/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
associate-*l*N/A
rgt-mult-inverseN/A
metadata-eval97.6%
Simplified97.6%
Final simplification80.0%
(FPCore (x) :precision binary64 (/ 1.0 x))
double code(double x) {
return 1.0 / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / x
end function
public static double code(double x) {
return 1.0 / x;
}
def code(x): return 1.0 / x
function code(x) return Float64(1.0 / x) end
function tmp = code(x) tmp = 1.0 / x; end
code[x_] := N[(1.0 / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x}
\end{array}
Initial program 44.4%
Taylor expanded in x around 0
/-lowering-/.f6460.1%
Simplified60.1%
(FPCore (x) :precision binary64 (* x 0.08333333333333333))
double code(double x) {
return x * 0.08333333333333333;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * 0.08333333333333333d0
end function
public static double code(double x) {
return x * 0.08333333333333333;
}
def code(x): return x * 0.08333333333333333
function code(x) return Float64(x * 0.08333333333333333) end
function tmp = code(x) tmp = x * 0.08333333333333333; end
code[x_] := N[(x * 0.08333333333333333), $MachinePrecision]
\begin{array}{l}
\\
x \cdot 0.08333333333333333
\end{array}
Initial program 44.4%
Taylor expanded in x around 0
*-lft-identityN/A
associate-/l*N/A
associate-*l/N/A
distribute-lft-inN/A
*-rgt-identityN/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
distribute-rgt-inN/A
*-commutativeN/A
associate-*r*N/A
lft-mult-inverseN/A
*-lft-identityN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
rgt-mult-inverseN/A
metadata-eval60.0%
Simplified60.0%
Taylor expanded in x around inf
metadata-evalN/A
lft-mult-inverseN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-eval3.5%
Simplified3.5%
(FPCore (x) :precision binary64 0.5)
double code(double x) {
return 0.5;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.5d0
end function
public static double code(double x) {
return 0.5;
}
def code(x): return 0.5
function code(x) return 0.5 end
function tmp = code(x) tmp = 0.5; end
code[x_] := 0.5
\begin{array}{l}
\\
0.5
\end{array}
Initial program 44.4%
Taylor expanded in x around 0
*-lft-identityN/A
associate-*l/N/A
distribute-rgt-inN/A
*-lft-identityN/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
associate-*l*N/A
rgt-mult-inverseN/A
metadata-eval59.9%
Simplified59.9%
Taylor expanded in x around inf
Simplified3.4%
(FPCore (x) :precision binary64 (/ (- 1.0) (expm1 (- x))))
double code(double x) {
return -1.0 / expm1(-x);
}
public static double code(double x) {
return -1.0 / Math.expm1(-x);
}
def code(x): return -1.0 / math.expm1(-x)
function code(x) return Float64(Float64(-1.0) / expm1(Float64(-x))) end
code[x_] := N[((-1.0) / N[(Exp[(-x)] - 1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\mathsf{expm1}\left(-x\right)}
\end{array}
herbie shell --seed 2024192
(FPCore (x)
:name "expq2 (section 3.11)"
:precision binary64
:pre (> 710.0 x)
:alt
(! :herbie-platform default (/ (- 1) (expm1 (- x))))
(/ (exp x) (- (exp x) 1.0)))