
(FPCore (x) :precision binary64 (/ (exp x) (- (exp x) 1.0)))
double code(double x) {
return exp(x) / (exp(x) - 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(x) / (exp(x) - 1.0d0)
end function
public static double code(double x) {
return Math.exp(x) / (Math.exp(x) - 1.0);
}
def code(x): return math.exp(x) / (math.exp(x) - 1.0)
function code(x) return Float64(exp(x) / Float64(exp(x) - 1.0)) end
function tmp = code(x) tmp = exp(x) / (exp(x) - 1.0); end
code[x_] := N[(N[Exp[x], $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{e^{x} - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (exp x) (- (exp x) 1.0)))
double code(double x) {
return exp(x) / (exp(x) - 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(x) / (exp(x) - 1.0d0)
end function
public static double code(double x) {
return Math.exp(x) / (Math.exp(x) - 1.0);
}
def code(x): return math.exp(x) / (math.exp(x) - 1.0)
function code(x) return Float64(exp(x) / Float64(exp(x) - 1.0)) end
function tmp = code(x) tmp = exp(x) / (exp(x) - 1.0); end
code[x_] := N[(N[Exp[x], $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{e^{x} - 1}
\end{array}
(FPCore (x) :precision binary64 (/ (exp x) (expm1 x)))
double code(double x) {
return exp(x) / expm1(x);
}
public static double code(double x) {
return Math.exp(x) / Math.expm1(x);
}
def code(x): return math.exp(x) / math.expm1(x)
function code(x) return Float64(exp(x) / expm1(x)) end
code[x_] := N[(N[Exp[x], $MachinePrecision] / N[(Exp[x] - 1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{\mathsf{expm1}\left(x\right)}
\end{array}
Initial program 34.4%
expm1-define100.0%
Simplified100.0%
(FPCore (x) :precision binary64 (/ -1.0 (expm1 (- x))))
double code(double x) {
return -1.0 / expm1(-x);
}
public static double code(double x) {
return -1.0 / Math.expm1(-x);
}
def code(x): return -1.0 / math.expm1(-x)
function code(x) return Float64(-1.0 / expm1(Float64(-x))) end
code[x_] := N[(-1.0 / N[(Exp[(-x)] - 1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\mathsf{expm1}\left(-x\right)}
\end{array}
Initial program 34.4%
sub-neg34.4%
+-commutative34.4%
rgt-mult-inverse2.8%
exp-neg2.8%
distribute-rgt-neg-out2.8%
*-rgt-identity2.8%
distribute-lft-in2.8%
neg-sub02.8%
associate-+l-2.8%
neg-sub03.0%
associate-/r*3.0%
*-rgt-identity3.0%
associate-*r/3.0%
rgt-mult-inverse34.6%
distribute-frac-neg234.6%
distribute-neg-frac34.6%
metadata-eval34.6%
expm1-define100.0%
Simplified100.0%
(FPCore (x) :precision binary64 (/ (exp x) x))
double code(double x) {
return exp(x) / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(x) / x
end function
public static double code(double x) {
return Math.exp(x) / x;
}
def code(x): return math.exp(x) / x
function code(x) return Float64(exp(x) / x) end
function tmp = code(x) tmp = exp(x) / x; end
code[x_] := N[(N[Exp[x], $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{x}
\end{array}
Initial program 34.4%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 99.3%
(FPCore (x)
:precision binary64
(/
-1.0
(*
x
(+
(* x (+ 0.5 (* x (- (* x 0.041666666666666664) 0.16666666666666666))))
-1.0))))
double code(double x) {
return -1.0 / (x * ((x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) / (x * ((x * (0.5d0 + (x * ((x * 0.041666666666666664d0) - 0.16666666666666666d0)))) + (-1.0d0)))
end function
public static double code(double x) {
return -1.0 / (x * ((x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))) + -1.0));
}
def code(x): return -1.0 / (x * ((x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))) + -1.0))
function code(x) return Float64(-1.0 / Float64(x * Float64(Float64(x * Float64(0.5 + Float64(x * Float64(Float64(x * 0.041666666666666664) - 0.16666666666666666)))) + -1.0))) end
function tmp = code(x) tmp = -1.0 / (x * ((x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))) + -1.0)); end
code[x_] := N[(-1.0 / N[(x * N[(N[(x * N[(0.5 + N[(x * N[(N[(x * 0.041666666666666664), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \left(x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right) + -1\right)}
\end{array}
Initial program 34.4%
sub-neg34.4%
+-commutative34.4%
rgt-mult-inverse2.8%
exp-neg2.8%
distribute-rgt-neg-out2.8%
*-rgt-identity2.8%
distribute-lft-in2.8%
neg-sub02.8%
associate-+l-2.8%
neg-sub03.0%
associate-/r*3.0%
*-rgt-identity3.0%
associate-*r/3.0%
rgt-mult-inverse34.6%
distribute-frac-neg234.6%
distribute-neg-frac34.6%
metadata-eval34.6%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 89.6%
Final simplification89.6%
(FPCore (x) :precision binary64 (/ -1.0 (* x (+ (* x (+ 0.5 (* x (* x 0.041666666666666664)))) -1.0))))
double code(double x) {
return -1.0 / (x * ((x * (0.5 + (x * (x * 0.041666666666666664)))) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) / (x * ((x * (0.5d0 + (x * (x * 0.041666666666666664d0)))) + (-1.0d0)))
end function
public static double code(double x) {
return -1.0 / (x * ((x * (0.5 + (x * (x * 0.041666666666666664)))) + -1.0));
}
def code(x): return -1.0 / (x * ((x * (0.5 + (x * (x * 0.041666666666666664)))) + -1.0))
function code(x) return Float64(-1.0 / Float64(x * Float64(Float64(x * Float64(0.5 + Float64(x * Float64(x * 0.041666666666666664)))) + -1.0))) end
function tmp = code(x) tmp = -1.0 / (x * ((x * (0.5 + (x * (x * 0.041666666666666664)))) + -1.0)); end
code[x_] := N[(-1.0 / N[(x * N[(N[(x * N[(0.5 + N[(x * N[(x * 0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \left(x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664\right)\right) + -1\right)}
\end{array}
Initial program 34.4%
sub-neg34.4%
+-commutative34.4%
rgt-mult-inverse2.8%
exp-neg2.8%
distribute-rgt-neg-out2.8%
*-rgt-identity2.8%
distribute-lft-in2.8%
neg-sub02.8%
associate-+l-2.8%
neg-sub03.0%
associate-/r*3.0%
*-rgt-identity3.0%
associate-*r/3.0%
rgt-mult-inverse34.6%
distribute-frac-neg234.6%
distribute-neg-frac34.6%
metadata-eval34.6%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 89.6%
Taylor expanded in x around inf 89.5%
*-commutative89.5%
Simplified89.5%
Final simplification89.5%
(FPCore (x) :precision binary64 (/ -1.0 (* x (+ (* x (+ 0.5 (* x -0.16666666666666666))) -1.0))))
double code(double x) {
return -1.0 / (x * ((x * (0.5 + (x * -0.16666666666666666))) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) / (x * ((x * (0.5d0 + (x * (-0.16666666666666666d0)))) + (-1.0d0)))
end function
public static double code(double x) {
return -1.0 / (x * ((x * (0.5 + (x * -0.16666666666666666))) + -1.0));
}
def code(x): return -1.0 / (x * ((x * (0.5 + (x * -0.16666666666666666))) + -1.0))
function code(x) return Float64(-1.0 / Float64(x * Float64(Float64(x * Float64(0.5 + Float64(x * -0.16666666666666666))) + -1.0))) end
function tmp = code(x) tmp = -1.0 / (x * ((x * (0.5 + (x * -0.16666666666666666))) + -1.0)); end
code[x_] := N[(-1.0 / N[(x * N[(N[(x * N[(0.5 + N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \left(x \cdot \left(0.5 + x \cdot -0.16666666666666666\right) + -1\right)}
\end{array}
Initial program 34.4%
sub-neg34.4%
+-commutative34.4%
rgt-mult-inverse2.8%
exp-neg2.8%
distribute-rgt-neg-out2.8%
*-rgt-identity2.8%
distribute-lft-in2.8%
neg-sub02.8%
associate-+l-2.8%
neg-sub03.0%
associate-/r*3.0%
*-rgt-identity3.0%
associate-*r/3.0%
rgt-mult-inverse34.6%
distribute-frac-neg234.6%
distribute-neg-frac34.6%
metadata-eval34.6%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 87.3%
Final simplification87.3%
(FPCore (x) :precision binary64 (if (<= x -1.8) (/ -1.0 (* x (* x 0.5))) (+ 0.5 (/ 1.0 x))))
double code(double x) {
double tmp;
if (x <= -1.8) {
tmp = -1.0 / (x * (x * 0.5));
} else {
tmp = 0.5 + (1.0 / x);
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-1.8d0)) then
tmp = (-1.0d0) / (x * (x * 0.5d0))
else
tmp = 0.5d0 + (1.0d0 / x)
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= -1.8) {
tmp = -1.0 / (x * (x * 0.5));
} else {
tmp = 0.5 + (1.0 / x);
}
return tmp;
}
def code(x): tmp = 0 if x <= -1.8: tmp = -1.0 / (x * (x * 0.5)) else: tmp = 0.5 + (1.0 / x) return tmp
function code(x) tmp = 0.0 if (x <= -1.8) tmp = Float64(-1.0 / Float64(x * Float64(x * 0.5))); else tmp = Float64(0.5 + Float64(1.0 / x)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -1.8) tmp = -1.0 / (x * (x * 0.5)); else tmp = 0.5 + (1.0 / x); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -1.8], N[(-1.0 / N[(x * N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(0.5 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.8:\\
\;\;\;\;\frac{-1}{x \cdot \left(x \cdot 0.5\right)}\\
\mathbf{else}:\\
\;\;\;\;0.5 + \frac{1}{x}\\
\end{array}
\end{array}
if x < -1.80000000000000004Initial program 100.0%
sub-neg100.0%
+-commutative100.0%
rgt-mult-inverse1.2%
exp-neg1.2%
distribute-rgt-neg-out1.2%
*-rgt-identity1.2%
distribute-lft-in1.2%
neg-sub01.2%
associate-+l-1.2%
neg-sub01.2%
associate-/r*1.2%
*-rgt-identity1.2%
associate-*r/1.2%
rgt-mult-inverse100.0%
distribute-frac-neg2100.0%
distribute-neg-frac100.0%
metadata-eval100.0%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 50.2%
Taylor expanded in x around inf 50.2%
*-commutative50.2%
Simplified50.2%
if -1.80000000000000004 < x Initial program 3.5%
sub-neg3.5%
+-commutative3.5%
rgt-mult-inverse3.5%
exp-neg3.5%
distribute-rgt-neg-out3.5%
*-rgt-identity3.5%
distribute-lft-in3.5%
neg-sub03.5%
associate-+l-3.5%
neg-sub03.8%
associate-/r*3.8%
*-rgt-identity3.8%
associate-*r/3.8%
rgt-mult-inverse3.8%
distribute-frac-neg23.8%
distribute-neg-frac3.8%
metadata-eval3.8%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 99.7%
*-commutative99.7%
Simplified99.7%
Taylor expanded in x around inf 99.7%
+-commutative99.7%
Simplified99.7%
Final simplification83.8%
(FPCore (x) :precision binary64 (/ -1.0 (* x (+ (* x (* x -0.16666666666666666)) -1.0))))
double code(double x) {
return -1.0 / (x * ((x * (x * -0.16666666666666666)) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) / (x * ((x * (x * (-0.16666666666666666d0))) + (-1.0d0)))
end function
public static double code(double x) {
return -1.0 / (x * ((x * (x * -0.16666666666666666)) + -1.0));
}
def code(x): return -1.0 / (x * ((x * (x * -0.16666666666666666)) + -1.0))
function code(x) return Float64(-1.0 / Float64(x * Float64(Float64(x * Float64(x * -0.16666666666666666)) + -1.0))) end
function tmp = code(x) tmp = -1.0 / (x * ((x * (x * -0.16666666666666666)) + -1.0)); end
code[x_] := N[(-1.0 / N[(x * N[(N[(x * N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \left(x \cdot \left(x \cdot -0.16666666666666666\right) + -1\right)}
\end{array}
Initial program 34.4%
sub-neg34.4%
+-commutative34.4%
rgt-mult-inverse2.8%
exp-neg2.8%
distribute-rgt-neg-out2.8%
*-rgt-identity2.8%
distribute-lft-in2.8%
neg-sub02.8%
associate-+l-2.8%
neg-sub03.0%
associate-/r*3.0%
*-rgt-identity3.0%
associate-*r/3.0%
rgt-mult-inverse34.6%
distribute-frac-neg234.6%
distribute-neg-frac34.6%
metadata-eval34.6%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 87.3%
Taylor expanded in x around inf 87.1%
*-commutative87.1%
Simplified87.1%
Final simplification87.1%
(FPCore (x) :precision binary64 (/ -1.0 (* x (+ (* x 0.5) -1.0))))
double code(double x) {
return -1.0 / (x * ((x * 0.5) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) / (x * ((x * 0.5d0) + (-1.0d0)))
end function
public static double code(double x) {
return -1.0 / (x * ((x * 0.5) + -1.0));
}
def code(x): return -1.0 / (x * ((x * 0.5) + -1.0))
function code(x) return Float64(-1.0 / Float64(x * Float64(Float64(x * 0.5) + -1.0))) end
function tmp = code(x) tmp = -1.0 / (x * ((x * 0.5) + -1.0)); end
code[x_] := N[(-1.0 / N[(x * N[(N[(x * 0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \left(x \cdot 0.5 + -1\right)}
\end{array}
Initial program 34.4%
sub-neg34.4%
+-commutative34.4%
rgt-mult-inverse2.8%
exp-neg2.8%
distribute-rgt-neg-out2.8%
*-rgt-identity2.8%
distribute-lft-in2.8%
neg-sub02.8%
associate-+l-2.8%
neg-sub03.0%
associate-/r*3.0%
*-rgt-identity3.0%
associate-*r/3.0%
rgt-mult-inverse34.6%
distribute-frac-neg234.6%
distribute-neg-frac34.6%
metadata-eval34.6%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 83.8%
Final simplification83.8%
(FPCore (x) :precision binary64 (/ 1.0 x))
double code(double x) {
return 1.0 / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / x
end function
public static double code(double x) {
return 1.0 / x;
}
def code(x): return 1.0 / x
function code(x) return Float64(1.0 / x) end
function tmp = code(x) tmp = 1.0 / x; end
code[x_] := N[(1.0 / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x}
\end{array}
Initial program 34.4%
sub-neg34.4%
+-commutative34.4%
rgt-mult-inverse2.8%
exp-neg2.8%
distribute-rgt-neg-out2.8%
*-rgt-identity2.8%
distribute-lft-in2.8%
neg-sub02.8%
associate-+l-2.8%
neg-sub03.0%
associate-/r*3.0%
*-rgt-identity3.0%
associate-*r/3.0%
rgt-mult-inverse34.6%
distribute-frac-neg234.6%
distribute-neg-frac34.6%
metadata-eval34.6%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 69.3%
(FPCore (x) :precision binary64 (/ (- 1.0) (expm1 (- x))))
double code(double x) {
return -1.0 / expm1(-x);
}
public static double code(double x) {
return -1.0 / Math.expm1(-x);
}
def code(x): return -1.0 / math.expm1(-x)
function code(x) return Float64(Float64(-1.0) / expm1(Float64(-x))) end
code[x_] := N[((-1.0) / N[(Exp[(-x)] - 1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\mathsf{expm1}\left(-x\right)}
\end{array}
herbie shell --seed 2024143
(FPCore (x)
:name "expq2 (section 3.11)"
:precision binary64
:pre (> 710.0 x)
:alt
(! :herbie-platform default (/ (- 1) (expm1 (- x))))
(/ (exp x) (- (exp x) 1.0)))