
(FPCore (x) :precision binary64 (/ (exp x) (- (exp x) 1.0)))
double code(double x) {
return exp(x) / (exp(x) - 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(x) / (exp(x) - 1.0d0)
end function
public static double code(double x) {
return Math.exp(x) / (Math.exp(x) - 1.0);
}
def code(x): return math.exp(x) / (math.exp(x) - 1.0)
function code(x) return Float64(exp(x) / Float64(exp(x) - 1.0)) end
function tmp = code(x) tmp = exp(x) / (exp(x) - 1.0); end
code[x_] := N[(N[Exp[x], $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{e^{x} - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (exp x) (- (exp x) 1.0)))
double code(double x) {
return exp(x) / (exp(x) - 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(x) / (exp(x) - 1.0d0)
end function
public static double code(double x) {
return Math.exp(x) / (Math.exp(x) - 1.0);
}
def code(x): return math.exp(x) / (math.exp(x) - 1.0)
function code(x) return Float64(exp(x) / Float64(exp(x) - 1.0)) end
function tmp = code(x) tmp = exp(x) / (exp(x) - 1.0); end
code[x_] := N[(N[Exp[x], $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{e^{x} - 1}
\end{array}
(FPCore (x) :precision binary64 (/ -1.0 (expm1 (- x))))
double code(double x) {
return -1.0 / expm1(-x);
}
public static double code(double x) {
return -1.0 / Math.expm1(-x);
}
def code(x): return -1.0 / math.expm1(-x)
function code(x) return Float64(-1.0 / expm1(Float64(-x))) end
code[x_] := N[(-1.0 / N[(Exp[(-x)] - 1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\mathsf{expm1}\left(-x\right)}
\end{array}
Initial program 39.2%
sub-neg39.2%
+-commutative39.2%
rgt-mult-inverse4.0%
exp-neg4.0%
distribute-rgt-neg-out4.0%
*-rgt-identity4.0%
distribute-lft-in4.0%
neg-sub04.0%
associate-+l-4.0%
neg-sub03.9%
associate-/r*3.9%
*-rgt-identity3.9%
associate-*r/3.9%
rgt-mult-inverse39.1%
distribute-frac-neg239.1%
distribute-neg-frac39.1%
metadata-eval39.1%
expm1-define100.0%
Simplified100.0%
(FPCore (x)
:precision binary64
(/
-1.0
(*
x
(+
(* x (+ 0.5 (* x (- (* x 0.041666666666666664) 0.16666666666666666))))
-1.0))))
double code(double x) {
return -1.0 / (x * ((x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) / (x * ((x * (0.5d0 + (x * ((x * 0.041666666666666664d0) - 0.16666666666666666d0)))) + (-1.0d0)))
end function
public static double code(double x) {
return -1.0 / (x * ((x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))) + -1.0));
}
def code(x): return -1.0 / (x * ((x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))) + -1.0))
function code(x) return Float64(-1.0 / Float64(x * Float64(Float64(x * Float64(0.5 + Float64(x * Float64(Float64(x * 0.041666666666666664) - 0.16666666666666666)))) + -1.0))) end
function tmp = code(x) tmp = -1.0 / (x * ((x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))) + -1.0)); end
code[x_] := N[(-1.0 / N[(x * N[(N[(x * N[(0.5 + N[(x * N[(N[(x * 0.041666666666666664), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \left(x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right) + -1\right)}
\end{array}
Initial program 39.2%
sub-neg39.2%
+-commutative39.2%
rgt-mult-inverse4.0%
exp-neg4.0%
distribute-rgt-neg-out4.0%
*-rgt-identity4.0%
distribute-lft-in4.0%
neg-sub04.0%
associate-+l-4.0%
neg-sub03.9%
associate-/r*3.9%
*-rgt-identity3.9%
associate-*r/3.9%
rgt-mult-inverse39.1%
distribute-frac-neg239.1%
distribute-neg-frac39.1%
metadata-eval39.1%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 93.6%
Final simplification93.6%
(FPCore (x) :precision binary64 (/ -1.0 (* x (+ (* x (+ 0.5 (* x (* x 0.041666666666666664)))) -1.0))))
double code(double x) {
return -1.0 / (x * ((x * (0.5 + (x * (x * 0.041666666666666664)))) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) / (x * ((x * (0.5d0 + (x * (x * 0.041666666666666664d0)))) + (-1.0d0)))
end function
public static double code(double x) {
return -1.0 / (x * ((x * (0.5 + (x * (x * 0.041666666666666664)))) + -1.0));
}
def code(x): return -1.0 / (x * ((x * (0.5 + (x * (x * 0.041666666666666664)))) + -1.0))
function code(x) return Float64(-1.0 / Float64(x * Float64(Float64(x * Float64(0.5 + Float64(x * Float64(x * 0.041666666666666664)))) + -1.0))) end
function tmp = code(x) tmp = -1.0 / (x * ((x * (0.5 + (x * (x * 0.041666666666666664)))) + -1.0)); end
code[x_] := N[(-1.0 / N[(x * N[(N[(x * N[(0.5 + N[(x * N[(x * 0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \left(x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664\right)\right) + -1\right)}
\end{array}
Initial program 39.2%
sub-neg39.2%
+-commutative39.2%
rgt-mult-inverse4.0%
exp-neg4.0%
distribute-rgt-neg-out4.0%
*-rgt-identity4.0%
distribute-lft-in4.0%
neg-sub04.0%
associate-+l-4.0%
neg-sub03.9%
associate-/r*3.9%
*-rgt-identity3.9%
associate-*r/3.9%
rgt-mult-inverse39.1%
distribute-frac-neg239.1%
distribute-neg-frac39.1%
metadata-eval39.1%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 93.6%
Taylor expanded in x around inf 93.3%
*-commutative93.3%
Simplified93.3%
Final simplification93.3%
(FPCore (x) :precision binary64 (/ -1.0 (* x (+ (* x (+ 0.5 (* x -0.16666666666666666))) -1.0))))
double code(double x) {
return -1.0 / (x * ((x * (0.5 + (x * -0.16666666666666666))) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) / (x * ((x * (0.5d0 + (x * (-0.16666666666666666d0)))) + (-1.0d0)))
end function
public static double code(double x) {
return -1.0 / (x * ((x * (0.5 + (x * -0.16666666666666666))) + -1.0));
}
def code(x): return -1.0 / (x * ((x * (0.5 + (x * -0.16666666666666666))) + -1.0))
function code(x) return Float64(-1.0 / Float64(x * Float64(Float64(x * Float64(0.5 + Float64(x * -0.16666666666666666))) + -1.0))) end
function tmp = code(x) tmp = -1.0 / (x * ((x * (0.5 + (x * -0.16666666666666666))) + -1.0)); end
code[x_] := N[(-1.0 / N[(x * N[(N[(x * N[(0.5 + N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \left(x \cdot \left(0.5 + x \cdot -0.16666666666666666\right) + -1\right)}
\end{array}
Initial program 39.2%
sub-neg39.2%
+-commutative39.2%
rgt-mult-inverse4.0%
exp-neg4.0%
distribute-rgt-neg-out4.0%
*-rgt-identity4.0%
distribute-lft-in4.0%
neg-sub04.0%
associate-+l-4.0%
neg-sub03.9%
associate-/r*3.9%
*-rgt-identity3.9%
associate-*r/3.9%
rgt-mult-inverse39.1%
distribute-frac-neg239.1%
distribute-neg-frac39.1%
metadata-eval39.1%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 88.5%
Final simplification88.5%
(FPCore (x) :precision binary64 (/ -1.0 (* x (+ (* x (* x -0.16666666666666666)) -1.0))))
double code(double x) {
return -1.0 / (x * ((x * (x * -0.16666666666666666)) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) / (x * ((x * (x * (-0.16666666666666666d0))) + (-1.0d0)))
end function
public static double code(double x) {
return -1.0 / (x * ((x * (x * -0.16666666666666666)) + -1.0));
}
def code(x): return -1.0 / (x * ((x * (x * -0.16666666666666666)) + -1.0))
function code(x) return Float64(-1.0 / Float64(x * Float64(Float64(x * Float64(x * -0.16666666666666666)) + -1.0))) end
function tmp = code(x) tmp = -1.0 / (x * ((x * (x * -0.16666666666666666)) + -1.0)); end
code[x_] := N[(-1.0 / N[(x * N[(N[(x * N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \left(x \cdot \left(x \cdot -0.16666666666666666\right) + -1\right)}
\end{array}
Initial program 39.2%
sub-neg39.2%
+-commutative39.2%
rgt-mult-inverse4.0%
exp-neg4.0%
distribute-rgt-neg-out4.0%
*-rgt-identity4.0%
distribute-lft-in4.0%
neg-sub04.0%
associate-+l-4.0%
neg-sub03.9%
associate-/r*3.9%
*-rgt-identity3.9%
associate-*r/3.9%
rgt-mult-inverse39.1%
distribute-frac-neg239.1%
distribute-neg-frac39.1%
metadata-eval39.1%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 93.6%
Taylor expanded in x around 0 88.5%
+-commutative88.5%
Simplified88.5%
Taylor expanded in x around inf 88.1%
*-commutative88.1%
Simplified88.1%
Final simplification88.1%
(FPCore (x) :precision binary64 (/ -1.0 (* x (+ (* x 0.5) -1.0))))
double code(double x) {
return -1.0 / (x * ((x * 0.5) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) / (x * ((x * 0.5d0) + (-1.0d0)))
end function
public static double code(double x) {
return -1.0 / (x * ((x * 0.5) + -1.0));
}
def code(x): return -1.0 / (x * ((x * 0.5) + -1.0))
function code(x) return Float64(-1.0 / Float64(x * Float64(Float64(x * 0.5) + -1.0))) end
function tmp = code(x) tmp = -1.0 / (x * ((x * 0.5) + -1.0)); end
code[x_] := N[(-1.0 / N[(x * N[(N[(x * 0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \left(x \cdot 0.5 + -1\right)}
\end{array}
Initial program 39.2%
sub-neg39.2%
+-commutative39.2%
rgt-mult-inverse4.0%
exp-neg4.0%
distribute-rgt-neg-out4.0%
*-rgt-identity4.0%
distribute-lft-in4.0%
neg-sub04.0%
associate-+l-4.0%
neg-sub03.9%
associate-/r*3.9%
*-rgt-identity3.9%
associate-*r/3.9%
rgt-mult-inverse39.1%
distribute-frac-neg239.1%
distribute-neg-frac39.1%
metadata-eval39.1%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 83.8%
Final simplification83.8%
(FPCore (x) :precision binary64 (/ 1.0 x))
double code(double x) {
return 1.0 / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / x
end function
public static double code(double x) {
return 1.0 / x;
}
def code(x): return 1.0 / x
function code(x) return Float64(1.0 / x) end
function tmp = code(x) tmp = 1.0 / x; end
code[x_] := N[(1.0 / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x}
\end{array}
Initial program 39.2%
sub-neg39.2%
+-commutative39.2%
rgt-mult-inverse4.0%
exp-neg4.0%
distribute-rgt-neg-out4.0%
*-rgt-identity4.0%
distribute-lft-in4.0%
neg-sub04.0%
associate-+l-4.0%
neg-sub03.9%
associate-/r*3.9%
*-rgt-identity3.9%
associate-*r/3.9%
rgt-mult-inverse39.1%
distribute-frac-neg239.1%
distribute-neg-frac39.1%
metadata-eval39.1%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 65.1%
(FPCore (x) :precision binary64 (* x 0.08333333333333333))
double code(double x) {
return x * 0.08333333333333333;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * 0.08333333333333333d0
end function
public static double code(double x) {
return x * 0.08333333333333333;
}
def code(x): return x * 0.08333333333333333
function code(x) return Float64(x * 0.08333333333333333) end
function tmp = code(x) tmp = x * 0.08333333333333333; end
code[x_] := N[(x * 0.08333333333333333), $MachinePrecision]
\begin{array}{l}
\\
x \cdot 0.08333333333333333
\end{array}
Initial program 39.2%
sub-neg39.2%
+-commutative39.2%
rgt-mult-inverse4.0%
exp-neg4.0%
distribute-rgt-neg-out4.0%
*-rgt-identity4.0%
distribute-lft-in4.0%
neg-sub04.0%
associate-+l-4.0%
neg-sub03.9%
associate-/r*3.9%
*-rgt-identity3.9%
associate-*r/3.9%
rgt-mult-inverse39.1%
distribute-frac-neg239.1%
distribute-neg-frac39.1%
metadata-eval39.1%
expm1-define100.0%
Simplified100.0%
Taylor expanded in x around 0 64.6%
*-commutative64.6%
Simplified64.6%
Taylor expanded in x around -inf 32.6%
mul-1-neg32.6%
distribute-rgt-neg-in32.6%
sub-neg32.6%
associate-*r/32.6%
+-commutative32.6%
distribute-lft-in32.6%
neg-mul-132.6%
distribute-neg-frac32.6%
metadata-eval32.6%
metadata-eval32.6%
metadata-eval32.6%
Simplified32.6%
Taylor expanded in x around inf 3.3%
(FPCore (x) :precision binary64 -1.0)
double code(double x) {
return -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double x) {
return -1.0;
}
def code(x): return -1.0
function code(x) return -1.0 end
function tmp = code(x) tmp = -1.0; end
code[x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 39.2%
sub-neg39.2%
+-commutative39.2%
rgt-mult-inverse4.0%
exp-neg4.0%
distribute-rgt-neg-out4.0%
*-rgt-identity4.0%
distribute-lft-in4.0%
neg-sub04.0%
associate-+l-4.0%
neg-sub03.9%
associate-/r*3.9%
*-rgt-identity3.9%
associate-*r/3.9%
rgt-mult-inverse39.1%
distribute-frac-neg239.1%
distribute-neg-frac39.1%
metadata-eval39.1%
expm1-define100.0%
Simplified100.0%
Applied egg-rr0.3%
unpow-10.3%
associate-*r/0.3%
*-rgt-identity0.3%
distribute-frac-neg20.3%
*-inverses3.3%
metadata-eval3.3%
Simplified3.3%
(FPCore (x) :precision binary64 (/ (- 1.0) (expm1 (- x))))
double code(double x) {
return -1.0 / expm1(-x);
}
public static double code(double x) {
return -1.0 / Math.expm1(-x);
}
def code(x): return -1.0 / math.expm1(-x)
function code(x) return Float64(Float64(-1.0) / expm1(Float64(-x))) end
code[x_] := N[((-1.0) / N[(Exp[(-x)] - 1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\mathsf{expm1}\left(-x\right)}
\end{array}
herbie shell --seed 2024157
(FPCore (x)
:name "expq2 (section 3.11)"
:precision binary64
:pre (> 710.0 x)
:alt
(! :herbie-platform default (/ (- 1) (expm1 (- x))))
(/ (exp x) (- (exp x) 1.0)))