
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x): return (math.exp(x) - 2.0) + math.exp(-x)
function code(x) return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) end
function tmp = code(x) tmp = (exp(x) - 2.0) + exp(-x); end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x): return (math.exp(x) - 2.0) + math.exp(-x)
function code(x) return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) end
function tmp = code(x) tmp = (exp(x) - 2.0) + exp(-x); end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}
(FPCore (x) :precision binary64 (fma x x (* 0.08333333333333333 (pow x 4.0))))
double code(double x) {
return fma(x, x, (0.08333333333333333 * pow(x, 4.0)));
}
function code(x) return fma(x, x, Float64(0.08333333333333333 * (x ^ 4.0))) end
code[x_] := N[(x * x + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4}\right)
\end{array}
Initial program 55.0%
associate-+l-54.9%
sub-neg54.9%
sub-neg54.9%
distribute-neg-in54.9%
remove-double-neg54.9%
+-commutative54.9%
metadata-eval54.9%
Simplified54.9%
Taylor expanded in x around 0 98.9%
+-commutative98.9%
unpow298.9%
fma-def98.9%
Applied egg-rr98.9%
Final simplification98.9%
(FPCore (x) :precision binary64 (pow x 2.0))
double code(double x) {
return pow(x, 2.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x ** 2.0d0
end function
public static double code(double x) {
return Math.pow(x, 2.0);
}
def code(x): return math.pow(x, 2.0)
function code(x) return x ^ 2.0 end
function tmp = code(x) tmp = x ^ 2.0; end
code[x_] := N[Power[x, 2.0], $MachinePrecision]
\begin{array}{l}
\\
{x}^{2}
\end{array}
Initial program 55.0%
associate-+l-54.9%
sub-neg54.9%
sub-neg54.9%
distribute-neg-in54.9%
remove-double-neg54.9%
+-commutative54.9%
metadata-eval54.9%
Simplified54.9%
Taylor expanded in x around 0 98.3%
Final simplification98.3%
(FPCore (x) :precision binary64 (expm1 x))
double code(double x) {
return expm1(x);
}
public static double code(double x) {
return Math.expm1(x);
}
def code(x): return math.expm1(x)
function code(x) return expm1(x) end
code[x_] := N[(Exp[x] - 1), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{expm1}\left(x\right)
\end{array}
Initial program 55.0%
associate-+l-54.9%
sub-neg54.9%
sub-neg54.9%
distribute-neg-in54.9%
remove-double-neg54.9%
+-commutative54.9%
metadata-eval54.9%
Simplified54.9%
Taylor expanded in x around 0 52.6%
Taylor expanded in x around inf 52.6%
expm1-def6.1%
Simplified6.1%
Final simplification6.1%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 55.0%
associate-+l-54.9%
sub-neg54.9%
sub-neg54.9%
distribute-neg-in54.9%
remove-double-neg54.9%
+-commutative54.9%
metadata-eval54.9%
Simplified54.9%
Taylor expanded in x around 0 52.6%
Taylor expanded in x around 0 6.1%
Final simplification6.1%
(FPCore (x) :precision binary64 0.0)
double code(double x) {
return 0.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0
end function
public static double code(double x) {
return 0.0;
}
def code(x): return 0.0
function code(x) return 0.0 end
function tmp = code(x) tmp = 0.0; end
code[x_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 55.0%
associate-+l-54.9%
sub-neg54.9%
sub-neg54.9%
distribute-neg-in54.9%
remove-double-neg54.9%
+-commutative54.9%
metadata-eval54.9%
Simplified54.9%
+-commutative54.9%
associate-+r+55.0%
metadata-eval55.0%
sub-neg55.0%
add-log-exp54.9%
+-commutative54.9%
sub-neg54.9%
metadata-eval54.9%
associate-+r+54.8%
+-commutative54.8%
+-commutative54.8%
cosh-undef54.8%
Applied egg-rr54.8%
Taylor expanded in x around 0 52.4%
Final simplification52.4%
(FPCore (x) :precision binary64 (* 4.0 (pow (sinh (/ x 2.0)) 2.0)))
double code(double x) {
return 4.0 * pow(sinh((x / 2.0)), 2.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 4.0d0 * (sinh((x / 2.0d0)) ** 2.0d0)
end function
public static double code(double x) {
return 4.0 * Math.pow(Math.sinh((x / 2.0)), 2.0);
}
def code(x): return 4.0 * math.pow(math.sinh((x / 2.0)), 2.0)
function code(x) return Float64(4.0 * (sinh(Float64(x / 2.0)) ^ 2.0)) end
function tmp = code(x) tmp = 4.0 * (sinh((x / 2.0)) ^ 2.0); end
code[x_] := N[(4.0 * N[Power[N[Sinh[N[(x / 2.0), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
4 \cdot {\sinh \left(\frac{x}{2}\right)}^{2}
\end{array}
herbie shell --seed 2024020
(FPCore (x)
:name "exp2 (problem 3.3.7)"
:precision binary64
:pre (<= (fabs x) 710.0)
:herbie-target
(* 4.0 (pow (sinh (/ x 2.0)) 2.0))
(+ (- (exp x) 2.0) (exp (- x))))