
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x): return (math.exp(x) - 2.0) + math.exp(-x)
function code(x) return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) end
function tmp = code(x) tmp = (exp(x) - 2.0) + exp(-x); end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x): return (math.exp(x) - 2.0) + math.exp(-x)
function code(x) return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) end
function tmp = code(x) tmp = (exp(x) - 2.0) + exp(-x); end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}
(FPCore (x) :precision binary64 (+ (* 0.002777777777777778 (pow x 6.0)) (fma x x (* 0.08333333333333333 (pow x 4.0)))))
double code(double x) {
return (0.002777777777777778 * pow(x, 6.0)) + fma(x, x, (0.08333333333333333 * pow(x, 4.0)));
}
function code(x) return Float64(Float64(0.002777777777777778 * (x ^ 6.0)) + fma(x, x, Float64(0.08333333333333333 * (x ^ 4.0)))) end
code[x_] := N[(N[(0.002777777777777778 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + N[(x * x + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.002777777777777778 \cdot {x}^{6} + \mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4}\right)
\end{array}
Initial program 52.9%
associate-+l-52.9%
sub-neg52.9%
sub-neg52.9%
distribute-neg-in52.9%
remove-double-neg52.9%
+-commutative52.9%
metadata-eval52.9%
Simplified52.9%
Taylor expanded in x around 0 99.6%
+-commutative99.6%
unpow299.6%
fma-def99.6%
Applied egg-rr99.6%
Final simplification99.6%
(FPCore (x) :precision binary64 (fma x x (* 0.08333333333333333 (pow x 4.0))))
double code(double x) {
return fma(x, x, (0.08333333333333333 * pow(x, 4.0)));
}
function code(x) return fma(x, x, Float64(0.08333333333333333 * (x ^ 4.0))) end
code[x_] := N[(x * x + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4}\right)
\end{array}
Initial program 52.9%
associate-+l-52.9%
sub-neg52.9%
sub-neg52.9%
distribute-neg-in52.9%
remove-double-neg52.9%
+-commutative52.9%
metadata-eval52.9%
Simplified52.9%
Taylor expanded in x around 0 99.4%
+-commutative99.6%
unpow299.6%
fma-def99.6%
Applied egg-rr99.4%
Final simplification99.4%
(FPCore (x) :precision binary64 (pow x 2.0))
double code(double x) {
return pow(x, 2.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x ** 2.0d0
end function
public static double code(double x) {
return Math.pow(x, 2.0);
}
def code(x): return math.pow(x, 2.0)
function code(x) return x ^ 2.0 end
function tmp = code(x) tmp = x ^ 2.0; end
code[x_] := N[Power[x, 2.0], $MachinePrecision]
\begin{array}{l}
\\
{x}^{2}
\end{array}
Initial program 52.9%
associate-+l-52.9%
sub-neg52.9%
sub-neg52.9%
distribute-neg-in52.9%
remove-double-neg52.9%
+-commutative52.9%
metadata-eval52.9%
Simplified52.9%
Taylor expanded in x around 0 98.8%
Final simplification98.8%
(FPCore (x) :precision binary64 (* 4.0 (pow (sinh (/ x 2.0)) 2.0)))
double code(double x) {
return 4.0 * pow(sinh((x / 2.0)), 2.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 4.0d0 * (sinh((x / 2.0d0)) ** 2.0d0)
end function
public static double code(double x) {
return 4.0 * Math.pow(Math.sinh((x / 2.0)), 2.0);
}
def code(x): return 4.0 * math.pow(math.sinh((x / 2.0)), 2.0)
function code(x) return Float64(4.0 * (sinh(Float64(x / 2.0)) ^ 2.0)) end
function tmp = code(x) tmp = 4.0 * (sinh((x / 2.0)) ^ 2.0); end
code[x_] := N[(4.0 * N[Power[N[Sinh[N[(x / 2.0), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
4 \cdot {\sinh \left(\frac{x}{2}\right)}^{2}
\end{array}
herbie shell --seed 2024017
(FPCore (x)
:name "exp2 (problem 3.3.7)"
:precision binary64
:pre (<= (fabs x) 710.0)
:herbie-target
(* 4.0 (pow (sinh (/ x 2.0)) 2.0))
(+ (- (exp x) 2.0) (exp (- x))))