
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x): return (math.exp(x) - 2.0) + math.exp(-x)
function code(x) return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) end
function tmp = code(x) tmp = (exp(x) - 2.0) + exp(-x); end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x): return (math.exp(x) - 2.0) + math.exp(-x)
function code(x) return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) end
function tmp = code(x) tmp = (exp(x) - 2.0) + exp(-x); end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}
(FPCore (x) :precision binary64 (+ (* 4.96031746031746e-5 (pow x 8.0)) (+ (* 0.002777777777777778 (pow x 6.0)) (fma x x (* 0.08333333333333333 (pow x 4.0))))))
double code(double x) {
return (4.96031746031746e-5 * pow(x, 8.0)) + ((0.002777777777777778 * pow(x, 6.0)) + fma(x, x, (0.08333333333333333 * pow(x, 4.0))));
}
function code(x) return Float64(Float64(4.96031746031746e-5 * (x ^ 8.0)) + Float64(Float64(0.002777777777777778 * (x ^ 6.0)) + fma(x, x, Float64(0.08333333333333333 * (x ^ 4.0))))) end
code[x_] := N[(N[(4.96031746031746e-5 * N[Power[x, 8.0], $MachinePrecision]), $MachinePrecision] + N[(N[(0.002777777777777778 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + N[(x * x + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
4.96031746031746 \cdot 10^{-5} \cdot {x}^{8} + \left(0.002777777777777778 \cdot {x}^{6} + \mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4}\right)\right)
\end{array}
Initial program 50.7%
associate-+l-50.7%
sub-neg50.7%
sub-neg50.7%
distribute-neg-in50.7%
remove-double-neg50.7%
+-commutative50.7%
metadata-eval50.7%
Simplified50.7%
Taylor expanded in x around 0 99.5%
+-commutative99.5%
unpow299.5%
fma-def99.5%
Applied egg-rr99.5%
Final simplification99.5%
(FPCore (x) :precision binary64 (+ (* 0.002777777777777778 (pow x 6.0)) (+ (* 0.08333333333333333 (pow x 4.0)) (pow x 2.0))))
double code(double x) {
return (0.002777777777777778 * pow(x, 6.0)) + ((0.08333333333333333 * pow(x, 4.0)) + pow(x, 2.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (0.002777777777777778d0 * (x ** 6.0d0)) + ((0.08333333333333333d0 * (x ** 4.0d0)) + (x ** 2.0d0))
end function
public static double code(double x) {
return (0.002777777777777778 * Math.pow(x, 6.0)) + ((0.08333333333333333 * Math.pow(x, 4.0)) + Math.pow(x, 2.0));
}
def code(x): return (0.002777777777777778 * math.pow(x, 6.0)) + ((0.08333333333333333 * math.pow(x, 4.0)) + math.pow(x, 2.0))
function code(x) return Float64(Float64(0.002777777777777778 * (x ^ 6.0)) + Float64(Float64(0.08333333333333333 * (x ^ 4.0)) + (x ^ 2.0))) end
function tmp = code(x) tmp = (0.002777777777777778 * (x ^ 6.0)) + ((0.08333333333333333 * (x ^ 4.0)) + (x ^ 2.0)); end
code[x_] := N[(N[(0.002777777777777778 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + N[(N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision] + N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.002777777777777778 \cdot {x}^{6} + \left(0.08333333333333333 \cdot {x}^{4} + {x}^{2}\right)
\end{array}
Initial program 50.7%
associate-+l-50.7%
sub-neg50.7%
sub-neg50.7%
distribute-neg-in50.7%
remove-double-neg50.7%
+-commutative50.7%
metadata-eval50.7%
Simplified50.7%
Taylor expanded in x around 0 99.4%
Final simplification99.4%
(FPCore (x) :precision binary64 (+ (* 0.002777777777777778 (pow x 6.0)) (fma x x (* 0.08333333333333333 (pow x 4.0)))))
double code(double x) {
return (0.002777777777777778 * pow(x, 6.0)) + fma(x, x, (0.08333333333333333 * pow(x, 4.0)));
}
function code(x) return Float64(Float64(0.002777777777777778 * (x ^ 6.0)) + fma(x, x, Float64(0.08333333333333333 * (x ^ 4.0)))) end
code[x_] := N[(N[(0.002777777777777778 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + N[(x * x + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.002777777777777778 \cdot {x}^{6} + \mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4}\right)
\end{array}
Initial program 50.7%
associate-+l-50.7%
sub-neg50.7%
sub-neg50.7%
distribute-neg-in50.7%
remove-double-neg50.7%
+-commutative50.7%
metadata-eval50.7%
Simplified50.7%
Taylor expanded in x around 0 99.4%
+-commutative99.5%
unpow299.5%
fma-def99.5%
Applied egg-rr99.4%
Final simplification99.4%
(FPCore (x) :precision binary64 (+ (* 0.08333333333333333 (pow x 4.0)) (pow x 2.0)))
double code(double x) {
return (0.08333333333333333 * pow(x, 4.0)) + pow(x, 2.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (0.08333333333333333d0 * (x ** 4.0d0)) + (x ** 2.0d0)
end function
public static double code(double x) {
return (0.08333333333333333 * Math.pow(x, 4.0)) + Math.pow(x, 2.0);
}
def code(x): return (0.08333333333333333 * math.pow(x, 4.0)) + math.pow(x, 2.0)
function code(x) return Float64(Float64(0.08333333333333333 * (x ^ 4.0)) + (x ^ 2.0)) end
function tmp = code(x) tmp = (0.08333333333333333 * (x ^ 4.0)) + (x ^ 2.0); end
code[x_] := N[(N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision] + N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.08333333333333333 \cdot {x}^{4} + {x}^{2}
\end{array}
Initial program 50.7%
associate-+l-50.7%
sub-neg50.7%
sub-neg50.7%
distribute-neg-in50.7%
remove-double-neg50.7%
+-commutative50.7%
metadata-eval50.7%
Simplified50.7%
Taylor expanded in x around 0 99.2%
Final simplification99.2%
(FPCore (x) :precision binary64 (fma x x (* 0.08333333333333333 (pow x 4.0))))
double code(double x) {
return fma(x, x, (0.08333333333333333 * pow(x, 4.0)));
}
function code(x) return fma(x, x, Float64(0.08333333333333333 * (x ^ 4.0))) end
code[x_] := N[(x * x + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4}\right)
\end{array}
Initial program 50.7%
associate-+l-50.7%
sub-neg50.7%
sub-neg50.7%
distribute-neg-in50.7%
remove-double-neg50.7%
+-commutative50.7%
metadata-eval50.7%
Simplified50.7%
Taylor expanded in x around 0 99.2%
+-commutative99.5%
unpow299.5%
fma-def99.5%
Applied egg-rr99.2%
Final simplification99.2%
(FPCore (x) :precision binary64 (* x x))
double code(double x) {
return x * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * x
end function
public static double code(double x) {
return x * x;
}
def code(x): return x * x
function code(x) return Float64(x * x) end
function tmp = code(x) tmp = x * x; end
code[x_] := N[(x * x), $MachinePrecision]
\begin{array}{l}
\\
x \cdot x
\end{array}
Initial program 50.7%
associate-+l-50.7%
sub-neg50.7%
sub-neg50.7%
distribute-neg-in50.7%
remove-double-neg50.7%
+-commutative50.7%
metadata-eval50.7%
Simplified50.7%
+-commutative50.7%
associate-+r+50.7%
metadata-eval50.7%
sub-neg50.7%
add-exp-log50.7%
sub-neg50.7%
metadata-eval50.7%
associate-+r+50.7%
+-commutative50.7%
associate-+r+50.7%
+-commutative50.7%
cosh-undef50.7%
Applied egg-rr50.7%
Taylor expanded in x around 0 44.7%
*-commutative44.7%
exp-to-pow98.9%
pow298.9%
Applied egg-rr98.9%
Final simplification98.9%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 50.7%
associate-+l-50.7%
sub-neg50.7%
sub-neg50.7%
distribute-neg-in50.7%
remove-double-neg50.7%
+-commutative50.7%
metadata-eval50.7%
Simplified50.7%
Taylor expanded in x around 0 49.4%
Taylor expanded in x around 0 5.9%
Final simplification5.9%
(FPCore (x) :precision binary64 (* 4.0 (pow (sinh (/ x 2.0)) 2.0)))
double code(double x) {
return 4.0 * pow(sinh((x / 2.0)), 2.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 4.0d0 * (sinh((x / 2.0d0)) ** 2.0d0)
end function
public static double code(double x) {
return 4.0 * Math.pow(Math.sinh((x / 2.0)), 2.0);
}
def code(x): return 4.0 * math.pow(math.sinh((x / 2.0)), 2.0)
function code(x) return Float64(4.0 * (sinh(Float64(x / 2.0)) ^ 2.0)) end
function tmp = code(x) tmp = 4.0 * (sinh((x / 2.0)) ^ 2.0); end
code[x_] := N[(4.0 * N[Power[N[Sinh[N[(x / 2.0), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
4 \cdot {\sinh \left(\frac{x}{2}\right)}^{2}
\end{array}
herbie shell --seed 2024013
(FPCore (x)
:name "exp2 (problem 3.3.7)"
:precision binary64
:pre (<= (fabs x) 710.0)
:herbie-target
(* 4.0 (pow (sinh (/ x 2.0)) 2.0))
(+ (- (exp x) 2.0) (exp (- x))))