
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x): return (math.exp(x) - 2.0) + math.exp(-x)
function code(x) return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) end
function tmp = code(x) tmp = (exp(x) - 2.0) + exp(-x); end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x): return (math.exp(x) - 2.0) + math.exp(-x)
function code(x) return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) end
function tmp = code(x) tmp = (exp(x) - 2.0) + exp(-x); end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}
(FPCore (x)
:precision binary64
(*
(pow x 2.0)
(+
1.0
(*
(pow x 2.0)
(+
0.08333333333333333
(*
(pow x 2.0)
(+ 0.002777777777777778 (* (pow x 2.0) 4.96031746031746e-5))))))))
double code(double x) {
return pow(x, 2.0) * (1.0 + (pow(x, 2.0) * (0.08333333333333333 + (pow(x, 2.0) * (0.002777777777777778 + (pow(x, 2.0) * 4.96031746031746e-5))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x ** 2.0d0) * (1.0d0 + ((x ** 2.0d0) * (0.08333333333333333d0 + ((x ** 2.0d0) * (0.002777777777777778d0 + ((x ** 2.0d0) * 4.96031746031746d-5))))))
end function
public static double code(double x) {
return Math.pow(x, 2.0) * (1.0 + (Math.pow(x, 2.0) * (0.08333333333333333 + (Math.pow(x, 2.0) * (0.002777777777777778 + (Math.pow(x, 2.0) * 4.96031746031746e-5))))));
}
def code(x): return math.pow(x, 2.0) * (1.0 + (math.pow(x, 2.0) * (0.08333333333333333 + (math.pow(x, 2.0) * (0.002777777777777778 + (math.pow(x, 2.0) * 4.96031746031746e-5))))))
function code(x) return Float64((x ^ 2.0) * Float64(1.0 + Float64((x ^ 2.0) * Float64(0.08333333333333333 + Float64((x ^ 2.0) * Float64(0.002777777777777778 + Float64((x ^ 2.0) * 4.96031746031746e-5))))))) end
function tmp = code(x) tmp = (x ^ 2.0) * (1.0 + ((x ^ 2.0) * (0.08333333333333333 + ((x ^ 2.0) * (0.002777777777777778 + ((x ^ 2.0) * 4.96031746031746e-5)))))); end
code[x_] := N[(N[Power[x, 2.0], $MachinePrecision] * N[(1.0 + N[(N[Power[x, 2.0], $MachinePrecision] * N[(0.08333333333333333 + N[(N[Power[x, 2.0], $MachinePrecision] * N[(0.002777777777777778 + N[(N[Power[x, 2.0], $MachinePrecision] * 4.96031746031746e-5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(0.08333333333333333 + {x}^{2} \cdot \left(0.002777777777777778 + {x}^{2} \cdot 4.96031746031746 \cdot 10^{-5}\right)\right)\right)
\end{array}
Initial program 52.6%
associate-+l-52.7%
sub-neg52.7%
sub-neg52.7%
distribute-neg-in52.7%
remove-double-neg52.7%
+-commutative52.7%
metadata-eval52.7%
Simplified52.7%
Taylor expanded in x around 0 99.8%
*-commutative99.8%
Simplified99.8%
(FPCore (x) :precision binary64 (fma x x (* (fma (pow x 2.0) 0.002777777777777778 0.08333333333333333) (pow x 4.0))))
double code(double x) {
return fma(x, x, (fma(pow(x, 2.0), 0.002777777777777778, 0.08333333333333333) * pow(x, 4.0)));
}
function code(x) return fma(x, x, Float64(fma((x ^ 2.0), 0.002777777777777778, 0.08333333333333333) * (x ^ 4.0))) end
code[x_] := N[(x * x + N[(N[(N[Power[x, 2.0], $MachinePrecision] * 0.002777777777777778 + 0.08333333333333333), $MachinePrecision] * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x, \mathsf{fma}\left({x}^{2}, 0.002777777777777778, 0.08333333333333333\right) \cdot {x}^{4}\right)
\end{array}
Initial program 52.6%
associate-+l-52.7%
sub-neg52.7%
sub-neg52.7%
distribute-neg-in52.7%
remove-double-neg52.7%
+-commutative52.7%
metadata-eval52.7%
Simplified52.7%
+-commutative52.7%
associate-+r+52.6%
metadata-eval52.6%
sub-neg52.6%
+-commutative52.6%
associate-+r-52.6%
+-commutative52.6%
cosh-undef52.6%
Applied egg-rr52.6%
Taylor expanded in x around 0 99.5%
distribute-lft-in99.5%
*-rgt-identity99.5%
unpow299.5%
fma-undefine99.5%
*-commutative99.5%
*-commutative99.5%
associate-*l*99.5%
+-commutative99.5%
*-commutative99.5%
fma-define99.5%
pow-sqr99.5%
metadata-eval99.5%
Simplified99.5%
(FPCore (x)
:precision binary64
(*
(pow x 2.0)
(+
1.0
(*
(pow x 2.0)
(+ 0.08333333333333333 (* (pow x 2.0) 0.002777777777777778))))))
double code(double x) {
return pow(x, 2.0) * (1.0 + (pow(x, 2.0) * (0.08333333333333333 + (pow(x, 2.0) * 0.002777777777777778))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x ** 2.0d0) * (1.0d0 + ((x ** 2.0d0) * (0.08333333333333333d0 + ((x ** 2.0d0) * 0.002777777777777778d0))))
end function
public static double code(double x) {
return Math.pow(x, 2.0) * (1.0 + (Math.pow(x, 2.0) * (0.08333333333333333 + (Math.pow(x, 2.0) * 0.002777777777777778))));
}
def code(x): return math.pow(x, 2.0) * (1.0 + (math.pow(x, 2.0) * (0.08333333333333333 + (math.pow(x, 2.0) * 0.002777777777777778))))
function code(x) return Float64((x ^ 2.0) * Float64(1.0 + Float64((x ^ 2.0) * Float64(0.08333333333333333 + Float64((x ^ 2.0) * 0.002777777777777778))))) end
function tmp = code(x) tmp = (x ^ 2.0) * (1.0 + ((x ^ 2.0) * (0.08333333333333333 + ((x ^ 2.0) * 0.002777777777777778)))); end
code[x_] := N[(N[Power[x, 2.0], $MachinePrecision] * N[(1.0 + N[(N[Power[x, 2.0], $MachinePrecision] * N[(0.08333333333333333 + N[(N[Power[x, 2.0], $MachinePrecision] * 0.002777777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(0.08333333333333333 + {x}^{2} \cdot 0.002777777777777778\right)\right)
\end{array}
Initial program 52.6%
associate-+l-52.7%
sub-neg52.7%
sub-neg52.7%
distribute-neg-in52.7%
remove-double-neg52.7%
+-commutative52.7%
metadata-eval52.7%
Simplified52.7%
Taylor expanded in x around 0 99.5%
*-commutative99.5%
Simplified99.5%
(FPCore (x) :precision binary64 (fma x x (* 0.08333333333333333 (pow x 4.0))))
double code(double x) {
return fma(x, x, (0.08333333333333333 * pow(x, 4.0)));
}
function code(x) return fma(x, x, Float64(0.08333333333333333 * (x ^ 4.0))) end
code[x_] := N[(x * x + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4}\right)
\end{array}
Initial program 52.6%
associate-+l-52.7%
sub-neg52.7%
sub-neg52.7%
distribute-neg-in52.7%
remove-double-neg52.7%
+-commutative52.7%
metadata-eval52.7%
Simplified52.7%
Taylor expanded in x around 0 99.8%
*-commutative99.8%
Simplified99.8%
Taylor expanded in x around 0 99.0%
distribute-lft-in99.0%
*-rgt-identity99.0%
unpow299.0%
fma-define99.0%
*-commutative99.0%
associate-*l*99.0%
pow-sqr99.0%
metadata-eval99.0%
Simplified99.0%
(FPCore (x) :precision binary64 (pow x 2.0))
double code(double x) {
return pow(x, 2.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x ** 2.0d0
end function
public static double code(double x) {
return Math.pow(x, 2.0);
}
def code(x): return math.pow(x, 2.0)
function code(x) return x ^ 2.0 end
function tmp = code(x) tmp = x ^ 2.0; end
code[x_] := N[Power[x, 2.0], $MachinePrecision]
\begin{array}{l}
\\
{x}^{2}
\end{array}
Initial program 52.6%
associate-+l-52.7%
sub-neg52.7%
sub-neg52.7%
distribute-neg-in52.7%
remove-double-neg52.7%
+-commutative52.7%
metadata-eval52.7%
Simplified52.7%
Taylor expanded in x around 0 98.0%
(FPCore (x) :precision binary64 (let* ((t_0 (sinh (/ x 2.0)))) (* 4.0 (* t_0 t_0))))
double code(double x) {
double t_0 = sinh((x / 2.0));
return 4.0 * (t_0 * t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = sinh((x / 2.0d0))
code = 4.0d0 * (t_0 * t_0)
end function
public static double code(double x) {
double t_0 = Math.sinh((x / 2.0));
return 4.0 * (t_0 * t_0);
}
def code(x): t_0 = math.sinh((x / 2.0)) return 4.0 * (t_0 * t_0)
function code(x) t_0 = sinh(Float64(x / 2.0)) return Float64(4.0 * Float64(t_0 * t_0)) end
function tmp = code(x) t_0 = sinh((x / 2.0)); tmp = 4.0 * (t_0 * t_0); end
code[x_] := Block[{t$95$0 = N[Sinh[N[(x / 2.0), $MachinePrecision]], $MachinePrecision]}, N[(4.0 * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \sinh \left(\frac{x}{2}\right)\\
4 \cdot \left(t\_0 \cdot t\_0\right)
\end{array}
\end{array}
herbie shell --seed 2024089
(FPCore (x)
:name "exp2 (problem 3.3.7)"
:precision binary64
:pre (<= (fabs x) 710.0)
:alt
(* 4.0 (* (sinh (/ x 2.0)) (sinh (/ x 2.0))))
(+ (- (exp x) 2.0) (exp (- x))))