
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x): return (math.exp(x) - 2.0) + math.exp(-x)
function code(x) return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) end
function tmp = code(x) tmp = (exp(x) - 2.0) + exp(-x); end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x): return (math.exp(x) - 2.0) + math.exp(-x)
function code(x) return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) end
function tmp = code(x) tmp = (exp(x) - 2.0) + exp(-x); end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (exp (- x_m))))
(if (<= (+ (- (exp x_m) 2.0) t_0) 2e-5)
(+ (* 0.08333333333333333 (pow x_m 4.0)) (* x_m x_m))
(+ (exp x_m) (+ t_0 -2.0)))))x_m = fabs(x);
double code(double x_m) {
double t_0 = exp(-x_m);
double tmp;
if (((exp(x_m) - 2.0) + t_0) <= 2e-5) {
tmp = (0.08333333333333333 * pow(x_m, 4.0)) + (x_m * x_m);
} else {
tmp = exp(x_m) + (t_0 + -2.0);
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: t_0
real(8) :: tmp
t_0 = exp(-x_m)
if (((exp(x_m) - 2.0d0) + t_0) <= 2d-5) then
tmp = (0.08333333333333333d0 * (x_m ** 4.0d0)) + (x_m * x_m)
else
tmp = exp(x_m) + (t_0 + (-2.0d0))
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double t_0 = Math.exp(-x_m);
double tmp;
if (((Math.exp(x_m) - 2.0) + t_0) <= 2e-5) {
tmp = (0.08333333333333333 * Math.pow(x_m, 4.0)) + (x_m * x_m);
} else {
tmp = Math.exp(x_m) + (t_0 + -2.0);
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): t_0 = math.exp(-x_m) tmp = 0 if ((math.exp(x_m) - 2.0) + t_0) <= 2e-5: tmp = (0.08333333333333333 * math.pow(x_m, 4.0)) + (x_m * x_m) else: tmp = math.exp(x_m) + (t_0 + -2.0) return tmp
x_m = abs(x) function code(x_m) t_0 = exp(Float64(-x_m)) tmp = 0.0 if (Float64(Float64(exp(x_m) - 2.0) + t_0) <= 2e-5) tmp = Float64(Float64(0.08333333333333333 * (x_m ^ 4.0)) + Float64(x_m * x_m)); else tmp = Float64(exp(x_m) + Float64(t_0 + -2.0)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) t_0 = exp(-x_m); tmp = 0.0; if (((exp(x_m) - 2.0) + t_0) <= 2e-5) tmp = (0.08333333333333333 * (x_m ^ 4.0)) + (x_m * x_m); else tmp = exp(x_m) + (t_0 + -2.0); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[Exp[(-x$95$m)], $MachinePrecision]}, If[LessEqual[N[(N[(N[Exp[x$95$m], $MachinePrecision] - 2.0), $MachinePrecision] + t$95$0), $MachinePrecision], 2e-5], N[(N[(0.08333333333333333 * N[Power[x$95$m, 4.0], $MachinePrecision]), $MachinePrecision] + N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision], N[(N[Exp[x$95$m], $MachinePrecision] + N[(t$95$0 + -2.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := e^{-x\_m}\\
\mathbf{if}\;\left(e^{x\_m} - 2\right) + t\_0 \leq 2 \cdot 10^{-5}:\\
\;\;\;\;0.08333333333333333 \cdot {x\_m}^{4} + x\_m \cdot x\_m\\
\mathbf{else}:\\
\;\;\;\;e^{x\_m} + \left(t\_0 + -2\right)\\
\end{array}
\end{array}
if (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x))) < 2.00000000000000016e-5Initial program 55.6%
associate-+l-55.6%
sub-neg55.6%
sub-neg55.6%
distribute-neg-in55.6%
remove-double-neg55.6%
+-commutative55.6%
metadata-eval55.6%
Simplified55.6%
Taylor expanded in x around 0 99.9%
unpow299.9%
Applied egg-rr99.9%
if 2.00000000000000016e-5 < (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x))) Initial program 91.7%
associate-+l-92.4%
sub-neg92.4%
sub-neg92.4%
distribute-neg-in92.4%
remove-double-neg92.4%
+-commutative92.4%
metadata-eval92.4%
Simplified92.4%
Final simplification99.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (+ (* 4.96031746031746e-5 (pow x_m 8.0)) (+ (* 0.002777777777777778 (pow x_m 6.0)) (fma x_m x_m (* 0.08333333333333333 (pow x_m 4.0))))))
x_m = fabs(x);
double code(double x_m) {
return (4.96031746031746e-5 * pow(x_m, 8.0)) + ((0.002777777777777778 * pow(x_m, 6.0)) + fma(x_m, x_m, (0.08333333333333333 * pow(x_m, 4.0))));
}
x_m = abs(x) function code(x_m) return Float64(Float64(4.96031746031746e-5 * (x_m ^ 8.0)) + Float64(Float64(0.002777777777777778 * (x_m ^ 6.0)) + fma(x_m, x_m, Float64(0.08333333333333333 * (x_m ^ 4.0))))) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(4.96031746031746e-5 * N[Power[x$95$m, 8.0], $MachinePrecision]), $MachinePrecision] + N[(N[(0.002777777777777778 * N[Power[x$95$m, 6.0], $MachinePrecision]), $MachinePrecision] + N[(x$95$m * x$95$m + N[(0.08333333333333333 * N[Power[x$95$m, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
4.96031746031746 \cdot 10^{-5} \cdot {x\_m}^{8} + \left(0.002777777777777778 \cdot {x\_m}^{6} + \mathsf{fma}\left(x\_m, x\_m, 0.08333333333333333 \cdot {x\_m}^{4}\right)\right)
\end{array}
Initial program 57.0%
associate-+l-57.1%
sub-neg57.1%
sub-neg57.1%
distribute-neg-in57.1%
remove-double-neg57.1%
+-commutative57.1%
metadata-eval57.1%
Simplified57.1%
Taylor expanded in x around 0 98.6%
+-commutative98.6%
unpow298.6%
fma-define98.6%
Applied egg-rr98.6%
Final simplification98.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (+ (* 4.96031746031746e-5 (pow x_m 8.0)) (+ (* 0.002777777777777778 (pow x_m 6.0)) (+ (* 0.08333333333333333 (pow x_m 4.0)) (* x_m x_m)))))
x_m = fabs(x);
double code(double x_m) {
return (4.96031746031746e-5 * pow(x_m, 8.0)) + ((0.002777777777777778 * pow(x_m, 6.0)) + ((0.08333333333333333 * pow(x_m, 4.0)) + (x_m * x_m)));
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = (4.96031746031746d-5 * (x_m ** 8.0d0)) + ((0.002777777777777778d0 * (x_m ** 6.0d0)) + ((0.08333333333333333d0 * (x_m ** 4.0d0)) + (x_m * x_m)))
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return (4.96031746031746e-5 * Math.pow(x_m, 8.0)) + ((0.002777777777777778 * Math.pow(x_m, 6.0)) + ((0.08333333333333333 * Math.pow(x_m, 4.0)) + (x_m * x_m)));
}
x_m = math.fabs(x) def code(x_m): return (4.96031746031746e-5 * math.pow(x_m, 8.0)) + ((0.002777777777777778 * math.pow(x_m, 6.0)) + ((0.08333333333333333 * math.pow(x_m, 4.0)) + (x_m * x_m)))
x_m = abs(x) function code(x_m) return Float64(Float64(4.96031746031746e-5 * (x_m ^ 8.0)) + Float64(Float64(0.002777777777777778 * (x_m ^ 6.0)) + Float64(Float64(0.08333333333333333 * (x_m ^ 4.0)) + Float64(x_m * x_m)))) end
x_m = abs(x); function tmp = code(x_m) tmp = (4.96031746031746e-5 * (x_m ^ 8.0)) + ((0.002777777777777778 * (x_m ^ 6.0)) + ((0.08333333333333333 * (x_m ^ 4.0)) + (x_m * x_m))); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(4.96031746031746e-5 * N[Power[x$95$m, 8.0], $MachinePrecision]), $MachinePrecision] + N[(N[(0.002777777777777778 * N[Power[x$95$m, 6.0], $MachinePrecision]), $MachinePrecision] + N[(N[(0.08333333333333333 * N[Power[x$95$m, 4.0], $MachinePrecision]), $MachinePrecision] + N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
4.96031746031746 \cdot 10^{-5} \cdot {x\_m}^{8} + \left(0.002777777777777778 \cdot {x\_m}^{6} + \left(0.08333333333333333 \cdot {x\_m}^{4} + x\_m \cdot x\_m\right)\right)
\end{array}
Initial program 57.0%
associate-+l-57.1%
sub-neg57.1%
sub-neg57.1%
distribute-neg-in57.1%
remove-double-neg57.1%
+-commutative57.1%
metadata-eval57.1%
Simplified57.1%
Taylor expanded in x around 0 98.6%
unpow297.7%
Applied egg-rr98.6%
Final simplification98.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (+ (* 0.002777777777777778 (pow x_m 6.0)) (+ (* 0.08333333333333333 (pow x_m 4.0)) (* x_m x_m))))
x_m = fabs(x);
double code(double x_m) {
return (0.002777777777777778 * pow(x_m, 6.0)) + ((0.08333333333333333 * pow(x_m, 4.0)) + (x_m * x_m));
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = (0.002777777777777778d0 * (x_m ** 6.0d0)) + ((0.08333333333333333d0 * (x_m ** 4.0d0)) + (x_m * x_m))
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return (0.002777777777777778 * Math.pow(x_m, 6.0)) + ((0.08333333333333333 * Math.pow(x_m, 4.0)) + (x_m * x_m));
}
x_m = math.fabs(x) def code(x_m): return (0.002777777777777778 * math.pow(x_m, 6.0)) + ((0.08333333333333333 * math.pow(x_m, 4.0)) + (x_m * x_m))
x_m = abs(x) function code(x_m) return Float64(Float64(0.002777777777777778 * (x_m ^ 6.0)) + Float64(Float64(0.08333333333333333 * (x_m ^ 4.0)) + Float64(x_m * x_m))) end
x_m = abs(x); function tmp = code(x_m) tmp = (0.002777777777777778 * (x_m ^ 6.0)) + ((0.08333333333333333 * (x_m ^ 4.0)) + (x_m * x_m)); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(0.002777777777777778 * N[Power[x$95$m, 6.0], $MachinePrecision]), $MachinePrecision] + N[(N[(0.08333333333333333 * N[Power[x$95$m, 4.0], $MachinePrecision]), $MachinePrecision] + N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
0.002777777777777778 \cdot {x\_m}^{6} + \left(0.08333333333333333 \cdot {x\_m}^{4} + x\_m \cdot x\_m\right)
\end{array}
Initial program 57.0%
associate-+l-57.1%
sub-neg57.1%
sub-neg57.1%
distribute-neg-in57.1%
remove-double-neg57.1%
+-commutative57.1%
metadata-eval57.1%
Simplified57.1%
Taylor expanded in x around 0 98.3%
unpow297.7%
Applied egg-rr98.3%
Final simplification98.3%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.0042) (+ (* 0.08333333333333333 (pow x_m 4.0)) (* x_m x_m)) (- (* 2.0 (cosh x_m)) 2.0)))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.0042) {
tmp = (0.08333333333333333 * pow(x_m, 4.0)) + (x_m * x_m);
} else {
tmp = (2.0 * cosh(x_m)) - 2.0;
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 0.0042d0) then
tmp = (0.08333333333333333d0 * (x_m ** 4.0d0)) + (x_m * x_m)
else
tmp = (2.0d0 * cosh(x_m)) - 2.0d0
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 0.0042) {
tmp = (0.08333333333333333 * Math.pow(x_m, 4.0)) + (x_m * x_m);
} else {
tmp = (2.0 * Math.cosh(x_m)) - 2.0;
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 0.0042: tmp = (0.08333333333333333 * math.pow(x_m, 4.0)) + (x_m * x_m) else: tmp = (2.0 * math.cosh(x_m)) - 2.0 return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.0042) tmp = Float64(Float64(0.08333333333333333 * (x_m ^ 4.0)) + Float64(x_m * x_m)); else tmp = Float64(Float64(2.0 * cosh(x_m)) - 2.0); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 0.0042) tmp = (0.08333333333333333 * (x_m ^ 4.0)) + (x_m * x_m); else tmp = (2.0 * cosh(x_m)) - 2.0; end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.0042], N[(N[(0.08333333333333333 * N[Power[x$95$m, 4.0], $MachinePrecision]), $MachinePrecision] + N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision], N[(N[(2.0 * N[Cosh[x$95$m], $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.0042:\\
\;\;\;\;0.08333333333333333 \cdot {x\_m}^{4} + x\_m \cdot x\_m\\
\mathbf{else}:\\
\;\;\;\;2 \cdot \cosh x\_m - 2\\
\end{array}
\end{array}
if x < 0.00419999999999999974Initial program 56.0%
associate-+l-55.9%
sub-neg55.9%
sub-neg55.9%
distribute-neg-in55.9%
remove-double-neg55.9%
+-commutative55.9%
metadata-eval55.9%
Simplified55.9%
Taylor expanded in x around 0 99.4%
unpow299.4%
Applied egg-rr99.4%
if 0.00419999999999999974 < x Initial program 90.5%
associate-+l-91.7%
sub-neg91.7%
sub-neg91.7%
distribute-neg-in91.7%
remove-double-neg91.7%
+-commutative91.7%
metadata-eval91.7%
Simplified91.7%
+-commutative91.7%
associate-+r+90.5%
metadata-eval90.5%
sub-neg90.5%
+-commutative90.5%
associate-+r-89.1%
+-commutative89.1%
cosh-undef89.4%
Applied egg-rr89.4%
Final simplification99.1%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.000195) (* x_m x_m) (- (* 2.0 (cosh x_m)) 2.0)))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.000195) {
tmp = x_m * x_m;
} else {
tmp = (2.0 * cosh(x_m)) - 2.0;
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 0.000195d0) then
tmp = x_m * x_m
else
tmp = (2.0d0 * cosh(x_m)) - 2.0d0
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 0.000195) {
tmp = x_m * x_m;
} else {
tmp = (2.0 * Math.cosh(x_m)) - 2.0;
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 0.000195: tmp = x_m * x_m else: tmp = (2.0 * math.cosh(x_m)) - 2.0 return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.000195) tmp = Float64(x_m * x_m); else tmp = Float64(Float64(2.0 * cosh(x_m)) - 2.0); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 0.000195) tmp = x_m * x_m; else tmp = (2.0 * cosh(x_m)) - 2.0; end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.000195], N[(x$95$m * x$95$m), $MachinePrecision], N[(N[(2.0 * N[Cosh[x$95$m], $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.000195:\\
\;\;\;\;x\_m \cdot x\_m\\
\mathbf{else}:\\
\;\;\;\;2 \cdot \cosh x\_m - 2\\
\end{array}
\end{array}
if x < 1.94999999999999996e-4Initial program 55.8%
associate-+l-55.8%
sub-neg55.8%
sub-neg55.8%
distribute-neg-in55.8%
remove-double-neg55.8%
+-commutative55.8%
metadata-eval55.8%
Simplified55.8%
Taylor expanded in x around 0 99.4%
unpow299.5%
Applied egg-rr99.4%
if 1.94999999999999996e-4 < x Initial program 86.4%
associate-+l-87.1%
sub-neg87.1%
sub-neg87.1%
distribute-neg-in87.1%
remove-double-neg87.1%
+-commutative87.1%
metadata-eval87.1%
Simplified87.1%
+-commutative87.1%
associate-+r+86.4%
metadata-eval86.4%
sub-neg86.4%
+-commutative86.4%
associate-+r-85.1%
+-commutative85.1%
cosh-undef85.4%
Applied egg-rr85.4%
Final simplification98.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (* x_m x_m))
x_m = fabs(x);
double code(double x_m) {
return x_m * x_m;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = x_m * x_m
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return x_m * x_m;
}
x_m = math.fabs(x) def code(x_m): return x_m * x_m
x_m = abs(x) function code(x_m) return Float64(x_m * x_m) end
x_m = abs(x); function tmp = code(x_m) tmp = x_m * x_m; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(x$95$m * x$95$m), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
x\_m \cdot x\_m
\end{array}
Initial program 57.0%
associate-+l-57.1%
sub-neg57.1%
sub-neg57.1%
distribute-neg-in57.1%
remove-double-neg57.1%
+-commutative57.1%
metadata-eval57.1%
Simplified57.1%
Taylor expanded in x around 0 96.8%
unpow297.7%
Applied egg-rr96.8%
Final simplification96.8%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 x_m)
x_m = fabs(x);
double code(double x_m) {
return x_m;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = x_m
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return x_m;
}
x_m = math.fabs(x) def code(x_m): return x_m
x_m = abs(x) function code(x_m) return x_m end
x_m = abs(x); function tmp = code(x_m) tmp = x_m; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := x$95$m
\begin{array}{l}
x_m = \left|x\right|
\\
x\_m
\end{array}
Initial program 57.0%
associate-+l-57.1%
sub-neg57.1%
sub-neg57.1%
distribute-neg-in57.1%
remove-double-neg57.1%
+-commutative57.1%
metadata-eval57.1%
Simplified57.1%
Taylor expanded in x around 0 54.6%
Taylor expanded in x around 0 6.1%
Final simplification6.1%
(FPCore (x) :precision binary64 (let* ((t_0 (sinh (/ x 2.0)))) (* 4.0 (* t_0 t_0))))
double code(double x) {
double t_0 = sinh((x / 2.0));
return 4.0 * (t_0 * t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = sinh((x / 2.0d0))
code = 4.0d0 * (t_0 * t_0)
end function
public static double code(double x) {
double t_0 = Math.sinh((x / 2.0));
return 4.0 * (t_0 * t_0);
}
def code(x): t_0 = math.sinh((x / 2.0)) return 4.0 * (t_0 * t_0)
function code(x) t_0 = sinh(Float64(x / 2.0)) return Float64(4.0 * Float64(t_0 * t_0)) end
function tmp = code(x) t_0 = sinh((x / 2.0)); tmp = 4.0 * (t_0 * t_0); end
code[x_] := Block[{t$95$0 = N[Sinh[N[(x / 2.0), $MachinePrecision]], $MachinePrecision]}, N[(4.0 * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \sinh \left(\frac{x}{2}\right)\\
4 \cdot \left(t\_0 \cdot t\_0\right)
\end{array}
\end{array}
herbie shell --seed 2024039
(FPCore (x)
:name "exp2 (problem 3.3.7)"
:precision binary64
:pre (<= (fabs x) 710.0)
:herbie-target
(* 4.0 (* (sinh (/ x 2.0)) (sinh (/ x 2.0))))
(+ (- (exp x) 2.0) (exp (- x))))