
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (let* ((t_0 (exp (- x_m)))) (/ 2.0 (+ t_0 (/ 1.0 t_0)))))
x_m = fabs(x);
double code(double x_m) {
double t_0 = exp(-x_m);
return 2.0 / (t_0 + (1.0 / t_0));
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: t_0
t_0 = exp(-x_m)
code = 2.0d0 / (t_0 + (1.0d0 / t_0))
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double t_0 = Math.exp(-x_m);
return 2.0 / (t_0 + (1.0 / t_0));
}
x_m = math.fabs(x) def code(x_m): t_0 = math.exp(-x_m) return 2.0 / (t_0 + (1.0 / t_0))
x_m = abs(x) function code(x_m) t_0 = exp(Float64(-x_m)) return Float64(2.0 / Float64(t_0 + Float64(1.0 / t_0))) end
x_m = abs(x); function tmp = code(x_m) t_0 = exp(-x_m); tmp = 2.0 / (t_0 + (1.0 / t_0)); end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[Exp[(-x$95$m)], $MachinePrecision]}, N[(2.0 / N[(t$95$0 + N[(1.0 / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := e^{-x\_m}\\
\frac{2}{t\_0 + \frac{1}{t\_0}}
\end{array}
\end{array}
Initial program 100.0%
lift-exp.f64100.0
/-rgt-identityN/A
clear-numN/A
lift-exp.f64N/A
exp-negN/A
lift-neg.f64N/A
lift-exp.f64N/A
lower-/.f64100.0
Applied egg-rr100.0%
Final simplification100.0%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (/ 2.0 (+ (exp (- x_m)) (exp x_m))) 2e-33) (/ 720.0 (* (* x_m x_m) (* x_m (* x_m (* x_m x_m))))) (/ 1.0 (fma x_m (* x_m (fma x_m (* x_m 0.041666666666666664) 0.5)) 1.0))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if ((2.0 / (exp(-x_m) + exp(x_m))) <= 2e-33) {
tmp = 720.0 / ((x_m * x_m) * (x_m * (x_m * (x_m * x_m))));
} else {
tmp = 1.0 / fma(x_m, (x_m * fma(x_m, (x_m * 0.041666666666666664), 0.5)), 1.0);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (Float64(2.0 / Float64(exp(Float64(-x_m)) + exp(x_m))) <= 2e-33) tmp = Float64(720.0 / Float64(Float64(x_m * x_m) * Float64(x_m * Float64(x_m * Float64(x_m * x_m))))); else tmp = Float64(1.0 / fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * 0.041666666666666664), 0.5)), 1.0)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[(2.0 / N[(N[Exp[(-x$95$m)], $MachinePrecision] + N[Exp[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2e-33], N[(720.0 / N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(1.0 / N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * 0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;\frac{2}{e^{-x\_m} + e^{x\_m}} \leq 2 \cdot 10^{-33}:\\
\;\;\;\;\frac{720}{\left(x\_m \cdot x\_m\right) \cdot \left(x\_m \cdot \left(x\_m \cdot \left(x\_m \cdot x\_m\right)\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot 0.041666666666666664, 0.5\right), 1\right)}\\
\end{array}
\end{array}
if (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) < 2.0000000000000001e-33Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6480.1
Simplified80.1%
Taylor expanded in x around inf
lower-/.f64N/A
metadata-evalN/A
pow-sqrN/A
cube-multN/A
unpow2N/A
cube-multN/A
unpow2N/A
swap-sqrN/A
unpow2N/A
pow-sqrN/A
metadata-evalN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
metadata-evalN/A
pow-plusN/A
*-commutativeN/A
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6480.1
Simplified80.1%
if 2.0000000000000001e-33 < (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6499.1
Simplified99.1%
Final simplification89.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (/ 2.0 (+ (exp (- x_m)) (exp x_m))) 0.5) (/ 12.0 (* x_m (* x_m x_m))) (fma x_m (* x_m (fma x_m (* x_m 0.20833333333333334) -0.5)) 1.0)))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if ((2.0 / (exp(-x_m) + exp(x_m))) <= 0.5) {
tmp = 12.0 / (x_m * (x_m * x_m));
} else {
tmp = fma(x_m, (x_m * fma(x_m, (x_m * 0.20833333333333334), -0.5)), 1.0);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (Float64(2.0 / Float64(exp(Float64(-x_m)) + exp(x_m))) <= 0.5) tmp = Float64(12.0 / Float64(x_m * Float64(x_m * x_m))); else tmp = fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * 0.20833333333333334), -0.5)), 1.0); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[(2.0 / N[(N[Exp[(-x$95$m)], $MachinePrecision] + N[Exp[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.5], N[(12.0 / N[(x$95$m * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * 0.20833333333333334), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;\frac{2}{e^{-x\_m} + e^{x\_m}} \leq 0.5:\\
\;\;\;\;\frac{12}{x\_m \cdot \left(x\_m \cdot x\_m\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot 0.20833333333333334, -0.5\right), 1\right)\\
\end{array}
\end{array}
if (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) < 0.5Initial program 100.0%
lift-exp.f64100.0
/-rgt-identityN/A
clear-numN/A
lift-exp.f64N/A
exp-negN/A
lift-neg.f64N/A
lift-exp.f64N/A
lower-/.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
Simplified54.7%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6466.8
Simplified66.8%
Taylor expanded in x around inf
lower-/.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6467.3
Simplified67.3%
if 0.5 < (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6499.6
Simplified99.6%
Final simplification83.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (/ 2.0 (+ (exp (- x_m)) (exp x_m))) 2e-33) (/ 12.0 (* x_m (* x_m x_m))) (/ 1.0 (fma x_m (* x_m 0.5) 1.0))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if ((2.0 / (exp(-x_m) + exp(x_m))) <= 2e-33) {
tmp = 12.0 / (x_m * (x_m * x_m));
} else {
tmp = 1.0 / fma(x_m, (x_m * 0.5), 1.0);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (Float64(2.0 / Float64(exp(Float64(-x_m)) + exp(x_m))) <= 2e-33) tmp = Float64(12.0 / Float64(x_m * Float64(x_m * x_m))); else tmp = Float64(1.0 / fma(x_m, Float64(x_m * 0.5), 1.0)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[(2.0 / N[(N[Exp[(-x$95$m)], $MachinePrecision] + N[Exp[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2e-33], N[(12.0 / N[(x$95$m * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(1.0 / N[(x$95$m * N[(x$95$m * 0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;\frac{2}{e^{-x\_m} + e^{x\_m}} \leq 2 \cdot 10^{-33}:\\
\;\;\;\;\frac{12}{x\_m \cdot \left(x\_m \cdot x\_m\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(x\_m, x\_m \cdot 0.5, 1\right)}\\
\end{array}
\end{array}
if (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) < 2.0000000000000001e-33Initial program 100.0%
lift-exp.f64100.0
/-rgt-identityN/A
clear-numN/A
lift-exp.f64N/A
exp-negN/A
lift-neg.f64N/A
lift-exp.f64N/A
lower-/.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
Simplified55.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6467.0
Simplified67.0%
Taylor expanded in x around inf
lower-/.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6467.7
Simplified67.7%
if 2.0000000000000001e-33 < (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6498.9
Simplified98.9%
Final simplification83.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (+ (exp (- x_m)) (exp x_m)) 4.0) (fma x_m (* x_m (fma x_m (* x_m 0.20833333333333334) -0.5)) 1.0) (/ 1.0 (* (* x_m x_m) (fma x_m (* x_m 0.041666666666666664) 0.5)))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if ((exp(-x_m) + exp(x_m)) <= 4.0) {
tmp = fma(x_m, (x_m * fma(x_m, (x_m * 0.20833333333333334), -0.5)), 1.0);
} else {
tmp = 1.0 / ((x_m * x_m) * fma(x_m, (x_m * 0.041666666666666664), 0.5));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (Float64(exp(Float64(-x_m)) + exp(x_m)) <= 4.0) tmp = fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * 0.20833333333333334), -0.5)), 1.0); else tmp = Float64(1.0 / Float64(Float64(x_m * x_m) * fma(x_m, Float64(x_m * 0.041666666666666664), 0.5))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[(N[Exp[(-x$95$m)], $MachinePrecision] + N[Exp[x$95$m], $MachinePrecision]), $MachinePrecision], 4.0], N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * 0.20833333333333334), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision], N[(1.0 / N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * 0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;e^{-x\_m} + e^{x\_m} \leq 4:\\
\;\;\;\;\mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot 0.20833333333333334, -0.5\right), 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\left(x\_m \cdot x\_m\right) \cdot \mathsf{fma}\left(x\_m, x\_m \cdot 0.041666666666666664, 0.5\right)}\\
\end{array}
\end{array}
if (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) < 4Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6499.6
Simplified99.6%
if 4 < (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6474.6
Simplified74.6%
Taylor expanded in x around inf
metadata-evalN/A
pow-sqrN/A
associate-*r*N/A
+-commutativeN/A
distribute-rgt-inN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6474.5
Simplified74.5%
Final simplification86.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (/ 2.0 (+ (exp (- x_m)) (exp x_m))) 0.5) (/ 12.0 (* x_m (* x_m x_m))) (fma -0.5 (* x_m x_m) 1.0)))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if ((2.0 / (exp(-x_m) + exp(x_m))) <= 0.5) {
tmp = 12.0 / (x_m * (x_m * x_m));
} else {
tmp = fma(-0.5, (x_m * x_m), 1.0);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (Float64(2.0 / Float64(exp(Float64(-x_m)) + exp(x_m))) <= 0.5) tmp = Float64(12.0 / Float64(x_m * Float64(x_m * x_m))); else tmp = fma(-0.5, Float64(x_m * x_m), 1.0); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[(2.0 / N[(N[Exp[(-x$95$m)], $MachinePrecision] + N[Exp[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.5], N[(12.0 / N[(x$95$m * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(-0.5 * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;\frac{2}{e^{-x\_m} + e^{x\_m}} \leq 0.5:\\
\;\;\;\;\frac{12}{x\_m \cdot \left(x\_m \cdot x\_m\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-0.5, x\_m \cdot x\_m, 1\right)\\
\end{array}
\end{array}
if (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) < 0.5Initial program 100.0%
lift-exp.f64100.0
/-rgt-identityN/A
clear-numN/A
lift-exp.f64N/A
exp-negN/A
lift-neg.f64N/A
lift-exp.f64N/A
lower-/.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
Simplified54.7%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6466.8
Simplified66.8%
Taylor expanded in x around inf
lower-/.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6467.3
Simplified67.3%
if 0.5 < (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.5
Simplified99.5%
Final simplification83.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (+ (exp (- x_m)) (exp x_m)) 4.0) (fma x_m (* x_m (fma x_m (* x_m 0.20833333333333334) -0.5)) 1.0) (/ 24.0 (* x_m (* x_m (* x_m x_m))))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if ((exp(-x_m) + exp(x_m)) <= 4.0) {
tmp = fma(x_m, (x_m * fma(x_m, (x_m * 0.20833333333333334), -0.5)), 1.0);
} else {
tmp = 24.0 / (x_m * (x_m * (x_m * x_m)));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (Float64(exp(Float64(-x_m)) + exp(x_m)) <= 4.0) tmp = fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * 0.20833333333333334), -0.5)), 1.0); else tmp = Float64(24.0 / Float64(x_m * Float64(x_m * Float64(x_m * x_m)))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[(N[Exp[(-x$95$m)], $MachinePrecision] + N[Exp[x$95$m], $MachinePrecision]), $MachinePrecision], 4.0], N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * 0.20833333333333334), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision], N[(24.0 / N[(x$95$m * N[(x$95$m * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;e^{-x\_m} + e^{x\_m} \leq 4:\\
\;\;\;\;\mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot 0.20833333333333334, -0.5\right), 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{24}{x\_m \cdot \left(x\_m \cdot \left(x\_m \cdot x\_m\right)\right)}\\
\end{array}
\end{array}
if (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) < 4Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6499.6
Simplified99.6%
if 4 < (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6474.6
Simplified74.6%
Taylor expanded in x around inf
lower-*.f64N/A
unpow2N/A
lower-*.f6474.5
Simplified74.5%
Taylor expanded in x around inf
lower-/.f64N/A
metadata-evalN/A
pow-plusN/A
*-commutativeN/A
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6474.5
Simplified74.5%
Final simplification86.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (+ (exp (- x_m)) (exp x_m)) 4.0) (fma -0.5 (* x_m x_m) 1.0) (/ 2.0 (* x_m x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if ((exp(-x_m) + exp(x_m)) <= 4.0) {
tmp = fma(-0.5, (x_m * x_m), 1.0);
} else {
tmp = 2.0 / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (Float64(exp(Float64(-x_m)) + exp(x_m)) <= 4.0) tmp = fma(-0.5, Float64(x_m * x_m), 1.0); else tmp = Float64(2.0 / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[(N[Exp[(-x$95$m)], $MachinePrecision] + N[Exp[x$95$m], $MachinePrecision]), $MachinePrecision], 4.0], N[(-0.5 * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision], N[(2.0 / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;e^{-x\_m} + e^{x\_m} \leq 4:\\
\;\;\;\;\mathsf{fma}\left(-0.5, x\_m \cdot x\_m, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) < 4Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.5
Simplified99.5%
if 4 < (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6447.1
Simplified47.1%
Taylor expanded in x around inf
lower-/.f64N/A
unpow2N/A
lower-*.f6447.1
Simplified47.1%
Final simplification72.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ 1.0 (cosh x_m)))
x_m = fabs(x);
double code(double x_m) {
return 1.0 / cosh(x_m);
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 1.0d0 / cosh(x_m)
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 1.0 / Math.cosh(x_m);
}
x_m = math.fabs(x) def code(x_m): return 1.0 / math.cosh(x_m)
x_m = abs(x) function code(x_m) return Float64(1.0 / cosh(x_m)) end
x_m = abs(x); function tmp = code(x_m) tmp = 1.0 / cosh(x_m); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(1.0 / N[Cosh[x$95$m], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{1}{\cosh x\_m}
\end{array}
Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied egg-rr100.0%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(/
1.0
(fma
(* x_m x_m)
(fma
x_m
(* x_m (fma (* x_m x_m) 0.001388888888888889 0.041666666666666664))
0.5)
1.0)))x_m = fabs(x);
double code(double x_m) {
return 1.0 / fma((x_m * x_m), fma(x_m, (x_m * fma((x_m * x_m), 0.001388888888888889, 0.041666666666666664)), 0.5), 1.0);
}
x_m = abs(x) function code(x_m) return Float64(1.0 / fma(Float64(x_m * x_m), fma(x_m, Float64(x_m * fma(Float64(x_m * x_m), 0.001388888888888889, 0.041666666666666664)), 0.5), 1.0)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(1.0 / N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.001388888888888889 + 0.041666666666666664), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{1}{\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m \cdot x\_m, 0.001388888888888889, 0.041666666666666664\right), 0.5\right), 1\right)}
\end{array}
Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6489.7
Simplified89.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ 1.0 (fma (* x_m x_m) (fma x_m (* 0.001388888888888889 (* x_m (* x_m x_m))) 0.5) 1.0)))
x_m = fabs(x);
double code(double x_m) {
return 1.0 / fma((x_m * x_m), fma(x_m, (0.001388888888888889 * (x_m * (x_m * x_m))), 0.5), 1.0);
}
x_m = abs(x) function code(x_m) return Float64(1.0 / fma(Float64(x_m * x_m), fma(x_m, Float64(0.001388888888888889 * Float64(x_m * Float64(x_m * x_m))), 0.5), 1.0)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(1.0 / N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(0.001388888888888889 * N[(x$95$m * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{1}{\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m, 0.001388888888888889 \cdot \left(x\_m \cdot \left(x\_m \cdot x\_m\right)\right), 0.5\right), 1\right)}
\end{array}
Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6489.7
Simplified89.7%
Taylor expanded in x around inf
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6489.5
Simplified89.5%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ 1.0 (fma (* x_m x_m) (* x_m (* 0.001388888888888889 (* x_m (* x_m x_m)))) 1.0)))
x_m = fabs(x);
double code(double x_m) {
return 1.0 / fma((x_m * x_m), (x_m * (0.001388888888888889 * (x_m * (x_m * x_m)))), 1.0);
}
x_m = abs(x) function code(x_m) return Float64(1.0 / fma(Float64(x_m * x_m), Float64(x_m * Float64(0.001388888888888889 * Float64(x_m * Float64(x_m * x_m)))), 1.0)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(1.0 / N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(0.001388888888888889 * N[(x$95$m * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{1}{\mathsf{fma}\left(x\_m \cdot x\_m, x\_m \cdot \left(0.001388888888888889 \cdot \left(x\_m \cdot \left(x\_m \cdot x\_m\right)\right)\right), 1\right)}
\end{array}
Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6489.7
Simplified89.7%
Taylor expanded in x around inf
metadata-evalN/A
pow-plusN/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6489.2
Simplified89.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ 1.0 (fma x_m (* x_m (fma x_m (* x_m 0.041666666666666664) 0.5)) 1.0)))
x_m = fabs(x);
double code(double x_m) {
return 1.0 / fma(x_m, (x_m * fma(x_m, (x_m * 0.041666666666666664), 0.5)), 1.0);
}
x_m = abs(x) function code(x_m) return Float64(1.0 / fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * 0.041666666666666664), 0.5)), 1.0)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(1.0 / N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * 0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{1}{\mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot 0.041666666666666664, 0.5\right), 1\right)}
\end{array}
Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6486.9
Simplified86.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ 1.0 (fma x_m (* x_m (* (* x_m x_m) 0.041666666666666664)) 1.0)))
x_m = fabs(x);
double code(double x_m) {
return 1.0 / fma(x_m, (x_m * ((x_m * x_m) * 0.041666666666666664)), 1.0);
}
x_m = abs(x) function code(x_m) return Float64(1.0 / fma(x_m, Float64(x_m * Float64(Float64(x_m * x_m) * 0.041666666666666664)), 1.0)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(1.0 / N[(x$95$m * N[(x$95$m * N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.041666666666666664), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{1}{\mathsf{fma}\left(x\_m, x\_m \cdot \left(\left(x\_m \cdot x\_m\right) \cdot 0.041666666666666664\right), 1\right)}
\end{array}
Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6486.9
Simplified86.9%
Taylor expanded in x around inf
lower-*.f64N/A
unpow2N/A
lower-*.f6486.6
Simplified86.6%
Final simplification86.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ 2.0 (+ 2.0 x_m)))
x_m = fabs(x);
double code(double x_m) {
return 2.0 / (2.0 + x_m);
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 2.0d0 / (2.0d0 + x_m)
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 2.0 / (2.0 + x_m);
}
x_m = math.fabs(x) def code(x_m): return 2.0 / (2.0 + x_m)
x_m = abs(x) function code(x_m) return Float64(2.0 / Float64(2.0 + x_m)) end
x_m = abs(x); function tmp = code(x_m) tmp = 2.0 / (2.0 + x_m); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(2.0 / N[(2.0 + x$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{2}{2 + x\_m}
\end{array}
Initial program 100.0%
lift-exp.f64100.0
/-rgt-identityN/A
clear-numN/A
lift-exp.f64N/A
exp-negN/A
lift-neg.f64N/A
lift-exp.f64N/A
lower-/.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
Simplified75.8%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f6450.7
Simplified50.7%
Final simplification50.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 1.0)
x_m = fabs(x);
double code(double x_m) {
return 1.0;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 1.0d0
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 1.0;
}
x_m = math.fabs(x) def code(x_m): return 1.0
x_m = abs(x) function code(x_m) return 1.0 end
x_m = abs(x); function tmp = code(x_m) tmp = 1.0; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := 1.0
\begin{array}{l}
x_m = \left|x\right|
\\
1
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Simplified50.4%
herbie shell --seed 2024215
(FPCore (x)
:name "Hyperbolic secant"
:precision binary64
(/ 2.0 (+ (exp x) (exp (- x)))))