
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 18 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
(FPCore (x) :precision binary64 (/ 1.0 (cosh x)))
double code(double x) {
return 1.0 / cosh(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / cosh(x)
end function
public static double code(double x) {
return 1.0 / Math.cosh(x);
}
def code(x): return 1.0 / math.cosh(x)
function code(x) return Float64(1.0 / cosh(x)) end
function tmp = code(x) tmp = 1.0 / cosh(x); end
code[x_] := N[(1.0 / N[Cosh[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\cosh x}
\end{array}
Initial program 100.0%
clear-numN/A
cosh-defN/A
/-lowering-/.f64N/A
cosh-lowering-cosh.f64100.0
Applied egg-rr100.0%
(FPCore (x) :precision binary64 (if (<= (/ 2.0 (+ (exp x) (exp (- x)))) 0.004) (/ 2.0 (* x (fma x (* x (* x 0.08333333333333333)) x))) (fma x (* x (fma (* x x) 0.20833333333333334 -0.5)) 1.0)))
double code(double x) {
double tmp;
if ((2.0 / (exp(x) + exp(-x))) <= 0.004) {
tmp = 2.0 / (x * fma(x, (x * (x * 0.08333333333333333)), x));
} else {
tmp = fma(x, (x * fma((x * x), 0.20833333333333334, -0.5)), 1.0);
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) <= 0.004) tmp = Float64(2.0 / Float64(x * fma(x, Float64(x * Float64(x * 0.08333333333333333)), x))); else tmp = fma(x, Float64(x * fma(Float64(x * x), 0.20833333333333334, -0.5)), 1.0); end return tmp end
code[x_] := If[LessEqual[N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.004], N[(2.0 / N[(x * N[(x * N[(x * N[(x * 0.08333333333333333), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.20833333333333334 + -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{2}{e^{x} + e^{-x}} \leq 0.004:\\
\;\;\;\;\frac{2}{x \cdot \mathsf{fma}\left(x, x \cdot \left(x \cdot 0.08333333333333333\right), x\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 0.20833333333333334, -0.5\right), 1\right)\\
\end{array}
\end{array}
if (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) < 0.0040000000000000001Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6483.9
Simplified83.9%
Taylor expanded in x around inf
distribute-lft-inN/A
*-commutativeN/A
metadata-evalN/A
pow-sqrN/A
associate-*l*N/A
associate-*r/N/A
*-rgt-identityN/A
metadata-evalN/A
pow-sqrN/A
associate-/l*N/A
*-rgt-identityN/A
associate-*r/N/A
rgt-mult-inverseN/A
*-commutativeN/A
distribute-rgt-inN/A
unpow2N/A
+-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
Simplified83.9%
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6483.9
Applied egg-rr83.9%
if 0.0040000000000000001 < (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6499.6
Simplified99.6%
Final simplification92.1%
(FPCore (x) :precision binary64 (if (<= (/ 2.0 (+ (exp x) (exp (- x)))) 0.004) (/ 24.0 (* x (* x (* x x)))) (fma x (* x (fma (* x x) 0.20833333333333334 -0.5)) 1.0)))
double code(double x) {
double tmp;
if ((2.0 / (exp(x) + exp(-x))) <= 0.004) {
tmp = 24.0 / (x * (x * (x * x)));
} else {
tmp = fma(x, (x * fma((x * x), 0.20833333333333334, -0.5)), 1.0);
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) <= 0.004) tmp = Float64(24.0 / Float64(x * Float64(x * Float64(x * x)))); else tmp = fma(x, Float64(x * fma(Float64(x * x), 0.20833333333333334, -0.5)), 1.0); end return tmp end
code[x_] := If[LessEqual[N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.004], N[(24.0 / N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.20833333333333334 + -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{2}{e^{x} + e^{-x}} \leq 0.004:\\
\;\;\;\;\frac{24}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 0.20833333333333334, -0.5\right), 1\right)\\
\end{array}
\end{array}
if (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) < 0.0040000000000000001Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6483.9
Simplified83.9%
Taylor expanded in x around inf
/-lowering-/.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6483.9
Simplified83.9%
if 0.0040000000000000001 < (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6499.6
Simplified99.6%
(FPCore (x)
:precision binary64
(if (<= (+ (exp x) (exp (- x))) 4.0)
(fma
(* x x)
(fma (* x x) (fma x (* x -0.08472222222222223) 0.20833333333333334) -0.5)
1.0)
(/ 720.0 (* (* x x) (* x (* x (* x x)))))))
double code(double x) {
double tmp;
if ((exp(x) + exp(-x)) <= 4.0) {
tmp = fma((x * x), fma((x * x), fma(x, (x * -0.08472222222222223), 0.20833333333333334), -0.5), 1.0);
} else {
tmp = 720.0 / ((x * x) * (x * (x * (x * x))));
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(exp(x) + exp(Float64(-x))) <= 4.0) tmp = fma(Float64(x * x), fma(Float64(x * x), fma(x, Float64(x * -0.08472222222222223), 0.20833333333333334), -0.5), 1.0); else tmp = Float64(720.0 / Float64(Float64(x * x) * Float64(x * Float64(x * Float64(x * x))))); end return tmp end
code[x_] := If[LessEqual[N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision], 4.0], N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * -0.08472222222222223), $MachinePrecision] + 0.20833333333333334), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision], N[(720.0 / N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;e^{x} + e^{-x} \leq 4:\\
\;\;\;\;\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot -0.08472222222222223, 0.20833333333333334\right), -0.5\right), 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{720}{\left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)}\\
\end{array}
\end{array}
if (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) < 4Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6499.9
Simplified99.9%
if 4 < (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified88.6%
Taylor expanded in x around inf
/-lowering-/.f64N/A
metadata-evalN/A
pow-plusN/A
metadata-evalN/A
pow-plusN/A
associate-*r*N/A
unpow2N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6488.6
Simplified88.6%
(FPCore (x)
:precision binary64
(/
2.0
(fma
x
(fma
(*
(* x (* x x))
(fma (* x x) (* (* x x) 7.71604938271605e-6) -0.006944444444444444))
(fma
(* x x)
(fma
(* x x)
(fma (* x x) -0.00044444444444444447 -0.013333333333333334)
-0.4)
-12.0)
x)
2.0)))
double code(double x) {
return 2.0 / fma(x, fma(((x * (x * x)) * fma((x * x), ((x * x) * 7.71604938271605e-6), -0.006944444444444444)), fma((x * x), fma((x * x), fma((x * x), -0.00044444444444444447, -0.013333333333333334), -0.4), -12.0), x), 2.0);
}
function code(x) return Float64(2.0 / fma(x, fma(Float64(Float64(x * Float64(x * x)) * fma(Float64(x * x), Float64(Float64(x * x) * 7.71604938271605e-6), -0.006944444444444444)), fma(Float64(x * x), fma(Float64(x * x), fma(Float64(x * x), -0.00044444444444444447, -0.013333333333333334), -0.4), -12.0), x), 2.0)) end
code[x_] := N[(2.0 / N[(x * N[(N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 7.71604938271605e-6), $MachinePrecision] + -0.006944444444444444), $MachinePrecision]), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.00044444444444444447 + -0.013333333333333334), $MachinePrecision] + -0.4), $MachinePrecision] + -12.0), $MachinePrecision] + x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, \mathsf{fma}\left(\left(x \cdot \left(x \cdot x\right)\right) \cdot \mathsf{fma}\left(x \cdot x, \left(x \cdot x\right) \cdot 7.71604938271605 \cdot 10^{-6}, -0.006944444444444444\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.00044444444444444447, -0.013333333333333334\right), -0.4\right), -12\right), x\right), 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified94.5%
associate-*r*N/A
pow3N/A
flip-+N/A
associate-*r/N/A
div-invN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr69.5%
Taylor expanded in x around 0
sub-negN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
metadata-eval97.8
Simplified97.8%
(FPCore (x)
:precision binary64
(/
2.0
(fma
x
(fma
(*
(* x (* x x))
(fma (* x x) (* (* x x) 7.71604938271605e-6) -0.006944444444444444))
(fma (* x x) (fma (* x x) (* (* x x) -0.00044444444444444447) -0.4) -12.0)
x)
2.0)))
double code(double x) {
return 2.0 / fma(x, fma(((x * (x * x)) * fma((x * x), ((x * x) * 7.71604938271605e-6), -0.006944444444444444)), fma((x * x), fma((x * x), ((x * x) * -0.00044444444444444447), -0.4), -12.0), x), 2.0);
}
function code(x) return Float64(2.0 / fma(x, fma(Float64(Float64(x * Float64(x * x)) * fma(Float64(x * x), Float64(Float64(x * x) * 7.71604938271605e-6), -0.006944444444444444)), fma(Float64(x * x), fma(Float64(x * x), Float64(Float64(x * x) * -0.00044444444444444447), -0.4), -12.0), x), 2.0)) end
code[x_] := N[(2.0 / N[(x * N[(N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 7.71604938271605e-6), $MachinePrecision] + -0.006944444444444444), $MachinePrecision]), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.00044444444444444447), $MachinePrecision] + -0.4), $MachinePrecision] + -12.0), $MachinePrecision] + x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, \mathsf{fma}\left(\left(x \cdot \left(x \cdot x\right)\right) \cdot \mathsf{fma}\left(x \cdot x, \left(x \cdot x\right) \cdot 7.71604938271605 \cdot 10^{-6}, -0.006944444444444444\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \left(x \cdot x\right) \cdot -0.00044444444444444447, -0.4\right), -12\right), x\right), 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified94.5%
associate-*r*N/A
pow3N/A
flip-+N/A
associate-*r/N/A
div-invN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr69.5%
Taylor expanded in x around 0
sub-negN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
metadata-eval97.8
Simplified97.8%
Taylor expanded in x around inf
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6497.8
Simplified97.8%
(FPCore (x)
:precision binary64
(/
2.0
(fma
x
(fma
(*
(* x (* x x))
(fma (* x x) (* (* x x) 7.71604938271605e-6) -0.006944444444444444))
(fma (* x x) (fma x (* x -0.013333333333333334) -0.4) -12.0)
x)
2.0)))
double code(double x) {
return 2.0 / fma(x, fma(((x * (x * x)) * fma((x * x), ((x * x) * 7.71604938271605e-6), -0.006944444444444444)), fma((x * x), fma(x, (x * -0.013333333333333334), -0.4), -12.0), x), 2.0);
}
function code(x) return Float64(2.0 / fma(x, fma(Float64(Float64(x * Float64(x * x)) * fma(Float64(x * x), Float64(Float64(x * x) * 7.71604938271605e-6), -0.006944444444444444)), fma(Float64(x * x), fma(x, Float64(x * -0.013333333333333334), -0.4), -12.0), x), 2.0)) end
code[x_] := N[(2.0 / N[(x * N[(N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 7.71604938271605e-6), $MachinePrecision] + -0.006944444444444444), $MachinePrecision]), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * -0.013333333333333334), $MachinePrecision] + -0.4), $MachinePrecision] + -12.0), $MachinePrecision] + x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, \mathsf{fma}\left(\left(x \cdot \left(x \cdot x\right)\right) \cdot \mathsf{fma}\left(x \cdot x, \left(x \cdot x\right) \cdot 7.71604938271605 \cdot 10^{-6}, -0.006944444444444444\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot -0.013333333333333334, -0.4\right), -12\right), x\right), 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified94.5%
associate-*r*N/A
pow3N/A
flip-+N/A
associate-*r/N/A
div-invN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr69.5%
Taylor expanded in x around 0
sub-negN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
metadata-eval97.4
Simplified97.4%
(FPCore (x)
:precision binary64
(/
2.0
(fma
x
(fma
(* (* x (* x x)) -0.006944444444444444)
(fma
(* x x)
(fma
(* x x)
(fma (* x x) -0.00044444444444444447 -0.013333333333333334)
-0.4)
-12.0)
x)
2.0)))
double code(double x) {
return 2.0 / fma(x, fma(((x * (x * x)) * -0.006944444444444444), fma((x * x), fma((x * x), fma((x * x), -0.00044444444444444447, -0.013333333333333334), -0.4), -12.0), x), 2.0);
}
function code(x) return Float64(2.0 / fma(x, fma(Float64(Float64(x * Float64(x * x)) * -0.006944444444444444), fma(Float64(x * x), fma(Float64(x * x), fma(Float64(x * x), -0.00044444444444444447, -0.013333333333333334), -0.4), -12.0), x), 2.0)) end
code[x_] := N[(2.0 / N[(x * N[(N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * -0.006944444444444444), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.00044444444444444447 + -0.013333333333333334), $MachinePrecision] + -0.4), $MachinePrecision] + -12.0), $MachinePrecision] + x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, \mathsf{fma}\left(\left(x \cdot \left(x \cdot x\right)\right) \cdot -0.006944444444444444, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.00044444444444444447, -0.013333333333333334\right), -0.4\right), -12\right), x\right), 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified94.5%
associate-*r*N/A
pow3N/A
flip-+N/A
associate-*r/N/A
div-invN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr69.5%
Taylor expanded in x around 0
sub-negN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
metadata-eval97.8
Simplified97.8%
Taylor expanded in x around 0
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6497.1
Simplified97.1%
Final simplification97.1%
(FPCore (x)
:precision binary64
(/
2.0
(fma
x
(fma
(*
(* x (* x x))
(fma (* x x) (* (* x x) 7.71604938271605e-6) -0.006944444444444444))
(fma x (* x -0.4) -12.0)
x)
2.0)))
double code(double x) {
return 2.0 / fma(x, fma(((x * (x * x)) * fma((x * x), ((x * x) * 7.71604938271605e-6), -0.006944444444444444)), fma(x, (x * -0.4), -12.0), x), 2.0);
}
function code(x) return Float64(2.0 / fma(x, fma(Float64(Float64(x * Float64(x * x)) * fma(Float64(x * x), Float64(Float64(x * x) * 7.71604938271605e-6), -0.006944444444444444)), fma(x, Float64(x * -0.4), -12.0), x), 2.0)) end
code[x_] := N[(2.0 / N[(x * N[(N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 7.71604938271605e-6), $MachinePrecision] + -0.006944444444444444), $MachinePrecision]), $MachinePrecision] * N[(x * N[(x * -0.4), $MachinePrecision] + -12.0), $MachinePrecision] + x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, \mathsf{fma}\left(\left(x \cdot \left(x \cdot x\right)\right) \cdot \mathsf{fma}\left(x \cdot x, \left(x \cdot x\right) \cdot 7.71604938271605 \cdot 10^{-6}, -0.006944444444444444\right), \mathsf{fma}\left(x, x \cdot -0.4, -12\right), x\right), 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified94.5%
associate-*r*N/A
pow3N/A
flip-+N/A
associate-*r/N/A
div-invN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr69.5%
Taylor expanded in x around 0
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
metadata-eval97.0
Simplified97.0%
(FPCore (x)
:precision binary64
(/
2.0
(fma
x
(fma
(*
(* x (* x x))
(fma (* x x) (* (* x x) 7.71604938271605e-6) -0.006944444444444444))
-12.0
x)
2.0)))
double code(double x) {
return 2.0 / fma(x, fma(((x * (x * x)) * fma((x * x), ((x * x) * 7.71604938271605e-6), -0.006944444444444444)), -12.0, x), 2.0);
}
function code(x) return Float64(2.0 / fma(x, fma(Float64(Float64(x * Float64(x * x)) * fma(Float64(x * x), Float64(Float64(x * x) * 7.71604938271605e-6), -0.006944444444444444)), -12.0, x), 2.0)) end
code[x_] := N[(2.0 / N[(x * N[(N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 7.71604938271605e-6), $MachinePrecision] + -0.006944444444444444), $MachinePrecision]), $MachinePrecision] * -12.0 + x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, \mathsf{fma}\left(\left(x \cdot \left(x \cdot x\right)\right) \cdot \mathsf{fma}\left(x \cdot x, \left(x \cdot x\right) \cdot 7.71604938271605 \cdot 10^{-6}, -0.006944444444444444\right), -12, x\right), 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified94.5%
associate-*r*N/A
pow3N/A
flip-+N/A
associate-*r/N/A
div-invN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr69.5%
Taylor expanded in x around 0
Simplified96.5%
(FPCore (x) :precision binary64 (/ 2.0 (fma x (fma (* x x) (* x (fma x (* x 0.002777777777777778) 0.08333333333333333)) x) 2.0)))
double code(double x) {
return 2.0 / fma(x, fma((x * x), (x * fma(x, (x * 0.002777777777777778), 0.08333333333333333)), x), 2.0);
}
function code(x) return Float64(2.0 / fma(x, fma(Float64(x * x), Float64(x * fma(x, Float64(x * 0.002777777777777778), 0.08333333333333333)), x), 2.0)) end
code[x_] := N[(2.0 / N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * 0.002777777777777778), $MachinePrecision] + 0.08333333333333333), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(x, x \cdot 0.002777777777777778, 0.08333333333333333\right), x\right), 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified94.5%
(FPCore (x) :precision binary64 (/ 2.0 (fma x (fma (* x x) (* x (* (* x x) 0.002777777777777778)) x) 2.0)))
double code(double x) {
return 2.0 / fma(x, fma((x * x), (x * ((x * x) * 0.002777777777777778)), x), 2.0);
}
function code(x) return Float64(2.0 / fma(x, fma(Float64(x * x), Float64(x * Float64(Float64(x * x) * 0.002777777777777778)), x), 2.0)) end
code[x_] := N[(2.0 / N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(N[(x * x), $MachinePrecision] * 0.002777777777777778), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, \mathsf{fma}\left(x \cdot x, x \cdot \left(\left(x \cdot x\right) \cdot 0.002777777777777778\right), x\right), 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified94.5%
Taylor expanded in x around inf
unpow3N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6494.2
Simplified94.2%
(FPCore (x) :precision binary64 (/ 2.0 (fma x (* (* x x) (* x (* (* x x) 0.002777777777777778))) 2.0)))
double code(double x) {
return 2.0 / fma(x, ((x * x) * (x * ((x * x) * 0.002777777777777778))), 2.0);
}
function code(x) return Float64(2.0 / fma(x, Float64(Float64(x * x) * Float64(x * Float64(Float64(x * x) * 0.002777777777777778))), 2.0)) end
code[x_] := N[(2.0 / N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(N[(x * x), $MachinePrecision] * 0.002777777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, \left(x \cdot x\right) \cdot \left(x \cdot \left(\left(x \cdot x\right) \cdot 0.002777777777777778\right)\right), 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified94.5%
Taylor expanded in x around inf
metadata-evalN/A
pow-plusN/A
associate-*l*N/A
metadata-evalN/A
pow-sqrN/A
associate-*r*N/A
*-commutativeN/A
associate-*l*N/A
associate-*r*N/A
unpow2N/A
unpow3N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow3N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6494.0
Simplified94.0%
(FPCore (x) :precision binary64 (/ 2.0 (fma x (fma x (* (* x x) 0.08333333333333333) x) 2.0)))
double code(double x) {
return 2.0 / fma(x, fma(x, ((x * x) * 0.08333333333333333), x), 2.0);
}
function code(x) return Float64(2.0 / fma(x, fma(x, Float64(Float64(x * x) * 0.08333333333333333), x), 2.0)) end
code[x_] := N[(2.0 / N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.08333333333333333), $MachinePrecision] + x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, \left(x \cdot x\right) \cdot 0.08333333333333333, x\right), 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6492.2
Simplified92.2%
(FPCore (x) :precision binary64 (/ 2.0 (fma (* x x) (* (* x x) 0.08333333333333333) 2.0)))
double code(double x) {
return 2.0 / fma((x * x), ((x * x) * 0.08333333333333333), 2.0);
}
function code(x) return Float64(2.0 / fma(Float64(x * x), Float64(Float64(x * x) * 0.08333333333333333), 2.0)) end
code[x_] := N[(2.0 / N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.08333333333333333), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x \cdot x, \left(x \cdot x\right) \cdot 0.08333333333333333, 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified94.5%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6494.5
Simplified94.5%
Taylor expanded in x around inf
distribute-rgt-inN/A
metadata-evalN/A
pow-plusN/A
associate-*l*N/A
associate-*r/N/A
metadata-evalN/A
associate-*l/N/A
metadata-evalN/A
pow-sqrN/A
associate-*l*N/A
associate-/l*N/A
*-rgt-identityN/A
associate-*r/N/A
rgt-mult-inverseN/A
*-rgt-identityN/A
unpow2N/A
associate-*r*N/A
distribute-rgt-inN/A
Simplified94.0%
Taylor expanded in x around 0
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6491.8
Simplified91.8%
(FPCore (x) :precision binary64 (if (<= x 1.25) (fma -0.5 (* x x) 1.0) (/ 2.0 (* x x))))
double code(double x) {
double tmp;
if (x <= 1.25) {
tmp = fma(-0.5, (x * x), 1.0);
} else {
tmp = 2.0 / (x * x);
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 1.25) tmp = fma(-0.5, Float64(x * x), 1.0); else tmp = Float64(2.0 / Float64(x * x)); end return tmp end
code[x_] := If[LessEqual[x, 1.25], N[(-0.5 * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision], N[(2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.25:\\
\;\;\;\;\mathsf{fma}\left(-0.5, x \cdot x, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{x \cdot x}\\
\end{array}
\end{array}
if x < 1.25Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6467.7
Simplified67.7%
if 1.25 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
accelerator-lowering-fma.f6448.9
Simplified48.9%
Taylor expanded in x around inf
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6448.9
Simplified48.9%
(FPCore (x) :precision binary64 (/ 2.0 (fma x x 2.0)))
double code(double x) {
return 2.0 / fma(x, x, 2.0);
}
function code(x) return Float64(2.0 / fma(x, x, 2.0)) end
code[x_] := N[(2.0 / N[(x * x + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, x, 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
accelerator-lowering-fma.f6478.4
Simplified78.4%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Simplified53.0%
herbie shell --seed 2024204
(FPCore (x)
:name "Hyperbolic secant"
:precision binary64
(/ 2.0 (+ (exp x) (exp (- x)))))