
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 15 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
(FPCore (x) :precision binary64 (pow (pow (cosh x) -0.5) 2.0))
double code(double x) {
return pow(pow(cosh(x), -0.5), 2.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (cosh(x) ** (-0.5d0)) ** 2.0d0
end function
public static double code(double x) {
return Math.pow(Math.pow(Math.cosh(x), -0.5), 2.0);
}
def code(x): return math.pow(math.pow(math.cosh(x), -0.5), 2.0)
function code(x) return (cosh(x) ^ -0.5) ^ 2.0 end
function tmp = code(x) tmp = (cosh(x) ^ -0.5) ^ 2.0; end
code[x_] := N[Power[N[Power[N[Cosh[x], $MachinePrecision], -0.5], $MachinePrecision], 2.0], $MachinePrecision]
\begin{array}{l}
\\
{\left({\cosh x}^{-0.5}\right)}^{2}
\end{array}
Initial program 100.0%
clear-numN/A
cosh-defN/A
inv-powN/A
sqr-powN/A
pow2N/A
pow-lowering-pow.f64N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
pow-lowering-pow.f64N/A
cosh-lowering-cosh.f64N/A
metadata-evalN/A
metadata-eval100.0%
Applied egg-rr100.0%
(FPCore (x) :precision binary64 (/ 1.0 (cosh x)))
double code(double x) {
return 1.0 / cosh(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / cosh(x)
end function
public static double code(double x) {
return 1.0 / Math.cosh(x);
}
def code(x): return 1.0 / math.cosh(x)
function code(x) return Float64(1.0 / cosh(x)) end
function tmp = code(x) tmp = 1.0 / cosh(x); end
code[x_] := N[(1.0 / N[Cosh[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\cosh x}
\end{array}
Initial program 100.0%
clear-numN/A
cosh-defN/A
/-lowering-/.f64N/A
cosh-lowering-cosh.f64100.0%
Applied egg-rr100.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma x (fma x x 0.0) 0.0)))
(if (<= x 4e+30)
(/
1.0
(fma
(* x x)
(fma
x
(* x (fma (* x x) 0.001388888888888889 0.041666666666666664))
0.5)
1.0))
(if (<= x 1.15e+77)
(/
2.0
(/
(*
(fma t_0 t_0 0.0)
(fma
x
(* x (fma (fma x x 0.0) 0.002777777777777778 0.08333333333333333))
1.0))
(fma x t_0 0.0)))
(/ 24.0 (* x (* x (* x x))))))))
double code(double x) {
double t_0 = fma(x, fma(x, x, 0.0), 0.0);
double tmp;
if (x <= 4e+30) {
tmp = 1.0 / fma((x * x), fma(x, (x * fma((x * x), 0.001388888888888889, 0.041666666666666664)), 0.5), 1.0);
} else if (x <= 1.15e+77) {
tmp = 2.0 / ((fma(t_0, t_0, 0.0) * fma(x, (x * fma(fma(x, x, 0.0), 0.002777777777777778, 0.08333333333333333)), 1.0)) / fma(x, t_0, 0.0));
} else {
tmp = 24.0 / (x * (x * (x * x)));
}
return tmp;
}
function code(x) t_0 = fma(x, fma(x, x, 0.0), 0.0) tmp = 0.0 if (x <= 4e+30) tmp = Float64(1.0 / fma(Float64(x * x), fma(x, Float64(x * fma(Float64(x * x), 0.001388888888888889, 0.041666666666666664)), 0.5), 1.0)); elseif (x <= 1.15e+77) tmp = Float64(2.0 / Float64(Float64(fma(t_0, t_0, 0.0) * fma(x, Float64(x * fma(fma(x, x, 0.0), 0.002777777777777778, 0.08333333333333333)), 1.0)) / fma(x, t_0, 0.0))); else tmp = Float64(24.0 / Float64(x * Float64(x * Float64(x * x)))); end return tmp end
code[x_] := Block[{t$95$0 = N[(x * N[(x * x + 0.0), $MachinePrecision] + 0.0), $MachinePrecision]}, If[LessEqual[x, 4e+30], N[(1.0 / N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.001388888888888889 + 0.041666666666666664), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 1.15e+77], N[(2.0 / N[(N[(N[(t$95$0 * t$95$0 + 0.0), $MachinePrecision] * N[(x * N[(x * N[(N[(x * x + 0.0), $MachinePrecision] * 0.002777777777777778 + 0.08333333333333333), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] / N[(x * t$95$0 + 0.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(24.0 / N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(x, \mathsf{fma}\left(x, x, 0\right), 0\right)\\
\mathbf{if}\;x \leq 4 \cdot 10^{+30}:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 0.001388888888888889, 0.041666666666666664\right), 0.5\right), 1\right)}\\
\mathbf{elif}\;x \leq 1.15 \cdot 10^{+77}:\\
\;\;\;\;\frac{2}{\frac{\mathsf{fma}\left(t\_0, t\_0, 0\right) \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x, 0\right), 0.002777777777777778, 0.08333333333333333\right), 1\right)}{\mathsf{fma}\left(x, t\_0, 0\right)}}\\
\mathbf{else}:\\
\;\;\;\;\frac{24}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\\
\end{array}
\end{array}
if x < 4.0000000000000001e30Initial program 100.0%
clear-numN/A
cosh-defN/A
/-lowering-/.f64N/A
cosh-lowering-cosh.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6491.5%
Simplified91.5%
if 4.0000000000000001e30 < x < 1.14999999999999997e77Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
unpow2N/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
accelerator-lowering-fma.f6450.1%
Simplified50.1%
Taylor expanded in x around inf
Simplified50.1%
Applied egg-rr100.0%
if 1.14999999999999997e77 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
unpow2N/A
accelerator-lowering-fma.f64100.0%
Simplified100.0%
Taylor expanded in x around inf
/-lowering-/.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64100.0%
Simplified100.0%
(FPCore (x)
:precision binary64
(if (<= x 1e+26)
(/
1.0
(fma
(* x x)
(fma x (* x (fma (* x x) 0.001388888888888889 0.041666666666666664)) 0.5)
1.0))
(if (<= x 1.35e+154)
(/
2.0
(/
(*
(fma
x
(* x (fma (fma x x 0.0) 0.002777777777777778 0.08333333333333333))
1.0)
(fma x (fma x (fma x x 0.0) 0.0) 0.0))
(fma x x 0.0)))
(/ 2.0 (* x x)))))
double code(double x) {
double tmp;
if (x <= 1e+26) {
tmp = 1.0 / fma((x * x), fma(x, (x * fma((x * x), 0.001388888888888889, 0.041666666666666664)), 0.5), 1.0);
} else if (x <= 1.35e+154) {
tmp = 2.0 / ((fma(x, (x * fma(fma(x, x, 0.0), 0.002777777777777778, 0.08333333333333333)), 1.0) * fma(x, fma(x, fma(x, x, 0.0), 0.0), 0.0)) / fma(x, x, 0.0));
} else {
tmp = 2.0 / (x * x);
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 1e+26) tmp = Float64(1.0 / fma(Float64(x * x), fma(x, Float64(x * fma(Float64(x * x), 0.001388888888888889, 0.041666666666666664)), 0.5), 1.0)); elseif (x <= 1.35e+154) tmp = Float64(2.0 / Float64(Float64(fma(x, Float64(x * fma(fma(x, x, 0.0), 0.002777777777777778, 0.08333333333333333)), 1.0) * fma(x, fma(x, fma(x, x, 0.0), 0.0), 0.0)) / fma(x, x, 0.0))); else tmp = Float64(2.0 / Float64(x * x)); end return tmp end
code[x_] := If[LessEqual[x, 1e+26], N[(1.0 / N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.001388888888888889 + 0.041666666666666664), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 1.35e+154], N[(2.0 / N[(N[(N[(x * N[(x * N[(N[(x * x + 0.0), $MachinePrecision] * 0.002777777777777778 + 0.08333333333333333), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * N[(x * N[(x * x + 0.0), $MachinePrecision] + 0.0), $MachinePrecision] + 0.0), $MachinePrecision]), $MachinePrecision] / N[(x * x + 0.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 10^{+26}:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 0.001388888888888889, 0.041666666666666664\right), 0.5\right), 1\right)}\\
\mathbf{elif}\;x \leq 1.35 \cdot 10^{+154}:\\
\;\;\;\;\frac{2}{\frac{\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x, 0\right), 0.002777777777777778, 0.08333333333333333\right), 1\right) \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, x, 0\right), 0\right), 0\right)}{\mathsf{fma}\left(x, x, 0\right)}}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{x \cdot x}\\
\end{array}
\end{array}
if x < 1.00000000000000005e26Initial program 100.0%
clear-numN/A
cosh-defN/A
/-lowering-/.f64N/A
cosh-lowering-cosh.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6492.4%
Simplified92.4%
if 1.00000000000000005e26 < x < 1.35000000000000003e154Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
unpow2N/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
accelerator-lowering-fma.f6476.9%
Simplified76.9%
Taylor expanded in x around inf
Simplified76.9%
+-rgt-identityN/A
flip-+N/A
--rgt-identityN/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr88.2%
if 1.35000000000000003e154 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
accelerator-lowering-fma.f64100.0%
Simplified100.0%
Taylor expanded in x around inf
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f64100.0%
Simplified100.0%
Final simplification92.8%
(FPCore (x)
:precision binary64
(if (<= x 4.5)
(/
2.0
(fma (fma x (fma x x 0.0) 0.0) (* x 0.08333333333333333) (fma x x 2.0)))
(/
1.0
(*
(* x x)
(* (* x x) (fma (* x x) 0.001388888888888889 0.041666666666666664))))))
double code(double x) {
double tmp;
if (x <= 4.5) {
tmp = 2.0 / fma(fma(x, fma(x, x, 0.0), 0.0), (x * 0.08333333333333333), fma(x, x, 2.0));
} else {
tmp = 1.0 / ((x * x) * ((x * x) * fma((x * x), 0.001388888888888889, 0.041666666666666664)));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 4.5) tmp = Float64(2.0 / fma(fma(x, fma(x, x, 0.0), 0.0), Float64(x * 0.08333333333333333), fma(x, x, 2.0))); else tmp = Float64(1.0 / Float64(Float64(x * x) * Float64(Float64(x * x) * fma(Float64(x * x), 0.001388888888888889, 0.041666666666666664)))); end return tmp end
code[x_] := If[LessEqual[x, 4.5], N[(2.0 / N[(N[(x * N[(x * x + 0.0), $MachinePrecision] + 0.0), $MachinePrecision] * N[(x * 0.08333333333333333), $MachinePrecision] + N[(x * x + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(1.0 / N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.001388888888888889 + 0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 4.5:\\
\;\;\;\;\frac{2}{\mathsf{fma}\left(\mathsf{fma}\left(x, \mathsf{fma}\left(x, x, 0\right), 0\right), x \cdot 0.08333333333333333, \mathsf{fma}\left(x, x, 2\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \mathsf{fma}\left(x \cdot x, 0.001388888888888889, 0.041666666666666664\right)\right)}\\
\end{array}
\end{array}
if x < 4.5Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
unpow2N/A
accelerator-lowering-fma.f6493.1%
Simplified93.1%
+-rgt-identityN/A
distribute-rgt-inN/A
*-lft-identityN/A
distribute-lft-inN/A
associate-+l+N/A
*-commutativeN/A
associate-*r*N/A
+-rgt-identityN/A
associate-*l*N/A
associate-*r*N/A
pow3N/A
accelerator-lowering-fma.f64N/A
Applied egg-rr93.1%
if 4.5 < x Initial program 100.0%
clear-numN/A
cosh-defN/A
/-lowering-/.f64N/A
cosh-lowering-cosh.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6482.3%
Simplified82.3%
Taylor expanded in x around inf
+-commutativeN/A
distribute-rgt-inN/A
Simplified82.3%
(FPCore (x)
:precision binary64
(if (<= x 6.2)
(/
2.0
(fma (fma x (fma x x 0.0) 0.0) (* x 0.08333333333333333) (fma x x 2.0)))
(/ 720.0 (* x (* x (* x (* x (* x x))))))))
double code(double x) {
double tmp;
if (x <= 6.2) {
tmp = 2.0 / fma(fma(x, fma(x, x, 0.0), 0.0), (x * 0.08333333333333333), fma(x, x, 2.0));
} else {
tmp = 720.0 / (x * (x * (x * (x * (x * x)))));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 6.2) tmp = Float64(2.0 / fma(fma(x, fma(x, x, 0.0), 0.0), Float64(x * 0.08333333333333333), fma(x, x, 2.0))); else tmp = Float64(720.0 / Float64(x * Float64(x * Float64(x * Float64(x * Float64(x * x)))))); end return tmp end
code[x_] := If[LessEqual[x, 6.2], N[(2.0 / N[(N[(x * N[(x * x + 0.0), $MachinePrecision] + 0.0), $MachinePrecision] * N[(x * 0.08333333333333333), $MachinePrecision] + N[(x * x + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(720.0 / N[(x * N[(x * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 6.2:\\
\;\;\;\;\frac{2}{\mathsf{fma}\left(\mathsf{fma}\left(x, \mathsf{fma}\left(x, x, 0\right), 0\right), x \cdot 0.08333333333333333, \mathsf{fma}\left(x, x, 2\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{720}{x \cdot \left(x \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)\right)}\\
\end{array}
\end{array}
if x < 6.20000000000000018Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
unpow2N/A
accelerator-lowering-fma.f6493.1%
Simplified93.1%
+-rgt-identityN/A
distribute-rgt-inN/A
*-lft-identityN/A
distribute-lft-inN/A
associate-+l+N/A
*-commutativeN/A
associate-*r*N/A
+-rgt-identityN/A
associate-*l*N/A
associate-*r*N/A
pow3N/A
accelerator-lowering-fma.f64N/A
Applied egg-rr93.1%
if 6.20000000000000018 < x Initial program 100.0%
clear-numN/A
cosh-defN/A
/-lowering-/.f64N/A
cosh-lowering-cosh.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6482.3%
Simplified82.3%
Taylor expanded in x around inf
Simplified82.3%
(FPCore (x) :precision binary64 (/ 1.0 (fma (* x x) (fma x (* x (fma (* x x) 0.001388888888888889 0.041666666666666664)) 0.5) 1.0)))
double code(double x) {
return 1.0 / fma((x * x), fma(x, (x * fma((x * x), 0.001388888888888889, 0.041666666666666664)), 0.5), 1.0);
}
function code(x) return Float64(1.0 / fma(Float64(x * x), fma(x, Float64(x * fma(Float64(x * x), 0.001388888888888889, 0.041666666666666664)), 0.5), 1.0)) end
code[x_] := N[(1.0 / N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.001388888888888889 + 0.041666666666666664), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 0.001388888888888889, 0.041666666666666664\right), 0.5\right), 1\right)}
\end{array}
Initial program 100.0%
clear-numN/A
cosh-defN/A
/-lowering-/.f64N/A
cosh-lowering-cosh.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6491.4%
Simplified91.4%
(FPCore (x) :precision binary64 (if (<= x 6.2) (/ 1.0 (fma (* x x) (fma (* x x) 0.041666666666666664 0.5) 1.0)) (/ 720.0 (* x (* x (* x (* x (* x x))))))))
double code(double x) {
double tmp;
if (x <= 6.2) {
tmp = 1.0 / fma((x * x), fma((x * x), 0.041666666666666664, 0.5), 1.0);
} else {
tmp = 720.0 / (x * (x * (x * (x * (x * x)))));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 6.2) tmp = Float64(1.0 / fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, 0.5), 1.0)); else tmp = Float64(720.0 / Float64(x * Float64(x * Float64(x * Float64(x * Float64(x * x)))))); end return tmp end
code[x_] := If[LessEqual[x, 6.2], N[(1.0 / N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + 0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision], N[(720.0 / N[(x * N[(x * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 6.2:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, 0.5\right), 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{720}{x \cdot \left(x \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)\right)}\\
\end{array}
\end{array}
if x < 6.20000000000000018Initial program 100.0%
clear-numN/A
cosh-defN/A
/-lowering-/.f64N/A
cosh-lowering-cosh.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6493.1%
Simplified93.1%
if 6.20000000000000018 < x Initial program 100.0%
clear-numN/A
cosh-defN/A
/-lowering-/.f64N/A
cosh-lowering-cosh.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6482.3%
Simplified82.3%
Taylor expanded in x around inf
Simplified82.3%
(FPCore (x) :precision binary64 (/ 2.0 (fma x (fma (* x (* x 0.08333333333333333)) x x) 2.0)))
double code(double x) {
return 2.0 / fma(x, fma((x * (x * 0.08333333333333333)), x, x), 2.0);
}
function code(x) return Float64(2.0 / fma(x, fma(Float64(x * Float64(x * 0.08333333333333333)), x, x), 2.0)) end
code[x_] := N[(2.0 / N[(x * N[(N[(x * N[(x * 0.08333333333333333), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, \mathsf{fma}\left(x \cdot \left(x \cdot 0.08333333333333333\right), x, x\right), 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
unpow2N/A
accelerator-lowering-fma.f6488.4%
Simplified88.4%
+-rgt-identityN/A
distribute-lft-inN/A
+-rgt-identityN/A
*-commutativeN/A
associate-*r*N/A
*-rgt-identityN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6488.4%
Applied egg-rr88.4%
Final simplification88.4%
(FPCore (x) :precision binary64 (if (<= x 1.85) (fma x (* x (fma x (* x 0.20833333333333334) -0.5)) 1.0) (/ 24.0 (* x (* x (* x x))))))
double code(double x) {
double tmp;
if (x <= 1.85) {
tmp = fma(x, (x * fma(x, (x * 0.20833333333333334), -0.5)), 1.0);
} else {
tmp = 24.0 / (x * (x * (x * x)));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 1.85) tmp = fma(x, Float64(x * fma(x, Float64(x * 0.20833333333333334), -0.5)), 1.0); else tmp = Float64(24.0 / Float64(x * Float64(x * Float64(x * x)))); end return tmp end
code[x_] := If[LessEqual[x, 1.85], N[(x * N[(x * N[(x * N[(x * 0.20833333333333334), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision], N[(24.0 / N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.85:\\
\;\;\;\;\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.20833333333333334, -0.5\right), 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{24}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\\
\end{array}
\end{array}
if x < 1.8500000000000001Initial program 100.0%
clear-numN/A
cosh-defN/A
inv-powN/A
sqr-powN/A
pow2N/A
pow-lowering-pow.f64N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
pow-lowering-pow.f64N/A
cosh-lowering-cosh.f64N/A
metadata-evalN/A
metadata-eval100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6466.0%
Simplified66.0%
if 1.8500000000000001 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
unpow2N/A
accelerator-lowering-fma.f6475.4%
Simplified75.4%
Taylor expanded in x around inf
/-lowering-/.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6475.4%
Simplified75.4%
(FPCore (x) :precision binary64 (/ 2.0 (fma x (* x (* (* x x) 0.08333333333333333)) 2.0)))
double code(double x) {
return 2.0 / fma(x, (x * ((x * x) * 0.08333333333333333)), 2.0);
}
function code(x) return Float64(2.0 / fma(x, Float64(x * Float64(Float64(x * x) * 0.08333333333333333)), 2.0)) end
code[x_] := N[(2.0 / N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.08333333333333333), $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, x \cdot \left(\left(x \cdot x\right) \cdot 0.08333333333333333\right), 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-rgt-identityN/A
unpow2N/A
accelerator-lowering-fma.f6488.4%
Simplified88.4%
Taylor expanded in x around inf
unpow3N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6488.3%
Simplified88.3%
(FPCore (x) :precision binary64 (/ 1.0 (fma (* x x) (* (* x x) 0.041666666666666664) 1.0)))
double code(double x) {
return 1.0 / fma((x * x), ((x * x) * 0.041666666666666664), 1.0);
}
function code(x) return Float64(1.0 / fma(Float64(x * x), Float64(Float64(x * x) * 0.041666666666666664), 1.0)) end
code[x_] := N[(1.0 / N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(x \cdot x, \left(x \cdot x\right) \cdot 0.041666666666666664, 1\right)}
\end{array}
Initial program 100.0%
clear-numN/A
cosh-defN/A
/-lowering-/.f64N/A
cosh-lowering-cosh.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6488.0%
Simplified88.0%
Taylor expanded in x around inf
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6488.0%
Simplified88.0%
(FPCore (x) :precision binary64 (if (<= x 1.25) (fma -0.5 (* x x) 1.0) (/ 2.0 (* x x))))
double code(double x) {
double tmp;
if (x <= 1.25) {
tmp = fma(-0.5, (x * x), 1.0);
} else {
tmp = 2.0 / (x * x);
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 1.25) tmp = fma(-0.5, Float64(x * x), 1.0); else tmp = Float64(2.0 / Float64(x * x)); end return tmp end
code[x_] := If[LessEqual[x, 1.25], N[(-0.5 * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision], N[(2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.25:\\
\;\;\;\;\mathsf{fma}\left(-0.5, x \cdot x, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{x \cdot x}\\
\end{array}
\end{array}
if x < 1.25Initial program 100.0%
clear-numN/A
cosh-defN/A
inv-powN/A
sqr-powN/A
pow2N/A
pow-lowering-pow.f64N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
pow-lowering-pow.f64N/A
cosh-lowering-cosh.f64N/A
metadata-evalN/A
metadata-eval100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6466.1%
Simplified66.1%
if 1.25 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
accelerator-lowering-fma.f6449.3%
Simplified49.3%
Taylor expanded in x around inf
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6449.3%
Simplified49.3%
(FPCore (x) :precision binary64 (/ 2.0 (fma x x 2.0)))
double code(double x) {
return 2.0 / fma(x, x, 2.0);
}
function code(x) return Float64(2.0 / fma(x, x, 2.0)) end
code[x_] := N[(2.0 / N[(x * x + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, x, 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
accelerator-lowering-fma.f6473.9%
Simplified73.9%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Simplified49.4%
herbie shell --seed 2024193
(FPCore (x)
:name "Hyperbolic secant"
:precision binary64
(/ 2.0 (+ (exp x) (exp (- x)))))