
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
(FPCore (x) :precision binary64 (/ 1.0 (cosh x)))
double code(double x) {
return 1.0 / cosh(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / cosh(x)
end function
public static double code(double x) {
return 1.0 / Math.cosh(x);
}
def code(x): return 1.0 / math.cosh(x)
function code(x) return Float64(1.0 / cosh(x)) end
function tmp = code(x) tmp = 1.0 / cosh(x); end
code[x_] := N[(1.0 / N[Cosh[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\cosh x}
\end{array}
Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
(FPCore (x)
:precision binary64
(if (<= (/ 2.0 (+ (exp x) (exp (- x)))) 0.0)
(/ 720.0 (* (* x x) (* x (* x (* x x)))))
(fma
(* x x)
(fma (* x x) (fma x (* x -0.08472222222222223) 0.20833333333333334) -0.5)
1.0)))
double code(double x) {
double tmp;
if ((2.0 / (exp(x) + exp(-x))) <= 0.0) {
tmp = 720.0 / ((x * x) * (x * (x * (x * x))));
} else {
tmp = fma((x * x), fma((x * x), fma(x, (x * -0.08472222222222223), 0.20833333333333334), -0.5), 1.0);
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) <= 0.0) tmp = Float64(720.0 / Float64(Float64(x * x) * Float64(x * Float64(x * Float64(x * x))))); else tmp = fma(Float64(x * x), fma(Float64(x * x), fma(x, Float64(x * -0.08472222222222223), 0.20833333333333334), -0.5), 1.0); end return tmp end
code[x_] := If[LessEqual[N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0], N[(720.0 / N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * -0.08472222222222223), $MachinePrecision] + 0.20833333333333334), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{2}{e^{x} + e^{-x}} \leq 0:\\
\;\;\;\;\frac{720}{\left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot -0.08472222222222223, 0.20833333333333334\right), -0.5\right), 1\right)\\
\end{array}
\end{array}
if (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) < 0.0Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites83.1%
Taylor expanded in x around inf
lower-/.f64N/A
metadata-evalN/A
pow-plusN/A
metadata-evalN/A
pow-plusN/A
associate-*r*N/A
unpow2N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6483.8
Applied rewrites83.8%
if 0.0 < (/.f64 #s(literal 2 binary64) (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x)))) Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6499.7
Applied rewrites99.7%
(FPCore (x) :precision binary64 (if (<= (+ (exp x) (exp (- x))) 4.0) (fma (* x x) (fma x (* x 0.20833333333333334) -0.5) 1.0) (/ 1.0 (* (* x x) (fma 0.041666666666666664 (* x x) 0.5)))))
double code(double x) {
double tmp;
if ((exp(x) + exp(-x)) <= 4.0) {
tmp = fma((x * x), fma(x, (x * 0.20833333333333334), -0.5), 1.0);
} else {
tmp = 1.0 / ((x * x) * fma(0.041666666666666664, (x * x), 0.5));
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(exp(x) + exp(Float64(-x))) <= 4.0) tmp = fma(Float64(x * x), fma(x, Float64(x * 0.20833333333333334), -0.5), 1.0); else tmp = Float64(1.0 / Float64(Float64(x * x) * fma(0.041666666666666664, Float64(x * x), 0.5))); end return tmp end
code[x_] := If[LessEqual[N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision], 4.0], N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * 0.20833333333333334), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision], N[(1.0 / N[(N[(x * x), $MachinePrecision] * N[(0.041666666666666664 * N[(x * x), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;e^{x} + e^{-x} \leq 4:\\
\;\;\;\;\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 0.20833333333333334, -0.5\right), 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\left(x \cdot x\right) \cdot \mathsf{fma}\left(0.041666666666666664, x \cdot x, 0.5\right)}\\
\end{array}
\end{array}
if (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) < 4Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
metadata-evalN/A
lower-fma.f64N/A
lower-*.f6499.6
Applied rewrites99.6%
if 4 < (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6483.1
Applied rewrites83.1%
Taylor expanded in x around 0
Applied rewrites77.0%
Taylor expanded in x around inf
+-commutativeN/A
distribute-lft-inN/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
metadata-evalN/A
pow-sqrN/A
associate-/l*N/A
*-rgt-identityN/A
associate-*r/N/A
rgt-mult-inverseN/A
*-commutativeN/A
*-lft-identityN/A
*-commutativeN/A
metadata-evalN/A
pow-sqrN/A
associate-*l*N/A
Applied rewrites77.0%
(FPCore (x) :precision binary64 (if (<= (+ (exp x) (exp (- x))) 4.0) (fma (* x x) (fma x (* x 0.20833333333333334) -0.5) 1.0) (/ 24.0 (* x (* x (* x x))))))
double code(double x) {
double tmp;
if ((exp(x) + exp(-x)) <= 4.0) {
tmp = fma((x * x), fma(x, (x * 0.20833333333333334), -0.5), 1.0);
} else {
tmp = 24.0 / (x * (x * (x * x)));
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(exp(x) + exp(Float64(-x))) <= 4.0) tmp = fma(Float64(x * x), fma(x, Float64(x * 0.20833333333333334), -0.5), 1.0); else tmp = Float64(24.0 / Float64(x * Float64(x * Float64(x * x)))); end return tmp end
code[x_] := If[LessEqual[N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision], 4.0], N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * 0.20833333333333334), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision], N[(24.0 / N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;e^{x} + e^{-x} \leq 4:\\
\;\;\;\;\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 0.20833333333333334, -0.5\right), 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{24}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\\
\end{array}
\end{array}
if (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) < 4Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
metadata-evalN/A
lower-fma.f64N/A
lower-*.f6499.6
Applied rewrites99.6%
if 4 < (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6477.0
Applied rewrites77.0%
Taylor expanded in x around inf
lower-/.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6477.0
Applied rewrites77.0%
(FPCore (x) :precision binary64 (if (<= (+ (exp x) (exp (- x))) 4.0) (fma -0.5 (* x x) 1.0) (/ 2.0 (* x x))))
double code(double x) {
double tmp;
if ((exp(x) + exp(-x)) <= 4.0) {
tmp = fma(-0.5, (x * x), 1.0);
} else {
tmp = 2.0 / (x * x);
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(exp(x) + exp(Float64(-x))) <= 4.0) tmp = fma(-0.5, Float64(x * x), 1.0); else tmp = Float64(2.0 / Float64(x * x)); end return tmp end
code[x_] := If[LessEqual[N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision], 4.0], N[(-0.5 * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision], N[(2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;e^{x} + e^{-x} \leq 4:\\
\;\;\;\;\mathsf{fma}\left(-0.5, x \cdot x, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{x \cdot x}\\
\end{array}
\end{array}
if (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) < 4Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.3
Applied rewrites99.3%
if 4 < (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
lower-fma.f6456.8
Applied rewrites56.8%
Taylor expanded in x around inf
lower-/.f64N/A
unpow2N/A
lower-*.f6456.8
Applied rewrites56.8%
(FPCore (x) :precision binary64 (/ 1.0 (fma (* x (* x (* (* x (* x x)) 0.001388888888888889))) x (fma x (* x (fma x (* x 0.041666666666666664) 0.5)) 1.0))))
double code(double x) {
return 1.0 / fma((x * (x * ((x * (x * x)) * 0.001388888888888889))), x, fma(x, (x * fma(x, (x * 0.041666666666666664), 0.5)), 1.0));
}
function code(x) return Float64(1.0 / fma(Float64(x * Float64(x * Float64(Float64(x * Float64(x * x)) * 0.001388888888888889))), x, fma(x, Float64(x * fma(x, Float64(x * 0.041666666666666664), 0.5)), 1.0))) end
code[x_] := N[(1.0 / N[(N[(x * N[(x * N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * 0.001388888888888889), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x + N[(x * N[(x * N[(x * N[(x * 0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(x \cdot \left(x \cdot \left(\left(x \cdot \left(x \cdot x\right)\right) \cdot 0.001388888888888889\right)\right), x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.041666666666666664, 0.5\right), 1\right)\right)}
\end{array}
Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6492.5
Applied rewrites92.5%
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
distribute-lft-inN/A
associate-+l+N/A
lift-*.f64N/A
associate-*l*N/A
associate-*r*N/A
*-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lower-*.f64N/A
*-commutativeN/A
lower-fma.f6492.5
Applied rewrites92.5%
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
distribute-lft-inN/A
associate-+l+N/A
Applied rewrites92.5%
Final simplification92.5%
(FPCore (x) :precision binary64 (/ 1.0 (fma (* x x) (fma (* x x) (fma (* x x) 0.001388888888888889 0.041666666666666664) 0.5) 1.0)))
double code(double x) {
return 1.0 / fma((x * x), fma((x * x), fma((x * x), 0.001388888888888889, 0.041666666666666664), 0.5), 1.0);
}
function code(x) return Float64(1.0 / fma(Float64(x * x), fma(Float64(x * x), fma(Float64(x * x), 0.001388888888888889, 0.041666666666666664), 0.5), 1.0)) end
code[x_] := N[(1.0 / N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.001388888888888889 + 0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.001388888888888889, 0.041666666666666664\right), 0.5\right), 1\right)}
\end{array}
Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6492.5
Applied rewrites92.5%
(FPCore (x) :precision binary64 (/ 1.0 (fma (* x x) (fma (* x (* x x)) (* x 0.001388888888888889) 0.5) 1.0)))
double code(double x) {
return 1.0 / fma((x * x), fma((x * (x * x)), (x * 0.001388888888888889), 0.5), 1.0);
}
function code(x) return Float64(1.0 / fma(Float64(x * x), fma(Float64(x * Float64(x * x)), Float64(x * 0.001388888888888889), 0.5), 1.0)) end
code[x_] := N[(1.0 / N[(N[(x * x), $MachinePrecision] * N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(x * 0.001388888888888889), $MachinePrecision] + 0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot \left(x \cdot x\right), x \cdot 0.001388888888888889, 0.5\right), 1\right)}
\end{array}
Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6492.5
Applied rewrites92.5%
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
distribute-lft-inN/A
associate-+l+N/A
lift-*.f64N/A
associate-*l*N/A
associate-*r*N/A
*-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lower-*.f64N/A
*-commutativeN/A
lower-fma.f6492.5
Applied rewrites92.5%
Taylor expanded in x around 0
Applied rewrites92.2%
(FPCore (x) :precision binary64 (/ 1.0 (fma (* x x) (fma (* x x) 0.041666666666666664 0.5) 1.0)))
double code(double x) {
return 1.0 / fma((x * x), fma((x * x), 0.041666666666666664, 0.5), 1.0);
}
function code(x) return Float64(1.0 / fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, 0.5), 1.0)) end
code[x_] := N[(1.0 / N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + 0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, 0.5\right), 1\right)}
\end{array}
Initial program 100.0%
cosh-undefN/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6492.5
Applied rewrites92.5%
Taylor expanded in x around 0
Applied rewrites89.8%
(FPCore (x) :precision binary64 (/ 2.0 (fma x x 2.0)))
double code(double x) {
return 2.0 / fma(x, x, 2.0);
}
function code(x) return Float64(2.0 / fma(x, x, 2.0)) end
code[x_] := N[(2.0 / N[(x * x + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, x, 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
lower-fma.f6480.7
Applied rewrites80.7%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites56.9%
herbie shell --seed 2024219
(FPCore (x)
:name "Hyperbolic secant"
:precision binary64
(/ 2.0 (+ (exp x) (exp (- x)))))