
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
(FPCore (x) :precision binary64 (/ 1.0 (cosh x)))
double code(double x) {
return 1.0 / cosh(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / cosh(x)
end function
public static double code(double x) {
return 1.0 / Math.cosh(x);
}
def code(x): return 1.0 / math.cosh(x)
function code(x) return Float64(1.0 / cosh(x)) end
function tmp = code(x) tmp = 1.0 / cosh(x); end
code[x_] := N[(1.0 / N[Cosh[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\cosh x}
\end{array}
Initial program 100.0%
lift-/.f64N/A
clear-numN/A
lift-+.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
cosh-def-revN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
(FPCore (x)
:precision binary64
(/
1.0
(fma
(*
(fma (* (fma (* x x) 0.001388888888888889 0.041666666666666664) x) x 0.5)
x)
x
1.0)))
double code(double x) {
return 1.0 / fma((fma((fma((x * x), 0.001388888888888889, 0.041666666666666664) * x), x, 0.5) * x), x, 1.0);
}
function code(x) return Float64(1.0 / fma(Float64(fma(Float64(fma(Float64(x * x), 0.001388888888888889, 0.041666666666666664) * x), x, 0.5) * x), x, 1.0)) end
code[x_] := N[(1.0 / N[(N[(N[(N[(N[(N[(x * x), $MachinePrecision] * 0.001388888888888889 + 0.041666666666666664), $MachinePrecision] * x), $MachinePrecision] * x + 0.5), $MachinePrecision] * x), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.001388888888888889, 0.041666666666666664\right) \cdot x, x, 0.5\right) \cdot x, x, 1\right)}
\end{array}
Initial program 100.0%
lift-/.f64N/A
clear-numN/A
lift-+.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
cosh-def-revN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6493.6
Applied rewrites93.6%
Applied rewrites93.6%
Applied rewrites93.6%
(FPCore (x) :precision binary64 (/ 1.0 (fma (fma (* 0.001388888888888889 (* x x)) (* x x) 0.5) (* x x) 1.0)))
double code(double x) {
return 1.0 / fma(fma((0.001388888888888889 * (x * x)), (x * x), 0.5), (x * x), 1.0);
}
function code(x) return Float64(1.0 / fma(fma(Float64(0.001388888888888889 * Float64(x * x)), Float64(x * x), 0.5), Float64(x * x), 1.0)) end
code[x_] := N[(1.0 / N[(N[(N[(0.001388888888888889 * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889 \cdot \left(x \cdot x\right), x \cdot x, 0.5\right), x \cdot x, 1\right)}
\end{array}
Initial program 100.0%
lift-/.f64N/A
clear-numN/A
lift-+.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
cosh-def-revN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6493.6
Applied rewrites93.6%
Taylor expanded in x around inf
Applied rewrites93.1%
(FPCore (x) :precision binary64 (/ 1.0 (fma (* (fma 0.041666666666666664 (* x x) 0.5) x) x 1.0)))
double code(double x) {
return 1.0 / fma((fma(0.041666666666666664, (x * x), 0.5) * x), x, 1.0);
}
function code(x) return Float64(1.0 / fma(Float64(fma(0.041666666666666664, Float64(x * x), 0.5) * x), x, 1.0)) end
code[x_] := N[(1.0 / N[(N[(N[(0.041666666666666664 * N[(x * x), $MachinePrecision] + 0.5), $MachinePrecision] * x), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(\mathsf{fma}\left(0.041666666666666664, x \cdot x, 0.5\right) \cdot x, x, 1\right)}
\end{array}
Initial program 100.0%
lift-/.f64N/A
clear-numN/A
lift-+.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
cosh-def-revN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6487.8
Applied rewrites87.8%
Applied rewrites87.8%
(FPCore (x) :precision binary64 (/ 1.0 (fma (* 0.041666666666666664 (* x x)) (* x x) 1.0)))
double code(double x) {
return 1.0 / fma((0.041666666666666664 * (x * x)), (x * x), 1.0);
}
function code(x) return Float64(1.0 / fma(Float64(0.041666666666666664 * Float64(x * x)), Float64(x * x), 1.0)) end
code[x_] := N[(1.0 / N[(N[(0.041666666666666664 * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(0.041666666666666664 \cdot \left(x \cdot x\right), x \cdot x, 1\right)}
\end{array}
Initial program 100.0%
lift-/.f64N/A
clear-numN/A
lift-+.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
cosh-def-revN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6487.8
Applied rewrites87.8%
Taylor expanded in x around inf
Applied rewrites87.1%
(FPCore (x) :precision binary64 (/ 2.0 (fma x x 2.0)))
double code(double x) {
return 2.0 / fma(x, x, 2.0);
}
function code(x) return Float64(2.0 / fma(x, x, 2.0)) end
code[x_] := N[(2.0 / N[(x * x + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, x, 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
lower-fma.f6476.9
Applied rewrites76.9%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites50.8%
herbie shell --seed 2024312
(FPCore (x)
:name "Hyperbolic secant"
:precision binary64
(/ 2.0 (+ (exp x) (exp (- x)))))