
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
(FPCore (x) :precision binary64 (pow (cosh x) -1.0))
double code(double x) {
return pow(cosh(x), -1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = cosh(x) ** (-1.0d0)
end function
public static double code(double x) {
return Math.pow(Math.cosh(x), -1.0);
}
def code(x): return math.pow(math.cosh(x), -1.0)
function code(x) return cosh(x) ^ -1.0 end
function tmp = code(x) tmp = cosh(x) ^ -1.0; end
code[x_] := N[Power[N[Cosh[x], $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\cosh x}^{-1}
\end{array}
Initial program 100.0%
lift-/.f64N/A
clear-numN/A
lift-+.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
cosh-defN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (if (<= (+ (exp x) (exp (- x))) 4.0) (fma (* x x) -0.5 1.0) (pow (* (* (fma 0.041666666666666664 (* x x) 0.5) x) x) -1.0)))
double code(double x) {
double tmp;
if ((exp(x) + exp(-x)) <= 4.0) {
tmp = fma((x * x), -0.5, 1.0);
} else {
tmp = pow(((fma(0.041666666666666664, (x * x), 0.5) * x) * x), -1.0);
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(exp(x) + exp(Float64(-x))) <= 4.0) tmp = fma(Float64(x * x), -0.5, 1.0); else tmp = Float64(Float64(fma(0.041666666666666664, Float64(x * x), 0.5) * x) * x) ^ -1.0; end return tmp end
code[x_] := If[LessEqual[N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision], 4.0], N[(N[(x * x), $MachinePrecision] * -0.5 + 1.0), $MachinePrecision], N[Power[N[(N[(N[(0.041666666666666664 * N[(x * x), $MachinePrecision] + 0.5), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;e^{x} + e^{-x} \leq 4:\\
\;\;\;\;\mathsf{fma}\left(x \cdot x, -0.5, 1\right)\\
\mathbf{else}:\\
\;\;\;\;{\left(\left(\mathsf{fma}\left(0.041666666666666664, x \cdot x, 0.5\right) \cdot x\right) \cdot x\right)}^{-1}\\
\end{array}
\end{array}
if (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) < 4Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
if 4 < (+.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) Initial program 100.0%
lift-/.f64N/A
clear-numN/A
lift-+.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
cosh-defN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6472.5
Applied rewrites72.5%
Taylor expanded in x around inf
Applied rewrites72.5%
Final simplification85.6%
(FPCore (x)
:precision binary64
(pow
(fma
(*
(fma (fma 0.001388888888888889 (* x x) 0.041666666666666664) (* x x) 0.5)
x)
x
1.0)
-1.0))
double code(double x) {
return pow(fma((fma(fma(0.001388888888888889, (x * x), 0.041666666666666664), (x * x), 0.5) * x), x, 1.0), -1.0);
}
function code(x) return fma(Float64(fma(fma(0.001388888888888889, Float64(x * x), 0.041666666666666664), Float64(x * x), 0.5) * x), x, 1.0) ^ -1.0 end
code[x_] := N[Power[N[(N[(N[(N[(0.001388888888888889 * N[(x * x), $MachinePrecision] + 0.041666666666666664), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.5), $MachinePrecision] * x), $MachinePrecision] * x + 1.0), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x \cdot x, 0.041666666666666664\right), x \cdot x, 0.5\right) \cdot x, x, 1\right)\right)}^{-1}
\end{array}
Initial program 100.0%
lift-/.f64N/A
clear-numN/A
lift-+.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
cosh-defN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6490.7
Applied rewrites90.7%
Applied rewrites90.7%
Final simplification90.7%
(FPCore (x) :precision binary64 (pow (fma (* (fma (* (* x x) 0.001388888888888889) (* x x) 0.5) x) x 1.0) -1.0))
double code(double x) {
return pow(fma((fma(((x * x) * 0.001388888888888889), (x * x), 0.5) * x), x, 1.0), -1.0);
}
function code(x) return fma(Float64(fma(Float64(Float64(x * x) * 0.001388888888888889), Float64(x * x), 0.5) * x), x, 1.0) ^ -1.0 end
code[x_] := N[Power[N[(N[(N[(N[(N[(x * x), $MachinePrecision] * 0.001388888888888889), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.5), $MachinePrecision] * x), $MachinePrecision] * x + 1.0), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\mathsf{fma}\left(\mathsf{fma}\left(\left(x \cdot x\right) \cdot 0.001388888888888889, x \cdot x, 0.5\right) \cdot x, x, 1\right)\right)}^{-1}
\end{array}
Initial program 100.0%
lift-/.f64N/A
clear-numN/A
lift-+.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
cosh-defN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6490.7
Applied rewrites90.7%
Taylor expanded in x around inf
Applied rewrites90.7%
Applied rewrites90.7%
Final simplification90.7%
(FPCore (x) :precision binary64 (pow (fma (* (* (fma 0.001388888888888889 (* x x) 0.041666666666666664) x) x) (* x x) 1.0) -1.0))
double code(double x) {
return pow(fma(((fma(0.001388888888888889, (x * x), 0.041666666666666664) * x) * x), (x * x), 1.0), -1.0);
}
function code(x) return fma(Float64(Float64(fma(0.001388888888888889, Float64(x * x), 0.041666666666666664) * x) * x), Float64(x * x), 1.0) ^ -1.0 end
code[x_] := N[Power[N[(N[(N[(N[(0.001388888888888889 * N[(x * x), $MachinePrecision] + 0.041666666666666664), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\mathsf{fma}\left(\left(\mathsf{fma}\left(0.001388888888888889, x \cdot x, 0.041666666666666664\right) \cdot x\right) \cdot x, x \cdot x, 1\right)\right)}^{-1}
\end{array}
Initial program 100.0%
lift-/.f64N/A
clear-numN/A
lift-+.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
cosh-defN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6490.7
Applied rewrites90.7%
Taylor expanded in x around inf
Applied rewrites90.6%
Final simplification90.6%
(FPCore (x) :precision binary64 (pow (fma (fma 0.041666666666666664 (* x x) 0.5) (* x x) 1.0) -1.0))
double code(double x) {
return pow(fma(fma(0.041666666666666664, (x * x), 0.5), (x * x), 1.0), -1.0);
}
function code(x) return fma(fma(0.041666666666666664, Float64(x * x), 0.5), Float64(x * x), 1.0) ^ -1.0 end
code[x_] := N[Power[N[(N[(0.041666666666666664 * N[(x * x), $MachinePrecision] + 0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\mathsf{fma}\left(\mathsf{fma}\left(0.041666666666666664, x \cdot x, 0.5\right), x \cdot x, 1\right)\right)}^{-1}
\end{array}
Initial program 100.0%
lift-/.f64N/A
clear-numN/A
lift-+.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
cosh-defN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6485.6
Applied rewrites85.6%
Final simplification85.6%
(FPCore (x) :precision binary64 (pow (fma (* 0.041666666666666664 (* x x)) (* x x) 1.0) -1.0))
double code(double x) {
return pow(fma((0.041666666666666664 * (x * x)), (x * x), 1.0), -1.0);
}
function code(x) return fma(Float64(0.041666666666666664 * Float64(x * x)), Float64(x * x), 1.0) ^ -1.0 end
code[x_] := N[Power[N[(N[(0.041666666666666664 * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\mathsf{fma}\left(0.041666666666666664 \cdot \left(x \cdot x\right), x \cdot x, 1\right)\right)}^{-1}
\end{array}
Initial program 100.0%
lift-/.f64N/A
clear-numN/A
lift-+.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
cosh-defN/A
lower-/.f64N/A
lower-cosh.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6485.6
Applied rewrites85.6%
Taylor expanded in x around inf
Applied rewrites85.5%
Final simplification85.5%
(FPCore (x) :precision binary64 (/ 2.0 (fma x x 2.0)))
double code(double x) {
return 2.0 / fma(x, x, 2.0);
}
function code(x) return Float64(2.0 / fma(x, x, 2.0)) end
code[x_] := N[(2.0 / N[(x * x + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, x, 2\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
lower-fma.f6474.4
Applied rewrites74.4%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites49.2%
herbie shell --seed 2024319
(FPCore (x)
:name "Hyperbolic secant"
:precision binary64
(/ 2.0 (+ (exp x) (exp (- x)))))