
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
(FPCore (x) :precision binary64 (sinh x))
double code(double x) {
return sinh(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = sinh(x)
end function
public static double code(double x) {
return Math.sinh(x);
}
def code(x): return math.sinh(x)
function code(x) return sinh(x) end
function tmp = code(x) tmp = sinh(x); end
code[x_] := N[Sinh[x], $MachinePrecision]
\begin{array}{l}
\\
\sinh x
\end{array}
Initial program 51.8%
sinh-defN/A
sinh-lowering-sinh.f64100.0
Applied egg-rr100.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (* x x) 0.008333333333333333 0.16666666666666666)))
(if (<= x 4e+61)
(/
(* x (fma (* (* x x) (* x x)) (* t_0 t_0) -1.0))
(fma (* x x) t_0 -1.0))
(* 0.008333333333333333 (* x (* x (* x (* x x))))))))
double code(double x) {
double t_0 = fma((x * x), 0.008333333333333333, 0.16666666666666666);
double tmp;
if (x <= 4e+61) {
tmp = (x * fma(((x * x) * (x * x)), (t_0 * t_0), -1.0)) / fma((x * x), t_0, -1.0);
} else {
tmp = 0.008333333333333333 * (x * (x * (x * (x * x))));
}
return tmp;
}
function code(x) t_0 = fma(Float64(x * x), 0.008333333333333333, 0.16666666666666666) tmp = 0.0 if (x <= 4e+61) tmp = Float64(Float64(x * fma(Float64(Float64(x * x) * Float64(x * x)), Float64(t_0 * t_0), -1.0)) / fma(Float64(x * x), t_0, -1.0)); else tmp = Float64(0.008333333333333333 * Float64(x * Float64(x * Float64(x * Float64(x * x))))); end return tmp end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * 0.008333333333333333 + 0.16666666666666666), $MachinePrecision]}, If[LessEqual[x, 4e+61], N[(N[(x * N[(N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(t$95$0 * t$95$0), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] * t$95$0 + -1.0), $MachinePrecision]), $MachinePrecision], N[(0.008333333333333333 * N[(x * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(x \cdot x, 0.008333333333333333, 0.16666666666666666\right)\\
\mathbf{if}\;x \leq 4 \cdot 10^{+61}:\\
\;\;\;\;\frac{x \cdot \mathsf{fma}\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right), t\_0 \cdot t\_0, -1\right)}{\mathsf{fma}\left(x \cdot x, t\_0, -1\right)}\\
\mathbf{else}:\\
\;\;\;\;0.008333333333333333 \cdot \left(x \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)\right)\\
\end{array}
\end{array}
if x < 3.9999999999999998e61Initial program 42.1%
Taylor expanded in x around 0
+-rgt-identityN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6487.5
Simplified87.5%
+-rgt-identityN/A
*-commutativeN/A
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr71.5%
if 3.9999999999999998e61 < x Initial program 100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64100.0
Simplified100.0%
Taylor expanded in x around inf
*-lowering-*.f64N/A
metadata-evalN/A
pow-plusN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64100.0
Simplified100.0%
Final simplification76.3%
(FPCore (x)
:precision binary64
(fma
(*
x
(*
x
(fma
x
(* x (fma x (* x 0.0003968253968253968) 0.016666666666666666))
0.3333333333333333)))
(* x 0.5)
x))
double code(double x) {
return fma((x * (x * fma(x, (x * fma(x, (x * 0.0003968253968253968), 0.016666666666666666)), 0.3333333333333333))), (x * 0.5), x);
}
function code(x) return fma(Float64(x * Float64(x * fma(x, Float64(x * fma(x, Float64(x * 0.0003968253968253968), 0.016666666666666666)), 0.3333333333333333))), Float64(x * 0.5), x) end
code[x_] := N[(N[(x * N[(x * N[(x * N[(x * N[(x * N[(x * 0.0003968253968253968), $MachinePrecision] + 0.016666666666666666), $MachinePrecision]), $MachinePrecision] + 0.3333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(x * 0.5), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.0003968253968253968, 0.016666666666666666\right), 0.3333333333333333\right)\right), x \cdot 0.5, x\right)
\end{array}
Initial program 51.8%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6492.3
Simplified92.3%
distribute-lft-inN/A
*-commutativeN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6492.3
Applied egg-rr92.3%
Applied egg-rr92.3%
Taylor expanded in x around 0
Simplified92.3%
(FPCore (x)
:precision binary64
(*
x
(fma
(* x x)
(fma
x
(* x (fma x (* x 0.0001984126984126984) 0.008333333333333333))
0.16666666666666666)
1.0)))
double code(double x) {
return x * fma((x * x), fma(x, (x * fma(x, (x * 0.0001984126984126984), 0.008333333333333333)), 0.16666666666666666), 1.0);
}
function code(x) return Float64(x * fma(Float64(x * x), fma(x, Float64(x * fma(x, Float64(x * 0.0001984126984126984), 0.008333333333333333)), 0.16666666666666666), 1.0)) end
code[x_] := N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * N[(x * 0.0001984126984126984), $MachinePrecision] + 0.008333333333333333), $MachinePrecision]), $MachinePrecision] + 0.16666666666666666), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.0001984126984126984, 0.008333333333333333\right), 0.16666666666666666\right), 1\right)
\end{array}
Initial program 51.8%
sinh-defN/A
sinh-lowering-sinh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6492.3
Simplified92.3%
(FPCore (x) :precision binary64 (if (<= x 4.9) (* x (fma x (* x 0.16666666666666666) 1.0)) (* 0.008333333333333333 (* x (* x (* x (* x x)))))))
double code(double x) {
double tmp;
if (x <= 4.9) {
tmp = x * fma(x, (x * 0.16666666666666666), 1.0);
} else {
tmp = 0.008333333333333333 * (x * (x * (x * (x * x))));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 4.9) tmp = Float64(x * fma(x, Float64(x * 0.16666666666666666), 1.0)); else tmp = Float64(0.008333333333333333 * Float64(x * Float64(x * Float64(x * Float64(x * x))))); end return tmp end
code[x_] := If[LessEqual[x, 4.9], N[(x * N[(x * N[(x * 0.16666666666666666), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision], N[(0.008333333333333333 * N[(x * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 4.9:\\
\;\;\;\;x \cdot \mathsf{fma}\left(x, x \cdot 0.16666666666666666, 1\right)\\
\mathbf{else}:\\
\;\;\;\;0.008333333333333333 \cdot \left(x \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)\right)\\
\end{array}
\end{array}
if x < 4.9000000000000004Initial program 38.0%
sinh-defN/A
sinh-lowering-sinh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6490.3
Simplified90.3%
if 4.9000000000000004 < x Initial program 100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6476.7
Simplified76.7%
Taylor expanded in x around inf
*-lowering-*.f64N/A
metadata-evalN/A
pow-plusN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6476.7
Simplified76.7%
Final simplification87.3%
(FPCore (x) :precision binary64 (* x (fma (* x x) (fma x (* x 0.008333333333333333) 0.16666666666666666) 1.0)))
double code(double x) {
return x * fma((x * x), fma(x, (x * 0.008333333333333333), 0.16666666666666666), 1.0);
}
function code(x) return Float64(x * fma(Float64(x * x), fma(x, Float64(x * 0.008333333333333333), 0.16666666666666666), 1.0)) end
code[x_] := N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * 0.008333333333333333), $MachinePrecision] + 0.16666666666666666), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 0.008333333333333333, 0.16666666666666666\right), 1\right)
\end{array}
Initial program 51.8%
sinh-defN/A
sinh-lowering-sinh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6489.6
Simplified89.6%
(FPCore (x) :precision binary64 (fma (* x (* x x)) (* x (* x 0.008333333333333333)) x))
double code(double x) {
return fma((x * (x * x)), (x * (x * 0.008333333333333333)), x);
}
function code(x) return fma(Float64(x * Float64(x * x)), Float64(x * Float64(x * 0.008333333333333333)), x) end
code[x_] := N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(x * N[(x * 0.008333333333333333), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot \left(x \cdot x\right), x \cdot \left(x \cdot 0.008333333333333333\right), x\right)
\end{array}
Initial program 51.8%
Taylor expanded in x around 0
+-rgt-identityN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6489.6
Simplified89.6%
+-rgt-identityN/A
distribute-lft-inN/A
*-rgt-identityN/A
associate-*r*N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6489.6
Applied egg-rr89.6%
Taylor expanded in x around inf
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f6489.4
Simplified89.4%
(FPCore (x) :precision binary64 (if (<= x 2.45) x (* 0.16666666666666666 (* x (* x x)))))
double code(double x) {
double tmp;
if (x <= 2.45) {
tmp = x;
} else {
tmp = 0.16666666666666666 * (x * (x * x));
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= 2.45d0) then
tmp = x
else
tmp = 0.16666666666666666d0 * (x * (x * x))
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= 2.45) {
tmp = x;
} else {
tmp = 0.16666666666666666 * (x * (x * x));
}
return tmp;
}
def code(x): tmp = 0 if x <= 2.45: tmp = x else: tmp = 0.16666666666666666 * (x * (x * x)) return tmp
function code(x) tmp = 0.0 if (x <= 2.45) tmp = x; else tmp = Float64(0.16666666666666666 * Float64(x * Float64(x * x))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 2.45) tmp = x; else tmp = 0.16666666666666666 * (x * (x * x)); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 2.45], x, N[(0.16666666666666666 * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 2.45:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;0.16666666666666666 \cdot \left(x \cdot \left(x \cdot x\right)\right)\\
\end{array}
\end{array}
if x < 2.4500000000000002Initial program 38.0%
Taylor expanded in x around 0
Simplified69.2%
if 2.4500000000000002 < x Initial program 100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6461.8
Simplified61.8%
Taylor expanded in x around inf
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6461.8
Simplified61.8%
(FPCore (x) :precision binary64 (* x (fma x (* x 0.16666666666666666) 1.0)))
double code(double x) {
return x * fma(x, (x * 0.16666666666666666), 1.0);
}
function code(x) return Float64(x * fma(x, Float64(x * 0.16666666666666666), 1.0)) end
code[x_] := N[(x * N[(x * N[(x * 0.16666666666666666), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \mathsf{fma}\left(x, x \cdot 0.16666666666666666, 1\right)
\end{array}
Initial program 51.8%
sinh-defN/A
sinh-lowering-sinh.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6484.0
Simplified84.0%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 51.8%
Taylor expanded in x around 0
Simplified54.8%
herbie shell --seed 2024196
(FPCore (x)
:name "Hyperbolic sine"
:precision binary64
(/ (- (exp x) (exp (- x))) 2.0))