
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (- (exp x) (exp (- x)))))
(if (or (<= t_0 -4e+170) (not (<= t_0 2e-9)))
(/ t_0 2.0)
(/ (+ (* x 2.0) (* 0.3333333333333333 (pow x 3.0))) 2.0))))
double code(double x) {
double t_0 = exp(x) - exp(-x);
double tmp;
if ((t_0 <= -4e+170) || !(t_0 <= 2e-9)) {
tmp = t_0 / 2.0;
} else {
tmp = ((x * 2.0) + (0.3333333333333333 * pow(x, 3.0))) / 2.0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = exp(x) - exp(-x)
if ((t_0 <= (-4d+170)) .or. (.not. (t_0 <= 2d-9))) then
tmp = t_0 / 2.0d0
else
tmp = ((x * 2.0d0) + (0.3333333333333333d0 * (x ** 3.0d0))) / 2.0d0
end if
code = tmp
end function
public static double code(double x) {
double t_0 = Math.exp(x) - Math.exp(-x);
double tmp;
if ((t_0 <= -4e+170) || !(t_0 <= 2e-9)) {
tmp = t_0 / 2.0;
} else {
tmp = ((x * 2.0) + (0.3333333333333333 * Math.pow(x, 3.0))) / 2.0;
}
return tmp;
}
def code(x): t_0 = math.exp(x) - math.exp(-x) tmp = 0 if (t_0 <= -4e+170) or not (t_0 <= 2e-9): tmp = t_0 / 2.0 else: tmp = ((x * 2.0) + (0.3333333333333333 * math.pow(x, 3.0))) / 2.0 return tmp
function code(x) t_0 = Float64(exp(x) - exp(Float64(-x))) tmp = 0.0 if ((t_0 <= -4e+170) || !(t_0 <= 2e-9)) tmp = Float64(t_0 / 2.0); else tmp = Float64(Float64(Float64(x * 2.0) + Float64(0.3333333333333333 * (x ^ 3.0))) / 2.0); end return tmp end
function tmp_2 = code(x) t_0 = exp(x) - exp(-x); tmp = 0.0; if ((t_0 <= -4e+170) || ~((t_0 <= 2e-9))) tmp = t_0 / 2.0; else tmp = ((x * 2.0) + (0.3333333333333333 * (x ^ 3.0))) / 2.0; end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$0, -4e+170], N[Not[LessEqual[t$95$0, 2e-9]], $MachinePrecision]], N[(t$95$0 / 2.0), $MachinePrecision], N[(N[(N[(x * 2.0), $MachinePrecision] + N[(0.3333333333333333 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{x} - e^{-x}\\
\mathbf{if}\;t_0 \leq -4 \cdot 10^{+170} \lor \neg \left(t_0 \leq 2 \cdot 10^{-9}\right):\\
\;\;\;\;\frac{t_0}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{x \cdot 2 + 0.3333333333333333 \cdot {x}^{3}}{2}\\
\end{array}
\end{array}
if (-.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) < -4.00000000000000014e170 or 2.00000000000000012e-9 < (-.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) Initial program 100.0%
if -4.00000000000000014e170 < (-.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) < 2.00000000000000012e-9Initial program 6.7%
Taylor expanded in x around 0 100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (/ (+ (* x 2.0) (* 0.3333333333333333 (pow x 3.0))) 2.0))
double code(double x) {
return ((x * 2.0) + (0.3333333333333333 * pow(x, 3.0))) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x * 2.0d0) + (0.3333333333333333d0 * (x ** 3.0d0))) / 2.0d0
end function
public static double code(double x) {
return ((x * 2.0) + (0.3333333333333333 * Math.pow(x, 3.0))) / 2.0;
}
def code(x): return ((x * 2.0) + (0.3333333333333333 * math.pow(x, 3.0))) / 2.0
function code(x) return Float64(Float64(Float64(x * 2.0) + Float64(0.3333333333333333 * (x ^ 3.0))) / 2.0) end
function tmp = code(x) tmp = ((x * 2.0) + (0.3333333333333333 * (x ^ 3.0))) / 2.0; end
code[x_] := N[(N[(N[(x * 2.0), $MachinePrecision] + N[(0.3333333333333333 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot 2 + 0.3333333333333333 \cdot {x}^{3}}{2}
\end{array}
Initial program 54.4%
Taylor expanded in x around 0 83.8%
Final simplification83.8%
(FPCore (x) :precision binary64 (/ (* x (+ 2.0 (* 0.3333333333333333 (* x x)))) 2.0))
double code(double x) {
return (x * (2.0 + (0.3333333333333333 * (x * x)))) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * (2.0d0 + (0.3333333333333333d0 * (x * x)))) / 2.0d0
end function
public static double code(double x) {
return (x * (2.0 + (0.3333333333333333 * (x * x)))) / 2.0;
}
def code(x): return (x * (2.0 + (0.3333333333333333 * (x * x)))) / 2.0
function code(x) return Float64(Float64(x * Float64(2.0 + Float64(0.3333333333333333 * Float64(x * x)))) / 2.0) end
function tmp = code(x) tmp = (x * (2.0 + (0.3333333333333333 * (x * x)))) / 2.0; end
code[x_] := N[(N[(x * N[(2.0 + N[(0.3333333333333333 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot \left(2 + 0.3333333333333333 \cdot \left(x \cdot x\right)\right)}{2}
\end{array}
Initial program 54.4%
Taylor expanded in x around 0 83.8%
unpow383.8%
associate-*r*83.8%
distribute-rgt-out83.8%
*-commutative83.8%
+-commutative83.8%
associate-*l*83.8%
fma-def83.8%
Simplified83.8%
fma-udef83.8%
associate-*r*83.8%
*-commutative83.8%
Applied egg-rr83.8%
Final simplification83.8%
(FPCore (x) :precision binary64 (/ (* x 2.0) 2.0))
double code(double x) {
return (x * 2.0) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * 2.0d0) / 2.0d0
end function
public static double code(double x) {
return (x * 2.0) / 2.0;
}
def code(x): return (x * 2.0) / 2.0
function code(x) return Float64(Float64(x * 2.0) / 2.0) end
function tmp = code(x) tmp = (x * 2.0) / 2.0; end
code[x_] := N[(N[(x * 2.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot 2}{2}
\end{array}
Initial program 54.4%
Taylor expanded in x around 0 51.4%
Final simplification51.4%
(FPCore (x) :precision binary64 -1.0)
double code(double x) {
return -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double x) {
return -1.0;
}
def code(x): return -1.0
function code(x) return -1.0 end
function tmp = code(x) tmp = -1.0; end
code[x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 54.4%
Applied egg-rr2.8%
Final simplification2.8%
(FPCore (x) :precision binary64 0.0)
double code(double x) {
return 0.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0
end function
public static double code(double x) {
return 0.0;
}
def code(x): return 0.0
function code(x) return 0.0 end
function tmp = code(x) tmp = 0.0; end
code[x_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 54.4%
Applied egg-rr3.4%
Final simplification3.4%
herbie shell --seed 2023178
(FPCore (x)
:name "Hyperbolic sine"
:precision binary64
(/ (- (exp x) (exp (- x))) 2.0))