
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
(FPCore (x) :precision binary64 (log1p (expm1 x)))
double code(double x) {
return log1p(expm1(x));
}
public static double code(double x) {
return Math.log1p(Math.expm1(x));
}
def code(x): return math.log1p(math.expm1(x))
function code(x) return log1p(expm1(x)) end
code[x_] := N[Log[1 + N[(Exp[x] - 1), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\mathsf{expm1}\left(x\right)\right)
\end{array}
Initial program 56.2%
Taylor expanded in x around 0 50.6%
*-commutative50.6%
associate-/l*50.3%
metadata-eval50.3%
*-commutative50.3%
log1p-expm1-u98.9%
*-un-lft-identity98.9%
Applied egg-rr98.9%
Final simplification98.9%
(FPCore (x) :precision binary64 (/ (* x (+ 2.0 (* 0.0003968253968253968 (pow x 6.0)))) 2.0))
double code(double x) {
return (x * (2.0 + (0.0003968253968253968 * pow(x, 6.0)))) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * (2.0d0 + (0.0003968253968253968d0 * (x ** 6.0d0)))) / 2.0d0
end function
public static double code(double x) {
return (x * (2.0 + (0.0003968253968253968 * Math.pow(x, 6.0)))) / 2.0;
}
def code(x): return (x * (2.0 + (0.0003968253968253968 * math.pow(x, 6.0)))) / 2.0
function code(x) return Float64(Float64(x * Float64(2.0 + Float64(0.0003968253968253968 * (x ^ 6.0)))) / 2.0) end
function tmp = code(x) tmp = (x * (2.0 + (0.0003968253968253968 * (x ^ 6.0)))) / 2.0; end
code[x_] := N[(N[(x * N[(2.0 + N[(0.0003968253968253968 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot \left(2 + 0.0003968253968253968 \cdot {x}^{6}\right)}{2}
\end{array}
Initial program 56.2%
Taylor expanded in x around 0 93.6%
*-commutative93.6%
Simplified93.6%
Taylor expanded in x around inf 93.0%
Final simplification93.0%
(FPCore (x) :precision binary64 (if (<= x 4.2) x (* (pow x 7.0) 0.0001984126984126984)))
double code(double x) {
double tmp;
if (x <= 4.2) {
tmp = x;
} else {
tmp = pow(x, 7.0) * 0.0001984126984126984;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= 4.2d0) then
tmp = x
else
tmp = (x ** 7.0d0) * 0.0001984126984126984d0
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= 4.2) {
tmp = x;
} else {
tmp = Math.pow(x, 7.0) * 0.0001984126984126984;
}
return tmp;
}
def code(x): tmp = 0 if x <= 4.2: tmp = x else: tmp = math.pow(x, 7.0) * 0.0001984126984126984 return tmp
function code(x) tmp = 0.0 if (x <= 4.2) tmp = x; else tmp = Float64((x ^ 7.0) * 0.0001984126984126984); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 4.2) tmp = x; else tmp = (x ^ 7.0) * 0.0001984126984126984; end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 4.2], x, N[(N[Power[x, 7.0], $MachinePrecision] * 0.0001984126984126984), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 4.2:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;{x}^{7} \cdot 0.0001984126984126984\\
\end{array}
\end{array}
if x < 4.20000000000000018Initial program 40.7%
Taylor expanded in x around 0 66.1%
Taylor expanded in x around 0 66.1%
if 4.20000000000000018 < x Initial program 100.0%
Taylor expanded in x around 0 88.9%
*-commutative88.9%
Simplified88.9%
Taylor expanded in x around inf 88.9%
*-commutative88.9%
associate-/l*88.9%
metadata-eval88.9%
Applied egg-rr88.9%
Final simplification72.1%
(FPCore (x) :precision binary64 (/ (* x 2.0) 2.0))
double code(double x) {
return (x * 2.0) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * 2.0d0) / 2.0d0
end function
public static double code(double x) {
return (x * 2.0) / 2.0;
}
def code(x): return (x * 2.0) / 2.0
function code(x) return Float64(Float64(x * 2.0) / 2.0) end
function tmp = code(x) tmp = (x * 2.0) / 2.0; end
code[x_] := N[(N[(x * 2.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot 2}{2}
\end{array}
Initial program 56.2%
Taylor expanded in x around 0 50.6%
Final simplification50.6%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 56.2%
Taylor expanded in x around 0 50.6%
Taylor expanded in x around 0 50.3%
Final simplification50.3%
herbie shell --seed 2024079
(FPCore (x)
:name "Hyperbolic sine"
:precision binary64
(/ (- (exp x) (exp (- x))) 2.0))