
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
(FPCore (x) :precision binary64 (log1p (expm1 x)))
double code(double x) {
return log1p(expm1(x));
}
public static double code(double x) {
return Math.log1p(Math.expm1(x));
}
def code(x): return math.log1p(math.expm1(x))
function code(x) return log1p(expm1(x)) end
code[x_] := N[Log[1 + N[(Exp[x] - 1), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\mathsf{expm1}\left(x\right)\right)
\end{array}
Initial program 55.2%
Taylor expanded in x around 0 50.7%
associate-/l*50.3%
associate-/r/50.4%
metadata-eval50.4%
*-un-lft-identity50.4%
log1p-expm1-u100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (/ (+ (* 0.3333333333333333 (pow x 3.0)) (* x 2.0)) 2.0))
double code(double x) {
return ((0.3333333333333333 * pow(x, 3.0)) + (x * 2.0)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((0.3333333333333333d0 * (x ** 3.0d0)) + (x * 2.0d0)) / 2.0d0
end function
public static double code(double x) {
return ((0.3333333333333333 * Math.pow(x, 3.0)) + (x * 2.0)) / 2.0;
}
def code(x): return ((0.3333333333333333 * math.pow(x, 3.0)) + (x * 2.0)) / 2.0
function code(x) return Float64(Float64(Float64(0.3333333333333333 * (x ^ 3.0)) + Float64(x * 2.0)) / 2.0) end
function tmp = code(x) tmp = ((0.3333333333333333 * (x ^ 3.0)) + (x * 2.0)) / 2.0; end
code[x_] := N[(N[(N[(0.3333333333333333 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision] + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.3333333333333333 \cdot {x}^{3} + x \cdot 2}{2}
\end{array}
Initial program 55.2%
Taylor expanded in x around 0 80.7%
Final simplification80.7%
(FPCore (x) :precision binary64 (if (<= x 2.5) x (/ (* 0.3333333333333333 (pow x 3.0)) 2.0)))
double code(double x) {
double tmp;
if (x <= 2.5) {
tmp = x;
} else {
tmp = (0.3333333333333333 * pow(x, 3.0)) / 2.0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= 2.5d0) then
tmp = x
else
tmp = (0.3333333333333333d0 * (x ** 3.0d0)) / 2.0d0
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= 2.5) {
tmp = x;
} else {
tmp = (0.3333333333333333 * Math.pow(x, 3.0)) / 2.0;
}
return tmp;
}
def code(x): tmp = 0 if x <= 2.5: tmp = x else: tmp = (0.3333333333333333 * math.pow(x, 3.0)) / 2.0 return tmp
function code(x) tmp = 0.0 if (x <= 2.5) tmp = x; else tmp = Float64(Float64(0.3333333333333333 * (x ^ 3.0)) / 2.0); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 2.5) tmp = x; else tmp = (0.3333333333333333 * (x ^ 3.0)) / 2.0; end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 2.5], x, N[(N[(0.3333333333333333 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 2.5:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;\frac{0.3333333333333333 \cdot {x}^{3}}{2}\\
\end{array}
\end{array}
if x < 2.5Initial program 39.1%
Taylor expanded in x around 0 67.2%
Taylor expanded in x around 0 66.8%
if 2.5 < x Initial program 100.0%
Taylor expanded in x around 0 70.6%
Taylor expanded in x around inf 70.6%
Final simplification67.8%
(FPCore (x) :precision binary64 (/ (* x 2.0) 2.0))
double code(double x) {
return (x * 2.0) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * 2.0d0) / 2.0d0
end function
public static double code(double x) {
return (x * 2.0) / 2.0;
}
def code(x): return (x * 2.0) / 2.0
function code(x) return Float64(Float64(x * 2.0) / 2.0) end
function tmp = code(x) tmp = (x * 2.0) / 2.0; end
code[x_] := N[(N[(x * 2.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot 2}{2}
\end{array}
Initial program 55.2%
Taylor expanded in x around 0 50.7%
Final simplification50.7%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 55.2%
Taylor expanded in x around 0 50.7%
Taylor expanded in x around 0 50.4%
Final simplification50.4%
herbie shell --seed 2023334
(FPCore (x)
:name "Hyperbolic sine"
:precision binary64
(/ (- (exp x) (exp (- x))) 2.0))