
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
(FPCore (x) :precision binary64 (log1p (expm1 x)))
double code(double x) {
return log1p(expm1(x));
}
public static double code(double x) {
return Math.log1p(Math.expm1(x));
}
def code(x): return math.log1p(math.expm1(x))
function code(x) return log1p(expm1(x)) end
code[x_] := N[Log[1 + N[(Exp[x] - 1), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\mathsf{expm1}\left(x\right)\right)
\end{array}
Initial program 55.1%
Taylor expanded in x around 0 52.0%
associate-/l*51.5%
associate-/r/51.7%
metadata-eval51.7%
*-un-lft-identity51.7%
log1p-expm1-u99.7%
Applied egg-rr99.7%
Final simplification99.7%
(FPCore (x) :precision binary64 (if (or (<= x -2.4) (not (<= x 2.4))) (/ (* 0.3333333333333333 (pow x 3.0)) 2.0) x))
double code(double x) {
double tmp;
if ((x <= -2.4) || !(x <= 2.4)) {
tmp = (0.3333333333333333 * pow(x, 3.0)) / 2.0;
} else {
tmp = x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-2.4d0)) .or. (.not. (x <= 2.4d0))) then
tmp = (0.3333333333333333d0 * (x ** 3.0d0)) / 2.0d0
else
tmp = x
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if ((x <= -2.4) || !(x <= 2.4)) {
tmp = (0.3333333333333333 * Math.pow(x, 3.0)) / 2.0;
} else {
tmp = x;
}
return tmp;
}
def code(x): tmp = 0 if (x <= -2.4) or not (x <= 2.4): tmp = (0.3333333333333333 * math.pow(x, 3.0)) / 2.0 else: tmp = x return tmp
function code(x) tmp = 0.0 if ((x <= -2.4) || !(x <= 2.4)) tmp = Float64(Float64(0.3333333333333333 * (x ^ 3.0)) / 2.0); else tmp = x; end return tmp end
function tmp_2 = code(x) tmp = 0.0; if ((x <= -2.4) || ~((x <= 2.4))) tmp = (0.3333333333333333 * (x ^ 3.0)) / 2.0; else tmp = x; end tmp_2 = tmp; end
code[x_] := If[Or[LessEqual[x, -2.4], N[Not[LessEqual[x, 2.4]], $MachinePrecision]], N[(N[(0.3333333333333333 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], x]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.4 \lor \neg \left(x \leq 2.4\right):\\
\;\;\;\;\frac{0.3333333333333333 \cdot {x}^{3}}{2}\\
\mathbf{else}:\\
\;\;\;\;x\\
\end{array}
\end{array}
if x < -2.39999999999999991 or 2.39999999999999991 < x Initial program 100.0%
Taylor expanded in x around 0 67.8%
Taylor expanded in x around inf 67.8%
if -2.39999999999999991 < x < 2.39999999999999991Initial program 8.8%
Taylor expanded in x around 0 99.3%
Taylor expanded in x around 0 99.3%
Final simplification83.3%
(FPCore (x) :precision binary64 (/ (+ (* 0.3333333333333333 (pow x 3.0)) (* x 2.0)) 2.0))
double code(double x) {
return ((0.3333333333333333 * pow(x, 3.0)) + (x * 2.0)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((0.3333333333333333d0 * (x ** 3.0d0)) + (x * 2.0d0)) / 2.0d0
end function
public static double code(double x) {
return ((0.3333333333333333 * Math.pow(x, 3.0)) + (x * 2.0)) / 2.0;
}
def code(x): return ((0.3333333333333333 * math.pow(x, 3.0)) + (x * 2.0)) / 2.0
function code(x) return Float64(Float64(Float64(0.3333333333333333 * (x ^ 3.0)) + Float64(x * 2.0)) / 2.0) end
function tmp = code(x) tmp = ((0.3333333333333333 * (x ^ 3.0)) + (x * 2.0)) / 2.0; end
code[x_] := N[(N[(N[(0.3333333333333333 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision] + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.3333333333333333 \cdot {x}^{3} + x \cdot 2}{2}
\end{array}
Initial program 55.1%
Taylor expanded in x around 0 83.7%
Final simplification83.7%
(FPCore (x) :precision binary64 (/ (* x 2.0) 2.0))
double code(double x) {
return (x * 2.0) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * 2.0d0) / 2.0d0
end function
public static double code(double x) {
return (x * 2.0) / 2.0;
}
def code(x): return (x * 2.0) / 2.0
function code(x) return Float64(Float64(x * 2.0) / 2.0) end
function tmp = code(x) tmp = (x * 2.0) / 2.0; end
code[x_] := N[(N[(x * 2.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot 2}{2}
\end{array}
Initial program 55.1%
Taylor expanded in x around 0 52.0%
Final simplification52.0%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 55.1%
Taylor expanded in x around 0 52.0%
Taylor expanded in x around 0 51.7%
Final simplification51.7%
herbie shell --seed 2023306
(FPCore (x)
:name "Hyperbolic sine"
:precision binary64
(/ (- (exp x) (exp (- x))) 2.0))