
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
(FPCore (x) :precision binary64 (log1p (expm1 x)))
double code(double x) {
return log1p(expm1(x));
}
public static double code(double x) {
return Math.log1p(Math.expm1(x));
}
def code(x): return math.log1p(math.expm1(x))
function code(x) return log1p(expm1(x)) end
code[x_] := N[Log[1 + N[(Exp[x] - 1), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\mathsf{expm1}\left(x\right)\right)
\end{array}
Initial program 49.6%
Taylor expanded in x around 0 57.3%
associate-/l*56.5%
associate-/r/56.7%
metadata-eval56.7%
*-un-lft-identity56.7%
log1p-expm1-u99.1%
Applied egg-rr99.1%
Final simplification99.1%
(FPCore (x) :precision binary64 (/ (+ (* x 2.0) (* 0.016666666666666666 (pow x 5.0))) 2.0))
double code(double x) {
return ((x * 2.0) + (0.016666666666666666 * pow(x, 5.0))) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x * 2.0d0) + (0.016666666666666666d0 * (x ** 5.0d0))) / 2.0d0
end function
public static double code(double x) {
return ((x * 2.0) + (0.016666666666666666 * Math.pow(x, 5.0))) / 2.0;
}
def code(x): return ((x * 2.0) + (0.016666666666666666 * math.pow(x, 5.0))) / 2.0
function code(x) return Float64(Float64(Float64(x * 2.0) + Float64(0.016666666666666666 * (x ^ 5.0))) / 2.0) end
function tmp = code(x) tmp = ((x * 2.0) + (0.016666666666666666 * (x ^ 5.0))) / 2.0; end
code[x_] := N[(N[(N[(x * 2.0), $MachinePrecision] + N[(0.016666666666666666 * N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot 2 + 0.016666666666666666 \cdot {x}^{5}}{2}
\end{array}
Initial program 49.6%
Taylor expanded in x around 0 92.0%
Taylor expanded in x around inf 91.3%
Final simplification91.3%
(FPCore (x) :precision binary64 (/ (+ (* x 2.0) (* x (* 0.3333333333333333 (* x x)))) 2.0))
double code(double x) {
return ((x * 2.0) + (x * (0.3333333333333333 * (x * x)))) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x * 2.0d0) + (x * (0.3333333333333333d0 * (x * x)))) / 2.0d0
end function
public static double code(double x) {
return ((x * 2.0) + (x * (0.3333333333333333 * (x * x)))) / 2.0;
}
def code(x): return ((x * 2.0) + (x * (0.3333333333333333 * (x * x)))) / 2.0
function code(x) return Float64(Float64(Float64(x * 2.0) + Float64(x * Float64(0.3333333333333333 * Float64(x * x)))) / 2.0) end
function tmp = code(x) tmp = ((x * 2.0) + (x * (0.3333333333333333 * (x * x)))) / 2.0; end
code[x_] := N[(N[(N[(x * 2.0), $MachinePrecision] + N[(x * N[(0.3333333333333333 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot 2 + x \cdot \left(0.3333333333333333 \cdot \left(x \cdot x\right)\right)}{2}
\end{array}
Initial program 49.6%
Taylor expanded in x around 0 85.1%
unpow385.1%
associate-*r*85.1%
distribute-rgt-out85.1%
*-commutative85.1%
+-commutative85.1%
associate-*l*85.1%
fma-def85.1%
Simplified85.1%
fma-udef85.1%
distribute-rgt-in85.1%
Applied egg-rr85.1%
Taylor expanded in x around 0 85.1%
unpow285.1%
Simplified85.1%
Final simplification85.1%
(FPCore (x) :precision binary64 (/ (* x 2.0) 2.0))
double code(double x) {
return (x * 2.0) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * 2.0d0) / 2.0d0
end function
public static double code(double x) {
return (x * 2.0) / 2.0;
}
def code(x): return (x * 2.0) / 2.0
function code(x) return Float64(Float64(x * 2.0) / 2.0) end
function tmp = code(x) tmp = (x * 2.0) / 2.0; end
code[x_] := N[(N[(x * 2.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot 2}{2}
\end{array}
Initial program 49.6%
Taylor expanded in x around 0 57.3%
Final simplification57.3%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 49.6%
Taylor expanded in x around 0 57.3%
Taylor expanded in x around 0 56.7%
Final simplification56.7%
herbie shell --seed 2023189
(FPCore (x)
:name "Hyperbolic sine"
:precision binary64
(/ (- (exp x) (exp (- x))) 2.0))