
(FPCore (x) :precision binary64 (- (exp x) 1.0))
double code(double x) {
return exp(x) - 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(x) - 1.0d0
end function
public static double code(double x) {
return Math.exp(x) - 1.0;
}
def code(x): return math.exp(x) - 1.0
function code(x) return Float64(exp(x) - 1.0) end
function tmp = code(x) tmp = exp(x) - 1.0; end
code[x_] := N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
e^{x} - 1
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (exp x) 1.0))
double code(double x) {
return exp(x) - 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(x) - 1.0d0
end function
public static double code(double x) {
return Math.exp(x) - 1.0;
}
def code(x): return math.exp(x) - 1.0
function code(x) return Float64(exp(x) - 1.0) end
function tmp = code(x) tmp = exp(x) - 1.0; end
code[x_] := N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
e^{x} - 1
\end{array}
(FPCore (x) :precision binary64 (expm1 x))
double code(double x) {
return expm1(x);
}
public static double code(double x) {
return Math.expm1(x);
}
def code(x): return math.expm1(x)
function code(x) return expm1(x) end
code[x_] := N[(Exp[x] - 1), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{expm1}\left(x\right)
\end{array}
Initial program 8.0%
lift--.f64N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
(FPCore (x) :precision binary64 (/ x (fma (fma 0.08333333333333333 x -0.5) x 1.0)))
double code(double x) {
return x / fma(fma(0.08333333333333333, x, -0.5), x, 1.0);
}
function code(x) return Float64(x / fma(fma(0.08333333333333333, x, -0.5), x, 1.0)) end
code[x_] := N[(x / N[(N[(0.08333333333333333 * x + -0.5), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{\mathsf{fma}\left(\mathsf{fma}\left(0.08333333333333333, x, -0.5\right), x, 1\right)}
\end{array}
Initial program 8.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f6499.6
Applied rewrites99.6%
Applied rewrites99.6%
Taylor expanded in x around 0
Applied rewrites99.6%
(FPCore (x) :precision binary64 (* (fma (fma (fma 0.041666666666666664 x 0.16666666666666666) x 0.5) x 1.0) x))
double code(double x) {
return fma(fma(fma(0.041666666666666664, x, 0.16666666666666666), x, 0.5), x, 1.0) * x;
}
function code(x) return Float64(fma(fma(fma(0.041666666666666664, x, 0.16666666666666666), x, 0.5), x, 1.0) * x) end
code[x_] := N[(N[(N[(N[(0.041666666666666664 * x + 0.16666666666666666), $MachinePrecision] * x + 0.5), $MachinePrecision] * x + 1.0), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.041666666666666664, x, 0.16666666666666666\right), x, 0.5\right), x, 1\right) \cdot x
\end{array}
Initial program 8.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f6499.6
Applied rewrites99.6%
(FPCore (x) :precision binary64 (fma (* (fma 0.16666666666666666 x 0.5) x) x x))
double code(double x) {
return fma((fma(0.16666666666666666, x, 0.5) * x), x, x);
}
function code(x) return fma(Float64(fma(0.16666666666666666, x, 0.5) * x), x, x) end
code[x_] := N[(N[(N[(0.16666666666666666 * x + 0.5), $MachinePrecision] * x), $MachinePrecision] * x + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, x, 0.5\right) \cdot x, x, x\right)
\end{array}
Initial program 8.0%
lift--.f64N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f6499.5
Applied rewrites99.5%
(FPCore (x) :precision binary64 (* (fma (fma 0.16666666666666666 x 0.5) x 1.0) x))
double code(double x) {
return fma(fma(0.16666666666666666, x, 0.5), x, 1.0) * x;
}
function code(x) return Float64(fma(fma(0.16666666666666666, x, 0.5), x, 1.0) * x) end
code[x_] := N[(N[(N[(0.16666666666666666 * x + 0.5), $MachinePrecision] * x + 1.0), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, x, 0.5\right), x, 1\right) \cdot x
\end{array}
Initial program 8.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f6499.4
Applied rewrites99.4%
(FPCore (x) :precision binary64 (fma (* 0.5 x) x x))
double code(double x) {
return fma((0.5 * x), x, x);
}
function code(x) return fma(Float64(0.5 * x), x, x) end
code[x_] := N[(N[(0.5 * x), $MachinePrecision] * x + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(0.5 \cdot x, x, x\right)
\end{array}
Initial program 8.0%
lift--.f64N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
lower-*.f6499.1
Applied rewrites99.1%
(FPCore (x) :precision binary64 (* (fma 0.5 x 1.0) x))
double code(double x) {
return fma(0.5, x, 1.0) * x;
}
function code(x) return Float64(fma(0.5, x, 1.0) * x) end
code[x_] := N[(N[(0.5 * x + 1.0), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(0.5, x, 1\right) \cdot x
\end{array}
Initial program 8.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f6499.1
Applied rewrites99.1%
(FPCore (x) :precision binary64 (* 1.0 x))
double code(double x) {
return 1.0 * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 * x
end function
public static double code(double x) {
return 1.0 * x;
}
def code(x): return 1.0 * x
function code(x) return Float64(1.0 * x) end
function tmp = code(x) tmp = 1.0 * x; end
code[x_] := N[(1.0 * x), $MachinePrecision]
\begin{array}{l}
\\
1 \cdot x
\end{array}
Initial program 8.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f6499.6
Applied rewrites99.6%
Taylor expanded in x around 0
Applied rewrites98.2%
(FPCore (x) :precision binary64 (- 1.0 1.0))
double code(double x) {
return 1.0 - 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - 1.0d0
end function
public static double code(double x) {
return 1.0 - 1.0;
}
def code(x): return 1.0 - 1.0
function code(x) return Float64(1.0 - 1.0) end
function tmp = code(x) tmp = 1.0 - 1.0; end
code[x_] := N[(1.0 - 1.0), $MachinePrecision]
\begin{array}{l}
\\
1 - 1
\end{array}
Initial program 8.0%
Taylor expanded in x around 0
Applied rewrites5.4%
(FPCore (x) :precision binary64 (expm1 x))
double code(double x) {
return expm1(x);
}
public static double code(double x) {
return Math.expm1(x);
}
def code(x): return math.expm1(x)
function code(x) return expm1(x) end
code[x_] := N[(Exp[x] - 1), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{expm1}\left(x\right)
\end{array}
herbie shell --seed 2024242
(FPCore (x)
:name "expm1 (example 3.7)"
:precision binary64
:pre (<= (fabs x) 1.0)
:alt
(! :herbie-platform default (expm1 x))
(- (exp x) 1.0))