
(FPCore (a x) :precision binary64 (- (exp (* a x)) 1.0))
double code(double a, double x) {
return exp((a * x)) - 1.0;
}
real(8) function code(a, x)
real(8), intent (in) :: a
real(8), intent (in) :: x
code = exp((a * x)) - 1.0d0
end function
public static double code(double a, double x) {
return Math.exp((a * x)) - 1.0;
}
def code(a, x): return math.exp((a * x)) - 1.0
function code(a, x) return Float64(exp(Float64(a * x)) - 1.0) end
function tmp = code(a, x) tmp = exp((a * x)) - 1.0; end
code[a_, x_] := N[(N[Exp[N[(a * x), $MachinePrecision]], $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
e^{a \cdot x} - 1
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a x) :precision binary64 (- (exp (* a x)) 1.0))
double code(double a, double x) {
return exp((a * x)) - 1.0;
}
real(8) function code(a, x)
real(8), intent (in) :: a
real(8), intent (in) :: x
code = exp((a * x)) - 1.0d0
end function
public static double code(double a, double x) {
return Math.exp((a * x)) - 1.0;
}
def code(a, x): return math.exp((a * x)) - 1.0
function code(a, x) return Float64(exp(Float64(a * x)) - 1.0) end
function tmp = code(a, x) tmp = exp((a * x)) - 1.0; end
code[a_, x_] := N[(N[Exp[N[(a * x), $MachinePrecision]], $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
e^{a \cdot x} - 1
\end{array}
(FPCore (a x) :precision binary64 (expm1 (* x a)))
double code(double a, double x) {
return expm1((x * a));
}
public static double code(double a, double x) {
return Math.expm1((x * a));
}
def code(a, x): return math.expm1((x * a))
function code(a, x) return expm1(Float64(x * a)) end
code[a_, x_] := N[(Exp[N[(x * a), $MachinePrecision]] - 1), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{expm1}\left(x \cdot a\right)
\end{array}
Initial program 56.9%
lift--.f64N/A
lift-exp.f64N/A
lower-expm1.f64100.0
lift-*.f64N/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
(FPCore (a x) :precision binary64 (if (<= a -4.5e+213) (- (* (* (fma (* 0.5 x) x (/ x a)) a) a) 1.0) (* (fma x (* (* (fma (* 0.16666666666666666 a) x 0.5) x) a) x) a)))
double code(double a, double x) {
double tmp;
if (a <= -4.5e+213) {
tmp = ((fma((0.5 * x), x, (x / a)) * a) * a) - 1.0;
} else {
tmp = fma(x, ((fma((0.16666666666666666 * a), x, 0.5) * x) * a), x) * a;
}
return tmp;
}
function code(a, x) tmp = 0.0 if (a <= -4.5e+213) tmp = Float64(Float64(Float64(fma(Float64(0.5 * x), x, Float64(x / a)) * a) * a) - 1.0); else tmp = Float64(fma(x, Float64(Float64(fma(Float64(0.16666666666666666 * a), x, 0.5) * x) * a), x) * a); end return tmp end
code[a_, x_] := If[LessEqual[a, -4.5e+213], N[(N[(N[(N[(N[(0.5 * x), $MachinePrecision] * x + N[(x / a), $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision] * a), $MachinePrecision] - 1.0), $MachinePrecision], N[(N[(x * N[(N[(N[(N[(0.16666666666666666 * a), $MachinePrecision] * x + 0.5), $MachinePrecision] * x), $MachinePrecision] * a), $MachinePrecision] + x), $MachinePrecision] * a), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \leq -4.5 \cdot 10^{+213}:\\
\;\;\;\;\left(\mathsf{fma}\left(0.5 \cdot x, x, \frac{x}{a}\right) \cdot a\right) \cdot a - 1\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x, \left(\mathsf{fma}\left(0.16666666666666666 \cdot a, x, 0.5\right) \cdot x\right) \cdot a, x\right) \cdot a\\
\end{array}
\end{array}
if a < -4.5000000000000002e213Initial program 100.0%
Taylor expanded in a around 0
+-commutativeN/A
Applied rewrites0.8%
Taylor expanded in a around inf
Applied rewrites20.5%
if -4.5000000000000002e213 < a Initial program 54.3%
lift--.f64N/A
lift-exp.f64N/A
lower-expm1.f64100.0
lift-*.f64N/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
Taylor expanded in a around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
cube-multN/A
unpow2N/A
associate-*r*N/A
distribute-rgt-outN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower-fma.f64N/A
lower-*.f6467.6
Applied rewrites67.6%
Applied rewrites71.4%
Final simplification68.4%
(FPCore (a x) :precision binary64 (* (fma x (* (* (fma (* 0.16666666666666666 a) x 0.5) x) a) x) a))
double code(double a, double x) {
return fma(x, ((fma((0.16666666666666666 * a), x, 0.5) * x) * a), x) * a;
}
function code(a, x) return Float64(fma(x, Float64(Float64(fma(Float64(0.16666666666666666 * a), x, 0.5) * x) * a), x) * a) end
code[a_, x_] := N[(N[(x * N[(N[(N[(N[(0.16666666666666666 * a), $MachinePrecision] * x + 0.5), $MachinePrecision] * x), $MachinePrecision] * a), $MachinePrecision] + x), $MachinePrecision] * a), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \left(\mathsf{fma}\left(0.16666666666666666 \cdot a, x, 0.5\right) \cdot x\right) \cdot a, x\right) \cdot a
\end{array}
Initial program 56.9%
lift--.f64N/A
lift-exp.f64N/A
lower-expm1.f64100.0
lift-*.f64N/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
Taylor expanded in a around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
cube-multN/A
unpow2N/A
associate-*r*N/A
distribute-rgt-outN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower-fma.f64N/A
lower-*.f6463.9
Applied rewrites63.9%
Applied rewrites67.4%
Final simplification67.4%
(FPCore (a x) :precision binary64 (* x a))
double code(double a, double x) {
return x * a;
}
real(8) function code(a, x)
real(8), intent (in) :: a
real(8), intent (in) :: x
code = x * a
end function
public static double code(double a, double x) {
return x * a;
}
def code(a, x): return x * a
function code(a, x) return Float64(x * a) end
function tmp = code(a, x) tmp = x * a; end
code[a_, x_] := N[(x * a), $MachinePrecision]
\begin{array}{l}
\\
x \cdot a
\end{array}
Initial program 56.9%
Taylor expanded in a around 0
*-commutativeN/A
lower-*.f6466.6
Applied rewrites66.6%
(FPCore (a x) :precision binary64 (- 1.0 1.0))
double code(double a, double x) {
return 1.0 - 1.0;
}
real(8) function code(a, x)
real(8), intent (in) :: a
real(8), intent (in) :: x
code = 1.0d0 - 1.0d0
end function
public static double code(double a, double x) {
return 1.0 - 1.0;
}
def code(a, x): return 1.0 - 1.0
function code(a, x) return Float64(1.0 - 1.0) end
function tmp = code(a, x) tmp = 1.0 - 1.0; end
code[a_, x_] := N[(1.0 - 1.0), $MachinePrecision]
\begin{array}{l}
\\
1 - 1
\end{array}
Initial program 56.9%
Taylor expanded in a around 0
Applied rewrites22.1%
(FPCore (a x) :precision binary64 (expm1 (* a x)))
double code(double a, double x) {
return expm1((a * x));
}
public static double code(double a, double x) {
return Math.expm1((a * x));
}
def code(a, x): return math.expm1((a * x))
function code(a, x) return expm1(Float64(a * x)) end
code[a_, x_] := N[(Exp[N[(a * x), $MachinePrecision]] - 1), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{expm1}\left(a \cdot x\right)
\end{array}
herbie shell --seed 2024342
(FPCore (a x)
:name "expax (section 3.5)"
:precision binary64
:pre (> 710.0 (* a x))
:alt
(! :herbie-platform default (expm1 (* a x)))
(- (exp (* a x)) 1.0))