
(FPCore (a x) :precision binary64 (- (exp (* a x)) 1.0))
double code(double a, double x) {
return exp((a * x)) - 1.0;
}
real(8) function code(a, x)
real(8), intent (in) :: a
real(8), intent (in) :: x
code = exp((a * x)) - 1.0d0
end function
public static double code(double a, double x) {
return Math.exp((a * x)) - 1.0;
}
def code(a, x): return math.exp((a * x)) - 1.0
function code(a, x) return Float64(exp(Float64(a * x)) - 1.0) end
function tmp = code(a, x) tmp = exp((a * x)) - 1.0; end
code[a_, x_] := N[(N[Exp[N[(a * x), $MachinePrecision]], $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
e^{a \cdot x} - 1
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a x) :precision binary64 (- (exp (* a x)) 1.0))
double code(double a, double x) {
return exp((a * x)) - 1.0;
}
real(8) function code(a, x)
real(8), intent (in) :: a
real(8), intent (in) :: x
code = exp((a * x)) - 1.0d0
end function
public static double code(double a, double x) {
return Math.exp((a * x)) - 1.0;
}
def code(a, x): return math.exp((a * x)) - 1.0
function code(a, x) return Float64(exp(Float64(a * x)) - 1.0) end
function tmp = code(a, x) tmp = exp((a * x)) - 1.0; end
code[a_, x_] := N[(N[Exp[N[(a * x), $MachinePrecision]], $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
e^{a \cdot x} - 1
\end{array}
(FPCore (a x) :precision binary64 (expm1 (* a x)))
double code(double a, double x) {
return expm1((a * x));
}
public static double code(double a, double x) {
return Math.expm1((a * x));
}
def code(a, x): return math.expm1((a * x))
function code(a, x) return expm1(Float64(a * x)) end
code[a_, x_] := N[(Exp[N[(a * x), $MachinePrecision]] - 1), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{expm1}\left(a \cdot x\right)
\end{array}
Initial program 54.3%
expm1-defineN/A
expm1-lowering-expm1.f64N/A
*-lowering-*.f64100.0%
Simplified100.0%
(FPCore (a x) :precision binary64 (+ (* a x) (* (* a x) (* (* a x) (+ 0.5 (* (* a x) 0.16666666666666666))))))
double code(double a, double x) {
return (a * x) + ((a * x) * ((a * x) * (0.5 + ((a * x) * 0.16666666666666666))));
}
real(8) function code(a, x)
real(8), intent (in) :: a
real(8), intent (in) :: x
code = (a * x) + ((a * x) * ((a * x) * (0.5d0 + ((a * x) * 0.16666666666666666d0))))
end function
public static double code(double a, double x) {
return (a * x) + ((a * x) * ((a * x) * (0.5 + ((a * x) * 0.16666666666666666))));
}
def code(a, x): return (a * x) + ((a * x) * ((a * x) * (0.5 + ((a * x) * 0.16666666666666666))))
function code(a, x) return Float64(Float64(a * x) + Float64(Float64(a * x) * Float64(Float64(a * x) * Float64(0.5 + Float64(Float64(a * x) * 0.16666666666666666))))) end
function tmp = code(a, x) tmp = (a * x) + ((a * x) * ((a * x) * (0.5 + ((a * x) * 0.16666666666666666)))); end
code[a_, x_] := N[(N[(a * x), $MachinePrecision] + N[(N[(a * x), $MachinePrecision] * N[(N[(a * x), $MachinePrecision] * N[(0.5 + N[(N[(a * x), $MachinePrecision] * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot x + \left(a \cdot x\right) \cdot \left(\left(a \cdot x\right) \cdot \left(0.5 + \left(a \cdot x\right) \cdot 0.16666666666666666\right)\right)
\end{array}
Initial program 54.3%
expm1-defineN/A
expm1-lowering-expm1.f64N/A
*-lowering-*.f64100.0%
Simplified100.0%
Taylor expanded in a around 0
Simplified65.1%
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6465.1%
Applied egg-rr65.1%
Final simplification65.1%
(FPCore (a x) :precision binary64 (* (* a x) (+ 1.0 (* (* a x) (+ 0.5 (* a (* x 0.16666666666666666)))))))
double code(double a, double x) {
return (a * x) * (1.0 + ((a * x) * (0.5 + (a * (x * 0.16666666666666666)))));
}
real(8) function code(a, x)
real(8), intent (in) :: a
real(8), intent (in) :: x
code = (a * x) * (1.0d0 + ((a * x) * (0.5d0 + (a * (x * 0.16666666666666666d0)))))
end function
public static double code(double a, double x) {
return (a * x) * (1.0 + ((a * x) * (0.5 + (a * (x * 0.16666666666666666)))));
}
def code(a, x): return (a * x) * (1.0 + ((a * x) * (0.5 + (a * (x * 0.16666666666666666)))))
function code(a, x) return Float64(Float64(a * x) * Float64(1.0 + Float64(Float64(a * x) * Float64(0.5 + Float64(a * Float64(x * 0.16666666666666666)))))) end
function tmp = code(a, x) tmp = (a * x) * (1.0 + ((a * x) * (0.5 + (a * (x * 0.16666666666666666))))); end
code[a_, x_] := N[(N[(a * x), $MachinePrecision] * N[(1.0 + N[(N[(a * x), $MachinePrecision] * N[(0.5 + N[(a * N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(a \cdot x\right) \cdot \left(1 + \left(a \cdot x\right) \cdot \left(0.5 + a \cdot \left(x \cdot 0.16666666666666666\right)\right)\right)
\end{array}
Initial program 54.3%
expm1-defineN/A
expm1-lowering-expm1.f64N/A
*-lowering-*.f64100.0%
Simplified100.0%
Taylor expanded in a around 0
Simplified65.1%
(FPCore (a x) :precision binary64 (* a (+ x (* (* a x) (* x (+ 0.5 (* (* a x) 0.16666666666666666)))))))
double code(double a, double x) {
return a * (x + ((a * x) * (x * (0.5 + ((a * x) * 0.16666666666666666)))));
}
real(8) function code(a, x)
real(8), intent (in) :: a
real(8), intent (in) :: x
code = a * (x + ((a * x) * (x * (0.5d0 + ((a * x) * 0.16666666666666666d0)))))
end function
public static double code(double a, double x) {
return a * (x + ((a * x) * (x * (0.5 + ((a * x) * 0.16666666666666666)))));
}
def code(a, x): return a * (x + ((a * x) * (x * (0.5 + ((a * x) * 0.16666666666666666)))))
function code(a, x) return Float64(a * Float64(x + Float64(Float64(a * x) * Float64(x * Float64(0.5 + Float64(Float64(a * x) * 0.16666666666666666)))))) end
function tmp = code(a, x) tmp = a * (x + ((a * x) * (x * (0.5 + ((a * x) * 0.16666666666666666))))); end
code[a_, x_] := N[(a * N[(x + N[(N[(a * x), $MachinePrecision] * N[(x * N[(0.5 + N[(N[(a * x), $MachinePrecision] * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot \left(x + \left(a \cdot x\right) \cdot \left(x \cdot \left(0.5 + \left(a \cdot x\right) \cdot 0.16666666666666666\right)\right)\right)
\end{array}
Initial program 54.3%
expm1-defineN/A
expm1-lowering-expm1.f64N/A
*-lowering-*.f64100.0%
Simplified100.0%
Taylor expanded in a around 0
Simplified65.1%
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6465.1%
Applied egg-rr65.1%
associate-*l*N/A
distribute-lft-outN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
associate-*l*N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6465.0%
Applied egg-rr65.0%
Final simplification65.0%
(FPCore (a x) :precision binary64 (* a x))
double code(double a, double x) {
return a * x;
}
real(8) function code(a, x)
real(8), intent (in) :: a
real(8), intent (in) :: x
code = a * x
end function
public static double code(double a, double x) {
return a * x;
}
def code(a, x): return a * x
function code(a, x) return Float64(a * x) end
function tmp = code(a, x) tmp = a * x; end
code[a_, x_] := N[(a * x), $MachinePrecision]
\begin{array}{l}
\\
a \cdot x
\end{array}
Initial program 54.3%
expm1-defineN/A
expm1-lowering-expm1.f64N/A
*-lowering-*.f64100.0%
Simplified100.0%
Taylor expanded in a around 0
*-lowering-*.f6463.9%
Simplified63.9%
(FPCore (a x) :precision binary64 (expm1 (* a x)))
double code(double a, double x) {
return expm1((a * x));
}
public static double code(double a, double x) {
return Math.expm1((a * x));
}
def code(a, x): return math.expm1((a * x))
function code(a, x) return expm1(Float64(a * x)) end
code[a_, x_] := N[(Exp[N[(a * x), $MachinePrecision]] - 1), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{expm1}\left(a \cdot x\right)
\end{array}
herbie shell --seed 2024139
(FPCore (a x)
:name "expax (section 3.5)"
:precision binary64
:pre (> 710.0 (* a x))
:alt
(! :herbie-platform default (expm1 (* a x)))
(- (exp (* a x)) 1.0))