
(FPCore (x) :precision binary64 (exp (- (- 1.0 (* x x)))))
double code(double x) {
return exp(-(1.0 - (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(-(1.0d0 - (x * x)))
end function
public static double code(double x) {
return Math.exp(-(1.0 - (x * x)));
}
def code(x): return math.exp(-(1.0 - (x * x)))
function code(x) return exp(Float64(-Float64(1.0 - Float64(x * x)))) end
function tmp = code(x) tmp = exp(-(1.0 - (x * x))); end
code[x_] := N[Exp[(-N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision])], $MachinePrecision]
\begin{array}{l}
\\
e^{-\left(1 - x \cdot x\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (exp (- (- 1.0 (* x x)))))
double code(double x) {
return exp(-(1.0 - (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(-(1.0d0 - (x * x)))
end function
public static double code(double x) {
return Math.exp(-(1.0 - (x * x)));
}
def code(x): return math.exp(-(1.0 - (x * x)))
function code(x) return exp(Float64(-Float64(1.0 - Float64(x * x)))) end
function tmp = code(x) tmp = exp(-(1.0 - (x * x))); end
code[x_] := N[Exp[(-N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision])], $MachinePrecision]
\begin{array}{l}
\\
e^{-\left(1 - x \cdot x\right)}
\end{array}
(FPCore (x) :precision binary64 (+ 1.0 (expm1 (fma x x -1.0))))
double code(double x) {
return 1.0 + expm1(fma(x, x, -1.0));
}
function code(x) return Float64(1.0 + expm1(fma(x, x, -1.0))) end
code[x_] := N[(1.0 + N[(Exp[N[(x * x + -1.0), $MachinePrecision]] - 1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 + \mathsf{expm1}\left(\mathsf{fma}\left(x, x, -1\right)\right)
\end{array}
Initial program 100.0%
neg-sub0100.0%
associate--r-100.0%
metadata-eval100.0%
+-commutative100.0%
Simplified100.0%
log1p-expm1-u100.0%
log1p-udef100.0%
add-exp-log100.0%
fma-def100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (exp (+ -1.0 (* x x))))
double code(double x) {
return exp((-1.0 + (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp(((-1.0d0) + (x * x)))
end function
public static double code(double x) {
return Math.exp((-1.0 + (x * x)));
}
def code(x): return math.exp((-1.0 + (x * x)))
function code(x) return exp(Float64(-1.0 + Float64(x * x))) end
function tmp = code(x) tmp = exp((-1.0 + (x * x))); end
code[x_] := N[Exp[N[(-1.0 + N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{-1 + x \cdot x}
\end{array}
Initial program 100.0%
neg-sub0100.0%
associate--r-100.0%
metadata-eval100.0%
+-commutative100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (exp -1.0))
double code(double x) {
return exp(-1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((-1.0d0))
end function
public static double code(double x) {
return Math.exp(-1.0);
}
def code(x): return math.exp(-1.0)
function code(x) return exp(-1.0) end
function tmp = code(x) tmp = exp(-1.0); end
code[x_] := N[Exp[-1.0], $MachinePrecision]
\begin{array}{l}
\\
e^{-1}
\end{array}
Initial program 100.0%
neg-sub0100.0%
associate--r-100.0%
metadata-eval100.0%
+-commutative100.0%
Simplified100.0%
Taylor expanded in x around 0 55.5%
Final simplification55.5%
herbie shell --seed 2023189
(FPCore (x)
:name "exp neg sub"
:precision binary64
(exp (- (- 1.0 (* x x)))))