
(FPCore (x y z) :precision binary64 (+ x (/ y (- (* 1.1283791670955126 (exp z)) (* x y)))))
double code(double x, double y, double z) {
return x + (y / ((1.1283791670955126 * exp(z)) - (x * y)));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + (y / ((1.1283791670955126d0 * exp(z)) - (x * y)))
end function
public static double code(double x, double y, double z) {
return x + (y / ((1.1283791670955126 * Math.exp(z)) - (x * y)));
}
def code(x, y, z): return x + (y / ((1.1283791670955126 * math.exp(z)) - (x * y)))
function code(x, y, z) return Float64(x + Float64(y / Float64(Float64(1.1283791670955126 * exp(z)) - Float64(x * y)))) end
function tmp = code(x, y, z) tmp = x + (y / ((1.1283791670955126 * exp(z)) - (x * y))); end
code[x_, y_, z_] := N[(x + N[(y / N[(N[(1.1283791670955126 * N[Exp[z], $MachinePrecision]), $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{y}{1.1283791670955126 \cdot e^{z} - x \cdot y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z) :precision binary64 (+ x (/ y (- (* 1.1283791670955126 (exp z)) (* x y)))))
double code(double x, double y, double z) {
return x + (y / ((1.1283791670955126 * exp(z)) - (x * y)));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + (y / ((1.1283791670955126d0 * exp(z)) - (x * y)))
end function
public static double code(double x, double y, double z) {
return x + (y / ((1.1283791670955126 * Math.exp(z)) - (x * y)));
}
def code(x, y, z): return x + (y / ((1.1283791670955126 * math.exp(z)) - (x * y)))
function code(x, y, z) return Float64(x + Float64(y / Float64(Float64(1.1283791670955126 * exp(z)) - Float64(x * y)))) end
function tmp = code(x, y, z) tmp = x + (y / ((1.1283791670955126 * exp(z)) - (x * y))); end
code[x_, y_, z_] := N[(x + N[(y / N[(N[(1.1283791670955126 * N[Exp[z], $MachinePrecision]), $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{y}{1.1283791670955126 \cdot e^{z} - x \cdot y}
\end{array}
(FPCore (x y z) :precision binary64 (if (<= (exp z) 0.0) (+ x (/ -1.0 x)) (- x (/ y (fma x y (* (exp z) -1.1283791670955126))))))
double code(double x, double y, double z) {
double tmp;
if (exp(z) <= 0.0) {
tmp = x + (-1.0 / x);
} else {
tmp = x - (y / fma(x, y, (exp(z) * -1.1283791670955126)));
}
return tmp;
}
function code(x, y, z) tmp = 0.0 if (exp(z) <= 0.0) tmp = Float64(x + Float64(-1.0 / x)); else tmp = Float64(x - Float64(y / fma(x, y, Float64(exp(z) * -1.1283791670955126)))); end return tmp end
code[x_, y_, z_] := If[LessEqual[N[Exp[z], $MachinePrecision], 0.0], N[(x + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision], N[(x - N[(y / N[(x * y + N[(N[Exp[z], $MachinePrecision] * -1.1283791670955126), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;e^{z} \leq 0:\\
\;\;\;\;x + \frac{-1}{x}\\
\mathbf{else}:\\
\;\;\;\;x - \frac{y}{\mathsf{fma}\left(x, y, e^{z} \cdot -1.1283791670955126\right)}\\
\end{array}
\end{array}
if (exp.f64 z) < 0.0Initial program 88.4%
Taylor expanded in y around inf
sub-negN/A
+-lowering-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
/-lowering-/.f64100.0
Simplified100.0%
if 0.0 < (exp.f64 z) Initial program 98.4%
frac-2negN/A
distribute-frac-neg2N/A
unsub-negN/A
distribute-frac-negN/A
--lowering--.f64N/A
distribute-frac-neg2N/A
/-lowering-/.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
exp-lowering-exp.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
(FPCore (x y z)
:precision binary64
(let* ((t_0 (+ x (/ -1.0 x)))
(t_1 (+ x (/ y (- (* (exp z) 1.1283791670955126) (* x y))))))
(if (<= t_1 -2000.0) t_0 (if (<= t_1 1.0) x t_0))))
double code(double x, double y, double z) {
double t_0 = x + (-1.0 / x);
double t_1 = x + (y / ((exp(z) * 1.1283791670955126) - (x * y)));
double tmp;
if (t_1 <= -2000.0) {
tmp = t_0;
} else if (t_1 <= 1.0) {
tmp = x;
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = x + ((-1.0d0) / x)
t_1 = x + (y / ((exp(z) * 1.1283791670955126d0) - (x * y)))
if (t_1 <= (-2000.0d0)) then
tmp = t_0
else if (t_1 <= 1.0d0) then
tmp = x
else
tmp = t_0
end if
code = tmp
end function
public static double code(double x, double y, double z) {
double t_0 = x + (-1.0 / x);
double t_1 = x + (y / ((Math.exp(z) * 1.1283791670955126) - (x * y)));
double tmp;
if (t_1 <= -2000.0) {
tmp = t_0;
} else if (t_1 <= 1.0) {
tmp = x;
} else {
tmp = t_0;
}
return tmp;
}
def code(x, y, z): t_0 = x + (-1.0 / x) t_1 = x + (y / ((math.exp(z) * 1.1283791670955126) - (x * y))) tmp = 0 if t_1 <= -2000.0: tmp = t_0 elif t_1 <= 1.0: tmp = x else: tmp = t_0 return tmp
function code(x, y, z) t_0 = Float64(x + Float64(-1.0 / x)) t_1 = Float64(x + Float64(y / Float64(Float64(exp(z) * 1.1283791670955126) - Float64(x * y)))) tmp = 0.0 if (t_1 <= -2000.0) tmp = t_0; elseif (t_1 <= 1.0) tmp = x; else tmp = t_0; end return tmp end
function tmp_2 = code(x, y, z) t_0 = x + (-1.0 / x); t_1 = x + (y / ((exp(z) * 1.1283791670955126) - (x * y))); tmp = 0.0; if (t_1 <= -2000.0) tmp = t_0; elseif (t_1 <= 1.0) tmp = x; else tmp = t_0; end tmp_2 = tmp; end
code[x_, y_, z_] := Block[{t$95$0 = N[(x + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(x + N[(y / N[(N[(N[Exp[z], $MachinePrecision] * 1.1283791670955126), $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -2000.0], t$95$0, If[LessEqual[t$95$1, 1.0], x, t$95$0]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x + \frac{-1}{x}\\
t_1 := x + \frac{y}{e^{z} \cdot 1.1283791670955126 - x \cdot y}\\
\mathbf{if}\;t\_1 \leq -2000:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;t\_1 \leq 1:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if (+.f64 x (/.f64 y (-.f64 (*.f64 #s(literal 5641895835477563/5000000000000000 binary64) (exp.f64 z)) (*.f64 x y)))) < -2e3 or 1 < (+.f64 x (/.f64 y (-.f64 (*.f64 #s(literal 5641895835477563/5000000000000000 binary64) (exp.f64 z)) (*.f64 x y)))) Initial program 94.6%
Taylor expanded in y around inf
sub-negN/A
+-lowering-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
/-lowering-/.f6492.2
Simplified92.2%
if -2e3 < (+.f64 x (/.f64 y (-.f64 (*.f64 #s(literal 5641895835477563/5000000000000000 binary64) (exp.f64 z)) (*.f64 x y)))) < 1Initial program 100.0%
Taylor expanded in x around inf
Simplified81.9%
Final simplification89.3%
(FPCore (x y z)
:precision binary64
(if (<= (exp z) 0.005)
(+ x (/ -1.0 x))
(if (<= (exp z) 2.0)
(+ x (/ y (- (fma z 1.1283791670955126 1.1283791670955126) (* x y))))
x)))
double code(double x, double y, double z) {
double tmp;
if (exp(z) <= 0.005) {
tmp = x + (-1.0 / x);
} else if (exp(z) <= 2.0) {
tmp = x + (y / (fma(z, 1.1283791670955126, 1.1283791670955126) - (x * y)));
} else {
tmp = x;
}
return tmp;
}
function code(x, y, z) tmp = 0.0 if (exp(z) <= 0.005) tmp = Float64(x + Float64(-1.0 / x)); elseif (exp(z) <= 2.0) tmp = Float64(x + Float64(y / Float64(fma(z, 1.1283791670955126, 1.1283791670955126) - Float64(x * y)))); else tmp = x; end return tmp end
code[x_, y_, z_] := If[LessEqual[N[Exp[z], $MachinePrecision], 0.005], N[(x + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision], If[LessEqual[N[Exp[z], $MachinePrecision], 2.0], N[(x + N[(y / N[(N[(z * 1.1283791670955126 + 1.1283791670955126), $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], x]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;e^{z} \leq 0.005:\\
\;\;\;\;x + \frac{-1}{x}\\
\mathbf{elif}\;e^{z} \leq 2:\\
\;\;\;\;x + \frac{y}{\mathsf{fma}\left(z, 1.1283791670955126, 1.1283791670955126\right) - x \cdot y}\\
\mathbf{else}:\\
\;\;\;\;x\\
\end{array}
\end{array}
if (exp.f64 z) < 0.0050000000000000001Initial program 88.6%
Taylor expanded in y around inf
sub-negN/A
+-lowering-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
/-lowering-/.f6499.6
Simplified99.6%
if 0.0050000000000000001 < (exp.f64 z) < 2Initial program 99.9%
Taylor expanded in z around 0
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f6499.9
Simplified99.9%
if 2 < (exp.f64 z) Initial program 95.6%
Taylor expanded in x around inf
Simplified100.0%
(FPCore (x y z) :precision binary64 (if (<= (exp z) 0.005) (+ x (/ -1.0 x)) (if (<= (exp z) 2.0) (- x (/ y (fma y x -1.1283791670955126))) x)))
double code(double x, double y, double z) {
double tmp;
if (exp(z) <= 0.005) {
tmp = x + (-1.0 / x);
} else if (exp(z) <= 2.0) {
tmp = x - (y / fma(y, x, -1.1283791670955126));
} else {
tmp = x;
}
return tmp;
}
function code(x, y, z) tmp = 0.0 if (exp(z) <= 0.005) tmp = Float64(x + Float64(-1.0 / x)); elseif (exp(z) <= 2.0) tmp = Float64(x - Float64(y / fma(y, x, -1.1283791670955126))); else tmp = x; end return tmp end
code[x_, y_, z_] := If[LessEqual[N[Exp[z], $MachinePrecision], 0.005], N[(x + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision], If[LessEqual[N[Exp[z], $MachinePrecision], 2.0], N[(x - N[(y / N[(y * x + -1.1283791670955126), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], x]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;e^{z} \leq 0.005:\\
\;\;\;\;x + \frac{-1}{x}\\
\mathbf{elif}\;e^{z} \leq 2:\\
\;\;\;\;x - \frac{y}{\mathsf{fma}\left(y, x, -1.1283791670955126\right)}\\
\mathbf{else}:\\
\;\;\;\;x\\
\end{array}
\end{array}
if (exp.f64 z) < 0.0050000000000000001Initial program 88.6%
Taylor expanded in y around inf
sub-negN/A
+-lowering-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
/-lowering-/.f6499.6
Simplified99.6%
if 0.0050000000000000001 < (exp.f64 z) < 2Initial program 99.9%
frac-2negN/A
distribute-frac-neg2N/A
unsub-negN/A
distribute-frac-negN/A
--lowering--.f64N/A
distribute-frac-neg2N/A
/-lowering-/.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
exp-lowering-exp.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
Taylor expanded in z around 0
--lowering--.f64N/A
/-lowering-/.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f6499.8
Simplified99.8%
if 2 < (exp.f64 z) Initial program 95.6%
Taylor expanded in x around inf
Simplified100.0%
(FPCore (x y z) :precision binary64 (if (<= x -2.1e-134) x (if (<= x 3.7e-147) (fma 0.8862269254527579 y x) x)))
double code(double x, double y, double z) {
double tmp;
if (x <= -2.1e-134) {
tmp = x;
} else if (x <= 3.7e-147) {
tmp = fma(0.8862269254527579, y, x);
} else {
tmp = x;
}
return tmp;
}
function code(x, y, z) tmp = 0.0 if (x <= -2.1e-134) tmp = x; elseif (x <= 3.7e-147) tmp = fma(0.8862269254527579, y, x); else tmp = x; end return tmp end
code[x_, y_, z_] := If[LessEqual[x, -2.1e-134], x, If[LessEqual[x, 3.7e-147], N[(0.8862269254527579 * y + x), $MachinePrecision], x]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.1 \cdot 10^{-134}:\\
\;\;\;\;x\\
\mathbf{elif}\;x \leq 3.7 \cdot 10^{-147}:\\
\;\;\;\;\mathsf{fma}\left(0.8862269254527579, y, x\right)\\
\mathbf{else}:\\
\;\;\;\;x\\
\end{array}
\end{array}
if x < -2.0999999999999999e-134 or 3.7000000000000002e-147 < x Initial program 97.8%
Taylor expanded in x around inf
Simplified86.0%
if -2.0999999999999999e-134 < x < 3.7000000000000002e-147Initial program 91.3%
Taylor expanded in z around 0
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
*-commutativeN/A
*-lowering-*.f6461.6
Simplified61.6%
Taylor expanded in y around 0
+-commutativeN/A
accelerator-lowering-fma.f6456.4
Simplified56.4%
(FPCore (x y z) :precision binary64 x)
double code(double x, double y, double z) {
return x;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x
end function
public static double code(double x, double y, double z) {
return x;
}
def code(x, y, z): return x
function code(x, y, z) return x end
function tmp = code(x, y, z) tmp = x; end
code[x_, y_, z_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 96.1%
Taylor expanded in x around inf
Simplified73.6%
(FPCore (x y z) :precision binary64 (+ x (/ 1.0 (- (* (/ 1.1283791670955126 y) (exp z)) x))))
double code(double x, double y, double z) {
return x + (1.0 / (((1.1283791670955126 / y) * exp(z)) - x));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + (1.0d0 / (((1.1283791670955126d0 / y) * exp(z)) - x))
end function
public static double code(double x, double y, double z) {
return x + (1.0 / (((1.1283791670955126 / y) * Math.exp(z)) - x));
}
def code(x, y, z): return x + (1.0 / (((1.1283791670955126 / y) * math.exp(z)) - x))
function code(x, y, z) return Float64(x + Float64(1.0 / Float64(Float64(Float64(1.1283791670955126 / y) * exp(z)) - x))) end
function tmp = code(x, y, z) tmp = x + (1.0 / (((1.1283791670955126 / y) * exp(z)) - x)); end
code[x_, y_, z_] := N[(x + N[(1.0 / N[(N[(N[(1.1283791670955126 / y), $MachinePrecision] * N[Exp[z], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{1}{\frac{1.1283791670955126}{y} \cdot e^{z} - x}
\end{array}
herbie shell --seed 2024196
(FPCore (x y z)
:name "Numeric.SpecFunctions:invErfc from math-functions-0.1.5.2, A"
:precision binary64
:alt
(! :herbie-platform default (+ x (/ 1 (- (* (/ 5641895835477563/5000000000000000 y) (exp z)) x))))
(+ x (/ y (- (* 1.1283791670955126 (exp z)) (* x y)))))