
(FPCore (x y) :precision binary64 (- x (/ y (+ 1.0 (/ (* x y) 2.0)))))
double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x - (y / (1.0d0 + ((x * y) / 2.0d0)))
end function
public static double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
def code(x, y): return x - (y / (1.0 + ((x * y) / 2.0)))
function code(x, y) return Float64(x - Float64(y / Float64(1.0 + Float64(Float64(x * y) / 2.0)))) end
function tmp = code(x, y) tmp = x - (y / (1.0 + ((x * y) / 2.0))); end
code[x_, y_] := N[(x - N[(y / N[(1.0 + N[(N[(x * y), $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{y}{1 + \frac{x \cdot y}{2}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (- x (/ y (+ 1.0 (/ (* x y) 2.0)))))
double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x - (y / (1.0d0 + ((x * y) / 2.0d0)))
end function
public static double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
def code(x, y): return x - (y / (1.0 + ((x * y) / 2.0)))
function code(x, y) return Float64(x - Float64(y / Float64(1.0 + Float64(Float64(x * y) / 2.0)))) end
function tmp = code(x, y) tmp = x - (y / (1.0 + ((x * y) / 2.0))); end
code[x_, y_] := N[(x - N[(y / N[(1.0 + N[(N[(x * y), $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{y}{1 + \frac{x \cdot y}{2}}
\end{array}
(FPCore (x y) :precision binary64 (- x (/ y (fma (* y 0.5) x 1.0))))
double code(double x, double y) {
return x - (y / fma((y * 0.5), x, 1.0));
}
function code(x, y) return Float64(x - Float64(y / fma(Float64(y * 0.5), x, 1.0))) end
code[x_, y_] := N[(x - N[(y / N[(N[(y * 0.5), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{y}{\mathsf{fma}\left(y \cdot 0.5, x, 1\right)}
\end{array}
Initial program 99.9%
lift-+.f64N/A
+-commutativeN/A
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
*-commutativeN/A
lower-fma.f64N/A
div-invN/A
lower-*.f64N/A
metadata-eval99.9
Applied rewrites99.9%
(FPCore (x y) :precision binary64 (let* ((t_0 (- x (/ 2.0 x)))) (if (<= y -2e+77) t_0 (if (<= y 3.5e+192) (- x y) t_0))))
double code(double x, double y) {
double t_0 = x - (2.0 / x);
double tmp;
if (y <= -2e+77) {
tmp = t_0;
} else if (y <= 3.5e+192) {
tmp = x - y;
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: t_0
real(8) :: tmp
t_0 = x - (2.0d0 / x)
if (y <= (-2d+77)) then
tmp = t_0
else if (y <= 3.5d+192) then
tmp = x - y
else
tmp = t_0
end if
code = tmp
end function
public static double code(double x, double y) {
double t_0 = x - (2.0 / x);
double tmp;
if (y <= -2e+77) {
tmp = t_0;
} else if (y <= 3.5e+192) {
tmp = x - y;
} else {
tmp = t_0;
}
return tmp;
}
def code(x, y): t_0 = x - (2.0 / x) tmp = 0 if y <= -2e+77: tmp = t_0 elif y <= 3.5e+192: tmp = x - y else: tmp = t_0 return tmp
function code(x, y) t_0 = Float64(x - Float64(2.0 / x)) tmp = 0.0 if (y <= -2e+77) tmp = t_0; elseif (y <= 3.5e+192) tmp = Float64(x - y); else tmp = t_0; end return tmp end
function tmp_2 = code(x, y) t_0 = x - (2.0 / x); tmp = 0.0; if (y <= -2e+77) tmp = t_0; elseif (y <= 3.5e+192) tmp = x - y; else tmp = t_0; end tmp_2 = tmp; end
code[x_, y_] := Block[{t$95$0 = N[(x - N[(2.0 / x), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[y, -2e+77], t$95$0, If[LessEqual[y, 3.5e+192], N[(x - y), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x - \frac{2}{x}\\
\mathbf{if}\;y \leq -2 \cdot 10^{+77}:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;y \leq 3.5 \cdot 10^{+192}:\\
\;\;\;\;x - y\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if y < -1.99999999999999997e77 or 3.49999999999999983e192 < y Initial program 99.8%
Taylor expanded in y around inf
lower-/.f6485.6
Applied rewrites85.6%
if -1.99999999999999997e77 < y < 3.49999999999999983e192Initial program 100.0%
Taylor expanded in y around 0
mul-1-negN/A
unsub-negN/A
lower--.f6494.0
Applied rewrites94.0%
(FPCore (x y) :precision binary64 (if (<= y -6.6e+181) (/ -2.0 x) (- x y)))
double code(double x, double y) {
double tmp;
if (y <= -6.6e+181) {
tmp = -2.0 / x;
} else {
tmp = x - y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (y <= (-6.6d+181)) then
tmp = (-2.0d0) / x
else
tmp = x - y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (y <= -6.6e+181) {
tmp = -2.0 / x;
} else {
tmp = x - y;
}
return tmp;
}
def code(x, y): tmp = 0 if y <= -6.6e+181: tmp = -2.0 / x else: tmp = x - y return tmp
function code(x, y) tmp = 0.0 if (y <= -6.6e+181) tmp = Float64(-2.0 / x); else tmp = Float64(x - y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (y <= -6.6e+181) tmp = -2.0 / x; else tmp = x - y; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[y, -6.6e+181], N[(-2.0 / x), $MachinePrecision], N[(x - y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -6.6 \cdot 10^{+181}:\\
\;\;\;\;\frac{-2}{x}\\
\mathbf{else}:\\
\;\;\;\;x - y\\
\end{array}
\end{array}
if y < -6.60000000000000034e181Initial program 99.7%
lift-+.f64N/A
+-commutativeN/A
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
*-commutativeN/A
lower-fma.f64N/A
div-invN/A
lower-*.f64N/A
metadata-eval99.7
Applied rewrites99.7%
Taylor expanded in x around inf
sub-negN/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
associate-*r/N/A
metadata-evalN/A
distribute-neg-fracN/A
lower-/.f64N/A
metadata-evalN/A
unpow2N/A
lower-*.f6468.5
Applied rewrites68.5%
Taylor expanded in x around 0
Applied rewrites49.8%
if -6.60000000000000034e181 < y Initial program 100.0%
Taylor expanded in y around 0
mul-1-negN/A
unsub-negN/A
lower--.f6483.0
Applied rewrites83.0%
(FPCore (x y) :precision binary64 (- x y))
double code(double x, double y) {
return x - y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x - y
end function
public static double code(double x, double y) {
return x - y;
}
def code(x, y): return x - y
function code(x, y) return Float64(x - y) end
function tmp = code(x, y) tmp = x - y; end
code[x_, y_] := N[(x - y), $MachinePrecision]
\begin{array}{l}
\\
x - y
\end{array}
Initial program 99.9%
Taylor expanded in y around 0
mul-1-negN/A
unsub-negN/A
lower--.f6474.7
Applied rewrites74.7%
(FPCore (x y) :precision binary64 (- y))
double code(double x, double y) {
return -y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -y
end function
public static double code(double x, double y) {
return -y;
}
def code(x, y): return -y
function code(x, y) return Float64(-y) end
function tmp = code(x, y) tmp = -y; end
code[x_, y_] := (-y)
\begin{array}{l}
\\
-y
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
mul-1-negN/A
lower-neg.f6428.0
Applied rewrites28.0%
herbie shell --seed 2024232
(FPCore (x y)
:name "Data.Number.Erf:$cinvnormcdf from erf-2.0.0.0, B"
:precision binary64
(- x (/ y (+ 1.0 (/ (* x y) 2.0)))))