
(FPCore (x y) :precision binary64 (- x (/ y (+ 1.0 (/ (* x y) 2.0)))))
double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x - (y / (1.0d0 + ((x * y) / 2.0d0)))
end function
public static double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
def code(x, y): return x - (y / (1.0 + ((x * y) / 2.0)))
function code(x, y) return Float64(x - Float64(y / Float64(1.0 + Float64(Float64(x * y) / 2.0)))) end
function tmp = code(x, y) tmp = x - (y / (1.0 + ((x * y) / 2.0))); end
code[x_, y_] := N[(x - N[(y / N[(1.0 + N[(N[(x * y), $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{y}{1 + \frac{x \cdot y}{2}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (- x (/ y (+ 1.0 (/ (* x y) 2.0)))))
double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x - (y / (1.0d0 + ((x * y) / 2.0d0)))
end function
public static double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
def code(x, y): return x - (y / (1.0 + ((x * y) / 2.0)))
function code(x, y) return Float64(x - Float64(y / Float64(1.0 + Float64(Float64(x * y) / 2.0)))) end
function tmp = code(x, y) tmp = x - (y / (1.0 + ((x * y) / 2.0))); end
code[x_, y_] := N[(x - N[(y / N[(1.0 + N[(N[(x * y), $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{y}{1 + \frac{x \cdot y}{2}}
\end{array}
(FPCore (x y) :precision binary64 (- x (pow (fma 0.5 x (pow y -1.0)) -1.0)))
double code(double x, double y) {
return x - pow(fma(0.5, x, pow(y, -1.0)), -1.0);
}
function code(x, y) return Float64(x - (fma(0.5, x, (y ^ -1.0)) ^ -1.0)) end
code[x_, y_] := N[(x - N[Power[N[(0.5 * x + N[Power[y, -1.0], $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - {\left(\mathsf{fma}\left(0.5, x, {y}^{-1}\right)\right)}^{-1}
\end{array}
Initial program 99.9%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6499.9
lift-+.f64N/A
+-commutativeN/A
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
lower-fma.f64N/A
metadata-eval99.9
lift-*.f64N/A
*-commutativeN/A
lower-*.f6499.9
Applied rewrites99.9%
Taylor expanded in x around 0
lower-fma.f64N/A
lower-/.f64100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x y) :precision binary64 (if (or (<= y -2.2e+122) (not (<= y 6.5e+154))) (- x (/ 2.0 x)) (- x y)))
double code(double x, double y) {
double tmp;
if ((y <= -2.2e+122) || !(y <= 6.5e+154)) {
tmp = x - (2.0 / x);
} else {
tmp = x - y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((y <= (-2.2d+122)) .or. (.not. (y <= 6.5d+154))) then
tmp = x - (2.0d0 / x)
else
tmp = x - y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((y <= -2.2e+122) || !(y <= 6.5e+154)) {
tmp = x - (2.0 / x);
} else {
tmp = x - y;
}
return tmp;
}
def code(x, y): tmp = 0 if (y <= -2.2e+122) or not (y <= 6.5e+154): tmp = x - (2.0 / x) else: tmp = x - y return tmp
function code(x, y) tmp = 0.0 if ((y <= -2.2e+122) || !(y <= 6.5e+154)) tmp = Float64(x - Float64(2.0 / x)); else tmp = Float64(x - y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((y <= -2.2e+122) || ~((y <= 6.5e+154))) tmp = x - (2.0 / x); else tmp = x - y; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[y, -2.2e+122], N[Not[LessEqual[y, 6.5e+154]], $MachinePrecision]], N[(x - N[(2.0 / x), $MachinePrecision]), $MachinePrecision], N[(x - y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -2.2 \cdot 10^{+122} \lor \neg \left(y \leq 6.5 \cdot 10^{+154}\right):\\
\;\;\;\;x - \frac{2}{x}\\
\mathbf{else}:\\
\;\;\;\;x - y\\
\end{array}
\end{array}
if y < -2.1999999999999999e122 or 6.5000000000000005e154 < y Initial program 99.9%
Taylor expanded in x around inf
lower-/.f6488.1
Applied rewrites88.1%
if -2.1999999999999999e122 < y < 6.5000000000000005e154Initial program 100.0%
Taylor expanded in y around 0
mul-1-negN/A
unsub-negN/A
lower--.f6493.3
Applied rewrites93.3%
Final simplification91.9%
(FPCore (x y) :precision binary64 (- x (/ y (fma (* y x) 0.5 1.0))))
double code(double x, double y) {
return x - (y / fma((y * x), 0.5, 1.0));
}
function code(x, y) return Float64(x - Float64(y / fma(Float64(y * x), 0.5, 1.0))) end
code[x_, y_] := N[(x - N[(y / N[(N[(y * x), $MachinePrecision] * 0.5 + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{y}{\mathsf{fma}\left(y \cdot x, 0.5, 1\right)}
\end{array}
Initial program 99.9%
lift-+.f64N/A
+-commutativeN/A
lift-/.f64N/A
div-invN/A
lower-fma.f64N/A
lift-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval99.9
Applied rewrites99.9%
(FPCore (x y) :precision binary64 (if (or (<= y -7e+212) (not (<= y 6.5e+239))) (/ -2.0 x) (- x y)))
double code(double x, double y) {
double tmp;
if ((y <= -7e+212) || !(y <= 6.5e+239)) {
tmp = -2.0 / x;
} else {
tmp = x - y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((y <= (-7d+212)) .or. (.not. (y <= 6.5d+239))) then
tmp = (-2.0d0) / x
else
tmp = x - y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((y <= -7e+212) || !(y <= 6.5e+239)) {
tmp = -2.0 / x;
} else {
tmp = x - y;
}
return tmp;
}
def code(x, y): tmp = 0 if (y <= -7e+212) or not (y <= 6.5e+239): tmp = -2.0 / x else: tmp = x - y return tmp
function code(x, y) tmp = 0.0 if ((y <= -7e+212) || !(y <= 6.5e+239)) tmp = Float64(-2.0 / x); else tmp = Float64(x - y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((y <= -7e+212) || ~((y <= 6.5e+239))) tmp = -2.0 / x; else tmp = x - y; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[y, -7e+212], N[Not[LessEqual[y, 6.5e+239]], $MachinePrecision]], N[(-2.0 / x), $MachinePrecision], N[(x - y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -7 \cdot 10^{+212} \lor \neg \left(y \leq 6.5 \cdot 10^{+239}\right):\\
\;\;\;\;\frac{-2}{x}\\
\mathbf{else}:\\
\;\;\;\;x - y\\
\end{array}
\end{array}
if y < -6.99999999999999974e212 or 6.5e239 < y Initial program 99.9%
Taylor expanded in x around inf
sub-negN/A
distribute-rgt-inN/A
*-lft-identityN/A
+-commutativeN/A
lower-fma.f64N/A
associate-*r/N/A
metadata-evalN/A
distribute-neg-fracN/A
lower-/.f64N/A
metadata-evalN/A
unpow2N/A
lower-*.f6484.2
Applied rewrites84.2%
Taylor expanded in x around 0
Applied rewrites60.6%
if -6.99999999999999974e212 < y < 6.5e239Initial program 100.0%
Taylor expanded in y around 0
mul-1-negN/A
unsub-negN/A
lower--.f6485.2
Applied rewrites85.2%
Final simplification81.8%
(FPCore (x y) :precision binary64 (- x y))
double code(double x, double y) {
return x - y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x - y
end function
public static double code(double x, double y) {
return x - y;
}
def code(x, y): return x - y
function code(x, y) return Float64(x - y) end
function tmp = code(x, y) tmp = x - y; end
code[x_, y_] := N[(x - y), $MachinePrecision]
\begin{array}{l}
\\
x - y
\end{array}
Initial program 99.9%
Taylor expanded in y around 0
mul-1-negN/A
unsub-negN/A
lower--.f6474.7
Applied rewrites74.7%
(FPCore (x y) :precision binary64 (- y))
double code(double x, double y) {
return -y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -y
end function
public static double code(double x, double y) {
return -y;
}
def code(x, y): return -y
function code(x, y) return Float64(-y) end
function tmp = code(x, y) tmp = -y; end
code[x_, y_] := (-y)
\begin{array}{l}
\\
-y
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
mul-1-negN/A
lower-neg.f6427.3
Applied rewrites27.3%
herbie shell --seed 2024324
(FPCore (x y)
:name "Data.Number.Erf:$cinvnormcdf from erf-2.0.0.0, B"
:precision binary64
(- x (/ y (+ 1.0 (/ (* x y) 2.0)))))