
(FPCore (x y) :precision binary64 (- x (/ y (+ 1.0 (/ (* x y) 2.0)))))
double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x - (y / (1.0d0 + ((x * y) / 2.0d0)))
end function
public static double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
def code(x, y): return x - (y / (1.0 + ((x * y) / 2.0)))
function code(x, y) return Float64(x - Float64(y / Float64(1.0 + Float64(Float64(x * y) / 2.0)))) end
function tmp = code(x, y) tmp = x - (y / (1.0 + ((x * y) / 2.0))); end
code[x_, y_] := N[(x - N[(y / N[(1.0 + N[(N[(x * y), $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{y}{1 + \frac{x \cdot y}{2}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (- x (/ y (+ 1.0 (/ (* x y) 2.0)))))
double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x - (y / (1.0d0 + ((x * y) / 2.0d0)))
end function
public static double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
def code(x, y): return x - (y / (1.0 + ((x * y) / 2.0)))
function code(x, y) return Float64(x - Float64(y / Float64(1.0 + Float64(Float64(x * y) / 2.0)))) end
function tmp = code(x, y) tmp = x - (y / (1.0 + ((x * y) / 2.0))); end
code[x_, y_] := N[(x - N[(y / N[(1.0 + N[(N[(x * y), $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{y}{1 + \frac{x \cdot y}{2}}
\end{array}
(FPCore (x y) :precision binary64 (- x (/ y (fma (* y x) 0.5 1.0))))
double code(double x, double y) {
return x - (y / fma((y * x), 0.5, 1.0));
}
function code(x, y) return Float64(x - Float64(y / fma(Float64(y * x), 0.5, 1.0))) end
code[x_, y_] := N[(x - N[(y / N[(N[(y * x), $MachinePrecision] * 0.5 + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{y}{\mathsf{fma}\left(y \cdot x, 0.5, 1\right)}
\end{array}
Initial program 99.9%
lift-+.f64N/A
+-commutativeN/A
lift-/.f64N/A
div-invN/A
lower-fma.f64N/A
lift-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval99.9
Applied rewrites99.9%
(FPCore (x y) :precision binary64 (if (or (<= y -1.5e+111) (not (<= y 3.6e+20))) (- x (/ 2.0 x)) (- x y)))
double code(double x, double y) {
double tmp;
if ((y <= -1.5e+111) || !(y <= 3.6e+20)) {
tmp = x - (2.0 / x);
} else {
tmp = x - y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((y <= (-1.5d+111)) .or. (.not. (y <= 3.6d+20))) then
tmp = x - (2.0d0 / x)
else
tmp = x - y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((y <= -1.5e+111) || !(y <= 3.6e+20)) {
tmp = x - (2.0 / x);
} else {
tmp = x - y;
}
return tmp;
}
def code(x, y): tmp = 0 if (y <= -1.5e+111) or not (y <= 3.6e+20): tmp = x - (2.0 / x) else: tmp = x - y return tmp
function code(x, y) tmp = 0.0 if ((y <= -1.5e+111) || !(y <= 3.6e+20)) tmp = Float64(x - Float64(2.0 / x)); else tmp = Float64(x - y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((y <= -1.5e+111) || ~((y <= 3.6e+20))) tmp = x - (2.0 / x); else tmp = x - y; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[y, -1.5e+111], N[Not[LessEqual[y, 3.6e+20]], $MachinePrecision]], N[(x - N[(2.0 / x), $MachinePrecision]), $MachinePrecision], N[(x - y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -1.5 \cdot 10^{+111} \lor \neg \left(y \leq 3.6 \cdot 10^{+20}\right):\\
\;\;\;\;x - \frac{2}{x}\\
\mathbf{else}:\\
\;\;\;\;x - y\\
\end{array}
\end{array}
if y < -1.5e111 or 3.6e20 < y Initial program 99.9%
Taylor expanded in x around inf
lower-/.f6485.5
Applied rewrites85.5%
if -1.5e111 < y < 3.6e20Initial program 100.0%
Taylor expanded in y around 0
mul-1-negN/A
unsub-negN/A
lower--.f6496.4
Applied rewrites96.4%
Final simplification92.2%
(FPCore (x y) :precision binary64 (if (<= y 2.4e+156) (- x y) (/ -2.0 x)))
double code(double x, double y) {
double tmp;
if (y <= 2.4e+156) {
tmp = x - y;
} else {
tmp = -2.0 / x;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (y <= 2.4d+156) then
tmp = x - y
else
tmp = (-2.0d0) / x
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (y <= 2.4e+156) {
tmp = x - y;
} else {
tmp = -2.0 / x;
}
return tmp;
}
def code(x, y): tmp = 0 if y <= 2.4e+156: tmp = x - y else: tmp = -2.0 / x return tmp
function code(x, y) tmp = 0.0 if (y <= 2.4e+156) tmp = Float64(x - y); else tmp = Float64(-2.0 / x); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (y <= 2.4e+156) tmp = x - y; else tmp = -2.0 / x; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[y, 2.4e+156], N[(x - y), $MachinePrecision], N[(-2.0 / x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 2.4 \cdot 10^{+156}:\\
\;\;\;\;x - y\\
\mathbf{else}:\\
\;\;\;\;\frac{-2}{x}\\
\end{array}
\end{array}
if y < 2.4000000000000001e156Initial program 100.0%
Taylor expanded in y around 0
mul-1-negN/A
unsub-negN/A
lower--.f6481.0
Applied rewrites81.0%
if 2.4000000000000001e156 < y Initial program 99.9%
Taylor expanded in x around inf
sub-negN/A
distribute-rgt-inN/A
*-lft-identityN/A
+-commutativeN/A
lower-fma.f64N/A
associate-*r/N/A
metadata-evalN/A
distribute-neg-fracN/A
lower-/.f64N/A
metadata-evalN/A
unpow2N/A
lower-*.f6493.9
Applied rewrites93.9%
Taylor expanded in x around 0
Applied rewrites36.9%
(FPCore (x y) :precision binary64 (- x y))
double code(double x, double y) {
return x - y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x - y
end function
public static double code(double x, double y) {
return x - y;
}
def code(x, y): return x - y
function code(x, y) return Float64(x - y) end
function tmp = code(x, y) tmp = x - y; end
code[x_, y_] := N[(x - y), $MachinePrecision]
\begin{array}{l}
\\
x - y
\end{array}
Initial program 99.9%
Taylor expanded in y around 0
mul-1-negN/A
unsub-negN/A
lower--.f6473.1
Applied rewrites73.1%
(FPCore (x y) :precision binary64 (- y))
double code(double x, double y) {
return -y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -y
end function
public static double code(double x, double y) {
return -y;
}
def code(x, y): return -y
function code(x, y) return Float64(-y) end
function tmp = code(x, y) tmp = -y; end
code[x_, y_] := (-y)
\begin{array}{l}
\\
-y
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
mul-1-negN/A
lower-neg.f6426.2
Applied rewrites26.2%
herbie shell --seed 2024313
(FPCore (x y)
:name "Data.Number.Erf:$cinvnormcdf from erf-2.0.0.0, B"
:precision binary64
(- x (/ y (+ 1.0 (/ (* x y) 2.0)))))