
(FPCore (x y) :precision binary64 (- x (/ y (+ 1.0 (/ (* x y) 2.0)))))
double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x - (y / (1.0d0 + ((x * y) / 2.0d0)))
end function
public static double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
def code(x, y): return x - (y / (1.0 + ((x * y) / 2.0)))
function code(x, y) return Float64(x - Float64(y / Float64(1.0 + Float64(Float64(x * y) / 2.0)))) end
function tmp = code(x, y) tmp = x - (y / (1.0 + ((x * y) / 2.0))); end
code[x_, y_] := N[(x - N[(y / N[(1.0 + N[(N[(x * y), $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{y}{1 + \frac{x \cdot y}{2}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (- x (/ y (+ 1.0 (/ (* x y) 2.0)))))
double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x - (y / (1.0d0 + ((x * y) / 2.0d0)))
end function
public static double code(double x, double y) {
return x - (y / (1.0 + ((x * y) / 2.0)));
}
def code(x, y): return x - (y / (1.0 + ((x * y) / 2.0)))
function code(x, y) return Float64(x - Float64(y / Float64(1.0 + Float64(Float64(x * y) / 2.0)))) end
function tmp = code(x, y) tmp = x - (y / (1.0 + ((x * y) / 2.0))); end
code[x_, y_] := N[(x - N[(y / N[(1.0 + N[(N[(x * y), $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{y}{1 + \frac{x \cdot y}{2}}
\end{array}
(FPCore (x y) :precision binary64 (- x (/ y (fma (* y 0.5) x 1.0))))
double code(double x, double y) {
return x - (y / fma((y * 0.5), x, 1.0));
}
function code(x, y) return Float64(x - Float64(y / fma(Float64(y * 0.5), x, 1.0))) end
code[x_, y_] := N[(x - N[(y / N[(N[(y * 0.5), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{y}{\mathsf{fma}\left(y \cdot 0.5, x, 1\right)}
\end{array}
Initial program 99.9%
lift-*.f64N/A
lift-/.f64N/A
+-commutativeN/A
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
*-commutativeN/A
lower-fma.f64N/A
div-invN/A
lower-*.f64N/A
metadata-eval99.9
Applied rewrites99.9%
(FPCore (x y) :precision binary64 (let* ((t_0 (- x (/ 2.0 x)))) (if (<= y -1e+79) t_0 (if (<= y 1.44e+159) (- x y) t_0))))
double code(double x, double y) {
double t_0 = x - (2.0 / x);
double tmp;
if (y <= -1e+79) {
tmp = t_0;
} else if (y <= 1.44e+159) {
tmp = x - y;
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: t_0
real(8) :: tmp
t_0 = x - (2.0d0 / x)
if (y <= (-1d+79)) then
tmp = t_0
else if (y <= 1.44d+159) then
tmp = x - y
else
tmp = t_0
end if
code = tmp
end function
public static double code(double x, double y) {
double t_0 = x - (2.0 / x);
double tmp;
if (y <= -1e+79) {
tmp = t_0;
} else if (y <= 1.44e+159) {
tmp = x - y;
} else {
tmp = t_0;
}
return tmp;
}
def code(x, y): t_0 = x - (2.0 / x) tmp = 0 if y <= -1e+79: tmp = t_0 elif y <= 1.44e+159: tmp = x - y else: tmp = t_0 return tmp
function code(x, y) t_0 = Float64(x - Float64(2.0 / x)) tmp = 0.0 if (y <= -1e+79) tmp = t_0; elseif (y <= 1.44e+159) tmp = Float64(x - y); else tmp = t_0; end return tmp end
function tmp_2 = code(x, y) t_0 = x - (2.0 / x); tmp = 0.0; if (y <= -1e+79) tmp = t_0; elseif (y <= 1.44e+159) tmp = x - y; else tmp = t_0; end tmp_2 = tmp; end
code[x_, y_] := Block[{t$95$0 = N[(x - N[(2.0 / x), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[y, -1e+79], t$95$0, If[LessEqual[y, 1.44e+159], N[(x - y), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x - \frac{2}{x}\\
\mathbf{if}\;y \leq -1 \cdot 10^{+79}:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;y \leq 1.44 \cdot 10^{+159}:\\
\;\;\;\;x - y\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if y < -9.99999999999999967e78 or 1.43999999999999995e159 < y Initial program 99.8%
Taylor expanded in y around inf
lower-/.f6485.3
Applied rewrites85.3%
if -9.99999999999999967e78 < y < 1.43999999999999995e159Initial program 100.0%
Taylor expanded in y around 0
mul-1-negN/A
unsub-negN/A
lower--.f6495.3
Applied rewrites95.3%
(FPCore (x y) :precision binary64 (if (<= y -5.8e+158) (/ -2.0 x) (- x y)))
double code(double x, double y) {
double tmp;
if (y <= -5.8e+158) {
tmp = -2.0 / x;
} else {
tmp = x - y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (y <= (-5.8d+158)) then
tmp = (-2.0d0) / x
else
tmp = x - y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (y <= -5.8e+158) {
tmp = -2.0 / x;
} else {
tmp = x - y;
}
return tmp;
}
def code(x, y): tmp = 0 if y <= -5.8e+158: tmp = -2.0 / x else: tmp = x - y return tmp
function code(x, y) tmp = 0.0 if (y <= -5.8e+158) tmp = Float64(-2.0 / x); else tmp = Float64(x - y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (y <= -5.8e+158) tmp = -2.0 / x; else tmp = x - y; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[y, -5.8e+158], N[(-2.0 / x), $MachinePrecision], N[(x - y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -5.8 \cdot 10^{+158}:\\
\;\;\;\;\frac{-2}{x}\\
\mathbf{else}:\\
\;\;\;\;x - y\\
\end{array}
\end{array}
if y < -5.80000000000000048e158Initial program 99.7%
Taylor expanded in y around inf
lower-/.f6489.1
Applied rewrites89.1%
Taylor expanded in x around 0
lower-/.f6448.5
Applied rewrites48.5%
if -5.80000000000000048e158 < y Initial program 100.0%
Taylor expanded in y around 0
mul-1-negN/A
unsub-negN/A
lower--.f6484.9
Applied rewrites84.9%
(FPCore (x y) :precision binary64 (- x y))
double code(double x, double y) {
return x - y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x - y
end function
public static double code(double x, double y) {
return x - y;
}
def code(x, y): return x - y
function code(x, y) return Float64(x - y) end
function tmp = code(x, y) tmp = x - y; end
code[x_, y_] := N[(x - y), $MachinePrecision]
\begin{array}{l}
\\
x - y
\end{array}
Initial program 99.9%
Taylor expanded in y around 0
mul-1-negN/A
unsub-negN/A
lower--.f6477.0
Applied rewrites77.0%
(FPCore (x y) :precision binary64 (- y))
double code(double x, double y) {
return -y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -y
end function
public static double code(double x, double y) {
return -y;
}
def code(x, y): return -y
function code(x, y) return Float64(-y) end
function tmp = code(x, y) tmp = -y; end
code[x_, y_] := (-y)
\begin{array}{l}
\\
-y
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
mul-1-negN/A
lower-neg.f6430.7
Applied rewrites30.7%
herbie shell --seed 2024214
(FPCore (x y)
:name "Data.Number.Erf:$cinvnormcdf from erf-2.0.0.0, B"
:precision binary64
(- x (/ y (+ 1.0 (/ (* x y) 2.0)))))