
(FPCore (x y) :precision binary64 (/ (- x y) (* (* x 2.0) y)))
double code(double x, double y) {
return (x - y) / ((x * 2.0) * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x - y) / ((x * 2.0d0) * y)
end function
public static double code(double x, double y) {
return (x - y) / ((x * 2.0) * y);
}
def code(x, y): return (x - y) / ((x * 2.0) * y)
function code(x, y) return Float64(Float64(x - y) / Float64(Float64(x * 2.0) * y)) end
function tmp = code(x, y) tmp = (x - y) / ((x * 2.0) * y); end
code[x_, y_] := N[(N[(x - y), $MachinePrecision] / N[(N[(x * 2.0), $MachinePrecision] * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - y}{\left(x \cdot 2\right) \cdot y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (- x y) (* (* x 2.0) y)))
double code(double x, double y) {
return (x - y) / ((x * 2.0) * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x - y) / ((x * 2.0d0) * y)
end function
public static double code(double x, double y) {
return (x - y) / ((x * 2.0) * y);
}
def code(x, y): return (x - y) / ((x * 2.0) * y)
function code(x, y) return Float64(Float64(x - y) / Float64(Float64(x * 2.0) * y)) end
function tmp = code(x, y) tmp = (x - y) / ((x * 2.0) * y); end
code[x_, y_] := N[(N[(x - y), $MachinePrecision] / N[(N[(x * 2.0), $MachinePrecision] * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - y}{\left(x \cdot 2\right) \cdot y}
\end{array}
(FPCore (x y) :precision binary64 (+ (/ 0.5 y) (/ -0.5 x)))
double code(double x, double y) {
return (0.5 / y) + (-0.5 / x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (0.5d0 / y) + ((-0.5d0) / x)
end function
public static double code(double x, double y) {
return (0.5 / y) + (-0.5 / x);
}
def code(x, y): return (0.5 / y) + (-0.5 / x)
function code(x, y) return Float64(Float64(0.5 / y) + Float64(-0.5 / x)) end
function tmp = code(x, y) tmp = (0.5 / y) + (-0.5 / x); end
code[x_, y_] := N[(N[(0.5 / y), $MachinePrecision] + N[(-0.5 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{y} + \frac{-0.5}{x}
\end{array}
Initial program 82.3%
remove-double-neg82.3%
distribute-rgt-neg-out82.3%
distribute-frac-neg282.3%
neg-mul-182.3%
div-sub81.8%
distribute-lft-out--81.8%
neg-mul-181.8%
distribute-frac-neg281.8%
distribute-rgt-neg-out81.8%
remove-double-neg81.8%
cancel-sign-sub-inv81.8%
associate-/r*86.3%
associate-/r*86.3%
*-inverses86.3%
metadata-eval86.3%
metadata-eval86.3%
*-lft-identity86.3%
distribute-rgt-neg-out86.3%
Simplified100.0%
(FPCore (x y)
:precision binary64
(if (or (<= x -2.2e+40)
(not
(or (<= x -1.16e-60) (and (not (<= x -3.9e-84)) (<= x 7.1e-12)))))
(/ 0.5 y)
(/ -0.5 x)))
double code(double x, double y) {
double tmp;
if ((x <= -2.2e+40) || !((x <= -1.16e-60) || (!(x <= -3.9e-84) && (x <= 7.1e-12)))) {
tmp = 0.5 / y;
} else {
tmp = -0.5 / x;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x <= (-2.2d+40)) .or. (.not. (x <= (-1.16d-60)) .or. (.not. (x <= (-3.9d-84))) .and. (x <= 7.1d-12))) then
tmp = 0.5d0 / y
else
tmp = (-0.5d0) / x
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x <= -2.2e+40) || !((x <= -1.16e-60) || (!(x <= -3.9e-84) && (x <= 7.1e-12)))) {
tmp = 0.5 / y;
} else {
tmp = -0.5 / x;
}
return tmp;
}
def code(x, y): tmp = 0 if (x <= -2.2e+40) or not ((x <= -1.16e-60) or (not (x <= -3.9e-84) and (x <= 7.1e-12))): tmp = 0.5 / y else: tmp = -0.5 / x return tmp
function code(x, y) tmp = 0.0 if ((x <= -2.2e+40) || !((x <= -1.16e-60) || (!(x <= -3.9e-84) && (x <= 7.1e-12)))) tmp = Float64(0.5 / y); else tmp = Float64(-0.5 / x); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x <= -2.2e+40) || ~(((x <= -1.16e-60) || (~((x <= -3.9e-84)) && (x <= 7.1e-12))))) tmp = 0.5 / y; else tmp = -0.5 / x; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[x, -2.2e+40], N[Not[Or[LessEqual[x, -1.16e-60], And[N[Not[LessEqual[x, -3.9e-84]], $MachinePrecision], LessEqual[x, 7.1e-12]]]], $MachinePrecision]], N[(0.5 / y), $MachinePrecision], N[(-0.5 / x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.2 \cdot 10^{+40} \lor \neg \left(x \leq -1.16 \cdot 10^{-60} \lor \neg \left(x \leq -3.9 \cdot 10^{-84}\right) \land x \leq 7.1 \cdot 10^{-12}\right):\\
\;\;\;\;\frac{0.5}{y}\\
\mathbf{else}:\\
\;\;\;\;\frac{-0.5}{x}\\
\end{array}
\end{array}
if x < -2.1999999999999999e40 or -1.16e-60 < x < -3.90000000000000023e-84 or 7.1e-12 < x Initial program 82.4%
remove-double-neg82.4%
distribute-rgt-neg-out82.4%
distribute-frac-neg282.4%
neg-mul-182.4%
div-sub82.3%
distribute-lft-out--82.3%
neg-mul-182.3%
distribute-frac-neg282.3%
distribute-rgt-neg-out82.3%
remove-double-neg82.3%
cancel-sign-sub-inv82.3%
associate-/r*90.4%
associate-/r*90.4%
*-inverses90.4%
metadata-eval90.4%
metadata-eval90.4%
*-lft-identity90.4%
distribute-rgt-neg-out90.4%
Simplified100.0%
Taylor expanded in y around 0 82.2%
if -2.1999999999999999e40 < x < -1.16e-60 or -3.90000000000000023e-84 < x < 7.1e-12Initial program 82.1%
remove-double-neg82.1%
distribute-rgt-neg-out82.1%
distribute-frac-neg282.1%
neg-mul-182.1%
div-sub81.2%
distribute-lft-out--81.2%
neg-mul-181.2%
distribute-frac-neg281.2%
distribute-rgt-neg-out81.2%
remove-double-neg81.2%
cancel-sign-sub-inv81.2%
associate-/r*81.8%
associate-/r*81.8%
*-inverses81.8%
metadata-eval81.8%
metadata-eval81.8%
*-lft-identity81.8%
distribute-rgt-neg-out81.8%
Simplified100.0%
Taylor expanded in y around inf 82.5%
Final simplification82.3%
(FPCore (x y) :precision binary64 (/ -0.5 x))
double code(double x, double y) {
return -0.5 / x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (-0.5d0) / x
end function
public static double code(double x, double y) {
return -0.5 / x;
}
def code(x, y): return -0.5 / x
function code(x, y) return Float64(-0.5 / x) end
function tmp = code(x, y) tmp = -0.5 / x; end
code[x_, y_] := N[(-0.5 / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{-0.5}{x}
\end{array}
Initial program 82.3%
remove-double-neg82.3%
distribute-rgt-neg-out82.3%
distribute-frac-neg282.3%
neg-mul-182.3%
div-sub81.8%
distribute-lft-out--81.8%
neg-mul-181.8%
distribute-frac-neg281.8%
distribute-rgt-neg-out81.8%
remove-double-neg81.8%
cancel-sign-sub-inv81.8%
associate-/r*86.3%
associate-/r*86.3%
*-inverses86.3%
metadata-eval86.3%
metadata-eval86.3%
*-lft-identity86.3%
distribute-rgt-neg-out86.3%
Simplified100.0%
Taylor expanded in y around inf 49.8%
(FPCore (x y) :precision binary64 (- (/ 0.5 y) (/ 0.5 x)))
double code(double x, double y) {
return (0.5 / y) - (0.5 / x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (0.5d0 / y) - (0.5d0 / x)
end function
public static double code(double x, double y) {
return (0.5 / y) - (0.5 / x);
}
def code(x, y): return (0.5 / y) - (0.5 / x)
function code(x, y) return Float64(Float64(0.5 / y) - Float64(0.5 / x)) end
function tmp = code(x, y) tmp = (0.5 / y) - (0.5 / x); end
code[x_, y_] := N[(N[(0.5 / y), $MachinePrecision] - N[(0.5 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{y} - \frac{0.5}{x}
\end{array}
herbie shell --seed 2024096
(FPCore (x y)
:name "Linear.Projection:inversePerspective from linear-1.19.1.3, B"
:precision binary64
:alt
(- (/ 0.5 y) (/ 0.5 x))
(/ (- x y) (* (* x 2.0) y)))