
(FPCore (x y) :precision binary64 (/ (+ x y) (- x y)))
double code(double x, double y) {
return (x + y) / (x - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x + y) / (x - y)
end function
public static double code(double x, double y) {
return (x + y) / (x - y);
}
def code(x, y): return (x + y) / (x - y)
function code(x, y) return Float64(Float64(x + y) / Float64(x - y)) end
function tmp = code(x, y) tmp = (x + y) / (x - y); end
code[x_, y_] := N[(N[(x + y), $MachinePrecision] / N[(x - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x + y}{x - y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (+ x y) (- x y)))
double code(double x, double y) {
return (x + y) / (x - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x + y) / (x - y)
end function
public static double code(double x, double y) {
return (x + y) / (x - y);
}
def code(x, y): return (x + y) / (x - y)
function code(x, y) return Float64(Float64(x + y) / Float64(x - y)) end
function tmp = code(x, y) tmp = (x + y) / (x - y); end
code[x_, y_] := N[(N[(x + y), $MachinePrecision] / N[(x - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x + y}{x - y}
\end{array}
(FPCore (x y) :precision binary64 (+ (/ y (- x y)) (/ x (- x y))))
double code(double x, double y) {
return (y / (x - y)) + (x / (x - y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (y / (x - y)) + (x / (x - y))
end function
public static double code(double x, double y) {
return (y / (x - y)) + (x / (x - y));
}
def code(x, y): return (y / (x - y)) + (x / (x - y))
function code(x, y) return Float64(Float64(y / Float64(x - y)) + Float64(x / Float64(x - y))) end
function tmp = code(x, y) tmp = (y / (x - y)) + (x / (x - y)); end
code[x_, y_] := N[(N[(y / N[(x - y), $MachinePrecision]), $MachinePrecision] + N[(x / N[(x - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{y}{x - y} + \frac{x}{x - y}
\end{array}
Initial program 100.0%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
lift--.f64N/A
associate-/l/N/A
difference-of-squaresN/A
lift-+.f64N/A
lift--.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f64N/A
lower-*.f6453.0
Applied rewrites53.0%
lift-*.f64N/A
*-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
*-inversesN/A
div-invN/A
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
*-inversesN/A
div-invN/A
lower-+.f64N/A
lower-/.f64N/A
lower-/.f64100.0
Applied rewrites100.0%
(FPCore (x y) :precision binary64 (if (<= (/ (+ y x) (- x y)) -0.5) (fma -2.0 (/ x y) -1.0) (+ 1.0 (/ (+ y y) x))))
double code(double x, double y) {
double tmp;
if (((y + x) / (x - y)) <= -0.5) {
tmp = fma(-2.0, (x / y), -1.0);
} else {
tmp = 1.0 + ((y + y) / x);
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(y + x) / Float64(x - y)) <= -0.5) tmp = fma(-2.0, Float64(x / y), -1.0); else tmp = Float64(1.0 + Float64(Float64(y + y) / x)); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(y + x), $MachinePrecision] / N[(x - y), $MachinePrecision]), $MachinePrecision], -0.5], N[(-2.0 * N[(x / y), $MachinePrecision] + -1.0), $MachinePrecision], N[(1.0 + N[(N[(y + y), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{y + x}{x - y} \leq -0.5:\\
\;\;\;\;\mathsf{fma}\left(-2, \frac{x}{y}, -1\right)\\
\mathbf{else}:\\
\;\;\;\;1 + \frac{y + y}{x}\\
\end{array}
\end{array}
if (/.f64 (+.f64 x y) (-.f64 x y)) < -0.5Initial program 99.9%
Taylor expanded in x around 0
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
lower-/.f6497.4
Applied rewrites97.4%
if -0.5 < (/.f64 (+.f64 x y) (-.f64 x y)) Initial program 100.0%
Taylor expanded in x around inf
associate--l+N/A
associate-*r/N/A
div-subN/A
*-lft-identityN/A
distribute-rgt-out--N/A
metadata-evalN/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
metadata-evalN/A
distribute-rgt-out--N/A
*-lft-identityN/A
lower-/.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6499.6
Applied rewrites99.6%
Final simplification98.4%
(FPCore (x y) :precision binary64 (if (<= (/ (+ y x) (- x y)) -0.5) (/ (+ y x) (- y)) (+ 1.0 (/ (+ y y) x))))
double code(double x, double y) {
double tmp;
if (((y + x) / (x - y)) <= -0.5) {
tmp = (y + x) / -y;
} else {
tmp = 1.0 + ((y + y) / x);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (((y + x) / (x - y)) <= (-0.5d0)) then
tmp = (y + x) / -y
else
tmp = 1.0d0 + ((y + y) / x)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (((y + x) / (x - y)) <= -0.5) {
tmp = (y + x) / -y;
} else {
tmp = 1.0 + ((y + y) / x);
}
return tmp;
}
def code(x, y): tmp = 0 if ((y + x) / (x - y)) <= -0.5: tmp = (y + x) / -y else: tmp = 1.0 + ((y + y) / x) return tmp
function code(x, y) tmp = 0.0 if (Float64(Float64(y + x) / Float64(x - y)) <= -0.5) tmp = Float64(Float64(y + x) / Float64(-y)); else tmp = Float64(1.0 + Float64(Float64(y + y) / x)); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (((y + x) / (x - y)) <= -0.5) tmp = (y + x) / -y; else tmp = 1.0 + ((y + y) / x); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[(N[(y + x), $MachinePrecision] / N[(x - y), $MachinePrecision]), $MachinePrecision], -0.5], N[(N[(y + x), $MachinePrecision] / (-y)), $MachinePrecision], N[(1.0 + N[(N[(y + y), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{y + x}{x - y} \leq -0.5:\\
\;\;\;\;\frac{y + x}{-y}\\
\mathbf{else}:\\
\;\;\;\;1 + \frac{y + y}{x}\\
\end{array}
\end{array}
if (/.f64 (+.f64 x y) (-.f64 x y)) < -0.5Initial program 99.9%
Taylor expanded in x around 0
mul-1-negN/A
lower-neg.f6496.8
Applied rewrites96.8%
if -0.5 < (/.f64 (+.f64 x y) (-.f64 x y)) Initial program 100.0%
Taylor expanded in x around inf
associate--l+N/A
associate-*r/N/A
div-subN/A
*-lft-identityN/A
distribute-rgt-out--N/A
metadata-evalN/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
metadata-evalN/A
distribute-rgt-out--N/A
*-lft-identityN/A
lower-/.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6499.6
Applied rewrites99.6%
Final simplification98.1%
(FPCore (x y) :precision binary64 (if (<= (/ (+ y x) (- x y)) -0.5) (/ (+ y x) (- y)) 1.0))
double code(double x, double y) {
double tmp;
if (((y + x) / (x - y)) <= -0.5) {
tmp = (y + x) / -y;
} else {
tmp = 1.0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (((y + x) / (x - y)) <= (-0.5d0)) then
tmp = (y + x) / -y
else
tmp = 1.0d0
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (((y + x) / (x - y)) <= -0.5) {
tmp = (y + x) / -y;
} else {
tmp = 1.0;
}
return tmp;
}
def code(x, y): tmp = 0 if ((y + x) / (x - y)) <= -0.5: tmp = (y + x) / -y else: tmp = 1.0 return tmp
function code(x, y) tmp = 0.0 if (Float64(Float64(y + x) / Float64(x - y)) <= -0.5) tmp = Float64(Float64(y + x) / Float64(-y)); else tmp = 1.0; end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (((y + x) / (x - y)) <= -0.5) tmp = (y + x) / -y; else tmp = 1.0; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[(N[(y + x), $MachinePrecision] / N[(x - y), $MachinePrecision]), $MachinePrecision], -0.5], N[(N[(y + x), $MachinePrecision] / (-y)), $MachinePrecision], 1.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{y + x}{x - y} \leq -0.5:\\
\;\;\;\;\frac{y + x}{-y}\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if (/.f64 (+.f64 x y) (-.f64 x y)) < -0.5Initial program 99.9%
Taylor expanded in x around 0
mul-1-negN/A
lower-neg.f6496.8
Applied rewrites96.8%
if -0.5 < (/.f64 (+.f64 x y) (-.f64 x y)) Initial program 100.0%
Taylor expanded in x around inf
Applied rewrites98.7%
Final simplification97.7%
(FPCore (x y) :precision binary64 (if (<= (/ (+ y x) (- x y)) -5e-310) -1.0 1.0))
double code(double x, double y) {
double tmp;
if (((y + x) / (x - y)) <= -5e-310) {
tmp = -1.0;
} else {
tmp = 1.0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (((y + x) / (x - y)) <= (-5d-310)) then
tmp = -1.0d0
else
tmp = 1.0d0
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (((y + x) / (x - y)) <= -5e-310) {
tmp = -1.0;
} else {
tmp = 1.0;
}
return tmp;
}
def code(x, y): tmp = 0 if ((y + x) / (x - y)) <= -5e-310: tmp = -1.0 else: tmp = 1.0 return tmp
function code(x, y) tmp = 0.0 if (Float64(Float64(y + x) / Float64(x - y)) <= -5e-310) tmp = -1.0; else tmp = 1.0; end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (((y + x) / (x - y)) <= -5e-310) tmp = -1.0; else tmp = 1.0; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[(N[(y + x), $MachinePrecision] / N[(x - y), $MachinePrecision]), $MachinePrecision], -5e-310], -1.0, 1.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{y + x}{x - y} \leq -5 \cdot 10^{-310}:\\
\;\;\;\;-1\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if (/.f64 (+.f64 x y) (-.f64 x y)) < -4.999999999999985e-310Initial program 99.9%
Taylor expanded in x around 0
Applied rewrites96.6%
if -4.999999999999985e-310 < (/.f64 (+.f64 x y) (-.f64 x y)) Initial program 100.0%
Taylor expanded in x around inf
Applied rewrites98.7%
Final simplification97.6%
(FPCore (x y) :precision binary64 (/ (+ y x) (- x y)))
double code(double x, double y) {
return (y + x) / (x - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (y + x) / (x - y)
end function
public static double code(double x, double y) {
return (y + x) / (x - y);
}
def code(x, y): return (y + x) / (x - y)
function code(x, y) return Float64(Float64(y + x) / Float64(x - y)) end
function tmp = code(x, y) tmp = (y + x) / (x - y); end
code[x_, y_] := N[(N[(y + x), $MachinePrecision] / N[(x - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{y + x}{x - y}
\end{array}
Initial program 100.0%
Final simplification100.0%
(FPCore (x y) :precision binary64 -1.0)
double code(double x, double y) {
return -1.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -1.0d0
end function
public static double code(double x, double y) {
return -1.0;
}
def code(x, y): return -1.0
function code(x, y) return -1.0 end
function tmp = code(x, y) tmp = -1.0; end
code[x_, y_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites52.8%
(FPCore (x y) :precision binary64 (/ 1.0 (- (/ x (+ x y)) (/ y (+ x y)))))
double code(double x, double y) {
return 1.0 / ((x / (x + y)) - (y / (x + y)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 / ((x / (x + y)) - (y / (x + y)))
end function
public static double code(double x, double y) {
return 1.0 / ((x / (x + y)) - (y / (x + y)));
}
def code(x, y): return 1.0 / ((x / (x + y)) - (y / (x + y)))
function code(x, y) return Float64(1.0 / Float64(Float64(x / Float64(x + y)) - Float64(y / Float64(x + y)))) end
function tmp = code(x, y) tmp = 1.0 / ((x / (x + y)) - (y / (x + y))); end
code[x_, y_] := N[(1.0 / N[(N[(x / N[(x + y), $MachinePrecision]), $MachinePrecision] - N[(y / N[(x + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{x}{x + y} - \frac{y}{x + y}}
\end{array}
herbie shell --seed 2024233
(FPCore (x y)
:name "Linear.Projection:perspective from linear-1.19.1.3, A"
:precision binary64
:alt
(! :herbie-platform default (/ 1 (- (/ x (+ x y)) (/ y (+ x y)))))
(/ (+ x y) (- x y)))