
(FPCore (x y) :precision binary64 (/ (+ x y) (- x y)))
double code(double x, double y) {
return (x + y) / (x - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x + y) / (x - y)
end function
public static double code(double x, double y) {
return (x + y) / (x - y);
}
def code(x, y): return (x + y) / (x - y)
function code(x, y) return Float64(Float64(x + y) / Float64(x - y)) end
function tmp = code(x, y) tmp = (x + y) / (x - y); end
code[x_, y_] := N[(N[(x + y), $MachinePrecision] / N[(x - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x + y}{x - y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (+ x y) (- x y)))
double code(double x, double y) {
return (x + y) / (x - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x + y) / (x - y)
end function
public static double code(double x, double y) {
return (x + y) / (x - y);
}
def code(x, y): return (x + y) / (x - y)
function code(x, y) return Float64(Float64(x + y) / Float64(x - y)) end
function tmp = code(x, y) tmp = (x + y) / (x - y); end
code[x_, y_] := N[(N[(x + y), $MachinePrecision] / N[(x - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x + y}{x - y}
\end{array}
(FPCore (x y) :precision binary64 (/ 1.0 (/ (- x y) (+ x y))))
double code(double x, double y) {
return 1.0 / ((x - y) / (x + y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 / ((x - y) / (x + y))
end function
public static double code(double x, double y) {
return 1.0 / ((x - y) / (x + y));
}
def code(x, y): return 1.0 / ((x - y) / (x + y))
function code(x, y) return Float64(1.0 / Float64(Float64(x - y) / Float64(x + y))) end
function tmp = code(x, y) tmp = 1.0 / ((x - y) / (x + y)); end
code[x_, y_] := N[(1.0 / N[(N[(x - y), $MachinePrecision] / N[(x + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{x - y}{x + y}}
\end{array}
Initial program 100.0%
add-cbrt-cube100.0%
pow3100.0%
Applied egg-rr100.0%
rem-cbrt-cube100.0%
clear-num100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x y) :precision binary64 (if (or (<= y -2.6e+14) (not (<= y 4.8e+34))) (+ (* -2.0 (/ x y)) -1.0) 1.0))
double code(double x, double y) {
double tmp;
if ((y <= -2.6e+14) || !(y <= 4.8e+34)) {
tmp = (-2.0 * (x / y)) + -1.0;
} else {
tmp = 1.0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((y <= (-2.6d+14)) .or. (.not. (y <= 4.8d+34))) then
tmp = ((-2.0d0) * (x / y)) + (-1.0d0)
else
tmp = 1.0d0
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((y <= -2.6e+14) || !(y <= 4.8e+34)) {
tmp = (-2.0 * (x / y)) + -1.0;
} else {
tmp = 1.0;
}
return tmp;
}
def code(x, y): tmp = 0 if (y <= -2.6e+14) or not (y <= 4.8e+34): tmp = (-2.0 * (x / y)) + -1.0 else: tmp = 1.0 return tmp
function code(x, y) tmp = 0.0 if ((y <= -2.6e+14) || !(y <= 4.8e+34)) tmp = Float64(Float64(-2.0 * Float64(x / y)) + -1.0); else tmp = 1.0; end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((y <= -2.6e+14) || ~((y <= 4.8e+34))) tmp = (-2.0 * (x / y)) + -1.0; else tmp = 1.0; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[y, -2.6e+14], N[Not[LessEqual[y, 4.8e+34]], $MachinePrecision]], N[(N[(-2.0 * N[(x / y), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision], 1.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -2.6 \cdot 10^{+14} \lor \neg \left(y \leq 4.8 \cdot 10^{+34}\right):\\
\;\;\;\;-2 \cdot \frac{x}{y} + -1\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if y < -2.6e14 or 4.79999999999999974e34 < y Initial program 100.0%
Taylor expanded in x around 0 82.4%
if -2.6e14 < y < 4.79999999999999974e34Initial program 100.0%
Taylor expanded in x around inf 78.5%
Final simplification80.4%
(FPCore (x y) :precision binary64 (/ (+ x y) (- x y)))
double code(double x, double y) {
return (x + y) / (x - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x + y) / (x - y)
end function
public static double code(double x, double y) {
return (x + y) / (x - y);
}
def code(x, y): return (x + y) / (x - y)
function code(x, y) return Float64(Float64(x + y) / Float64(x - y)) end
function tmp = code(x, y) tmp = (x + y) / (x - y); end
code[x_, y_] := N[(N[(x + y), $MachinePrecision] / N[(x - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x + y}{x - y}
\end{array}
Initial program 100.0%
Final simplification100.0%
(FPCore (x y) :precision binary64 (if (<= y -4e+14) -1.0 (if (<= y 7.4e+33) 1.0 -1.0)))
double code(double x, double y) {
double tmp;
if (y <= -4e+14) {
tmp = -1.0;
} else if (y <= 7.4e+33) {
tmp = 1.0;
} else {
tmp = -1.0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (y <= (-4d+14)) then
tmp = -1.0d0
else if (y <= 7.4d+33) then
tmp = 1.0d0
else
tmp = -1.0d0
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (y <= -4e+14) {
tmp = -1.0;
} else if (y <= 7.4e+33) {
tmp = 1.0;
} else {
tmp = -1.0;
}
return tmp;
}
def code(x, y): tmp = 0 if y <= -4e+14: tmp = -1.0 elif y <= 7.4e+33: tmp = 1.0 else: tmp = -1.0 return tmp
function code(x, y) tmp = 0.0 if (y <= -4e+14) tmp = -1.0; elseif (y <= 7.4e+33) tmp = 1.0; else tmp = -1.0; end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (y <= -4e+14) tmp = -1.0; elseif (y <= 7.4e+33) tmp = 1.0; else tmp = -1.0; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[y, -4e+14], -1.0, If[LessEqual[y, 7.4e+33], 1.0, -1.0]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -4 \cdot 10^{+14}:\\
\;\;\;\;-1\\
\mathbf{elif}\;y \leq 7.4 \cdot 10^{+33}:\\
\;\;\;\;1\\
\mathbf{else}:\\
\;\;\;\;-1\\
\end{array}
\end{array}
if y < -4e14 or 7.3999999999999997e33 < y Initial program 100.0%
Taylor expanded in x around 0 81.4%
if -4e14 < y < 7.3999999999999997e33Initial program 100.0%
Taylor expanded in x around inf 78.5%
Final simplification79.9%
(FPCore (x y) :precision binary64 -1.0)
double code(double x, double y) {
return -1.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -1.0d0
end function
public static double code(double x, double y) {
return -1.0;
}
def code(x, y): return -1.0
function code(x, y) return -1.0 end
function tmp = code(x, y) tmp = -1.0; end
code[x_, y_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 50.7%
Final simplification50.7%
(FPCore (x y) :precision binary64 (/ 1.0 (- (/ x (+ x y)) (/ y (+ x y)))))
double code(double x, double y) {
return 1.0 / ((x / (x + y)) - (y / (x + y)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 / ((x / (x + y)) - (y / (x + y)))
end function
public static double code(double x, double y) {
return 1.0 / ((x / (x + y)) - (y / (x + y)));
}
def code(x, y): return 1.0 / ((x / (x + y)) - (y / (x + y)))
function code(x, y) return Float64(1.0 / Float64(Float64(x / Float64(x + y)) - Float64(y / Float64(x + y)))) end
function tmp = code(x, y) tmp = 1.0 / ((x / (x + y)) - (y / (x + y))); end
code[x_, y_] := N[(1.0 / N[(N[(x / N[(x + y), $MachinePrecision]), $MachinePrecision] - N[(y / N[(x + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{x}{x + y} - \frac{y}{x + y}}
\end{array}
herbie shell --seed 2023195
(FPCore (x y)
:name "Linear.Projection:perspective from linear-1.19.1.3, A"
:precision binary64
:herbie-target
(/ 1.0 (- (/ x (+ x y)) (/ y (+ x y))))
(/ (+ x y) (- x y)))