
(FPCore (x y) :precision binary64 (/ (- x y) (+ x y)))
double code(double x, double y) {
return (x - y) / (x + y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x - y) / (x + y)
end function
public static double code(double x, double y) {
return (x - y) / (x + y);
}
def code(x, y): return (x - y) / (x + y)
function code(x, y) return Float64(Float64(x - y) / Float64(x + y)) end
function tmp = code(x, y) tmp = (x - y) / (x + y); end
code[x_, y_] := N[(N[(x - y), $MachinePrecision] / N[(x + y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - y}{x + y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (- x y) (+ x y)))
double code(double x, double y) {
return (x - y) / (x + y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x - y) / (x + y)
end function
public static double code(double x, double y) {
return (x - y) / (x + y);
}
def code(x, y): return (x - y) / (x + y)
function code(x, y) return Float64(Float64(x - y) / Float64(x + y)) end
function tmp = code(x, y) tmp = (x - y) / (x + y); end
code[x_, y_] := N[(N[(x - y), $MachinePrecision] / N[(x + y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - y}{x + y}
\end{array}
(FPCore (x y) :precision binary64 (- (/ x (+ x y)) (/ y (+ x y))))
double code(double x, double y) {
return (x / (x + y)) - (y / (x + y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x / (x + y)) - (y / (x + y))
end function
public static double code(double x, double y) {
return (x / (x + y)) - (y / (x + y));
}
def code(x, y): return (x / (x + y)) - (y / (x + y))
function code(x, y) return Float64(Float64(x / Float64(x + y)) - Float64(y / Float64(x + y))) end
function tmp = code(x, y) tmp = (x / (x + y)) - (y / (x + y)); end
code[x_, y_] := N[(N[(x / N[(x + y), $MachinePrecision]), $MachinePrecision] - N[(y / N[(x + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{x + y} - \frac{y}{x + y}
\end{array}
Initial program 100.0%
lift-/.f64N/A
lift--.f64N/A
div-subN/A
lower--.f64N/A
lower-/.f64N/A
lower-/.f64100.0
Applied rewrites100.0%
(FPCore (x y) :precision binary64 (if (<= (/ (- x y) (+ x y)) -0.5) (+ -1.0 (/ (+ x x) y)) (fma y (/ -2.0 x) 1.0)))
double code(double x, double y) {
double tmp;
if (((x - y) / (x + y)) <= -0.5) {
tmp = -1.0 + ((x + x) / y);
} else {
tmp = fma(y, (-2.0 / x), 1.0);
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(x - y) / Float64(x + y)) <= -0.5) tmp = Float64(-1.0 + Float64(Float64(x + x) / y)); else tmp = fma(y, Float64(-2.0 / x), 1.0); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(x - y), $MachinePrecision] / N[(x + y), $MachinePrecision]), $MachinePrecision], -0.5], N[(-1.0 + N[(N[(x + x), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision], N[(y * N[(-2.0 / x), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{x - y}{x + y} \leq -0.5:\\
\;\;\;\;-1 + \frac{x + x}{y}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(y, \frac{-2}{x}, 1\right)\\
\end{array}
\end{array}
if (/.f64 (-.f64 x y) (+.f64 x y)) < -0.5Initial program 100.0%
Taylor expanded in x around 0
sub-negN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
metadata-evalN/A
+-commutativeN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
associate-*l/N/A
metadata-evalN/A
distribute-rgt1-inN/A
metadata-evalN/A
cancel-sign-sub-invN/A
lower-/.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6499.0
Applied rewrites99.0%
if -0.5 < (/.f64 (-.f64 x y) (+.f64 x y)) Initial program 100.0%
Taylor expanded in x around inf
associate--l+N/A
associate-*r/N/A
div-subN/A
+-commutativeN/A
div-subN/A
associate-*r/N/A
sub-negN/A
mul-1-negN/A
distribute-rgt-outN/A
metadata-evalN/A
*-commutativeN/A
associate-*r/N/A
*-commutativeN/A
associate-*r/N/A
metadata-evalN/A
distribute-neg-fracN/A
metadata-evalN/A
associate-*r/N/A
lower-fma.f64N/A
Applied rewrites99.0%
(FPCore (x y) :precision binary64 (- (/ x (+ x y)) (/ y (+ x y))))
double code(double x, double y) {
return (x / (x + y)) - (y / (x + y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x / (x + y)) - (y / (x + y))
end function
public static double code(double x, double y) {
return (x / (x + y)) - (y / (x + y));
}
def code(x, y): return (x / (x + y)) - (y / (x + y))
function code(x, y) return Float64(Float64(x / Float64(x + y)) - Float64(y / Float64(x + y))) end
function tmp = code(x, y) tmp = (x / (x + y)) - (y / (x + y)); end
code[x_, y_] := N[(N[(x / N[(x + y), $MachinePrecision]), $MachinePrecision] - N[(y / N[(x + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{x + y} - \frac{y}{x + y}
\end{array}
herbie shell --seed 2024223
(FPCore (x y)
:name "Data.Colour.RGB:hslsv from colour-2.3.3, D"
:precision binary64
:alt
(! :herbie-platform default (- (/ x (+ x y)) (/ y (+ x y))))
(/ (- x y) (+ x y)))