
(FPCore (x y) :precision binary64 (/ (fabs (- x y)) (fabs y)))
double code(double x, double y) {
return fabs((x - y)) / fabs(y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = abs((x - y)) / abs(y)
end function
public static double code(double x, double y) {
return Math.abs((x - y)) / Math.abs(y);
}
def code(x, y): return math.fabs((x - y)) / math.fabs(y)
function code(x, y) return Float64(abs(Float64(x - y)) / abs(y)) end
function tmp = code(x, y) tmp = abs((x - y)) / abs(y); end
code[x_, y_] := N[(N[Abs[N[(x - y), $MachinePrecision]], $MachinePrecision] / N[Abs[y], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left|x - y\right|}{\left|y\right|}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (fabs (- x y)) (fabs y)))
double code(double x, double y) {
return fabs((x - y)) / fabs(y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = abs((x - y)) / abs(y)
end function
public static double code(double x, double y) {
return Math.abs((x - y)) / Math.abs(y);
}
def code(x, y): return math.fabs((x - y)) / math.fabs(y)
function code(x, y) return Float64(abs(Float64(x - y)) / abs(y)) end
function tmp = code(x, y) tmp = abs((x - y)) / abs(y); end
code[x_, y_] := N[(N[Abs[N[(x - y), $MachinePrecision]], $MachinePrecision] / N[Abs[y], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left|x - y\right|}{\left|y\right|}
\end{array}
(FPCore (x y) :precision binary64 (/ (fabs (- x y)) (fabs y)))
double code(double x, double y) {
return fabs((x - y)) / fabs(y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = abs((x - y)) / abs(y)
end function
public static double code(double x, double y) {
return Math.abs((x - y)) / Math.abs(y);
}
def code(x, y): return math.fabs((x - y)) / math.fabs(y)
function code(x, y) return Float64(abs(Float64(x - y)) / abs(y)) end
function tmp = code(x, y) tmp = abs((x - y)) / abs(y); end
code[x_, y_] := N[(N[Abs[N[(x - y), $MachinePrecision]], $MachinePrecision] / N[Abs[y], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left|x - y\right|}{\left|y\right|}
\end{array}
Initial program 100.0%
Final simplification100.0%
(FPCore (x y) :precision binary64 (+ (/ x y) -1.0))
double code(double x, double y) {
return (x / y) + -1.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x / y) + (-1.0d0)
end function
public static double code(double x, double y) {
return (x / y) + -1.0;
}
def code(x, y): return (x / y) + -1.0
function code(x, y) return Float64(Float64(x / y) + -1.0) end
function tmp = code(x, y) tmp = (x / y) + -1.0; end
code[x_, y_] := N[(N[(x / y), $MachinePrecision] + -1.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{y} + -1
\end{array}
Initial program 100.0%
fabs-sub100.0%
Simplified100.0%
Taylor expanded in y around -inf 100.0%
fabs-neg100.0%
neg-mul-1100.0%
sub-neg100.0%
fabs-sub100.0%
fabs-div100.0%
rem-square-sqrt78.7%
fabs-sqr78.7%
rem-square-sqrt79.0%
rem-square-sqrt37.7%
fabs-sqr37.7%
rem-square-sqrt50.6%
fabs-sub50.6%
unpow150.6%
sqr-pow12.6%
fabs-sqr12.6%
sqr-pow22.1%
unpow122.1%
div-sub22.1%
*-inverses22.1%
sub-neg22.1%
metadata-eval22.1%
+-commutative22.1%
Simplified22.1%
Final simplification22.1%
(FPCore (x y) :precision binary64 (- 1.0 (/ x y)))
double code(double x, double y) {
return 1.0 - (x / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 - (x / y)
end function
public static double code(double x, double y) {
return 1.0 - (x / y);
}
def code(x, y): return 1.0 - (x / y)
function code(x, y) return Float64(1.0 - Float64(x / y)) end
function tmp = code(x, y) tmp = 1.0 - (x / y); end
code[x_, y_] := N[(1.0 - N[(x / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \frac{x}{y}
\end{array}
Initial program 100.0%
fabs-sub100.0%
Simplified100.0%
Taylor expanded in y around -inf 100.0%
fabs-neg100.0%
neg-mul-1100.0%
sub-neg100.0%
fabs-sub100.0%
fabs-div100.0%
rem-square-sqrt78.7%
fabs-sqr78.7%
rem-square-sqrt79.0%
div-sub79.1%
*-inverses79.1%
Simplified79.1%
Final simplification79.1%
herbie shell --seed 2023320
(FPCore (x y)
:name "Numeric.LinearAlgebra.Util:formatSparse from hmatrix-0.16.1.5"
:precision binary64
(/ (fabs (- x y)) (fabs y)))