
(FPCore (x y) :precision binary64 (/ (fabs (- x y)) (fabs y)))
double code(double x, double y) {
return fabs((x - y)) / fabs(y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = abs((x - y)) / abs(y)
end function
public static double code(double x, double y) {
return Math.abs((x - y)) / Math.abs(y);
}
def code(x, y): return math.fabs((x - y)) / math.fabs(y)
function code(x, y) return Float64(abs(Float64(x - y)) / abs(y)) end
function tmp = code(x, y) tmp = abs((x - y)) / abs(y); end
code[x_, y_] := N[(N[Abs[N[(x - y), $MachinePrecision]], $MachinePrecision] / N[Abs[y], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left|x - y\right|}{\left|y\right|}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 2 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (fabs (- x y)) (fabs y)))
double code(double x, double y) {
return fabs((x - y)) / fabs(y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = abs((x - y)) / abs(y)
end function
public static double code(double x, double y) {
return Math.abs((x - y)) / Math.abs(y);
}
def code(x, y): return math.fabs((x - y)) / math.fabs(y)
function code(x, y) return Float64(abs(Float64(x - y)) / abs(y)) end
function tmp = code(x, y) tmp = abs((x - y)) / abs(y); end
code[x_, y_] := N[(N[Abs[N[(x - y), $MachinePrecision]], $MachinePrecision] / N[Abs[y], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left|x - y\right|}{\left|y\right|}
\end{array}
(FPCore (x y) :precision binary64 (fabs (- 1.0 (/ x y))))
double code(double x, double y) {
return fabs((1.0 - (x / y)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = abs((1.0d0 - (x / y)))
end function
public static double code(double x, double y) {
return Math.abs((1.0 - (x / y)));
}
def code(x, y): return math.fabs((1.0 - (x / y)))
function code(x, y) return abs(Float64(1.0 - Float64(x / y))) end
function tmp = code(x, y) tmp = abs((1.0 - (x / y))); end
code[x_, y_] := N[Abs[N[(1.0 - N[(x / y), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|1 - \frac{x}{y}\right|
\end{array}
Initial program 100.0%
neg-fabsN/A
div-fabsN/A
fabs-lowering-fabs.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
sub-negN/A
div-subN/A
*-inversesN/A
--lowering--.f64N/A
/-lowering-/.f64100.0%
Applied egg-rr100.0%
(FPCore (x y) :precision binary64 1.0)
double code(double x, double y) {
return 1.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0
end function
public static double code(double x, double y) {
return 1.0;
}
def code(x, y): return 1.0
function code(x, y) return 1.0 end
function tmp = code(x, y) tmp = 1.0; end
code[x_, y_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 100.0%
neg-fabsN/A
div-fabsN/A
fabs-lowering-fabs.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
sub-negN/A
div-subN/A
*-inversesN/A
--lowering--.f64N/A
/-lowering-/.f64100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0
Simplified47.1%
metadata-eval47.1%
Applied egg-rr47.1%
herbie shell --seed 2024139
(FPCore (x y)
:name "Numeric.LinearAlgebra.Util:formatSparse from hmatrix-0.16.1.5"
:precision binary64
(/ (fabs (- x y)) (fabs y)))