
(FPCore (x y) :precision binary64 (+ x (/ (fabs (- y x)) 2.0)))
double code(double x, double y) {
return x + (fabs((y - x)) / 2.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x + (abs((y - x)) / 2.0d0)
end function
public static double code(double x, double y) {
return x + (Math.abs((y - x)) / 2.0);
}
def code(x, y): return x + (math.fabs((y - x)) / 2.0)
function code(x, y) return Float64(x + Float64(abs(Float64(y - x)) / 2.0)) end
function tmp = code(x, y) tmp = x + (abs((y - x)) / 2.0); end
code[x_, y_] := N[(x + N[(N[Abs[N[(y - x), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{\left|y - x\right|}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (+ x (/ (fabs (- y x)) 2.0)))
double code(double x, double y) {
return x + (fabs((y - x)) / 2.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x + (abs((y - x)) / 2.0d0)
end function
public static double code(double x, double y) {
return x + (Math.abs((y - x)) / 2.0);
}
def code(x, y): return x + (math.fabs((y - x)) / 2.0)
function code(x, y) return Float64(x + Float64(abs(Float64(y - x)) / 2.0)) end
function tmp = code(x, y) tmp = x + (abs((y - x)) / 2.0); end
code[x_, y_] := N[(x + N[(N[Abs[N[(y - x), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{\left|y - x\right|}{2}
\end{array}
(FPCore (x y) :precision binary64 (+ x (/ (fabs (- y x)) 2.0)))
double code(double x, double y) {
return x + (fabs((y - x)) / 2.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x + (abs((y - x)) / 2.0d0)
end function
public static double code(double x, double y) {
return x + (Math.abs((y - x)) / 2.0);
}
def code(x, y): return x + (math.fabs((y - x)) / 2.0)
function code(x, y) return Float64(x + Float64(abs(Float64(y - x)) / 2.0)) end
function tmp = code(x, y) tmp = x + (abs((y - x)) / 2.0); end
code[x_, y_] := N[(x + N[(N[Abs[N[(y - x), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{\left|y - x\right|}{2}
\end{array}
Initial program 99.9%
(FPCore (x y) :precision binary64 (+ x (/ (fabs y) 2.0)))
double code(double x, double y) {
return x + (fabs(y) / 2.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x + (abs(y) / 2.0d0)
end function
public static double code(double x, double y) {
return x + (Math.abs(y) / 2.0);
}
def code(x, y): return x + (math.fabs(y) / 2.0)
function code(x, y) return Float64(x + Float64(abs(y) / 2.0)) end
function tmp = code(x, y) tmp = x + (abs(y) / 2.0); end
code[x_, y_] := N[(x + N[(N[Abs[y], $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{\left|y\right|}{2}
\end{array}
Initial program 99.9%
Taylor expanded in y around inf
Simplified59.6%
(FPCore (x y) :precision binary64 (+ (* x 0.75) (* y 0.5)))
double code(double x, double y) {
return (x * 0.75) + (y * 0.5);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * 0.75d0) + (y * 0.5d0)
end function
public static double code(double x, double y) {
return (x * 0.75) + (y * 0.5);
}
def code(x, y): return (x * 0.75) + (y * 0.5)
function code(x, y) return Float64(Float64(x * 0.75) + Float64(y * 0.5)) end
function tmp = code(x, y) tmp = (x * 0.75) + (y * 0.5); end
code[x_, y_] := N[(N[(x * 0.75), $MachinePrecision] + N[(y * 0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot 0.75 + y \cdot 0.5
\end{array}
Initial program 99.9%
+-commutativeN/A
flip-+N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
frac-timesN/A
/-lowering-/.f64N/A
sqr-absN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
--lowering--.f64N/A
metadata-evalN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f6454.6%
Applied egg-rr54.6%
Taylor expanded in x around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f646.6%
Simplified6.6%
Taylor expanded in y around 0
mul-1-negN/A
div-subN/A
unpow2N/A
associate-/l*N/A
*-inversesN/A
cancel-sign-sub-invN/A
associate-/l*N/A
unpow2N/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
*-rgt-identityN/A
mul-1-negN/A
distribute-rgt-outN/A
metadata-evalN/A
*-commutativeN/A
distribute-lft-neg-inN/A
metadata-evalN/A
Simplified30.5%
Final simplification30.5%
(FPCore (x y) :precision binary64 (* x 0.75))
double code(double x, double y) {
return x * 0.75;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * 0.75d0
end function
public static double code(double x, double y) {
return x * 0.75;
}
def code(x, y): return x * 0.75
function code(x, y) return Float64(x * 0.75) end
function tmp = code(x, y) tmp = x * 0.75; end
code[x_, y_] := N[(x * 0.75), $MachinePrecision]
\begin{array}{l}
\\
x \cdot 0.75
\end{array}
Initial program 99.9%
+-commutativeN/A
flip-+N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
frac-timesN/A
/-lowering-/.f64N/A
sqr-absN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
--lowering--.f64N/A
metadata-evalN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f6454.6%
Applied egg-rr54.6%
Taylor expanded in x around inf
*-commutativeN/A
*-lowering-*.f6410.8%
Simplified10.8%
(FPCore (x y) :precision binary64 x)
double code(double x, double y) {
return x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x
end function
public static double code(double x, double y) {
return x;
}
def code(x, y): return x
function code(x, y) return x end
function tmp = code(x, y) tmp = x; end
code[x_, y_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 99.9%
Taylor expanded in x around inf
Simplified10.8%
herbie shell --seed 2024139
(FPCore (x y)
:name "Graphics.Rendering.Chart.Plot.AreaSpots:renderSpotLegend from Chart-1.5.3"
:precision binary64
(+ x (/ (fabs (- y x)) 2.0)))