
(FPCore (x y) :precision binary64 (+ x (/ (- y x) 2.0)))
double code(double x, double y) {
return x + ((y - x) / 2.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x + ((y - x) / 2.0d0)
end function
public static double code(double x, double y) {
return x + ((y - x) / 2.0);
}
def code(x, y): return x + ((y - x) / 2.0)
function code(x, y) return Float64(x + Float64(Float64(y - x) / 2.0)) end
function tmp = code(x, y) tmp = x + ((y - x) / 2.0); end
code[x_, y_] := N[(x + N[(N[(y - x), $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{y - x}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (+ x (/ (- y x) 2.0)))
double code(double x, double y) {
return x + ((y - x) / 2.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x + ((y - x) / 2.0d0)
end function
public static double code(double x, double y) {
return x + ((y - x) / 2.0);
}
def code(x, y): return x + ((y - x) / 2.0)
function code(x, y) return Float64(x + Float64(Float64(y - x) / 2.0)) end
function tmp = code(x, y) tmp = x + ((y - x) / 2.0); end
code[x_, y_] := N[(x + N[(N[(y - x), $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{y - x}{2}
\end{array}
(FPCore (x y) :precision binary64 (* (+ y x) 0.5))
double code(double x, double y) {
return (y + x) * 0.5;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (y + x) * 0.5d0
end function
public static double code(double x, double y) {
return (y + x) * 0.5;
}
def code(x, y): return (y + x) * 0.5
function code(x, y) return Float64(Float64(y + x) * 0.5) end
function tmp = code(x, y) tmp = (y + x) * 0.5; end
code[x_, y_] := N[(N[(y + x), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(y + x\right) \cdot 0.5
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Simplified100.0%
+-rgt-identityN/A
*-commutativeN/A
*-lowering-*.f64N/A
+-commutativeN/A
+-lowering-+.f64100.0
Applied egg-rr100.0%
(FPCore (x y) :precision binary64 (if (<= (+ x (/ (- y x) 2.0)) -1e-308) (* x 0.5) (* y 0.5)))
double code(double x, double y) {
double tmp;
if ((x + ((y - x) / 2.0)) <= -1e-308) {
tmp = x * 0.5;
} else {
tmp = y * 0.5;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x + ((y - x) / 2.0d0)) <= (-1d-308)) then
tmp = x * 0.5d0
else
tmp = y * 0.5d0
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x + ((y - x) / 2.0)) <= -1e-308) {
tmp = x * 0.5;
} else {
tmp = y * 0.5;
}
return tmp;
}
def code(x, y): tmp = 0 if (x + ((y - x) / 2.0)) <= -1e-308: tmp = x * 0.5 else: tmp = y * 0.5 return tmp
function code(x, y) tmp = 0.0 if (Float64(x + Float64(Float64(y - x) / 2.0)) <= -1e-308) tmp = Float64(x * 0.5); else tmp = Float64(y * 0.5); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x + ((y - x) / 2.0)) <= -1e-308) tmp = x * 0.5; else tmp = y * 0.5; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[(x + N[(N[(y - x), $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision], -1e-308], N[(x * 0.5), $MachinePrecision], N[(y * 0.5), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x + \frac{y - x}{2} \leq -1 \cdot 10^{-308}:\\
\;\;\;\;x \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;y \cdot 0.5\\
\end{array}
\end{array}
if (+.f64 x (/.f64 (-.f64 y x) #s(literal 2 binary64))) < -9.9999999999999991e-309Initial program 100.0%
Taylor expanded in x around inf
metadata-evalN/A
associate-*r*N/A
+-rgt-identityN/A
associate-*r*N/A
metadata-evalN/A
*-commutativeN/A
accelerator-lowering-fma.f6444.1
Simplified44.1%
+-rgt-identityN/A
*-lowering-*.f6444.1
Applied egg-rr44.1%
if -9.9999999999999991e-309 < (+.f64 x (/.f64 (-.f64 y x) #s(literal 2 binary64))) Initial program 100.0%
Taylor expanded in x around 0
+-rgt-identityN/A
accelerator-lowering-fma.f6446.5
Simplified46.5%
+-rgt-identityN/A
*-commutativeN/A
*-lowering-*.f6446.5
Applied egg-rr46.5%
(FPCore (x y) :precision binary64 (* x 0.5))
double code(double x, double y) {
return x * 0.5;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * 0.5d0
end function
public static double code(double x, double y) {
return x * 0.5;
}
def code(x, y): return x * 0.5
function code(x, y) return Float64(x * 0.5) end
function tmp = code(x, y) tmp = x * 0.5; end
code[x_, y_] := N[(x * 0.5), $MachinePrecision]
\begin{array}{l}
\\
x \cdot 0.5
\end{array}
Initial program 100.0%
Taylor expanded in x around inf
metadata-evalN/A
associate-*r*N/A
+-rgt-identityN/A
associate-*r*N/A
metadata-evalN/A
*-commutativeN/A
accelerator-lowering-fma.f6448.8
Simplified48.8%
+-rgt-identityN/A
*-lowering-*.f6448.8
Applied egg-rr48.8%
(FPCore (x y) :precision binary64 (* 0.5 (+ x y)))
double code(double x, double y) {
return 0.5 * (x + y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.5d0 * (x + y)
end function
public static double code(double x, double y) {
return 0.5 * (x + y);
}
def code(x, y): return 0.5 * (x + y)
function code(x, y) return Float64(0.5 * Float64(x + y)) end
function tmp = code(x, y) tmp = 0.5 * (x + y); end
code[x_, y_] := N[(0.5 * N[(x + y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \left(x + y\right)
\end{array}
herbie shell --seed 2024196
(FPCore (x y)
:name "Numeric.Interval.Internal:bisect from intervals-0.7.1, A"
:precision binary64
:alt
(! :herbie-platform default (* 1/2 (+ x y)))
(+ x (/ (- y x) 2.0)))