
(FPCore (x y z) :precision binary64 (+ x (/ (- y x) z)))
double code(double x, double y, double z) {
return x + ((y - x) / z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + ((y - x) / z)
end function
public static double code(double x, double y, double z) {
return x + ((y - x) / z);
}
def code(x, y, z): return x + ((y - x) / z)
function code(x, y, z) return Float64(x + Float64(Float64(y - x) / z)) end
function tmp = code(x, y, z) tmp = x + ((y - x) / z); end
code[x_, y_, z_] := N[(x + N[(N[(y - x), $MachinePrecision] / z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{y - x}{z}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z) :precision binary64 (+ x (/ (- y x) z)))
double code(double x, double y, double z) {
return x + ((y - x) / z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + ((y - x) / z)
end function
public static double code(double x, double y, double z) {
return x + ((y - x) / z);
}
def code(x, y, z): return x + ((y - x) / z)
function code(x, y, z) return Float64(x + Float64(Float64(y - x) / z)) end
function tmp = code(x, y, z) tmp = x + ((y - x) / z); end
code[x_, y_, z_] := N[(x + N[(N[(y - x), $MachinePrecision] / z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{y - x}{z}
\end{array}
(FPCore (x y z) :precision binary64 (+ x (/ (- y x) z)))
double code(double x, double y, double z) {
return x + ((y - x) / z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + ((y - x) / z)
end function
public static double code(double x, double y, double z) {
return x + ((y - x) / z);
}
def code(x, y, z): return x + ((y - x) / z)
function code(x, y, z) return Float64(x + Float64(Float64(y - x) / z)) end
function tmp = code(x, y, z) tmp = x + ((y - x) / z); end
code[x_, y_, z_] := N[(x + N[(N[(y - x), $MachinePrecision] / z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{y - x}{z}
\end{array}
Initial program 100.0%
Final simplification100.0%
(FPCore (x y z) :precision binary64 (if (or (<= y -4.8e-89) (not (<= y 2.5e-125))) (+ x (/ y z)) (- x (/ x z))))
double code(double x, double y, double z) {
double tmp;
if ((y <= -4.8e-89) || !(y <= 2.5e-125)) {
tmp = x + (y / z);
} else {
tmp = x - (x / z);
}
return tmp;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8) :: tmp
if ((y <= (-4.8d-89)) .or. (.not. (y <= 2.5d-125))) then
tmp = x + (y / z)
else
tmp = x - (x / z)
end if
code = tmp
end function
public static double code(double x, double y, double z) {
double tmp;
if ((y <= -4.8e-89) || !(y <= 2.5e-125)) {
tmp = x + (y / z);
} else {
tmp = x - (x / z);
}
return tmp;
}
def code(x, y, z): tmp = 0 if (y <= -4.8e-89) or not (y <= 2.5e-125): tmp = x + (y / z) else: tmp = x - (x / z) return tmp
function code(x, y, z) tmp = 0.0 if ((y <= -4.8e-89) || !(y <= 2.5e-125)) tmp = Float64(x + Float64(y / z)); else tmp = Float64(x - Float64(x / z)); end return tmp end
function tmp_2 = code(x, y, z) tmp = 0.0; if ((y <= -4.8e-89) || ~((y <= 2.5e-125))) tmp = x + (y / z); else tmp = x - (x / z); end tmp_2 = tmp; end
code[x_, y_, z_] := If[Or[LessEqual[y, -4.8e-89], N[Not[LessEqual[y, 2.5e-125]], $MachinePrecision]], N[(x + N[(y / z), $MachinePrecision]), $MachinePrecision], N[(x - N[(x / z), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -4.8 \cdot 10^{-89} \lor \neg \left(y \leq 2.5 \cdot 10^{-125}\right):\\
\;\;\;\;x + \frac{y}{z}\\
\mathbf{else}:\\
\;\;\;\;x - \frac{x}{z}\\
\end{array}
\end{array}
if y < -4.80000000000000032e-89 or 2.49999999999999983e-125 < y Initial program 100.0%
Taylor expanded in y around inf 85.6%
if -4.80000000000000032e-89 < y < 2.49999999999999983e-125Initial program 100.0%
Taylor expanded in y around 0 91.8%
Final simplification87.5%
(FPCore (x y z) :precision binary64 (+ x (/ y z)))
double code(double x, double y, double z) {
return x + (y / z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + (y / z)
end function
public static double code(double x, double y, double z) {
return x + (y / z);
}
def code(x, y, z): return x + (y / z)
function code(x, y, z) return Float64(x + Float64(y / z)) end
function tmp = code(x, y, z) tmp = x + (y / z); end
code[x_, y_, z_] := N[(x + N[(y / z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{y}{z}
\end{array}
Initial program 100.0%
Taylor expanded in y around inf 73.7%
Final simplification73.7%
(FPCore (x y z) :precision binary64 x)
double code(double x, double y, double z) {
return x;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x
end function
public static double code(double x, double y, double z) {
return x;
}
def code(x, y, z): return x
function code(x, y, z) return x end
function tmp = code(x, y, z) tmp = x; end
code[x_, y_, z_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 100.0%
Taylor expanded in z around inf 35.5%
Final simplification35.5%
herbie shell --seed 2023202
(FPCore (x y z)
:name "Statistics.Sample:$swelfordMean from math-functions-0.1.5.2"
:precision binary64
(+ x (/ (- y x) z)))