
(FPCore (x y) :precision binary64 (/ (+ x y) (+ y y)))
double code(double x, double y) {
return (x + y) / (y + y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x + y) / (y + y)
end function
public static double code(double x, double y) {
return (x + y) / (y + y);
}
def code(x, y): return (x + y) / (y + y)
function code(x, y) return Float64(Float64(x + y) / Float64(y + y)) end
function tmp = code(x, y) tmp = (x + y) / (y + y); end
code[x_, y_] := N[(N[(x + y), $MachinePrecision] / N[(y + y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x + y}{y + y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (+ x y) (+ y y)))
double code(double x, double y) {
return (x + y) / (y + y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x + y) / (y + y)
end function
public static double code(double x, double y) {
return (x + y) / (y + y);
}
def code(x, y): return (x + y) / (y + y)
function code(x, y) return Float64(Float64(x + y) / Float64(y + y)) end
function tmp = code(x, y) tmp = (x + y) / (y + y); end
code[x_, y_] := N[(N[(x + y), $MachinePrecision] / N[(y + y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x + y}{y + y}
\end{array}
(FPCore (x y) :precision binary64 (+ 0.5 (* 0.5 (/ x y))))
double code(double x, double y) {
return 0.5 + (0.5 * (x / y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.5d0 + (0.5d0 * (x / y))
end function
public static double code(double x, double y) {
return 0.5 + (0.5 * (x / y));
}
def code(x, y): return 0.5 + (0.5 * (x / y))
function code(x, y) return Float64(0.5 + Float64(0.5 * Float64(x / y))) end
function tmp = code(x, y) tmp = 0.5 + (0.5 * (x / y)); end
code[x_, y_] := N[(0.5 + N[(0.5 * N[(x / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 + 0.5 \cdot \frac{x}{y}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 100.0%
(FPCore (x y) :precision binary64 (if (or (<= x -2.6e-117) (not (<= x 1.15e+36))) (* 0.5 (/ x y)) 0.5))
double code(double x, double y) {
double tmp;
if ((x <= -2.6e-117) || !(x <= 1.15e+36)) {
tmp = 0.5 * (x / y);
} else {
tmp = 0.5;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x <= (-2.6d-117)) .or. (.not. (x <= 1.15d+36))) then
tmp = 0.5d0 * (x / y)
else
tmp = 0.5d0
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x <= -2.6e-117) || !(x <= 1.15e+36)) {
tmp = 0.5 * (x / y);
} else {
tmp = 0.5;
}
return tmp;
}
def code(x, y): tmp = 0 if (x <= -2.6e-117) or not (x <= 1.15e+36): tmp = 0.5 * (x / y) else: tmp = 0.5 return tmp
function code(x, y) tmp = 0.0 if ((x <= -2.6e-117) || !(x <= 1.15e+36)) tmp = Float64(0.5 * Float64(x / y)); else tmp = 0.5; end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x <= -2.6e-117) || ~((x <= 1.15e+36))) tmp = 0.5 * (x / y); else tmp = 0.5; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[x, -2.6e-117], N[Not[LessEqual[x, 1.15e+36]], $MachinePrecision]], N[(0.5 * N[(x / y), $MachinePrecision]), $MachinePrecision], 0.5]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.6 \cdot 10^{-117} \lor \neg \left(x \leq 1.15 \cdot 10^{+36}\right):\\
\;\;\;\;0.5 \cdot \frac{x}{y}\\
\mathbf{else}:\\
\;\;\;\;0.5\\
\end{array}
\end{array}
if x < -2.59999999999999983e-117 or 1.14999999999999998e36 < x Initial program 100.0%
Taylor expanded in x around inf 74.6%
if -2.59999999999999983e-117 < x < 1.14999999999999998e36Initial program 100.0%
Taylor expanded in x around 0 82.7%
Final simplification78.0%
(FPCore (x y) :precision binary64 0.5)
double code(double x, double y) {
return 0.5;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.5d0
end function
public static double code(double x, double y) {
return 0.5;
}
def code(x, y): return 0.5
function code(x, y) return 0.5 end
function tmp = code(x, y) tmp = 0.5; end
code[x_, y_] := 0.5
\begin{array}{l}
\\
0.5
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 50.9%
(FPCore (x y) :precision binary64 0.0)
double code(double x, double y) {
return 0.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.0d0
end function
public static double code(double x, double y) {
return 0.0;
}
def code(x, y): return 0.0
function code(x, y) return 0.0 end
function tmp = code(x, y) tmp = 0.0; end
code[x_, y_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 100.0%
add-log-exp62.6%
*-un-lft-identity62.6%
exp-prod62.6%
flip-+0.0%
+-inverses0.0%
+-inverses0.0%
associate-/r/0.0%
pow-unpow1.9%
+-inverses2.6%
metadata-eval2.6%
metadata-eval2.6%
Applied egg-rr2.6%
(FPCore (x y) :precision binary64 (+ (* 0.5 (/ x y)) 0.5))
double code(double x, double y) {
return (0.5 * (x / y)) + 0.5;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (0.5d0 * (x / y)) + 0.5d0
end function
public static double code(double x, double y) {
return (0.5 * (x / y)) + 0.5;
}
def code(x, y): return (0.5 * (x / y)) + 0.5
function code(x, y) return Float64(Float64(0.5 * Float64(x / y)) + 0.5) end
function tmp = code(x, y) tmp = (0.5 * (x / y)) + 0.5; end
code[x_, y_] := N[(N[(0.5 * N[(x / y), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \frac{x}{y} + 0.5
\end{array}
herbie shell --seed 2024097
(FPCore (x y)
:name "Data.Random.Distribution.T:$ccdf from random-fu-0.2.6.2"
:precision binary64
:alt
(+ (* 0.5 (/ x y)) 0.5)
(/ (+ x y) (+ y y)))