
(FPCore (x y) :precision binary64 (* (* x y) (- 1.0 y)))
double code(double x, double y) {
return (x * y) * (1.0 - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * y) * (1.0d0 - y)
end function
public static double code(double x, double y) {
return (x * y) * (1.0 - y);
}
def code(x, y): return (x * y) * (1.0 - y)
function code(x, y) return Float64(Float64(x * y) * Float64(1.0 - y)) end
function tmp = code(x, y) tmp = (x * y) * (1.0 - y); end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot y\right) \cdot \left(1 - y\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* (* x y) (- 1.0 y)))
double code(double x, double y) {
return (x * y) * (1.0 - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * y) * (1.0d0 - y)
end function
public static double code(double x, double y) {
return (x * y) * (1.0 - y);
}
def code(x, y): return (x * y) * (1.0 - y)
function code(x, y) return Float64(Float64(x * y) * Float64(1.0 - y)) end
function tmp = code(x, y) tmp = (x * y) * (1.0 - y); end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot y\right) \cdot \left(1 - y\right)
\end{array}
(FPCore (x y) :precision binary64 (* y (- x (* x y))))
double code(double x, double y) {
return y * (x - (x * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = y * (x - (x * y))
end function
public static double code(double x, double y) {
return y * (x - (x * y));
}
def code(x, y): return y * (x - (x * y))
function code(x, y) return Float64(y * Float64(x - Float64(x * y))) end
function tmp = code(x, y) tmp = y * (x - (x * y)); end
code[x_, y_] := N[(y * N[(x - N[(x * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
y \cdot \left(x - x \cdot y\right)
\end{array}
Initial program 99.9%
*-commutative99.9%
associate-*r*99.9%
Applied egg-rr99.9%
*-commutative99.9%
sub-neg99.9%
distribute-rgt-in99.9%
*-un-lft-identity99.9%
distribute-lft-neg-in99.9%
*-commutative99.9%
unsub-neg99.9%
*-commutative99.9%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (if (or (<= y -1.0) (not (<= y 1.0))) (* y (* x (- y))) (* x y)))
double code(double x, double y) {
double tmp;
if ((y <= -1.0) || !(y <= 1.0)) {
tmp = y * (x * -y);
} else {
tmp = x * y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((y <= (-1.0d0)) .or. (.not. (y <= 1.0d0))) then
tmp = y * (x * -y)
else
tmp = x * y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((y <= -1.0) || !(y <= 1.0)) {
tmp = y * (x * -y);
} else {
tmp = x * y;
}
return tmp;
}
def code(x, y): tmp = 0 if (y <= -1.0) or not (y <= 1.0): tmp = y * (x * -y) else: tmp = x * y return tmp
function code(x, y) tmp = 0.0 if ((y <= -1.0) || !(y <= 1.0)) tmp = Float64(y * Float64(x * Float64(-y))); else tmp = Float64(x * y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((y <= -1.0) || ~((y <= 1.0))) tmp = y * (x * -y); else tmp = x * y; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[y, -1.0], N[Not[LessEqual[y, 1.0]], $MachinePrecision]], N[(y * N[(x * (-y)), $MachinePrecision]), $MachinePrecision], N[(x * y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -1 \lor \neg \left(y \leq 1\right):\\
\;\;\;\;y \cdot \left(x \cdot \left(-y\right)\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot y\\
\end{array}
\end{array}
if y < -1 or 1 < y Initial program 99.8%
*-commutative99.8%
associate-*r*99.8%
Applied egg-rr99.8%
Taylor expanded in y around inf 98.1%
mul-1-neg98.1%
distribute-rgt-neg-in98.1%
Simplified98.1%
if -1 < y < 1Initial program 100.0%
Taylor expanded in y around 0 98.1%
Final simplification98.1%
(FPCore (x y) :precision binary64 (* y (* x (- 1.0 y))))
double code(double x, double y) {
return y * (x * (1.0 - y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = y * (x * (1.0d0 - y))
end function
public static double code(double x, double y) {
return y * (x * (1.0 - y));
}
def code(x, y): return y * (x * (1.0 - y))
function code(x, y) return Float64(y * Float64(x * Float64(1.0 - y))) end
function tmp = code(x, y) tmp = y * (x * (1.0 - y)); end
code[x_, y_] := N[(y * N[(x * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
y \cdot \left(x \cdot \left(1 - y\right)\right)
\end{array}
Initial program 99.9%
*-commutative99.9%
associate-*r*99.9%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (* (- 1.0 y) (* x y)))
double code(double x, double y) {
return (1.0 - y) * (x * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (1.0d0 - y) * (x * y)
end function
public static double code(double x, double y) {
return (1.0 - y) * (x * y);
}
def code(x, y): return (1.0 - y) * (x * y)
function code(x, y) return Float64(Float64(1.0 - y) * Float64(x * y)) end
function tmp = code(x, y) tmp = (1.0 - y) * (x * y); end
code[x_, y_] := N[(N[(1.0 - y), $MachinePrecision] * N[(x * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(1 - y\right) \cdot \left(x \cdot y\right)
\end{array}
Initial program 99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (* x y))
double code(double x, double y) {
return x * y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * y
end function
public static double code(double x, double y) {
return x * y;
}
def code(x, y): return x * y
function code(x, y) return Float64(x * y) end
function tmp = code(x, y) tmp = x * y; end
code[x_, y_] := N[(x * y), $MachinePrecision]
\begin{array}{l}
\\
x \cdot y
\end{array}
Initial program 99.9%
Taylor expanded in y around 0 56.3%
(FPCore (x y) :precision binary64 (- x))
double code(double x, double y) {
return -x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -x
end function
public static double code(double x, double y) {
return -x;
}
def code(x, y): return -x
function code(x, y) return Float64(-x) end
function tmp = code(x, y) tmp = -x; end
code[x_, y_] := (-x)
\begin{array}{l}
\\
-x
\end{array}
Initial program 99.9%
flip--92.9%
associate-*r/91.5%
metadata-eval91.5%
pow291.5%
+-commutative91.5%
Applied egg-rr91.5%
associate-*l*87.7%
*-commutative87.7%
associate-/l*83.1%
sub-neg83.1%
distribute-lft-in83.1%
distribute-rgt-neg-in83.1%
unpow283.1%
cube-mult83.2%
unsub-neg83.2%
*-rgt-identity83.2%
Simplified83.2%
Taylor expanded in y around inf 34.0%
Taylor expanded in y around 0 2.7%
associate-*r/11.9%
frac-2neg11.9%
add-sqr-sqrt5.3%
sqrt-unprod4.1%
sqr-neg4.1%
sqrt-unprod12.2%
add-sqr-sqrt24.2%
Applied egg-rr24.2%
distribute-rgt-neg-in24.2%
neg-mul-124.2%
*-commutative24.2%
associate-/l*4.4%
*-inverses4.4%
*-rgt-identity4.4%
neg-mul-14.4%
Simplified4.4%
herbie shell --seed 2024096
(FPCore (x y)
:name "Statistics.Distribution.Binomial:$cvariance from math-functions-0.1.5.2"
:precision binary64
(* (* x y) (- 1.0 y)))