
(FPCore (x y) :precision binary64 (* (* x y) (- 1.0 y)))
double code(double x, double y) {
return (x * y) * (1.0 - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * y) * (1.0d0 - y)
end function
public static double code(double x, double y) {
return (x * y) * (1.0 - y);
}
def code(x, y): return (x * y) * (1.0 - y)
function code(x, y) return Float64(Float64(x * y) * Float64(1.0 - y)) end
function tmp = code(x, y) tmp = (x * y) * (1.0 - y); end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot y\right) \cdot \left(1 - y\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* (* x y) (- 1.0 y)))
double code(double x, double y) {
return (x * y) * (1.0 - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * y) * (1.0d0 - y)
end function
public static double code(double x, double y) {
return (x * y) * (1.0 - y);
}
def code(x, y): return (x * y) * (1.0 - y)
function code(x, y) return Float64(Float64(x * y) * Float64(1.0 - y)) end
function tmp = code(x, y) tmp = (x * y) * (1.0 - y); end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot y\right) \cdot \left(1 - y\right)
\end{array}
(FPCore (x y) :precision binary64 (* (* x y) (- 1.0 y)))
double code(double x, double y) {
return (x * y) * (1.0 - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * y) * (1.0d0 - y)
end function
public static double code(double x, double y) {
return (x * y) * (1.0 - y);
}
def code(x, y): return (x * y) * (1.0 - y)
function code(x, y) return Float64(Float64(x * y) * Float64(1.0 - y)) end
function tmp = code(x, y) tmp = (x * y) * (1.0 - y); end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot y\right) \cdot \left(1 - y\right)
\end{array}
Initial program 99.9%
(FPCore (x y) :precision binary64 (if (or (<= y -1.0) (not (<= y 1.0))) (* (* x y) (- y)) (* (* x y) 1.0)))
double code(double x, double y) {
double tmp;
if ((y <= -1.0) || !(y <= 1.0)) {
tmp = (x * y) * -y;
} else {
tmp = (x * y) * 1.0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((y <= (-1.0d0)) .or. (.not. (y <= 1.0d0))) then
tmp = (x * y) * -y
else
tmp = (x * y) * 1.0d0
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((y <= -1.0) || !(y <= 1.0)) {
tmp = (x * y) * -y;
} else {
tmp = (x * y) * 1.0;
}
return tmp;
}
def code(x, y): tmp = 0 if (y <= -1.0) or not (y <= 1.0): tmp = (x * y) * -y else: tmp = (x * y) * 1.0 return tmp
function code(x, y) tmp = 0.0 if ((y <= -1.0) || !(y <= 1.0)) tmp = Float64(Float64(x * y) * Float64(-y)); else tmp = Float64(Float64(x * y) * 1.0); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((y <= -1.0) || ~((y <= 1.0))) tmp = (x * y) * -y; else tmp = (x * y) * 1.0; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[y, -1.0], N[Not[LessEqual[y, 1.0]], $MachinePrecision]], N[(N[(x * y), $MachinePrecision] * (-y)), $MachinePrecision], N[(N[(x * y), $MachinePrecision] * 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -1 \lor \neg \left(y \leq 1\right):\\
\;\;\;\;\left(x \cdot y\right) \cdot \left(-y\right)\\
\mathbf{else}:\\
\;\;\;\;\left(x \cdot y\right) \cdot 1\\
\end{array}
\end{array}
if y < -1 or 1 < y Initial program 99.8%
Taylor expanded in y around inf
mul-1-negN/A
lower-neg.f6497.6
Applied rewrites97.6%
if -1 < y < 1Initial program 100.0%
Taylor expanded in y around 0
Applied rewrites97.7%
Final simplification97.6%
(FPCore (x y) :precision binary64 (if (or (<= y -1.0) (not (<= y 1.0))) (* (* (- y) y) x) (* (* x y) 1.0)))
double code(double x, double y) {
double tmp;
if ((y <= -1.0) || !(y <= 1.0)) {
tmp = (-y * y) * x;
} else {
tmp = (x * y) * 1.0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((y <= (-1.0d0)) .or. (.not. (y <= 1.0d0))) then
tmp = (-y * y) * x
else
tmp = (x * y) * 1.0d0
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((y <= -1.0) || !(y <= 1.0)) {
tmp = (-y * y) * x;
} else {
tmp = (x * y) * 1.0;
}
return tmp;
}
def code(x, y): tmp = 0 if (y <= -1.0) or not (y <= 1.0): tmp = (-y * y) * x else: tmp = (x * y) * 1.0 return tmp
function code(x, y) tmp = 0.0 if ((y <= -1.0) || !(y <= 1.0)) tmp = Float64(Float64(Float64(-y) * y) * x); else tmp = Float64(Float64(x * y) * 1.0); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((y <= -1.0) || ~((y <= 1.0))) tmp = (-y * y) * x; else tmp = (x * y) * 1.0; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[y, -1.0], N[Not[LessEqual[y, 1.0]], $MachinePrecision]], N[(N[((-y) * y), $MachinePrecision] * x), $MachinePrecision], N[(N[(x * y), $MachinePrecision] * 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -1 \lor \neg \left(y \leq 1\right):\\
\;\;\;\;\left(\left(-y\right) \cdot y\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\left(x \cdot y\right) \cdot 1\\
\end{array}
\end{array}
if y < -1 or 1 < y Initial program 99.8%
Taylor expanded in y around inf
mul-1-negN/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-*.f64N/A
unpow2N/A
distribute-lft-neg-inN/A
mul-1-negN/A
lower-*.f64N/A
mul-1-negN/A
lower-neg.f6487.4
Applied rewrites87.4%
if -1 < y < 1Initial program 100.0%
Taylor expanded in y around 0
Applied rewrites97.7%
Final simplification92.9%
(FPCore (x y) :precision binary64 (if (<= y 1.0) (* (* x y) 1.0) (* (- y) x)))
double code(double x, double y) {
double tmp;
if (y <= 1.0) {
tmp = (x * y) * 1.0;
} else {
tmp = -y * x;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (y <= 1.0d0) then
tmp = (x * y) * 1.0d0
else
tmp = -y * x
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (y <= 1.0) {
tmp = (x * y) * 1.0;
} else {
tmp = -y * x;
}
return tmp;
}
def code(x, y): tmp = 0 if y <= 1.0: tmp = (x * y) * 1.0 else: tmp = -y * x return tmp
function code(x, y) tmp = 0.0 if (y <= 1.0) tmp = Float64(Float64(x * y) * 1.0); else tmp = Float64(Float64(-y) * x); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (y <= 1.0) tmp = (x * y) * 1.0; else tmp = -y * x; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[y, 1.0], N[(N[(x * y), $MachinePrecision] * 1.0), $MachinePrecision], N[((-y) * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 1:\\
\;\;\;\;\left(x \cdot y\right) \cdot 1\\
\mathbf{else}:\\
\;\;\;\;\left(-y\right) \cdot x\\
\end{array}
\end{array}
if y < 1Initial program 99.9%
Taylor expanded in y around 0
Applied rewrites77.7%
if 1 < y Initial program 99.8%
Applied rewrites99.8%
Applied rewrites92.2%
Taylor expanded in y around 0
*-commutativeN/A
associate-*r*N/A
lower-*.f64N/A
mul-1-negN/A
lower-neg.f6433.3
Applied rewrites33.3%
(FPCore (x y) :precision binary64 (* (- y) x))
double code(double x, double y) {
return -y * x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -y * x
end function
public static double code(double x, double y) {
return -y * x;
}
def code(x, y): return -y * x
function code(x, y) return Float64(Float64(-y) * x) end
function tmp = code(x, y) tmp = -y * x; end
code[x_, y_] := N[((-y) * x), $MachinePrecision]
\begin{array}{l}
\\
\left(-y\right) \cdot x
\end{array}
Initial program 99.9%
Applied rewrites99.9%
Applied rewrites55.7%
Taylor expanded in y around 0
*-commutativeN/A
associate-*r*N/A
lower-*.f64N/A
mul-1-negN/A
lower-neg.f6423.2
Applied rewrites23.2%
herbie shell --seed 2024313
(FPCore (x y)
:name "Statistics.Distribution.Binomial:$cvariance from math-functions-0.1.5.2"
:precision binary64
(* (* x y) (- 1.0 y)))