
(FPCore (x y) :precision binary64 (* (* x y) (- 1.0 y)))
double code(double x, double y) {
return (x * y) * (1.0 - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * y) * (1.0d0 - y)
end function
public static double code(double x, double y) {
return (x * y) * (1.0 - y);
}
def code(x, y): return (x * y) * (1.0 - y)
function code(x, y) return Float64(Float64(x * y) * Float64(1.0 - y)) end
function tmp = code(x, y) tmp = (x * y) * (1.0 - y); end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot y\right) \cdot \left(1 - y\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* (* x y) (- 1.0 y)))
double code(double x, double y) {
return (x * y) * (1.0 - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * y) * (1.0d0 - y)
end function
public static double code(double x, double y) {
return (x * y) * (1.0 - y);
}
def code(x, y): return (x * y) * (1.0 - y)
function code(x, y) return Float64(Float64(x * y) * Float64(1.0 - y)) end
function tmp = code(x, y) tmp = (x * y) * (1.0 - y); end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot y\right) \cdot \left(1 - y\right)
\end{array}
(FPCore (x y) :precision binary64 (fma y x (* y (* y (- 0.0 x)))))
double code(double x, double y) {
return fma(y, x, (y * (y * (0.0 - x))));
}
function code(x, y) return fma(y, x, Float64(y * Float64(y * Float64(0.0 - x)))) end
code[x_, y_] := N[(y * x + N[(y * N[(y * N[(0.0 - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y, x, y \cdot \left(y \cdot \left(0 - x\right)\right)\right)
\end{array}
Initial program 99.9%
sub-negN/A
distribute-lft-inN/A
*-rgt-identityN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
distribute-lft-neg-outN/A
neg-sub0N/A
--lowering--.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6499.9
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (if (<= y 1.0) (* y x) (* y (- 0.0 x))))
double code(double x, double y) {
double tmp;
if (y <= 1.0) {
tmp = y * x;
} else {
tmp = y * (0.0 - x);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (y <= 1.0d0) then
tmp = y * x
else
tmp = y * (0.0d0 - x)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (y <= 1.0) {
tmp = y * x;
} else {
tmp = y * (0.0 - x);
}
return tmp;
}
def code(x, y): tmp = 0 if y <= 1.0: tmp = y * x else: tmp = y * (0.0 - x) return tmp
function code(x, y) tmp = 0.0 if (y <= 1.0) tmp = Float64(y * x); else tmp = Float64(y * Float64(0.0 - x)); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (y <= 1.0) tmp = y * x; else tmp = y * (0.0 - x); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[y, 1.0], N[(y * x), $MachinePrecision], N[(y * N[(0.0 - x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 1:\\
\;\;\;\;y \cdot x\\
\mathbf{else}:\\
\;\;\;\;y \cdot \left(0 - x\right)\\
\end{array}
\end{array}
if y < 1Initial program 99.9%
Taylor expanded in y around 0
+-rgt-identityN/A
*-commutativeN/A
accelerator-lowering-fma.f6476.5
Simplified76.5%
+-rgt-identityN/A
*-lowering-*.f6476.5
Applied egg-rr76.5%
if 1 < y Initial program 99.8%
Taylor expanded in y around 0
+-rgt-identityN/A
*-commutativeN/A
accelerator-lowering-fma.f640.6
Simplified0.6%
Applied egg-rr28.6%
sub0-negN/A
neg-lowering-neg.f6428.6
Applied egg-rr28.6%
Final simplification65.6%
(FPCore (x y) :precision binary64 (fma y (- x (* y x)) 0.0))
double code(double x, double y) {
return fma(y, (x - (y * x)), 0.0);
}
function code(x, y) return fma(y, Float64(x - Float64(y * x)), 0.0) end
code[x_, y_] := N[(y * N[(x - N[(y * x), $MachinePrecision]), $MachinePrecision] + 0.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y, x - y \cdot x, 0\right)
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
*-commutativeN/A
+-rgt-identityN/A
distribute-rgt-inN/A
associate-*r*N/A
distribute-lft-out--N/A
*-rgt-identityN/A
cancel-sign-sub-invN/A
mul-1-negN/A
distribute-rgt-inN/A
mul0-lftN/A
accelerator-lowering-fma.f64N/A
mul-1-negN/A
unsub-negN/A
*-rgt-identityN/A
distribute-lft-out--N/A
*-commutativeN/A
+-rgt-identityN/A
distribute-rgt-inN/A
mul0-lftN/A
accelerator-lowering-fma.f64N/A
--lowering--.f6499.9
Simplified99.9%
+-rgt-identityN/A
sub-negN/A
distribute-rgt-inN/A
distribute-lft-neg-inN/A
unsub-negN/A
*-lft-identityN/A
--lowering--.f64N/A
+-rgt-identityN/A
accelerator-lowering-fma.f6499.9
Applied egg-rr99.9%
+-rgt-identityN/A
*-lowering-*.f6499.9
Applied egg-rr99.9%
(FPCore (x y) :precision binary64 (* (* y x) (- 1.0 y)))
double code(double x, double y) {
return (y * x) * (1.0 - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (y * x) * (1.0d0 - y)
end function
public static double code(double x, double y) {
return (y * x) * (1.0 - y);
}
def code(x, y): return (y * x) * (1.0 - y)
function code(x, y) return Float64(Float64(y * x) * Float64(1.0 - y)) end
function tmp = code(x, y) tmp = (y * x) * (1.0 - y); end
code[x_, y_] := N[(N[(y * x), $MachinePrecision] * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(y \cdot x\right) \cdot \left(1 - y\right)
\end{array}
Initial program 99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (* y x))
double code(double x, double y) {
return y * x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = y * x
end function
public static double code(double x, double y) {
return y * x;
}
def code(x, y): return y * x
function code(x, y) return Float64(y * x) end
function tmp = code(x, y) tmp = y * x; end
code[x_, y_] := N[(y * x), $MachinePrecision]
\begin{array}{l}
\\
y \cdot x
\end{array}
Initial program 99.9%
Taylor expanded in y around 0
+-rgt-identityN/A
*-commutativeN/A
accelerator-lowering-fma.f6459.3
Simplified59.3%
+-rgt-identityN/A
*-lowering-*.f6459.3
Applied egg-rr59.3%
herbie shell --seed 2024196
(FPCore (x y)
:name "Statistics.Distribution.Binomial:$cvariance from math-functions-0.1.5.2"
:precision binary64
(* (* x y) (- 1.0 y)))