
(FPCore (x y) :precision binary64 (* (* x y) (- 1.0 y)))
double code(double x, double y) {
return (x * y) * (1.0 - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * y) * (1.0d0 - y)
end function
public static double code(double x, double y) {
return (x * y) * (1.0 - y);
}
def code(x, y): return (x * y) * (1.0 - y)
function code(x, y) return Float64(Float64(x * y) * Float64(1.0 - y)) end
function tmp = code(x, y) tmp = (x * y) * (1.0 - y); end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot y\right) \cdot \left(1 - y\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* (* x y) (- 1.0 y)))
double code(double x, double y) {
return (x * y) * (1.0 - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * y) * (1.0d0 - y)
end function
public static double code(double x, double y) {
return (x * y) * (1.0 - y);
}
def code(x, y): return (x * y) * (1.0 - y)
function code(x, y) return Float64(Float64(x * y) * Float64(1.0 - y)) end
function tmp = code(x, y) tmp = (x * y) * (1.0 - y); end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot y\right) \cdot \left(1 - y\right)
\end{array}
(FPCore (x y) :precision binary64 (* (* x y) (- 1.0 y)))
double code(double x, double y) {
return (x * y) * (1.0 - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * y) * (1.0d0 - y)
end function
public static double code(double x, double y) {
return (x * y) * (1.0 - y);
}
def code(x, y): return (x * y) * (1.0 - y)
function code(x, y) return Float64(Float64(x * y) * Float64(1.0 - y)) end
function tmp = code(x, y) tmp = (x * y) * (1.0 - y); end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot y\right) \cdot \left(1 - y\right)
\end{array}
Initial program 99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (* x (* y (- 1.0 y))))
double code(double x, double y) {
return x * (y * (1.0 - y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (y * (1.0d0 - y))
end function
public static double code(double x, double y) {
return x * (y * (1.0 - y));
}
def code(x, y): return x * (y * (1.0 - y))
function code(x, y) return Float64(x * Float64(y * Float64(1.0 - y))) end
function tmp = code(x, y) tmp = x * (y * (1.0 - y)); end
code[x_, y_] := N[(x * N[(y * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(y \cdot \left(1 - y\right)\right)
\end{array}
Initial program 99.9%
associate-*l*93.0%
Simplified93.0%
Final simplification93.0%
(FPCore (x y) :precision binary64 (* x y))
double code(double x, double y) {
return x * y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * y
end function
public static double code(double x, double y) {
return x * y;
}
def code(x, y): return x * y
function code(x, y) return Float64(x * y) end
function tmp = code(x, y) tmp = x * y; end
code[x_, y_] := N[(x * y), $MachinePrecision]
\begin{array}{l}
\\
x \cdot y
\end{array}
Initial program 99.9%
associate-*l*93.0%
Simplified93.0%
Taylor expanded in y around 0 53.7%
Final simplification53.7%
herbie shell --seed 2024073
(FPCore (x y)
:name "Statistics.Distribution.Binomial:$cvariance from math-functions-0.1.5.2"
:precision binary64
(* (* x y) (- 1.0 y)))