
(FPCore (x y z t) :precision binary64 (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = (((x - 1.0d0) * log(y)) + ((z - 1.0d0) * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return (((x - 1.0) * Math.log(y)) + ((z - 1.0) * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return (((x - 1.0) * math.log(y)) + ((z - 1.0) * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = (((x - 1.0d0) * log(y)) + ((z - 1.0d0) * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return (((x - 1.0) * Math.log(y)) + ((z - 1.0) * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return (((x - 1.0) * math.log(y)) + ((z - 1.0) * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t
\end{array}
(FPCore (x y z t) :precision binary64 (- (+ (* (+ x -1.0) (log y)) (* (+ z -1.0) (* y (fma y -0.5 -1.0)))) t))
double code(double x, double y, double z, double t) {
return (((x + -1.0) * log(y)) + ((z + -1.0) * (y * fma(y, -0.5, -1.0)))) - t;
}
function code(x, y, z, t) return Float64(Float64(Float64(Float64(x + -1.0) * log(y)) + Float64(Float64(z + -1.0) * Float64(y * fma(y, -0.5, -1.0)))) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(N[(x + -1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z + -1.0), $MachinePrecision] * N[(y * N[(y * -0.5 + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x + -1\right) \cdot \log y + \left(z + -1\right) \cdot \left(y \cdot \mathsf{fma}\left(y, -0.5, -1\right)\right)\right) - t
\end{array}
Initial program 85.9%
Taylor expanded in y around 0
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6499.7
Applied rewrites99.7%
Final simplification99.7%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (- (fma y (- z) (* x (log y))) t)))
(if (<= (+ x -1.0) -5e+15)
t_1
(if (<= (+ x -1.0) -0.2) (- (fma y (- 1.0 z) (- (log y))) t) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = fma(y, -z, (x * log(y))) - t;
double tmp;
if ((x + -1.0) <= -5e+15) {
tmp = t_1;
} else if ((x + -1.0) <= -0.2) {
tmp = fma(y, (1.0 - z), -log(y)) - t;
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(fma(y, Float64(-z), Float64(x * log(y))) - t) tmp = 0.0 if (Float64(x + -1.0) <= -5e+15) tmp = t_1; elseif (Float64(x + -1.0) <= -0.2) tmp = Float64(fma(y, Float64(1.0 - z), Float64(-log(y))) - t); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(y * (-z) + N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[N[(x + -1.0), $MachinePrecision], -5e+15], t$95$1, If[LessEqual[N[(x + -1.0), $MachinePrecision], -0.2], N[(N[(y * N[(1.0 - z), $MachinePrecision] + (-N[Log[y], $MachinePrecision])), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \mathsf{fma}\left(y, -z, x \cdot \log y\right) - t\\
\mathbf{if}\;x + -1 \leq -5 \cdot 10^{+15}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x + -1 \leq -0.2:\\
\;\;\;\;\mathsf{fma}\left(y, 1 - z, -\log y\right) - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if (-.f64 x #s(literal 1 binary64)) < -5e15 or -0.20000000000000001 < (-.f64 x #s(literal 1 binary64)) Initial program 94.4%
Taylor expanded in y around 0
mul-1-negN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
lower-fma.f64N/A
mul-1-negN/A
neg-sub0N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
associate--r+N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
lower-log.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
lower-+.f6499.2
Applied rewrites99.2%
Taylor expanded in x around inf
Applied rewrites98.8%
Taylor expanded in z around inf
Applied rewrites98.8%
if -5e15 < (-.f64 x #s(literal 1 binary64)) < -0.20000000000000001Initial program 84.5%
Taylor expanded in y around 0
mul-1-negN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
lower-fma.f64N/A
mul-1-negN/A
neg-sub0N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
associate--r+N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
lower-log.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
lower-+.f6499.0
Applied rewrites99.0%
Taylor expanded in x around 0
Applied rewrites97.3%
Final simplification98.0%
herbie shell --seed 2024226
(FPCore (x y z t)
:name "Statistics.Distribution.Beta:$cdensity from math-functions-0.1.5.2"
:precision binary64
(- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))