
(FPCore (x y z t) :precision binary64 (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = (((x - 1.0d0) * log(y)) + ((z - 1.0d0) * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return (((x - 1.0) * Math.log(y)) + ((z - 1.0) * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return (((x - 1.0) * math.log(y)) + ((z - 1.0) * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = (((x - 1.0d0) * log(y)) + ((z - 1.0d0) * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return (((x - 1.0) * Math.log(y)) + ((z - 1.0) * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return (((x - 1.0) * math.log(y)) + ((z - 1.0) * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t
\end{array}
(FPCore (x y z t) :precision binary64 (- (+ (* (* (fma (fma -0.3333333333333333 y -0.5) y -1.0) y) (- z 1.0)) (* (log y) (- x 1.0))) t))
double code(double x, double y, double z, double t) {
return (((fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * y) * (z - 1.0)) + (log(y) * (x - 1.0))) - t;
}
function code(x, y, z, t) return Float64(Float64(Float64(Float64(fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * y) * Float64(z - 1.0)) + Float64(log(y) * Float64(x - 1.0))) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(N[(N[(N[(-0.3333333333333333 * y + -0.5), $MachinePrecision] * y + -1.0), $MachinePrecision] * y), $MachinePrecision] * N[(z - 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[Log[y], $MachinePrecision] * N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, y, -0.5\right), y, -1\right) \cdot y\right) \cdot \left(z - 1\right) + \log y \cdot \left(x - 1\right)\right) - t
\end{array}
Initial program 90.5%
Taylor expanded in y around 0
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f6499.7
Applied rewrites99.7%
Final simplification99.7%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (- (* (log y) (- x 1.0)) t)))
(if (<= (- x 1.0) -1.0000000000002)
t_1
(if (<= (- x 1.0) -1.0) (- (fma (- z) y (- (log y))) t) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = (log(y) * (x - 1.0)) - t;
double tmp;
if ((x - 1.0) <= -1.0000000000002) {
tmp = t_1;
} else if ((x - 1.0) <= -1.0) {
tmp = fma(-z, y, -log(y)) - t;
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(Float64(log(y) * Float64(x - 1.0)) - t) tmp = 0.0 if (Float64(x - 1.0) <= -1.0000000000002) tmp = t_1; elseif (Float64(x - 1.0) <= -1.0) tmp = Float64(fma(Float64(-z), y, Float64(-log(y))) - t); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(N[Log[y], $MachinePrecision] * N[(x - 1.0), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[N[(x - 1.0), $MachinePrecision], -1.0000000000002], t$95$1, If[LessEqual[N[(x - 1.0), $MachinePrecision], -1.0], N[(N[((-z) * y + (-N[Log[y], $MachinePrecision])), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \log y \cdot \left(x - 1\right) - t\\
\mathbf{if}\;x - 1 \leq -1.0000000000002:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x - 1 \leq -1:\\
\;\;\;\;\mathsf{fma}\left(-z, y, -\log y\right) - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if (-.f64 x #s(literal 1 binary64)) < -1.00000000000020006 or -1 < (-.f64 x #s(literal 1 binary64)) Initial program 95.9%
Taylor expanded in y around 0
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
lower-log.f6494.9
Applied rewrites94.9%
if -1.00000000000020006 < (-.f64 x #s(literal 1 binary64)) < -1Initial program 85.7%
Taylor expanded in y around 0
*-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
mul-1-negN/A
neg-sub0N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
associate--r+N/A
metadata-evalN/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
lower-log.f6498.9
Applied rewrites98.9%
Applied rewrites98.9%
Taylor expanded in x around 0
Applied rewrites98.9%
Taylor expanded in z around inf
Applied rewrites98.9%
Final simplification97.0%
(FPCore (x y z t)
:precision binary64
(if (<= (- z 1.0) -4e+198)
(- (* (* (fma (fma -0.3333333333333333 y -0.5) y -1.0) z) y) t)
(if (<= (- z 1.0) 5e+204)
(- (* (log y) (- x 1.0)) t)
(- (* (log1p (- y)) z) t))))
double code(double x, double y, double z, double t) {
double tmp;
if ((z - 1.0) <= -4e+198) {
tmp = ((fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * z) * y) - t;
} else if ((z - 1.0) <= 5e+204) {
tmp = (log(y) * (x - 1.0)) - t;
} else {
tmp = (log1p(-y) * z) - t;
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if (Float64(z - 1.0) <= -4e+198) tmp = Float64(Float64(Float64(fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * z) * y) - t); elseif (Float64(z - 1.0) <= 5e+204) tmp = Float64(Float64(log(y) * Float64(x - 1.0)) - t); else tmp = Float64(Float64(log1p(Float64(-y)) * z) - t); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[N[(z - 1.0), $MachinePrecision], -4e+198], N[(N[(N[(N[(N[(-0.3333333333333333 * y + -0.5), $MachinePrecision] * y + -1.0), $MachinePrecision] * z), $MachinePrecision] * y), $MachinePrecision] - t), $MachinePrecision], If[LessEqual[N[(z - 1.0), $MachinePrecision], 5e+204], N[(N[(N[Log[y], $MachinePrecision] * N[(x - 1.0), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(N[Log[1 + (-y)], $MachinePrecision] * z), $MachinePrecision] - t), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;z - 1 \leq -4 \cdot 10^{+198}:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, y, -0.5\right), y, -1\right) \cdot z\right) \cdot y - t\\
\mathbf{elif}\;z - 1 \leq 5 \cdot 10^{+204}:\\
\;\;\;\;\log y \cdot \left(x - 1\right) - t\\
\mathbf{else}:\\
\;\;\;\;\mathsf{log1p}\left(-y\right) \cdot z - t\\
\end{array}
\end{array}
if (-.f64 z #s(literal 1 binary64)) < -4.00000000000000007e198Initial program 54.3%
Taylor expanded in y around 0
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites99.9%
Taylor expanded in z around inf
Applied rewrites69.3%
if -4.00000000000000007e198 < (-.f64 z #s(literal 1 binary64)) < 5.00000000000000008e204Initial program 97.6%
Taylor expanded in y around 0
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
lower-log.f6497.1
Applied rewrites97.1%
if 5.00000000000000008e204 < (-.f64 z #s(literal 1 binary64)) Initial program 61.0%
Taylor expanded in z around inf
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6481.5
Applied rewrites81.5%
Final simplification93.2%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (- (* (log y) x) t)))
(if (<= x -6.5e-34)
t_1
(if (<= x 74000000.0) (- (* (log1p (- y)) z) t) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = (log(y) * x) - t;
double tmp;
if (x <= -6.5e-34) {
tmp = t_1;
} else if (x <= 74000000.0) {
tmp = (log1p(-y) * z) - t;
} else {
tmp = t_1;
}
return tmp;
}
public static double code(double x, double y, double z, double t) {
double t_1 = (Math.log(y) * x) - t;
double tmp;
if (x <= -6.5e-34) {
tmp = t_1;
} else if (x <= 74000000.0) {
tmp = (Math.log1p(-y) * z) - t;
} else {
tmp = t_1;
}
return tmp;
}
def code(x, y, z, t): t_1 = (math.log(y) * x) - t tmp = 0 if x <= -6.5e-34: tmp = t_1 elif x <= 74000000.0: tmp = (math.log1p(-y) * z) - t else: tmp = t_1 return tmp
function code(x, y, z, t) t_1 = Float64(Float64(log(y) * x) - t) tmp = 0.0 if (x <= -6.5e-34) tmp = t_1; elseif (x <= 74000000.0) tmp = Float64(Float64(log1p(Float64(-y)) * z) - t); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -6.5e-34], t$95$1, If[LessEqual[x, 74000000.0], N[(N[(N[Log[1 + (-y)], $MachinePrecision] * z), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \log y \cdot x - t\\
\mathbf{if}\;x \leq -6.5 \cdot 10^{-34}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 74000000:\\
\;\;\;\;\mathsf{log1p}\left(-y\right) \cdot z - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -6.49999999999999985e-34 or 7.4e7 < x Initial program 96.7%
Taylor expanded in x around inf
lower-*.f64N/A
lower-log.f6492.4
Applied rewrites92.4%
if -6.49999999999999985e-34 < x < 7.4e7Initial program 85.0%
Taylor expanded in z around inf
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6460.8
Applied rewrites60.8%
Final simplification75.8%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (- (* (log y) x) t)))
(if (<= x -6.5e-34)
t_1
(if (<= x 74000000.0)
(- (* (* (fma (fma -0.3333333333333333 y -0.5) y -1.0) z) y) t)
t_1))))
double code(double x, double y, double z, double t) {
double t_1 = (log(y) * x) - t;
double tmp;
if (x <= -6.5e-34) {
tmp = t_1;
} else if (x <= 74000000.0) {
tmp = ((fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * z) * y) - t;
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(Float64(log(y) * x) - t) tmp = 0.0 if (x <= -6.5e-34) tmp = t_1; elseif (x <= 74000000.0) tmp = Float64(Float64(Float64(fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * z) * y) - t); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -6.5e-34], t$95$1, If[LessEqual[x, 74000000.0], N[(N[(N[(N[(N[(-0.3333333333333333 * y + -0.5), $MachinePrecision] * y + -1.0), $MachinePrecision] * z), $MachinePrecision] * y), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \log y \cdot x - t\\
\mathbf{if}\;x \leq -6.5 \cdot 10^{-34}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 74000000:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, y, -0.5\right), y, -1\right) \cdot z\right) \cdot y - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -6.49999999999999985e-34 or 7.4e7 < x Initial program 96.7%
Taylor expanded in x around inf
lower-*.f64N/A
lower-log.f6492.4
Applied rewrites92.4%
if -6.49999999999999985e-34 < x < 7.4e7Initial program 85.0%
Taylor expanded in y around 0
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites99.7%
Taylor expanded in z around inf
Applied rewrites60.5%
Final simplification75.6%
(FPCore (x y z t) :precision binary64 (- (fma (- 1.0 z) y (* (log y) (- x 1.0))) t))
double code(double x, double y, double z, double t) {
return fma((1.0 - z), y, (log(y) * (x - 1.0))) - t;
}
function code(x, y, z, t) return Float64(fma(Float64(1.0 - z), y, Float64(log(y) * Float64(x - 1.0))) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(1.0 - z), $MachinePrecision] * y + N[(N[Log[y], $MachinePrecision] * N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(1 - z, y, \log y \cdot \left(x - 1\right)\right) - t
\end{array}
Initial program 90.5%
Taylor expanded in y around 0
*-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
mul-1-negN/A
neg-sub0N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
associate--r+N/A
metadata-evalN/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
lower-log.f6499.0
Applied rewrites99.0%
Final simplification99.0%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (* (log y) x)))
(if (<= x -1.12e+18)
t_1
(if (<= x 1.1e+76)
(- (* (* (fma (fma -0.3333333333333333 y -0.5) y -1.0) z) y) t)
t_1))))
double code(double x, double y, double z, double t) {
double t_1 = log(y) * x;
double tmp;
if (x <= -1.12e+18) {
tmp = t_1;
} else if (x <= 1.1e+76) {
tmp = ((fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * z) * y) - t;
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(log(y) * x) tmp = 0.0 if (x <= -1.12e+18) tmp = t_1; elseif (x <= 1.1e+76) tmp = Float64(Float64(Float64(fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * z) * y) - t); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision]}, If[LessEqual[x, -1.12e+18], t$95$1, If[LessEqual[x, 1.1e+76], N[(N[(N[(N[(N[(-0.3333333333333333 * y + -0.5), $MachinePrecision] * y + -1.0), $MachinePrecision] * z), $MachinePrecision] * y), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \log y \cdot x\\
\mathbf{if}\;x \leq -1.12 \cdot 10^{+18}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 1.1 \cdot 10^{+76}:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, y, -0.5\right), y, -1\right) \cdot z\right) \cdot y - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -1.12e18 or 1.1e76 < x Initial program 96.0%
Taylor expanded in x around inf
lower-*.f64N/A
lower-log.f6475.3
Applied rewrites75.3%
if -1.12e18 < x < 1.1e76Initial program 87.1%
Taylor expanded in y around 0
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites99.7%
Taylor expanded in z around inf
Applied rewrites59.6%
Final simplification65.7%
(FPCore (x y z t) :precision binary64 (- (* (* (fma (fma -0.3333333333333333 y -0.5) y -1.0) z) y) t))
double code(double x, double y, double z, double t) {
return ((fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * z) * y) - t;
}
function code(x, y, z, t) return Float64(Float64(Float64(fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * z) * y) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(N[(N[(-0.3333333333333333 * y + -0.5), $MachinePrecision] * y + -1.0), $MachinePrecision] * z), $MachinePrecision] * y), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, y, -0.5\right), y, -1\right) \cdot z\right) \cdot y - t
\end{array}
Initial program 90.5%
Taylor expanded in y around 0
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites99.7%
Taylor expanded in z around inf
Applied rewrites46.0%
(FPCore (x y z t) :precision binary64 (- (fma (- y) z y) t))
double code(double x, double y, double z, double t) {
return fma(-y, z, y) - t;
}
function code(x, y, z, t) return Float64(fma(Float64(-y), z, y) - t) end
code[x_, y_, z_, t_] := N[(N[((-y) * z + y), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-y, z, y\right) - t
\end{array}
Initial program 90.5%
Taylor expanded in y around 0
*-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
mul-1-negN/A
neg-sub0N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
associate--r+N/A
metadata-evalN/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
lower-log.f6499.0
Applied rewrites99.0%
Taylor expanded in y around inf
Applied rewrites45.6%
(FPCore (x y z t) :precision binary64 (- (* (- y) z) t))
double code(double x, double y, double z, double t) {
return (-y * z) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = (-y * z) - t
end function
public static double code(double x, double y, double z, double t) {
return (-y * z) - t;
}
def code(x, y, z, t): return (-y * z) - t
function code(x, y, z, t) return Float64(Float64(Float64(-y) * z) - t) end
function tmp = code(x, y, z, t) tmp = (-y * z) - t; end
code[x_, y_, z_, t_] := N[(N[((-y) * z), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(-y\right) \cdot z - t
\end{array}
Initial program 90.5%
Taylor expanded in y around 0
*-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
mul-1-negN/A
neg-sub0N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
associate--r+N/A
metadata-evalN/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
lower-log.f6499.0
Applied rewrites99.0%
Taylor expanded in z around inf
Applied rewrites45.4%
Final simplification45.4%
(FPCore (x y z t) :precision binary64 (- t))
double code(double x, double y, double z, double t) {
return -t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = -t
end function
public static double code(double x, double y, double z, double t) {
return -t;
}
def code(x, y, z, t): return -t
function code(x, y, z, t) return Float64(-t) end
function tmp = code(x, y, z, t) tmp = -t; end
code[x_, y_, z_, t_] := (-t)
\begin{array}{l}
\\
-t
\end{array}
Initial program 90.5%
Taylor expanded in t around inf
mul-1-negN/A
lower-neg.f6435.8
Applied rewrites35.8%
herbie shell --seed 2024244
(FPCore (x y z t)
:name "Statistics.Distribution.Beta:$cdensity from math-functions-0.1.5.2"
:precision binary64
(- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))