
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 17 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (- (log1p (* (* y y) (- y))) (log1p (fma y y y))))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * (log1p(((y * y) * -y)) - log1p(fma(y, y, y))))) - t;
}
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * Float64(log1p(Float64(Float64(y * y) * Float64(-y))) - log1p(fma(y, y, y))))) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[(N[Log[1 + N[(N[(y * y), $MachinePrecision] * (-y)), $MachinePrecision]], $MachinePrecision] - N[Log[1 + N[(y * y + y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\left(y \cdot y\right) \cdot \left(-y\right)\right) - \mathsf{log1p}\left(\mathsf{fma}\left(y, y, y\right)\right)\right)\right) - t
\end{array}
Initial program 84.1%
lift-log.f64N/A
lift--.f64N/A
flip3--N/A
log-divN/A
lower--.f64N/A
metadata-evalN/A
sub-negN/A
cube-negN/A
metadata-evalN/A
metadata-evalN/A
lower-log1p.f64N/A
cube-negN/A
lower-neg.f64N/A
cube-multN/A
lower-*.f64N/A
lower-*.f64N/A
metadata-evalN/A
lower-log1p.f64N/A
*-lft-identityN/A
lower-fma.f6499.7
Applied rewrites99.7%
Final simplification99.7%
(FPCore (x y z t) :precision binary64 (/ 1.0 (/ 1.0 (fma x (log y) (fma z (log1p (- y)) (- t))))))
double code(double x, double y, double z, double t) {
return 1.0 / (1.0 / fma(x, log(y), fma(z, log1p(-y), -t)));
}
function code(x, y, z, t) return Float64(1.0 / Float64(1.0 / fma(x, log(y), fma(z, log1p(Float64(-y)), Float64(-t))))) end
code[x_, y_, z_, t_] := N[(1.0 / N[(1.0 / N[(x * N[Log[y], $MachinePrecision] + N[(z * N[Log[1 + (-y)], $MachinePrecision] + (-t)), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{1}{\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\right)}}
\end{array}
Initial program 84.1%
lift--.f64N/A
flip--N/A
clear-numN/A
lower-/.f64N/A
clear-numN/A
flip--N/A
lift--.f64N/A
lower-/.f6483.9
Applied rewrites99.5%
(FPCore (x y z t) :precision binary64 (fma y (- (* (* y z) (fma y -0.3333333333333333 -0.5)) z) (fma x (log y) (- t))))
double code(double x, double y, double z, double t) {
return fma(y, (((y * z) * fma(y, -0.3333333333333333, -0.5)) - z), fma(x, log(y), -t));
}
function code(x, y, z, t) return fma(y, Float64(Float64(Float64(y * z) * fma(y, -0.3333333333333333, -0.5)) - z), fma(x, log(y), Float64(-t))) end
code[x_, y_, z_, t_] := N[(y * N[(N[(N[(y * z), $MachinePrecision] * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision] + N[(x * N[Log[y], $MachinePrecision] + (-t)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y, \left(y \cdot z\right) \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right) - z, \mathsf{fma}\left(x, \log y, -t\right)\right)
\end{array}
Initial program 84.1%
Taylor expanded in y around 0
+-commutativeN/A
associate--l+N/A
lower-fma.f64N/A
Applied rewrites99.1%
Final simplification99.1%
(FPCore (x y z t) :precision binary64 (fma x (log y) (fma y (fma (* y z) -0.5 (- z)) (- t))))
double code(double x, double y, double z, double t) {
return fma(x, log(y), fma(y, fma((y * z), -0.5, -z), -t));
}
function code(x, y, z, t) return fma(x, log(y), fma(y, fma(Float64(y * z), -0.5, Float64(-z)), Float64(-t))) end
code[x_, y_, z_, t_] := N[(x * N[Log[y], $MachinePrecision] + N[(y * N[(N[(y * z), $MachinePrecision] * -0.5 + (-z)), $MachinePrecision] + (-t)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \mathsf{fma}\left(y \cdot z, -0.5, -z\right), -t\right)\right)
\end{array}
Initial program 84.1%
lift-log.f64N/A
lift--.f64N/A
flip3--N/A
log-divN/A
lower--.f64N/A
metadata-evalN/A
sub-negN/A
cube-negN/A
metadata-evalN/A
metadata-evalN/A
lower-log1p.f64N/A
cube-negN/A
lower-neg.f64N/A
cube-multN/A
lower-*.f64N/A
lower-*.f64N/A
metadata-evalN/A
lower-log1p.f64N/A
*-lft-identityN/A
lower-fma.f6499.7
Applied rewrites99.7%
Taylor expanded in y around 0
associate--l+N/A
lower-fma.f64N/A
lower-log.f64N/A
sub-negN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lower-neg.f6498.9
Applied rewrites98.9%
Final simplification98.9%
(FPCore (x y z t) :precision binary64 (let* ((t_1 (- (* x (log y)) t))) (if (<= x -4.8e+57) t_1 (if (<= x 3.3e-43) (- (* z (log1p (- y))) t) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = (x * log(y)) - t;
double tmp;
if (x <= -4.8e+57) {
tmp = t_1;
} else if (x <= 3.3e-43) {
tmp = (z * log1p(-y)) - t;
} else {
tmp = t_1;
}
return tmp;
}
public static double code(double x, double y, double z, double t) {
double t_1 = (x * Math.log(y)) - t;
double tmp;
if (x <= -4.8e+57) {
tmp = t_1;
} else if (x <= 3.3e-43) {
tmp = (z * Math.log1p(-y)) - t;
} else {
tmp = t_1;
}
return tmp;
}
def code(x, y, z, t): t_1 = (x * math.log(y)) - t tmp = 0 if x <= -4.8e+57: tmp = t_1 elif x <= 3.3e-43: tmp = (z * math.log1p(-y)) - t else: tmp = t_1 return tmp
function code(x, y, z, t) t_1 = Float64(Float64(x * log(y)) - t) tmp = 0.0 if (x <= -4.8e+57) tmp = t_1; elseif (x <= 3.3e-43) tmp = Float64(Float64(z * log1p(Float64(-y))) - t); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -4.8e+57], t$95$1, If[LessEqual[x, 3.3e-43], N[(N[(z * N[Log[1 + (-y)], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -4.8 \cdot 10^{+57}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 3.3 \cdot 10^{-43}:\\
\;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -4.80000000000000009e57 or 3.30000000000000016e-43 < x Initial program 91.2%
Taylor expanded in x around inf
remove-double-negN/A
mul-1-negN/A
mul-1-negN/A
mul-1-negN/A
log-recN/A
lower-*.f64N/A
log-recN/A
mul-1-negN/A
mul-1-negN/A
mul-1-negN/A
remove-double-negN/A
lower-log.f6490.3
Applied rewrites90.3%
if -4.80000000000000009e57 < x < 3.30000000000000016e-43Initial program 75.7%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6492.0
Applied rewrites92.0%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (- (* x (log y)) t)))
(if (<= x -4.8e+57)
t_1
(if (<= x 3.3e-43)
(-
(* z (* y (fma y (fma y (fma y -0.25 -0.3333333333333333) -0.5) -1.0)))
t)
t_1))))
double code(double x, double y, double z, double t) {
double t_1 = (x * log(y)) - t;
double tmp;
if (x <= -4.8e+57) {
tmp = t_1;
} else if (x <= 3.3e-43) {
tmp = (z * (y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0))) - t;
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(Float64(x * log(y)) - t) tmp = 0.0 if (x <= -4.8e+57) tmp = t_1; elseif (x <= 3.3e-43) tmp = Float64(Float64(z * Float64(y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0))) - t); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -4.8e+57], t$95$1, If[LessEqual[x, 3.3e-43], N[(N[(z * N[(y * N[(y * N[(y * N[(y * -0.25 + -0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -4.8 \cdot 10^{+57}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 3.3 \cdot 10^{-43}:\\
\;\;\;\;z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right) - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -4.80000000000000009e57 or 3.30000000000000016e-43 < x Initial program 91.2%
Taylor expanded in x around inf
remove-double-negN/A
mul-1-negN/A
mul-1-negN/A
mul-1-negN/A
log-recN/A
lower-*.f64N/A
log-recN/A
mul-1-negN/A
mul-1-negN/A
mul-1-negN/A
remove-double-negN/A
lower-log.f6490.3
Applied rewrites90.3%
if -4.80000000000000009e57 < x < 3.30000000000000016e-43Initial program 75.7%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6492.0
Applied rewrites92.0%
Taylor expanded in y around 0
Applied rewrites91.2%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (* x (log y))))
(if (<= x -1.82e+81)
t_1
(if (<= x 5.8e+26)
(-
(* z (* y (fma y (fma y (fma y -0.25 -0.3333333333333333) -0.5) -1.0)))
t)
t_1))))
double code(double x, double y, double z, double t) {
double t_1 = x * log(y);
double tmp;
if (x <= -1.82e+81) {
tmp = t_1;
} else if (x <= 5.8e+26) {
tmp = (z * (y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0))) - t;
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(x * log(y)) tmp = 0.0 if (x <= -1.82e+81) tmp = t_1; elseif (x <= 5.8e+26) tmp = Float64(Float64(z * Float64(y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0))) - t); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -1.82e+81], t$95$1, If[LessEqual[x, 5.8e+26], N[(N[(z * N[(y * N[(y * N[(y * N[(y * -0.25 + -0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := x \cdot \log y\\
\mathbf{if}\;x \leq -1.82 \cdot 10^{+81}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 5.8 \cdot 10^{+26}:\\
\;\;\;\;z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right) - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -1.82000000000000003e81 or 5.8e26 < x Initial program 92.1%
Taylor expanded in x around inf
remove-double-negN/A
mul-1-negN/A
mul-1-negN/A
mul-1-negN/A
log-recN/A
lower-*.f64N/A
log-recN/A
mul-1-negN/A
mul-1-negN/A
mul-1-negN/A
remove-double-negN/A
lower-log.f6471.3
Applied rewrites71.3%
if -1.82000000000000003e81 < x < 5.8e26Initial program 77.6%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6488.7
Applied rewrites88.7%
Taylor expanded in y around 0
Applied rewrites88.0%
(FPCore (x y z t) :precision binary64 (- (* x (log y)) (fma z y t)))
double code(double x, double y, double z, double t) {
return (x * log(y)) - fma(z, y, t);
}
function code(x, y, z, t) return Float64(Float64(x * log(y)) - fma(z, y, t)) end
code[x_, y_, z_, t_] := N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - N[(z * y + t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \log y - \mathsf{fma}\left(z, y, t\right)
\end{array}
Initial program 84.1%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
remove-double-negN/A
mul-1-negN/A
distribute-rgt-neg-inN/A
neg-mul-1N/A
mul-1-negN/A
log-recN/A
associate--l-N/A
lower--.f64N/A
Applied rewrites98.7%
(FPCore (x y z t) :precision binary64 (- (* z (* y (fma y (fma y (fma y -0.25 -0.3333333333333333) -0.5) -1.0))) t))
double code(double x, double y, double z, double t) {
return (z * (y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0))) - t;
}
function code(x, y, z, t) return Float64(Float64(z * Float64(y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0))) - t) end
code[x_, y_, z_, t_] := N[(N[(z * N[(y * N[(y * N[(y * N[(y * -0.25 + -0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right) - t
\end{array}
Initial program 84.1%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6462.2
Applied rewrites62.2%
Taylor expanded in y around 0
Applied rewrites61.6%
(FPCore (x y z t) :precision binary64 (- (* z (fma (fma y -0.3333333333333333 -0.5) (* y y) (- y))) t))
double code(double x, double y, double z, double t) {
return (z * fma(fma(y, -0.3333333333333333, -0.5), (y * y), -y)) - t;
}
function code(x, y, z, t) return Float64(Float64(z * fma(fma(y, -0.3333333333333333, -0.5), Float64(y * y), Float64(-y))) - t) end
code[x_, y_, z_, t_] := N[(N[(z * N[(N[(y * -0.3333333333333333 + -0.5), $MachinePrecision] * N[(y * y), $MachinePrecision] + (-y)), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
z \cdot \mathsf{fma}\left(\mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), y \cdot y, -y\right) - t
\end{array}
Initial program 84.1%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6462.2
Applied rewrites62.2%
Taylor expanded in y around 0
Applied rewrites61.5%
Applied rewrites61.5%
(FPCore (x y z t) :precision binary64 (- (* y (fma y (* z (fma y -0.3333333333333333 -0.5)) (- z))) t))
double code(double x, double y, double z, double t) {
return (y * fma(y, (z * fma(y, -0.3333333333333333, -0.5)), -z)) - t;
}
function code(x, y, z, t) return Float64(Float64(y * fma(y, Float64(z * fma(y, -0.3333333333333333, -0.5)), Float64(-z))) - t) end
code[x_, y_, z_, t_] := N[(N[(y * N[(y * N[(z * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + (-z)), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
y \cdot \mathsf{fma}\left(y, z \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -z\right) - t
\end{array}
Initial program 84.1%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6462.2
Applied rewrites62.2%
Taylor expanded in y around 0
Applied rewrites61.1%
Taylor expanded in y around 0
Applied rewrites61.5%
(FPCore (x y z t) :precision binary64 (- (* z (* y (fma y (fma y -0.3333333333333333 -0.5) -1.0))) t))
double code(double x, double y, double z, double t) {
return (z * (y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0))) - t;
}
function code(x, y, z, t) return Float64(Float64(z * Float64(y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0))) - t) end
code[x_, y_, z_, t_] := N[(N[(z * N[(y * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right)\right) - t
\end{array}
Initial program 84.1%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6462.2
Applied rewrites62.2%
Taylor expanded in y around 0
Applied rewrites61.5%
(FPCore (x y z t) :precision binary64 (if (<= t -1.1e-45) (- t) (if (<= t 4.8e-78) (* z (- y)) (- t))))
double code(double x, double y, double z, double t) {
double tmp;
if (t <= -1.1e-45) {
tmp = -t;
} else if (t <= 4.8e-78) {
tmp = z * -y;
} else {
tmp = -t;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if (t <= (-1.1d-45)) then
tmp = -t
else if (t <= 4.8d-78) then
tmp = z * -y
else
tmp = -t
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if (t <= -1.1e-45) {
tmp = -t;
} else if (t <= 4.8e-78) {
tmp = z * -y;
} else {
tmp = -t;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if t <= -1.1e-45: tmp = -t elif t <= 4.8e-78: tmp = z * -y else: tmp = -t return tmp
function code(x, y, z, t) tmp = 0.0 if (t <= -1.1e-45) tmp = Float64(-t); elseif (t <= 4.8e-78) tmp = Float64(z * Float64(-y)); else tmp = Float64(-t); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if (t <= -1.1e-45) tmp = -t; elseif (t <= 4.8e-78) tmp = z * -y; else tmp = -t; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[t, -1.1e-45], (-t), If[LessEqual[t, 4.8e-78], N[(z * (-y)), $MachinePrecision], (-t)]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;t \leq -1.1 \cdot 10^{-45}:\\
\;\;\;\;-t\\
\mathbf{elif}\;t \leq 4.8 \cdot 10^{-78}:\\
\;\;\;\;z \cdot \left(-y\right)\\
\mathbf{else}:\\
\;\;\;\;-t\\
\end{array}
\end{array}
if t < -1.09999999999999997e-45 or 4.79999999999999999e-78 < t Initial program 93.0%
Taylor expanded in t around inf
mul-1-negN/A
lower-neg.f6465.6
Applied rewrites65.6%
if -1.09999999999999997e-45 < t < 4.79999999999999999e-78Initial program 69.7%
lift-log.f64N/A
lift--.f64N/A
flip3--N/A
log-divN/A
lower--.f64N/A
metadata-evalN/A
sub-negN/A
cube-negN/A
metadata-evalN/A
metadata-evalN/A
lower-log1p.f64N/A
cube-negN/A
lower-neg.f64N/A
cube-multN/A
lower-*.f64N/A
lower-*.f64N/A
metadata-evalN/A
lower-log1p.f64N/A
*-lft-identityN/A
lower-fma.f6499.6
Applied rewrites99.6%
Taylor expanded in y around 0
associate--l+N/A
mul-1-negN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
lower-neg.f64N/A
sub-negN/A
lower-fma.f64N/A
lower-log.f64N/A
lower-neg.f6497.6
Applied rewrites97.6%
Taylor expanded in z around inf
Applied rewrites31.4%
(FPCore (x y z t) :precision binary64 (- (* z (* y (fma y -0.5 -1.0))) t))
double code(double x, double y, double z, double t) {
return (z * (y * fma(y, -0.5, -1.0))) - t;
}
function code(x, y, z, t) return Float64(Float64(z * Float64(y * fma(y, -0.5, -1.0))) - t) end
code[x_, y_, z_, t_] := N[(N[(z * N[(y * N[(y * -0.5 + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
z \cdot \left(y \cdot \mathsf{fma}\left(y, -0.5, -1\right)\right) - t
\end{array}
Initial program 84.1%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6462.2
Applied rewrites62.2%
Taylor expanded in y around 0
Applied rewrites61.4%
(FPCore (x y z t) :precision binary64 (- (fma z y t)))
double code(double x, double y, double z, double t) {
return -fma(z, y, t);
}
function code(x, y, z, t) return Float64(-fma(z, y, t)) end
code[x_, y_, z_, t_] := (-N[(z * y + t), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(z, y, t\right)
\end{array}
Initial program 84.1%
lift-log.f64N/A
lift--.f64N/A
flip3--N/A
log-divN/A
lower--.f64N/A
metadata-evalN/A
sub-negN/A
cube-negN/A
metadata-evalN/A
metadata-evalN/A
lower-log1p.f64N/A
cube-negN/A
lower-neg.f64N/A
cube-multN/A
lower-*.f64N/A
lower-*.f64N/A
metadata-evalN/A
lower-log1p.f64N/A
*-lft-identityN/A
lower-fma.f6499.7
Applied rewrites99.7%
Taylor expanded in y around 0
associate--l+N/A
mul-1-negN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
lower-neg.f64N/A
sub-negN/A
lower-fma.f64N/A
lower-log.f64N/A
lower-neg.f6498.6
Applied rewrites98.6%
Taylor expanded in x around 0
Applied rewrites61.1%
(FPCore (x y z t) :precision binary64 (- t))
double code(double x, double y, double z, double t) {
return -t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = -t
end function
public static double code(double x, double y, double z, double t) {
return -t;
}
def code(x, y, z, t): return -t
function code(x, y, z, t) return Float64(-t) end
function tmp = code(x, y, z, t) tmp = -t; end
code[x_, y_, z_, t_] := (-t)
\begin{array}{l}
\\
-t
\end{array}
Initial program 84.1%
Taylor expanded in t around inf
mul-1-negN/A
lower-neg.f6445.8
Applied rewrites45.8%
(FPCore (x y z t) :precision binary64 t)
double code(double x, double y, double z, double t) {
return t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = t
end function
public static double code(double x, double y, double z, double t) {
return t;
}
def code(x, y, z, t): return t
function code(x, y, z, t) return t end
function tmp = code(x, y, z, t) tmp = t; end
code[x_, y_, z_, t_] := t
\begin{array}{l}
\\
t
\end{array}
Initial program 84.1%
Taylor expanded in t around inf
mul-1-negN/A
lower-neg.f6445.8
Applied rewrites45.8%
Applied rewrites14.3%
Applied rewrites2.0%
(FPCore (x y z t)
:precision binary64
(-
(*
(- z)
(+
(+ (* 0.5 (* y y)) y)
(* (/ 0.3333333333333333 (* 1.0 (* 1.0 1.0))) (* y (* y y)))))
(- t (* x (log y)))))
double code(double x, double y, double z, double t) {
return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = (-z * (((0.5d0 * (y * y)) + y) + ((0.3333333333333333d0 / (1.0d0 * (1.0d0 * 1.0d0))) * (y * (y * y))))) - (t - (x * log(y)))
end function
public static double code(double x, double y, double z, double t) {
return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * Math.log(y)));
}
def code(x, y, z, t): return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * math.log(y)))
function code(x, y, z, t) return Float64(Float64(Float64(-z) * Float64(Float64(Float64(0.5 * Float64(y * y)) + y) + Float64(Float64(0.3333333333333333 / Float64(1.0 * Float64(1.0 * 1.0))) * Float64(y * Float64(y * y))))) - Float64(t - Float64(x * log(y)))) end
function tmp = code(x, y, z, t) tmp = (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y))); end
code[x_, y_, z_, t_] := N[(N[((-z) * N[(N[(N[(0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] + N[(N[(0.3333333333333333 / N[(1.0 * N[(1.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(t - N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right)
\end{array}
herbie shell --seed 2024233
(FPCore (x y z t)
:name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B"
:precision binary64
:alt
(! :herbie-platform default (- (* (- z) (+ (+ (* 1/2 (* y y)) y) (* (/ 1/3 (* 1 (* 1 1))) (* y (* y y))))) (- t (* x (log y)))))
(- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))