
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}
(FPCore (x y z t) :precision binary64 (- (+ (* (- (log1p (pow (- y) 3.0)) (log1p (fma y y y))) z) (* (log y) x)) t))
double code(double x, double y, double z, double t) {
return (((log1p(pow(-y, 3.0)) - log1p(fma(y, y, y))) * z) + (log(y) * x)) - t;
}
function code(x, y, z, t) return Float64(Float64(Float64(Float64(log1p((Float64(-y) ^ 3.0)) - log1p(fma(y, y, y))) * z) + Float64(log(y) * x)) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(N[(N[Log[1 + N[Power[(-y), 3.0], $MachinePrecision]], $MachinePrecision] - N[Log[1 + N[(y * y + y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision] + N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(\mathsf{log1p}\left({\left(-y\right)}^{3}\right) - \mathsf{log1p}\left(\mathsf{fma}\left(y, y, y\right)\right)\right) \cdot z + \log y \cdot x\right) - t
\end{array}
Initial program 85.9%
lift-log.f64N/A
lift--.f64N/A
flip3--N/A
log-divN/A
lower--.f64N/A
metadata-evalN/A
sub-negN/A
cube-negN/A
metadata-evalN/A
metadata-evalN/A
lower-log1p.f64N/A
lower-pow.f64N/A
lower-neg.f64N/A
metadata-evalN/A
lower-log1p.f64N/A
*-lft-identityN/A
lower-fma.f6499.9
Applied rewrites99.9%
Final simplification99.9%
(FPCore (x y z t) :precision binary64 (fma (log1p (- y)) z (fma (log y) x (- t))))
double code(double x, double y, double z, double t) {
return fma(log1p(-y), z, fma(log(y), x, -t));
}
function code(x, y, z, t) return fma(log1p(Float64(-y)), z, fma(log(y), x, Float64(-t))) end
code[x_, y_, z_, t_] := N[(N[Log[1 + (-y)], $MachinePrecision] * z + N[(N[Log[y], $MachinePrecision] * x + (-t)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{log1p}\left(-y\right), z, \mathsf{fma}\left(\log y, x, -t\right)\right)
\end{array}
Initial program 85.9%
lift--.f64N/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lift-log.f64N/A
lift--.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f64N/A
sub-negN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lower-neg.f6499.9
Applied rewrites99.9%
(FPCore (x y z t)
:precision binary64
(-
(+
(*
(fma (fma (* (fma -0.25 y -0.3333333333333333) z) y (* -0.5 z)) y (- z))
y)
(* (log y) x))
t))
double code(double x, double y, double z, double t) {
return ((fma(fma((fma(-0.25, y, -0.3333333333333333) * z), y, (-0.5 * z)), y, -z) * y) + (log(y) * x)) - t;
}
function code(x, y, z, t) return Float64(Float64(Float64(fma(fma(Float64(fma(-0.25, y, -0.3333333333333333) * z), y, Float64(-0.5 * z)), y, Float64(-z)) * y) + Float64(log(y) * x)) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(N[(N[(N[(N[(-0.25 * y + -0.3333333333333333), $MachinePrecision] * z), $MachinePrecision] * y + N[(-0.5 * z), $MachinePrecision]), $MachinePrecision] * y + (-z)), $MachinePrecision] * y), $MachinePrecision] + N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.25, y, -0.3333333333333333\right) \cdot z, y, -0.5 \cdot z\right), y, -z\right) \cdot y + \log y \cdot x\right) - t
\end{array}
Initial program 85.9%
Taylor expanded in y around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.6%
Final simplification99.6%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (fma (log y) x (- t))))
(if (<= x -3.8e-90)
t_1
(if (<= x 1.25e-26) (- (* (log1p (- y)) z) t) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = fma(log(y), x, -t);
double tmp;
if (x <= -3.8e-90) {
tmp = t_1;
} else if (x <= 1.25e-26) {
tmp = (log1p(-y) * z) - t;
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = fma(log(y), x, Float64(-t)) tmp = 0.0 if (x <= -3.8e-90) tmp = t_1; elseif (x <= 1.25e-26) tmp = Float64(Float64(log1p(Float64(-y)) * z) - t); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[Log[y], $MachinePrecision] * x + (-t)), $MachinePrecision]}, If[LessEqual[x, -3.8e-90], t$95$1, If[LessEqual[x, 1.25e-26], N[(N[(N[Log[1 + (-y)], $MachinePrecision] * z), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \mathsf{fma}\left(\log y, x, -t\right)\\
\mathbf{if}\;x \leq -3.8 \cdot 10^{-90}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 1.25 \cdot 10^{-26}:\\
\;\;\;\;\mathsf{log1p}\left(-y\right) \cdot z - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -3.8e-90 or 1.25000000000000005e-26 < x Initial program 92.5%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
associate--l-N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-log.f64N/A
*-commutativeN/A
lower-fma.f6499.3
Applied rewrites99.3%
Applied rewrites99.3%
Taylor expanded in t around inf
Applied rewrites91.7%
if -3.8e-90 < x < 1.25000000000000005e-26Initial program 75.7%
Taylor expanded in z around inf
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6492.6
Applied rewrites92.6%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (fma (log y) x (- t))))
(if (<= x -3.8e-90)
t_1
(if (<= x 1.25e-26)
(-
(*
(fma
(fma (* (fma -0.25 y -0.3333333333333333) z) y (* -0.5 z))
y
(- z))
y)
t)
t_1))))
double code(double x, double y, double z, double t) {
double t_1 = fma(log(y), x, -t);
double tmp;
if (x <= -3.8e-90) {
tmp = t_1;
} else if (x <= 1.25e-26) {
tmp = (fma(fma((fma(-0.25, y, -0.3333333333333333) * z), y, (-0.5 * z)), y, -z) * y) - t;
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = fma(log(y), x, Float64(-t)) tmp = 0.0 if (x <= -3.8e-90) tmp = t_1; elseif (x <= 1.25e-26) tmp = Float64(Float64(fma(fma(Float64(fma(-0.25, y, -0.3333333333333333) * z), y, Float64(-0.5 * z)), y, Float64(-z)) * y) - t); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[Log[y], $MachinePrecision] * x + (-t)), $MachinePrecision]}, If[LessEqual[x, -3.8e-90], t$95$1, If[LessEqual[x, 1.25e-26], N[(N[(N[(N[(N[(N[(-0.25 * y + -0.3333333333333333), $MachinePrecision] * z), $MachinePrecision] * y + N[(-0.5 * z), $MachinePrecision]), $MachinePrecision] * y + (-z)), $MachinePrecision] * y), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \mathsf{fma}\left(\log y, x, -t\right)\\
\mathbf{if}\;x \leq -3.8 \cdot 10^{-90}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 1.25 \cdot 10^{-26}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.25, y, -0.3333333333333333\right) \cdot z, y, -0.5 \cdot z\right), y, -z\right) \cdot y - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -3.8e-90 or 1.25000000000000005e-26 < x Initial program 92.5%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
associate--l-N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-log.f64N/A
*-commutativeN/A
lower-fma.f6499.3
Applied rewrites99.3%
Applied rewrites99.3%
Taylor expanded in t around inf
Applied rewrites91.7%
if -3.8e-90 < x < 1.25000000000000005e-26Initial program 75.7%
Taylor expanded in z around inf
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6492.6
Applied rewrites92.6%
Taylor expanded in y around 0
Applied rewrites90.2%
Taylor expanded in y around 0
Applied rewrites92.0%
Final simplification91.9%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (* (log y) x)))
(if (<= x -7e+176)
t_1
(if (<= x 5.3e+138)
(-
(*
(fma
(fma (* (fma -0.25 y -0.3333333333333333) z) y (* -0.5 z))
y
(- z))
y)
t)
t_1))))
double code(double x, double y, double z, double t) {
double t_1 = log(y) * x;
double tmp;
if (x <= -7e+176) {
tmp = t_1;
} else if (x <= 5.3e+138) {
tmp = (fma(fma((fma(-0.25, y, -0.3333333333333333) * z), y, (-0.5 * z)), y, -z) * y) - t;
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(log(y) * x) tmp = 0.0 if (x <= -7e+176) tmp = t_1; elseif (x <= 5.3e+138) tmp = Float64(Float64(fma(fma(Float64(fma(-0.25, y, -0.3333333333333333) * z), y, Float64(-0.5 * z)), y, Float64(-z)) * y) - t); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision]}, If[LessEqual[x, -7e+176], t$95$1, If[LessEqual[x, 5.3e+138], N[(N[(N[(N[(N[(N[(-0.25 * y + -0.3333333333333333), $MachinePrecision] * z), $MachinePrecision] * y + N[(-0.5 * z), $MachinePrecision]), $MachinePrecision] * y + (-z)), $MachinePrecision] * y), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \log y \cdot x\\
\mathbf{if}\;x \leq -7 \cdot 10^{+176}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 5.3 \cdot 10^{+138}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.25, y, -0.3333333333333333\right) \cdot z, y, -0.5 \cdot z\right), y, -z\right) \cdot y - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -7.00000000000000005e176 or 5.29999999999999984e138 < x Initial program 99.7%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
lower-log.f6483.6
Applied rewrites83.6%
if -7.00000000000000005e176 < x < 5.29999999999999984e138Initial program 80.4%
Taylor expanded in z around inf
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6477.0
Applied rewrites77.0%
Taylor expanded in y around 0
Applied rewrites75.2%
Taylor expanded in y around 0
Applied rewrites76.6%
Final simplification78.6%
(FPCore (x y z t) :precision binary64 (fma (log y) x (- (fma z y t))))
double code(double x, double y, double z, double t) {
return fma(log(y), x, -fma(z, y, t));
}
function code(x, y, z, t) return fma(log(y), x, Float64(-fma(z, y, t))) end
code[x_, y_, z_, t_] := N[(N[Log[y], $MachinePrecision] * x + (-N[(z * y + t), $MachinePrecision])), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\log y, x, -\mathsf{fma}\left(z, y, t\right)\right)
\end{array}
Initial program 85.9%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
associate--l-N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-log.f64N/A
*-commutativeN/A
lower-fma.f6498.6
Applied rewrites98.6%
Applied rewrites98.6%
(FPCore (x y z t) :precision binary64 (- (* (log y) x) (fma z y t)))
double code(double x, double y, double z, double t) {
return (log(y) * x) - fma(z, y, t);
}
function code(x, y, z, t) return Float64(Float64(log(y) * x) - fma(z, y, t)) end
code[x_, y_, z_, t_] := N[(N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision] - N[(z * y + t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log y \cdot x - \mathsf{fma}\left(z, y, t\right)
\end{array}
Initial program 85.9%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
associate--l-N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-log.f64N/A
*-commutativeN/A
lower-fma.f6498.6
Applied rewrites98.6%
(FPCore (x y z t) :precision binary64 (- (* (fma (fma (* (fma -0.25 y -0.3333333333333333) z) y (* -0.5 z)) y (- z)) y) t))
double code(double x, double y, double z, double t) {
return (fma(fma((fma(-0.25, y, -0.3333333333333333) * z), y, (-0.5 * z)), y, -z) * y) - t;
}
function code(x, y, z, t) return Float64(Float64(fma(fma(Float64(fma(-0.25, y, -0.3333333333333333) * z), y, Float64(-0.5 * z)), y, Float64(-z)) * y) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(N[(N[(N[(-0.25 * y + -0.3333333333333333), $MachinePrecision] * z), $MachinePrecision] * y + N[(-0.5 * z), $MachinePrecision]), $MachinePrecision] * y + (-z)), $MachinePrecision] * y), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.25, y, -0.3333333333333333\right) \cdot z, y, -0.5 \cdot z\right), y, -z\right) \cdot y - t
\end{array}
Initial program 85.9%
Taylor expanded in z around inf
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6459.5
Applied rewrites59.5%
Taylor expanded in y around 0
Applied rewrites58.2%
Taylor expanded in y around 0
Applied rewrites59.2%
Final simplification59.2%
(FPCore (x y z t) :precision binary64 (- (* (* (fma (fma (fma -0.25 y -0.3333333333333333) y -0.5) y -1.0) y) z) t))
double code(double x, double y, double z, double t) {
return ((fma(fma(fma(-0.25, y, -0.3333333333333333), y, -0.5), y, -1.0) * y) * z) - t;
}
function code(x, y, z, t) return Float64(Float64(Float64(fma(fma(fma(-0.25, y, -0.3333333333333333), y, -0.5), y, -1.0) * y) * z) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(N[(N[(N[(-0.25 * y + -0.3333333333333333), $MachinePrecision] * y + -0.5), $MachinePrecision] * y + -1.0), $MachinePrecision] * y), $MachinePrecision] * z), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.25, y, -0.3333333333333333\right), y, -0.5\right), y, -1\right) \cdot y\right) \cdot z - t
\end{array}
Initial program 85.9%
Taylor expanded in z around inf
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6459.5
Applied rewrites59.5%
Taylor expanded in y around 0
Applied rewrites59.2%
(FPCore (x y z t) :precision binary64 (- (* (* (fma (fma -0.3333333333333333 y -0.5) y -1.0) y) z) t))
double code(double x, double y, double z, double t) {
return ((fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * y) * z) - t;
}
function code(x, y, z, t) return Float64(Float64(Float64(fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * y) * z) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(N[(N[(-0.3333333333333333 * y + -0.5), $MachinePrecision] * y + -1.0), $MachinePrecision] * y), $MachinePrecision] * z), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, y, -0.5\right), y, -1\right) \cdot y\right) \cdot z - t
\end{array}
Initial program 85.9%
Taylor expanded in z around inf
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6459.5
Applied rewrites59.5%
Taylor expanded in y around 0
Applied rewrites59.1%
(FPCore (x y z t) :precision binary64 (- (* (fma -0.5 (* z y) (- z)) y) t))
double code(double x, double y, double z, double t) {
return (fma(-0.5, (z * y), -z) * y) - t;
}
function code(x, y, z, t) return Float64(Float64(fma(-0.5, Float64(z * y), Float64(-z)) * y) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(-0.5 * N[(z * y), $MachinePrecision] + (-z)), $MachinePrecision] * y), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.5, z \cdot y, -z\right) \cdot y - t
\end{array}
Initial program 85.9%
Taylor expanded in z around inf
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6459.5
Applied rewrites59.5%
Taylor expanded in y around 0
Applied rewrites58.8%
(FPCore (x y z t) :precision binary64 (if (<= t -3.4e-147) (- t) (if (<= t 3e-175) (* (- z) y) (- t))))
double code(double x, double y, double z, double t) {
double tmp;
if (t <= -3.4e-147) {
tmp = -t;
} else if (t <= 3e-175) {
tmp = -z * y;
} else {
tmp = -t;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if (t <= (-3.4d-147)) then
tmp = -t
else if (t <= 3d-175) then
tmp = -z * y
else
tmp = -t
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if (t <= -3.4e-147) {
tmp = -t;
} else if (t <= 3e-175) {
tmp = -z * y;
} else {
tmp = -t;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if t <= -3.4e-147: tmp = -t elif t <= 3e-175: tmp = -z * y else: tmp = -t return tmp
function code(x, y, z, t) tmp = 0.0 if (t <= -3.4e-147) tmp = Float64(-t); elseif (t <= 3e-175) tmp = Float64(Float64(-z) * y); else tmp = Float64(-t); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if (t <= -3.4e-147) tmp = -t; elseif (t <= 3e-175) tmp = -z * y; else tmp = -t; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[t, -3.4e-147], (-t), If[LessEqual[t, 3e-175], N[((-z) * y), $MachinePrecision], (-t)]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;t \leq -3.4 \cdot 10^{-147}:\\
\;\;\;\;-t\\
\mathbf{elif}\;t \leq 3 \cdot 10^{-175}:\\
\;\;\;\;\left(-z\right) \cdot y\\
\mathbf{else}:\\
\;\;\;\;-t\\
\end{array}
\end{array}
if t < -3.39999999999999996e-147 or 3e-175 < t Initial program 91.1%
Taylor expanded in t around inf
mul-1-negN/A
lower-neg.f6456.8
Applied rewrites56.8%
if -3.39999999999999996e-147 < t < 3e-175Initial program 70.2%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
associate--l-N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-log.f64N/A
*-commutativeN/A
lower-fma.f6497.5
Applied rewrites97.5%
Taylor expanded in z around inf
Applied rewrites34.4%
(FPCore (x y z t) :precision binary64 (- (* (* (fma -0.5 y -1.0) y) z) t))
double code(double x, double y, double z, double t) {
return ((fma(-0.5, y, -1.0) * y) * z) - t;
}
function code(x, y, z, t) return Float64(Float64(Float64(fma(-0.5, y, -1.0) * y) * z) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(N[(-0.5 * y + -1.0), $MachinePrecision] * y), $MachinePrecision] * z), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(-0.5, y, -1\right) \cdot y\right) \cdot z - t
\end{array}
Initial program 85.9%
Taylor expanded in z around inf
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
lower-log1p.f64N/A
lower-neg.f6459.5
Applied rewrites59.5%
Taylor expanded in y around 0
Applied rewrites58.8%
(FPCore (x y z t) :precision binary64 (- (fma z y t)))
double code(double x, double y, double z, double t) {
return -fma(z, y, t);
}
function code(x, y, z, t) return Float64(-fma(z, y, t)) end
code[x_, y_, z_, t_] := (-N[(z * y + t), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(z, y, t\right)
\end{array}
Initial program 85.9%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
associate--l-N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-log.f64N/A
*-commutativeN/A
lower-fma.f6498.6
Applied rewrites98.6%
Taylor expanded in x around 0
Applied rewrites58.2%
(FPCore (x y z t) :precision binary64 (- t))
double code(double x, double y, double z, double t) {
return -t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = -t
end function
public static double code(double x, double y, double z, double t) {
return -t;
}
def code(x, y, z, t): return -t
function code(x, y, z, t) return Float64(-t) end
function tmp = code(x, y, z, t) tmp = -t; end
code[x_, y_, z_, t_] := (-t)
\begin{array}{l}
\\
-t
\end{array}
Initial program 85.9%
Taylor expanded in t around inf
mul-1-negN/A
lower-neg.f6443.4
Applied rewrites43.4%
(FPCore (x y z t)
:precision binary64
(-
(*
(- z)
(+
(+ (* 0.5 (* y y)) y)
(* (/ 0.3333333333333333 (* 1.0 (* 1.0 1.0))) (* y (* y y)))))
(- t (* x (log y)))))
double code(double x, double y, double z, double t) {
return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = (-z * (((0.5d0 * (y * y)) + y) + ((0.3333333333333333d0 / (1.0d0 * (1.0d0 * 1.0d0))) * (y * (y * y))))) - (t - (x * log(y)))
end function
public static double code(double x, double y, double z, double t) {
return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * Math.log(y)));
}
def code(x, y, z, t): return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * math.log(y)))
function code(x, y, z, t) return Float64(Float64(Float64(-z) * Float64(Float64(Float64(0.5 * Float64(y * y)) + y) + Float64(Float64(0.3333333333333333 / Float64(1.0 * Float64(1.0 * 1.0))) * Float64(y * Float64(y * y))))) - Float64(t - Float64(x * log(y)))) end
function tmp = code(x, y, z, t) tmp = (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y))); end
code[x_, y_, z_, t_] := N[(N[((-z) * N[(N[(N[(0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] + N[(N[(0.3333333333333333 / N[(1.0 * N[(1.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(t - N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right)
\end{array}
herbie shell --seed 2024263
(FPCore (x y z t)
:name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B"
:precision binary64
:alt
(! :herbie-platform default (- (* (- z) (+ (+ (* 1/2 (* y y)) y) (* (/ 1/3 (* 1 (* 1 1))) (* y (* y y))))) (- t (* x (log y)))))
(- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))