
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 19 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (log1p (- 0.0 y)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * log1p((0.0 - y)))) - t;
}
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * Math.log1p((0.0 - y)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * math.log1p((0.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * log1p(Float64(0.0 - y)))) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[1 + N[(0.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \mathsf{log1p}\left(0 - y\right)\right) - t
\end{array}
Initial program 83.8%
sub-negN/A
accelerator-lowering-log1p.f64N/A
neg-sub0N/A
--lowering--.f6499.8%
Applied egg-rr99.8%
(FPCore (x y z t)
:precision binary64
(-
(+
(* x (log y))
(*
y
(fma
y
(fma z (* y (fma y -0.25 -0.3333333333333333)) (* z -0.5))
(- 0.0 z))))
t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (y * fma(y, fma(z, (y * fma(y, -0.25, -0.3333333333333333)), (z * -0.5)), (0.0 - z)))) - t;
}
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(y * fma(y, fma(z, Float64(y * fma(y, -0.25, -0.3333333333333333)), Float64(z * -0.5)), Float64(0.0 - z)))) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(y * N[(y * N[(z * N[(y * N[(y * -0.25 + -0.3333333333333333), $MachinePrecision]), $MachinePrecision] + N[(z * -0.5), $MachinePrecision]), $MachinePrecision] + N[(0.0 - z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), z \cdot -0.5\right), 0 - z\right)\right) - t
\end{array}
Initial program 83.8%
Taylor expanded in y around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified99.7%
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (* y (fma y (fma y (fma y -0.25 -0.3333333333333333) -0.5) -1.0)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * (y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0)))) - t;
}
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * Float64(y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0)))) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[(y * N[(y * N[(y * N[(y * -0.25 + -0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right)\right) - t
\end{array}
Initial program 83.8%
Taylor expanded in y around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f6499.7%
Simplified99.7%
(FPCore (x y z t) :precision binary64 (fma y (fma z (* y (fma y -0.3333333333333333 -0.5)) (- 0.0 z)) (fma x (log y) (- 0.0 t))))
double code(double x, double y, double z, double t) {
return fma(y, fma(z, (y * fma(y, -0.3333333333333333, -0.5)), (0.0 - z)), fma(x, log(y), (0.0 - t)));
}
function code(x, y, z, t) return fma(y, fma(z, Float64(y * fma(y, -0.3333333333333333, -0.5)), Float64(0.0 - z)), fma(x, log(y), Float64(0.0 - t))) end
code[x_, y_, z_, t_] := N[(y * N[(z * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + N[(0.0 - z), $MachinePrecision]), $MachinePrecision] + N[(x * N[Log[y], $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)
\end{array}
Initial program 83.8%
Taylor expanded in y around 0
Simplified99.6%
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (* y (fma y (fma y -0.3333333333333333 -0.5) -1.0)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * (y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0)))) - t;
}
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * Float64(y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0)))) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[(y * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right)\right)\right) - t
\end{array}
Initial program 83.8%
Taylor expanded in y around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f6499.6%
Simplified99.6%
(FPCore (x y z t) :precision binary64 (fma (* y z) (fma y -0.5 -1.0) (fma x (log y) (- 0.0 t))))
double code(double x, double y, double z, double t) {
return fma((y * z), fma(y, -0.5, -1.0), fma(x, log(y), (0.0 - t)));
}
function code(x, y, z, t) return fma(Float64(y * z), fma(y, -0.5, -1.0), fma(x, log(y), Float64(0.0 - t))) end
code[x_, y_, z_, t_] := N[(N[(y * z), $MachinePrecision] * N[(y * -0.5 + -1.0), $MachinePrecision] + N[(x * N[Log[y], $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y \cdot z, \mathsf{fma}\left(y, -0.5, -1\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)
\end{array}
Initial program 83.8%
Taylor expanded in y around 0
+-commutativeN/A
associate--l+N/A
associate-*r*N/A
distribute-rgt-outN/A
+-commutativeN/A
metadata-evalN/A
sub-negN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
remove-double-negN/A
mul-1-negN/A
distribute-rgt-neg-inN/A
Simplified99.4%
Final simplification99.4%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (- (* x (log y)) (* y z))))
(if (<= z -1.45e+218)
t_1
(if (<= z 1.3e+150) (fma x (log y) (- 0.0 t)) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = (x * log(y)) - (y * z);
double tmp;
if (z <= -1.45e+218) {
tmp = t_1;
} else if (z <= 1.3e+150) {
tmp = fma(x, log(y), (0.0 - t));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(Float64(x * log(y)) - Float64(y * z)) tmp = 0.0 if (z <= -1.45e+218) tmp = t_1; elseif (z <= 1.3e+150) tmp = fma(x, log(y), Float64(0.0 - t)); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - N[(y * z), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[z, -1.45e+218], t$95$1, If[LessEqual[z, 1.3e+150], N[(x * N[Log[y], $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := x \cdot \log y - y \cdot z\\
\mathbf{if}\;z \leq -1.45 \cdot 10^{+218}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;z \leq 1.3 \cdot 10^{+150}:\\
\;\;\;\;\mathsf{fma}\left(x, \log y, 0 - t\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if z < -1.45e218 or 1.30000000000000003e150 < z Initial program 42.9%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
remove-double-negN/A
mul-1-negN/A
distribute-rgt-neg-inN/A
neg-mul-1N/A
mul-1-negN/A
log-recN/A
associate--l-N/A
--lowering--.f64N/A
Simplified97.8%
Taylor expanded in t around 0
--lowering--.f64N/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
*-commutativeN/A
*-lowering-*.f6488.7%
Simplified88.7%
if -1.45e218 < z < 1.30000000000000003e150Initial program 94.4%
Taylor expanded in y around 0
sub-negN/A
remove-double-negN/A
mul-1-negN/A
distribute-rgt-neg-inN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
mul-1-negN/A
log-recN/A
accelerator-lowering-fma.f64N/A
log-recN/A
mul-1-negN/A
mul-1-negN/A
mul-1-negN/A
remove-double-negN/A
log-lowering-log.f64N/A
neg-sub0N/A
--lowering--.f6493.6%
Simplified93.6%
sub0-negN/A
neg-lowering-neg.f6493.6%
Applied egg-rr93.6%
Final simplification92.6%
(FPCore (x y z t)
:precision binary64
(if (<= x -1.95e-146)
(- (* x (log y)) t)
(if (<= x 7e-101)
(fma y (fma z (* y (fma y -0.3333333333333333 -0.5)) (- 0.0 z)) (- 0.0 t))
(fma x (log y) (- 0.0 t)))))
double code(double x, double y, double z, double t) {
double tmp;
if (x <= -1.95e-146) {
tmp = (x * log(y)) - t;
} else if (x <= 7e-101) {
tmp = fma(y, fma(z, (y * fma(y, -0.3333333333333333, -0.5)), (0.0 - z)), (0.0 - t));
} else {
tmp = fma(x, log(y), (0.0 - t));
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if (x <= -1.95e-146) tmp = Float64(Float64(x * log(y)) - t); elseif (x <= 7e-101) tmp = fma(y, fma(z, Float64(y * fma(y, -0.3333333333333333, -0.5)), Float64(0.0 - z)), Float64(0.0 - t)); else tmp = fma(x, log(y), Float64(0.0 - t)); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[x, -1.95e-146], N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], If[LessEqual[x, 7e-101], N[(y * N[(z * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + N[(0.0 - z), $MachinePrecision]), $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision], N[(x * N[Log[y], $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.95 \cdot 10^{-146}:\\
\;\;\;\;x \cdot \log y - t\\
\mathbf{elif}\;x \leq 7 \cdot 10^{-101}:\\
\;\;\;\;\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x, \log y, 0 - t\right)\\
\end{array}
\end{array}
if x < -1.95000000000000001e-146Initial program 90.9%
sub-negN/A
accelerator-lowering-log1p.f64N/A
neg-sub0N/A
--lowering--.f6499.8%
Applied egg-rr99.8%
Taylor expanded in x around inf
*-lowering-*.f64N/A
log-lowering-log.f6489.9%
Simplified89.9%
if -1.95000000000000001e-146 < x < 6.99999999999999989e-101Initial program 68.9%
Taylor expanded in y around 0
Simplified100.0%
Taylor expanded in x around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f6493.7%
Simplified93.7%
if 6.99999999999999989e-101 < x Initial program 93.5%
Taylor expanded in y around 0
sub-negN/A
remove-double-negN/A
mul-1-negN/A
distribute-rgt-neg-inN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
mul-1-negN/A
log-recN/A
accelerator-lowering-fma.f64N/A
log-recN/A
mul-1-negN/A
mul-1-negN/A
mul-1-negN/A
remove-double-negN/A
log-lowering-log.f64N/A
neg-sub0N/A
--lowering--.f6492.1%
Simplified92.1%
sub0-negN/A
neg-lowering-neg.f6492.1%
Applied egg-rr92.1%
Final simplification91.9%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (- (* x (log y)) t)))
(if (<= x -1.2e-146)
t_1
(if (<= x 5.4e-101)
(fma
y
(fma z (* y (fma y -0.3333333333333333 -0.5)) (- 0.0 z))
(- 0.0 t))
t_1))))
double code(double x, double y, double z, double t) {
double t_1 = (x * log(y)) - t;
double tmp;
if (x <= -1.2e-146) {
tmp = t_1;
} else if (x <= 5.4e-101) {
tmp = fma(y, fma(z, (y * fma(y, -0.3333333333333333, -0.5)), (0.0 - z)), (0.0 - t));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(Float64(x * log(y)) - t) tmp = 0.0 if (x <= -1.2e-146) tmp = t_1; elseif (x <= 5.4e-101) tmp = fma(y, fma(z, Float64(y * fma(y, -0.3333333333333333, -0.5)), Float64(0.0 - z)), Float64(0.0 - t)); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -1.2e-146], t$95$1, If[LessEqual[x, 5.4e-101], N[(y * N[(z * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + N[(0.0 - z), $MachinePrecision]), $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -1.2 \cdot 10^{-146}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 5.4 \cdot 10^{-101}:\\
\;\;\;\;\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -1.2000000000000001e-146 or 5.4000000000000003e-101 < x Initial program 92.1%
sub-negN/A
accelerator-lowering-log1p.f64N/A
neg-sub0N/A
--lowering--.f6499.8%
Applied egg-rr99.8%
Taylor expanded in x around inf
*-lowering-*.f64N/A
log-lowering-log.f6490.9%
Simplified90.9%
if -1.2000000000000001e-146 < x < 5.4000000000000003e-101Initial program 68.9%
Taylor expanded in y around 0
Simplified100.0%
Taylor expanded in x around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f6493.7%
Simplified93.7%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (* x (log y))))
(if (<= x -3.9e+49)
t_1
(if (<= x 5e+53)
(fma
y
(fma z (* y (fma y -0.3333333333333333 -0.5)) (- 0.0 z))
(- 0.0 t))
t_1))))
double code(double x, double y, double z, double t) {
double t_1 = x * log(y);
double tmp;
if (x <= -3.9e+49) {
tmp = t_1;
} else if (x <= 5e+53) {
tmp = fma(y, fma(z, (y * fma(y, -0.3333333333333333, -0.5)), (0.0 - z)), (0.0 - t));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(x * log(y)) tmp = 0.0 if (x <= -3.9e+49) tmp = t_1; elseif (x <= 5e+53) tmp = fma(y, fma(z, Float64(y * fma(y, -0.3333333333333333, -0.5)), Float64(0.0 - z)), Float64(0.0 - t)); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -3.9e+49], t$95$1, If[LessEqual[x, 5e+53], N[(y * N[(z * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + N[(0.0 - z), $MachinePrecision]), $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := x \cdot \log y\\
\mathbf{if}\;x \leq -3.9 \cdot 10^{+49}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 5 \cdot 10^{+53}:\\
\;\;\;\;\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -3.9000000000000001e49 or 5.0000000000000004e53 < x Initial program 96.7%
sub-negN/A
accelerator-lowering-log1p.f64N/A
neg-sub0N/A
--lowering--.f6499.8%
Applied egg-rr99.8%
Taylor expanded in x around inf
*-lowering-*.f64N/A
log-lowering-log.f6482.3%
Simplified82.3%
if -3.9000000000000001e49 < x < 5.0000000000000004e53Initial program 76.3%
Taylor expanded in y around 0
Simplified99.7%
Taylor expanded in x around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f6483.7%
Simplified83.7%
(FPCore (x y z t) :precision binary64 (- (fma x (log y) 0.0) (fma z y t)))
double code(double x, double y, double z, double t) {
return fma(x, log(y), 0.0) - fma(z, y, t);
}
function code(x, y, z, t) return Float64(fma(x, log(y), 0.0) - fma(z, y, t)) end
code[x_, y_, z_, t_] := N[(N[(x * N[Log[y], $MachinePrecision] + 0.0), $MachinePrecision] - N[(z * y + t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \log y, 0\right) - \mathsf{fma}\left(z, y, t\right)
\end{array}
Initial program 83.8%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
remove-double-negN/A
mul-1-negN/A
distribute-rgt-neg-inN/A
neg-mul-1N/A
mul-1-negN/A
log-recN/A
associate--l-N/A
--lowering--.f64N/A
Simplified99.0%
(FPCore (x y z t) :precision binary64 (fma (* y (fma y -0.3333333333333333 -0.5)) (* y z) (- 0.0 (fma y z t))))
double code(double x, double y, double z, double t) {
return fma((y * fma(y, -0.3333333333333333, -0.5)), (y * z), (0.0 - fma(y, z, t)));
}
function code(x, y, z, t) return fma(Float64(y * fma(y, -0.3333333333333333, -0.5)), Float64(y * z), Float64(0.0 - fma(y, z, t))) end
code[x_, y_, z_, t_] := N[(N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] * N[(y * z), $MachinePrecision] + N[(0.0 - N[(y * z + t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), y \cdot z, 0 - \mathsf{fma}\left(y, z, t\right)\right)
\end{array}
Initial program 83.8%
Taylor expanded in y around 0
Simplified99.6%
Taylor expanded in x around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f6459.0%
Simplified59.0%
distribute-rgt-inN/A
sub0-negN/A
distribute-lft-neg-inN/A
associate-+l+N/A
*-commutativeN/A
associate-*l*N/A
neg-sub0N/A
distribute-neg-inN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
neg-sub0N/A
--lowering--.f64N/A
*-commutativeN/A
accelerator-lowering-fma.f6459.0%
Applied egg-rr59.0%
(FPCore (x y z t) :precision binary64 (fma y (fma z (* y (fma y -0.3333333333333333 -0.5)) (- 0.0 z)) (- 0.0 t)))
double code(double x, double y, double z, double t) {
return fma(y, fma(z, (y * fma(y, -0.3333333333333333, -0.5)), (0.0 - z)), (0.0 - t));
}
function code(x, y, z, t) return fma(y, fma(z, Float64(y * fma(y, -0.3333333333333333, -0.5)), Float64(0.0 - z)), Float64(0.0 - t)) end
code[x_, y_, z_, t_] := N[(y * N[(z * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + N[(0.0 - z), $MachinePrecision]), $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right)
\end{array}
Initial program 83.8%
Taylor expanded in y around 0
Simplified99.6%
Taylor expanded in x around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f6459.0%
Simplified59.0%
(FPCore (x y z t) :precision binary64 (let* ((t_1 (* y (- 0.0 z)))) (if (<= z -8e+217) t_1 (if (<= z 3.95e+155) (- 0.0 t) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = y * (0.0 - z);
double tmp;
if (z <= -8e+217) {
tmp = t_1;
} else if (z <= 3.95e+155) {
tmp = 0.0 - t;
} else {
tmp = t_1;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: t_1
real(8) :: tmp
t_1 = y * (0.0d0 - z)
if (z <= (-8d+217)) then
tmp = t_1
else if (z <= 3.95d+155) then
tmp = 0.0d0 - t
else
tmp = t_1
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double t_1 = y * (0.0 - z);
double tmp;
if (z <= -8e+217) {
tmp = t_1;
} else if (z <= 3.95e+155) {
tmp = 0.0 - t;
} else {
tmp = t_1;
}
return tmp;
}
def code(x, y, z, t): t_1 = y * (0.0 - z) tmp = 0 if z <= -8e+217: tmp = t_1 elif z <= 3.95e+155: tmp = 0.0 - t else: tmp = t_1 return tmp
function code(x, y, z, t) t_1 = Float64(y * Float64(0.0 - z)) tmp = 0.0 if (z <= -8e+217) tmp = t_1; elseif (z <= 3.95e+155) tmp = Float64(0.0 - t); else tmp = t_1; end return tmp end
function tmp_2 = code(x, y, z, t) t_1 = y * (0.0 - z); tmp = 0.0; if (z <= -8e+217) tmp = t_1; elseif (z <= 3.95e+155) tmp = 0.0 - t; else tmp = t_1; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(y * N[(0.0 - z), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[z, -8e+217], t$95$1, If[LessEqual[z, 3.95e+155], N[(0.0 - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := y \cdot \left(0 - z\right)\\
\mathbf{if}\;z \leq -8 \cdot 10^{+217}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;z \leq 3.95 \cdot 10^{+155}:\\
\;\;\;\;0 - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if z < -7.99999999999999968e217 or 3.94999999999999992e155 < z Initial program 42.5%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
remove-double-negN/A
mul-1-negN/A
distribute-rgt-neg-inN/A
neg-mul-1N/A
mul-1-negN/A
log-recN/A
associate--l-N/A
--lowering--.f64N/A
Simplified97.7%
Taylor expanded in y around inf
mul-1-negN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
*-lowering-*.f64N/A
mul-1-negN/A
neg-sub0N/A
--lowering--.f6461.7%
Simplified61.7%
if -7.99999999999999968e217 < z < 3.94999999999999992e155Initial program 94.0%
Taylor expanded in t around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6449.8%
Simplified49.8%
sub0-negN/A
neg-lowering-neg.f6449.8%
Applied egg-rr49.8%
Final simplification52.1%
(FPCore (x y z t) :precision binary64 (fma y (* z (fma y -0.5 -1.0)) (- 0.0 t)))
double code(double x, double y, double z, double t) {
return fma(y, (z * fma(y, -0.5, -1.0)), (0.0 - t));
}
function code(x, y, z, t) return fma(y, Float64(z * fma(y, -0.5, -1.0)), Float64(0.0 - t)) end
code[x_, y_, z_, t_] := N[(y * N[(z * N[(y * -0.5 + -1.0), $MachinePrecision]), $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y, z \cdot \mathsf{fma}\left(y, -0.5, -1\right), 0 - t\right)
\end{array}
Initial program 83.8%
Taylor expanded in y around 0
Simplified99.6%
Taylor expanded in x around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f6459.0%
Simplified59.0%
Taylor expanded in y around 0
sub-negN/A
associate-*r*N/A
mul-1-negN/A
distribute-rgt-outN/A
*-lowering-*.f64N/A
*-commutativeN/A
accelerator-lowering-fma.f6458.8%
Simplified58.8%
(FPCore (x y z t) :precision binary64 (- (fma (* y z) (fma y -0.5 -1.0) 0.0) t))
double code(double x, double y, double z, double t) {
return fma((y * z), fma(y, -0.5, -1.0), 0.0) - t;
}
function code(x, y, z, t) return Float64(fma(Float64(y * z), fma(y, -0.5, -1.0), 0.0) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(y * z), $MachinePrecision] * N[(y * -0.5 + -1.0), $MachinePrecision] + 0.0), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y \cdot z, \mathsf{fma}\left(y, -0.5, -1\right), 0\right) - t
\end{array}
Initial program 83.8%
Taylor expanded in y around 0
Simplified99.6%
Taylor expanded in x around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f6459.0%
Simplified59.0%
Taylor expanded in y around 0
sub-negN/A
neg-sub0N/A
associate-+r-N/A
--lowering--.f64N/A
distribute-lft-inN/A
mul-1-negN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-outN/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
accelerator-lowering-fma.f6458.8%
Simplified58.8%
(FPCore (x y z t) :precision binary64 (- 0.0 (fma z y t)))
double code(double x, double y, double z, double t) {
return 0.0 - fma(z, y, t);
}
function code(x, y, z, t) return Float64(0.0 - fma(z, y, t)) end
code[x_, y_, z_, t_] := N[(0.0 - N[(z * y + t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0 - \mathsf{fma}\left(z, y, t\right)
\end{array}
Initial program 83.8%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
remove-double-negN/A
mul-1-negN/A
distribute-rgt-neg-inN/A
neg-mul-1N/A
mul-1-negN/A
log-recN/A
associate--l-N/A
--lowering--.f64N/A
Simplified99.0%
Taylor expanded in x around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f6458.4%
Simplified58.4%
(FPCore (x y z t) :precision binary64 (- 0.0 t))
double code(double x, double y, double z, double t) {
return 0.0 - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = 0.0d0 - t
end function
public static double code(double x, double y, double z, double t) {
return 0.0 - t;
}
def code(x, y, z, t): return 0.0 - t
function code(x, y, z, t) return Float64(0.0 - t) end
function tmp = code(x, y, z, t) tmp = 0.0 - t; end
code[x_, y_, z_, t_] := N[(0.0 - t), $MachinePrecision]
\begin{array}{l}
\\
0 - t
\end{array}
Initial program 83.8%
Taylor expanded in t around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6441.9%
Simplified41.9%
sub0-negN/A
neg-lowering-neg.f6441.9%
Applied egg-rr41.9%
Final simplification41.9%
(FPCore (x y z t) :precision binary64 t)
double code(double x, double y, double z, double t) {
return t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = t
end function
public static double code(double x, double y, double z, double t) {
return t;
}
def code(x, y, z, t): return t
function code(x, y, z, t) return t end
function tmp = code(x, y, z, t) tmp = t; end
code[x_, y_, z_, t_] := t
\begin{array}{l}
\\
t
\end{array}
Initial program 83.8%
Taylor expanded in t around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6441.9%
Simplified41.9%
flip3--N/A
metadata-evalN/A
sub0-negN/A
cube-negN/A
sub0-negN/A
sqr-powN/A
unpow-prod-downN/A
sub0-negN/A
sub0-negN/A
sqr-negN/A
unpow-prod-downN/A
sqr-powN/A
metadata-evalN/A
+-lft-identityN/A
distribute-rgt-outN/A
+-commutativeN/A
+-lft-identityN/A
pow2N/A
pow-divN/A
metadata-evalN/A
unpow12.3%
Applied egg-rr2.3%
(FPCore (x y z t)
:precision binary64
(-
(*
(- z)
(+
(+ (* 0.5 (* y y)) y)
(* (/ 0.3333333333333333 (* 1.0 (* 1.0 1.0))) (* y (* y y)))))
(- t (* x (log y)))))
double code(double x, double y, double z, double t) {
return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = (-z * (((0.5d0 * (y * y)) + y) + ((0.3333333333333333d0 / (1.0d0 * (1.0d0 * 1.0d0))) * (y * (y * y))))) - (t - (x * log(y)))
end function
public static double code(double x, double y, double z, double t) {
return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * Math.log(y)));
}
def code(x, y, z, t): return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * math.log(y)))
function code(x, y, z, t) return Float64(Float64(Float64(-z) * Float64(Float64(Float64(0.5 * Float64(y * y)) + y) + Float64(Float64(0.3333333333333333 / Float64(1.0 * Float64(1.0 * 1.0))) * Float64(y * Float64(y * y))))) - Float64(t - Float64(x * log(y)))) end
function tmp = code(x, y, z, t) tmp = (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y))); end
code[x_, y_, z_, t_] := N[(N[((-z) * N[(N[(N[(0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] + N[(N[(0.3333333333333333 / N[(1.0 * N[(1.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(t - N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right)
\end{array}
herbie shell --seed 2024193
(FPCore (x y z t)
:name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B"
:precision binary64
:alt
(! :herbie-platform default (- (* (- z) (+ (+ (* 1/2 (* y y)) y) (* (/ 1/3 (* 1 (* 1 1))) (* y (* y y))))) (- t (* x (log y)))))
(- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))