Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B

Percentage Accurate: 85.1% → 99.6%
Time: 15.3s
Alternatives: 10
Speedup: 1.9×

Specification

?
\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 10 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 85.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}

Alternative 1: 99.6% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \frac{1}{\frac{1}{\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\right)}} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (/ 1.0 (/ 1.0 (fma x (log y) (fma z (log1p (- y)) (- t))))))
double code(double x, double y, double z, double t) {
	return 1.0 / (1.0 / fma(x, log(y), fma(z, log1p(-y), -t)));
}
function code(x, y, z, t)
	return Float64(1.0 / Float64(1.0 / fma(x, log(y), fma(z, log1p(Float64(-y)), Float64(-t)))))
end
code[x_, y_, z_, t_] := N[(1.0 / N[(1.0 / N[(x * N[Log[y], $MachinePrecision] + N[(z * N[Log[1 + (-y)], $MachinePrecision] + (-t)), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{1}{\frac{1}{\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\right)}}
\end{array}
Derivation
  1. Initial program 85.4%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. flip--N/A

      \[\leadsto \color{blue}{\frac{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) \cdot \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \cdot t}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) + t}} \]
    2. clear-numN/A

      \[\leadsto \color{blue}{\frac{1}{\frac{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) + t}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) \cdot \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \cdot t}}} \]
    3. /-lowering-/.f64N/A

      \[\leadsto \color{blue}{\frac{1}{\frac{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) + t}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) \cdot \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \cdot t}}} \]
    4. clear-numN/A

      \[\leadsto \frac{1}{\color{blue}{\frac{1}{\frac{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) \cdot \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \cdot t}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) + t}}}} \]
    5. flip--N/A

      \[\leadsto \frac{1}{\frac{1}{\color{blue}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t}}} \]
    6. /-lowering-/.f64N/A

      \[\leadsto \frac{1}{\color{blue}{\frac{1}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t}}} \]
    7. associate--l+N/A

      \[\leadsto \frac{1}{\frac{1}{\color{blue}{x \cdot \log y + \left(z \cdot \log \left(1 - y\right) - t\right)}}} \]
    8. accelerator-lowering-fma.f64N/A

      \[\leadsto \frac{1}{\frac{1}{\color{blue}{\mathsf{fma}\left(x, \log y, z \cdot \log \left(1 - y\right) - t\right)}}} \]
  4. Applied egg-rr99.7%

    \[\leadsto \color{blue}{\frac{1}{\frac{1}{\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\right)}}} \]
  5. Add Preprocessing

Alternative 2: 99.6% accurate, 1.6× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(y, \left(y \cdot z\right) \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right) - z, \mathsf{fma}\left(x, \log y, -t\right)\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (fma
  y
  (- (* (* y z) (fma y -0.3333333333333333 -0.5)) z)
  (fma x (log y) (- t))))
double code(double x, double y, double z, double t) {
	return fma(y, (((y * z) * fma(y, -0.3333333333333333, -0.5)) - z), fma(x, log(y), -t));
}
function code(x, y, z, t)
	return fma(y, Float64(Float64(Float64(y * z) * fma(y, -0.3333333333333333, -0.5)) - z), fma(x, log(y), Float64(-t)))
end
code[x_, y_, z_, t_] := N[(y * N[(N[(N[(y * z), $MachinePrecision] * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision] + N[(x * N[Log[y], $MachinePrecision] + (-t)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(y, \left(y \cdot z\right) \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right) - z, \mathsf{fma}\left(x, \log y, -t\right)\right)
\end{array}
Derivation
  1. Initial program 85.4%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
  4. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \color{blue}{\left(y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right) + x \cdot \log y\right)} - t \]
    2. associate--l+N/A

      \[\leadsto \color{blue}{y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right) + \left(x \cdot \log y - t\right)} \]
    3. accelerator-lowering-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(y, -1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right), x \cdot \log y - t\right)} \]
  5. Simplified99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(y, \left(z \cdot y\right) \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right) - z, \mathsf{fma}\left(x, \log y, -t\right)\right)} \]
  6. Final simplification99.6%

    \[\leadsto \mathsf{fma}\left(y, \left(y \cdot z\right) \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right) - z, \mathsf{fma}\left(x, \log y, -t\right)\right) \]
  7. Add Preprocessing

Alternative 3: 88.9% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y - t\\ \mathbf{if}\;x \leq -2.5 \cdot 10^{+31}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x \leq 4.5 \cdot 10^{-7}:\\ \;\;\;\;\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (- (* x (log y)) t)))
   (if (<= x -2.5e+31)
     t_1
     (if (<= x 4.5e-7) (fma z (log1p (- y)) (- t)) t_1))))
double code(double x, double y, double z, double t) {
	double t_1 = (x * log(y)) - t;
	double tmp;
	if (x <= -2.5e+31) {
		tmp = t_1;
	} else if (x <= 4.5e-7) {
		tmp = fma(z, log1p(-y), -t);
	} else {
		tmp = t_1;
	}
	return tmp;
}
function code(x, y, z, t)
	t_1 = Float64(Float64(x * log(y)) - t)
	tmp = 0.0
	if (x <= -2.5e+31)
		tmp = t_1;
	elseif (x <= 4.5e-7)
		tmp = fma(z, log1p(Float64(-y)), Float64(-t));
	else
		tmp = t_1;
	end
	return tmp
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -2.5e+31], t$95$1, If[LessEqual[x, 4.5e-7], N[(z * N[Log[1 + (-y)], $MachinePrecision] + (-t)), $MachinePrecision], t$95$1]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -2.5 \cdot 10^{+31}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;x \leq 4.5 \cdot 10^{-7}:\\
\;\;\;\;\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -2.50000000000000013e31 or 4.4999999999999998e-7 < x

    1. Initial program 96.4%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \log y} - t \]
    4. Step-by-step derivation
      1. remove-double-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right)} - t \]
      2. mul-1-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{-1 \cdot \log y}\right)\right) - t \]
      3. mul-1-negN/A

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \left(-1 \cdot \log y\right)\right)} - t \]
      4. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) - t \]
      5. log-recN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\log \left(\frac{1}{y}\right)}\right) - t \]
      6. *-lowering-*.f64N/A

        \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{y}\right)\right)} - t \]
      7. log-recN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) - t \]
      8. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(-1 \cdot \log y\right)}\right) - t \]
      9. mul-1-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(-1 \cdot \log y\right)\right)} - t \]
      10. mul-1-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right)\right) - t \]
      11. remove-double-negN/A

        \[\leadsto x \cdot \color{blue}{\log y} - t \]
      12. log-lowering-log.f6496.0

        \[\leadsto x \cdot \color{blue}{\log y} - t \]
    5. Simplified96.0%

      \[\leadsto \color{blue}{x \cdot \log y} - t \]

    if -2.50000000000000013e31 < x < 4.4999999999999998e-7

    1. Initial program 74.3%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right) - t} \]
    4. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right) + \left(\mathsf{neg}\left(t\right)\right)} \]
      2. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(z, \log \left(1 - y\right), \mathsf{neg}\left(t\right)\right)} \]
      3. sub-negN/A

        \[\leadsto \mathsf{fma}\left(z, \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)}, \mathsf{neg}\left(t\right)\right) \]
      4. accelerator-lowering-log1p.f64N/A

        \[\leadsto \mathsf{fma}\left(z, \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(y\right)\right)}, \mathsf{neg}\left(t\right)\right) \]
      5. neg-lowering-neg.f64N/A

        \[\leadsto \mathsf{fma}\left(z, \mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left(y\right)}\right), \mathsf{neg}\left(t\right)\right) \]
      6. neg-lowering-neg.f6489.9

        \[\leadsto \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), \color{blue}{-t}\right) \]
    5. Simplified89.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 4: 88.9% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y - t\\ \mathbf{if}\;x \leq -1.6 \cdot 10^{+27}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x \leq 3.8 \cdot 10^{-7}:\\ \;\;\;\;\mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right), -t\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (- (* x (log y)) t)))
   (if (<= x -1.6e+27)
     t_1
     (if (<= x 3.8e-7)
       (fma z (* y (fma y (fma y -0.3333333333333333 -0.5) -1.0)) (- t))
       t_1))))
double code(double x, double y, double z, double t) {
	double t_1 = (x * log(y)) - t;
	double tmp;
	if (x <= -1.6e+27) {
		tmp = t_1;
	} else if (x <= 3.8e-7) {
		tmp = fma(z, (y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0)), -t);
	} else {
		tmp = t_1;
	}
	return tmp;
}
function code(x, y, z, t)
	t_1 = Float64(Float64(x * log(y)) - t)
	tmp = 0.0
	if (x <= -1.6e+27)
		tmp = t_1;
	elseif (x <= 3.8e-7)
		tmp = fma(z, Float64(y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0)), Float64(-t));
	else
		tmp = t_1;
	end
	return tmp
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -1.6e+27], t$95$1, If[LessEqual[x, 3.8e-7], N[(z * N[(y * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + (-t)), $MachinePrecision], t$95$1]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -1.6 \cdot 10^{+27}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;x \leq 3.8 \cdot 10^{-7}:\\
\;\;\;\;\mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right), -t\right)\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -1.60000000000000008e27 or 3.80000000000000015e-7 < x

    1. Initial program 96.4%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \log y} - t \]
    4. Step-by-step derivation
      1. remove-double-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right)} - t \]
      2. mul-1-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{-1 \cdot \log y}\right)\right) - t \]
      3. mul-1-negN/A

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \left(-1 \cdot \log y\right)\right)} - t \]
      4. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) - t \]
      5. log-recN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\log \left(\frac{1}{y}\right)}\right) - t \]
      6. *-lowering-*.f64N/A

        \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{y}\right)\right)} - t \]
      7. log-recN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) - t \]
      8. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(-1 \cdot \log y\right)}\right) - t \]
      9. mul-1-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(-1 \cdot \log y\right)\right)} - t \]
      10. mul-1-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right)\right) - t \]
      11. remove-double-negN/A

        \[\leadsto x \cdot \color{blue}{\log y} - t \]
      12. log-lowering-log.f6496.0

        \[\leadsto x \cdot \color{blue}{\log y} - t \]
    5. Simplified96.0%

      \[\leadsto \color{blue}{x \cdot \log y} - t \]

    if -1.60000000000000008e27 < x < 3.80000000000000015e-7

    1. Initial program 74.3%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
    4. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{\left(y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right) + x \cdot \log y\right)} - t \]
      2. associate--l+N/A

        \[\leadsto \color{blue}{y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right) + \left(x \cdot \log y - t\right)} \]
      3. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(y, -1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right), x \cdot \log y - t\right)} \]
    5. Simplified99.5%

      \[\leadsto \color{blue}{\mathsf{fma}\left(y, \left(z \cdot y\right) \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right) - z, \mathsf{fma}\left(x, \log y, -t\right)\right)} \]
    6. Taylor expanded in x around 0

      \[\leadsto \color{blue}{y \cdot \left(y \cdot \left(z \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) - z\right) - t} \]
    7. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto \color{blue}{y \cdot \left(y \cdot \left(z \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) - z\right) + \left(\mathsf{neg}\left(t\right)\right)} \]
      2. *-commutativeN/A

        \[\leadsto \color{blue}{\left(y \cdot \left(z \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) - z\right) \cdot y} + \left(\mathsf{neg}\left(t\right)\right) \]
      3. sub-negN/A

        \[\leadsto \color{blue}{\left(y \cdot \left(z \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) + \left(\mathsf{neg}\left(z\right)\right)\right)} \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      4. *-commutativeN/A

        \[\leadsto \left(y \cdot \color{blue}{\left(\left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) \cdot z\right)} + \left(\mathsf{neg}\left(z\right)\right)\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      5. associate-*r*N/A

        \[\leadsto \left(\color{blue}{\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) \cdot z} + \left(\mathsf{neg}\left(z\right)\right)\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      6. mul-1-negN/A

        \[\leadsto \left(\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) \cdot z + \color{blue}{-1 \cdot z}\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      7. distribute-rgt-inN/A

        \[\leadsto \color{blue}{\left(z \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) + -1\right)\right)} \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      8. metadata-evalN/A

        \[\leadsto \left(z \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) + \color{blue}{\left(\mathsf{neg}\left(1\right)\right)}\right)\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      9. sub-negN/A

        \[\leadsto \left(z \cdot \color{blue}{\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right)}\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      10. associate-*l*N/A

        \[\leadsto \color{blue}{z \cdot \left(\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right) \cdot y\right)} + \left(\mathsf{neg}\left(t\right)\right) \]
      11. *-commutativeN/A

        \[\leadsto z \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right)\right)} + \left(\mathsf{neg}\left(t\right)\right) \]
      12. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(z, y \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right), \mathsf{neg}\left(t\right)\right)} \]
    8. Simplified89.4%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right), -t\right)} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 5: 77.8% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y\\ \mathbf{if}\;x \leq -7 \cdot 10^{+92}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x \leq 4.7 \cdot 10^{+97}:\\ \;\;\;\;\mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right), -t\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (* x (log y))))
   (if (<= x -7e+92)
     t_1
     (if (<= x 4.7e+97)
       (fma z (* y (fma y (fma y -0.3333333333333333 -0.5) -1.0)) (- t))
       t_1))))
double code(double x, double y, double z, double t) {
	double t_1 = x * log(y);
	double tmp;
	if (x <= -7e+92) {
		tmp = t_1;
	} else if (x <= 4.7e+97) {
		tmp = fma(z, (y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0)), -t);
	} else {
		tmp = t_1;
	}
	return tmp;
}
function code(x, y, z, t)
	t_1 = Float64(x * log(y))
	tmp = 0.0
	if (x <= -7e+92)
		tmp = t_1;
	elseif (x <= 4.7e+97)
		tmp = fma(z, Float64(y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0)), Float64(-t));
	else
		tmp = t_1;
	end
	return tmp
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -7e+92], t$95$1, If[LessEqual[x, 4.7e+97], N[(z * N[(y * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + (-t)), $MachinePrecision], t$95$1]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := x \cdot \log y\\
\mathbf{if}\;x \leq -7 \cdot 10^{+92}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;x \leq 4.7 \cdot 10^{+97}:\\
\;\;\;\;\mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right), -t\right)\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -6.99999999999999972e92 or 4.6999999999999997e97 < x

    1. Initial program 98.7%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \log y} \]
    4. Step-by-step derivation
      1. remove-double-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right)} \]
      2. mul-1-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{-1 \cdot \log y}\right)\right) \]
      3. mul-1-negN/A

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \left(-1 \cdot \log y\right)\right)} \]
      4. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) \]
      5. log-recN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\log \left(\frac{1}{y}\right)}\right) \]
      6. *-lowering-*.f64N/A

        \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{y}\right)\right)} \]
      7. log-recN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) \]
      8. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(-1 \cdot \log y\right)}\right) \]
      9. mul-1-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(-1 \cdot \log y\right)\right)} \]
      10. mul-1-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right)\right) \]
      11. remove-double-negN/A

        \[\leadsto x \cdot \color{blue}{\log y} \]
      12. log-lowering-log.f6482.9

        \[\leadsto x \cdot \color{blue}{\log y} \]
    5. Simplified82.9%

      \[\leadsto \color{blue}{x \cdot \log y} \]

    if -6.99999999999999972e92 < x < 4.6999999999999997e97

    1. Initial program 77.3%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
    4. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{\left(y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right) + x \cdot \log y\right)} - t \]
      2. associate--l+N/A

        \[\leadsto \color{blue}{y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right) + \left(x \cdot \log y - t\right)} \]
      3. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(y, -1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right), x \cdot \log y - t\right)} \]
    5. Simplified99.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(y, \left(z \cdot y\right) \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right) - z, \mathsf{fma}\left(x, \log y, -t\right)\right)} \]
    6. Taylor expanded in x around 0

      \[\leadsto \color{blue}{y \cdot \left(y \cdot \left(z \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) - z\right) - t} \]
    7. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto \color{blue}{y \cdot \left(y \cdot \left(z \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) - z\right) + \left(\mathsf{neg}\left(t\right)\right)} \]
      2. *-commutativeN/A

        \[\leadsto \color{blue}{\left(y \cdot \left(z \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) - z\right) \cdot y} + \left(\mathsf{neg}\left(t\right)\right) \]
      3. sub-negN/A

        \[\leadsto \color{blue}{\left(y \cdot \left(z \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) + \left(\mathsf{neg}\left(z\right)\right)\right)} \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      4. *-commutativeN/A

        \[\leadsto \left(y \cdot \color{blue}{\left(\left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) \cdot z\right)} + \left(\mathsf{neg}\left(z\right)\right)\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      5. associate-*r*N/A

        \[\leadsto \left(\color{blue}{\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) \cdot z} + \left(\mathsf{neg}\left(z\right)\right)\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      6. mul-1-negN/A

        \[\leadsto \left(\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) \cdot z + \color{blue}{-1 \cdot z}\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      7. distribute-rgt-inN/A

        \[\leadsto \color{blue}{\left(z \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) + -1\right)\right)} \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      8. metadata-evalN/A

        \[\leadsto \left(z \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) + \color{blue}{\left(\mathsf{neg}\left(1\right)\right)}\right)\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      9. sub-negN/A

        \[\leadsto \left(z \cdot \color{blue}{\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right)}\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
      10. associate-*l*N/A

        \[\leadsto \color{blue}{z \cdot \left(\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right) \cdot y\right)} + \left(\mathsf{neg}\left(t\right)\right) \]
      11. *-commutativeN/A

        \[\leadsto z \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right)\right)} + \left(\mathsf{neg}\left(t\right)\right) \]
      12. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(z, y \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right), \mathsf{neg}\left(t\right)\right)} \]
    8. Simplified82.4%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right), -t\right)} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 6: 99.1% accurate, 1.9× speedup?

\[\begin{array}{l} \\ x \cdot \log y - \mathsf{fma}\left(z, y, t\right) \end{array} \]
(FPCore (x y z t) :precision binary64 (- (* x (log y)) (fma z y t)))
double code(double x, double y, double z, double t) {
	return (x * log(y)) - fma(z, y, t);
}
function code(x, y, z, t)
	return Float64(Float64(x * log(y)) - fma(z, y, t))
end
code[x_, y_, z_, t_] := N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - N[(z * y + t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \log y - \mathsf{fma}\left(z, y, t\right)
\end{array}
Derivation
  1. Initial program 85.4%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + x \cdot \log y\right) - t} \]
  4. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \color{blue}{\left(x \cdot \log y + -1 \cdot \left(y \cdot z\right)\right)} - t \]
    2. mul-1-negN/A

      \[\leadsto \left(x \cdot \log y + \color{blue}{\left(\mathsf{neg}\left(y \cdot z\right)\right)}\right) - t \]
    3. unsub-negN/A

      \[\leadsto \color{blue}{\left(x \cdot \log y - y \cdot z\right)} - t \]
    4. remove-double-negN/A

      \[\leadsto \left(x \cdot \color{blue}{\left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right)} - y \cdot z\right) - t \]
    5. mul-1-negN/A

      \[\leadsto \left(x \cdot \left(\mathsf{neg}\left(\color{blue}{-1 \cdot \log y}\right)\right) - y \cdot z\right) - t \]
    6. distribute-rgt-neg-inN/A

      \[\leadsto \left(\color{blue}{\left(\mathsf{neg}\left(x \cdot \left(-1 \cdot \log y\right)\right)\right)} - y \cdot z\right) - t \]
    7. neg-mul-1N/A

      \[\leadsto \left(\color{blue}{-1 \cdot \left(x \cdot \left(-1 \cdot \log y\right)\right)} - y \cdot z\right) - t \]
    8. mul-1-negN/A

      \[\leadsto \left(-1 \cdot \left(x \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) - y \cdot z\right) - t \]
    9. log-recN/A

      \[\leadsto \left(-1 \cdot \left(x \cdot \color{blue}{\log \left(\frac{1}{y}\right)}\right) - y \cdot z\right) - t \]
    10. associate--l-N/A

      \[\leadsto \color{blue}{-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - \left(y \cdot z + t\right)} \]
    11. --lowering--.f64N/A

      \[\leadsto \color{blue}{-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - \left(y \cdot z + t\right)} \]
  5. Simplified99.1%

    \[\leadsto \color{blue}{x \cdot \log y - \mathsf{fma}\left(z, y, t\right)} \]
  6. Add Preprocessing

Alternative 7: 57.8% accurate, 8.5× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right), -t\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (fma z (* y (fma y (fma y -0.3333333333333333 -0.5) -1.0)) (- t)))
double code(double x, double y, double z, double t) {
	return fma(z, (y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0)), -t);
}
function code(x, y, z, t)
	return fma(z, Float64(y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0)), Float64(-t))
end
code[x_, y_, z_, t_] := N[(z * N[(y * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + (-t)), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right), -t\right)
\end{array}
Derivation
  1. Initial program 85.4%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
  4. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \color{blue}{\left(y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right) + x \cdot \log y\right)} - t \]
    2. associate--l+N/A

      \[\leadsto \color{blue}{y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right) + \left(x \cdot \log y - t\right)} \]
    3. accelerator-lowering-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(y, -1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right), x \cdot \log y - t\right)} \]
  5. Simplified99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(y, \left(z \cdot y\right) \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right) - z, \mathsf{fma}\left(x, \log y, -t\right)\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \color{blue}{y \cdot \left(y \cdot \left(z \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) - z\right) - t} \]
  7. Step-by-step derivation
    1. sub-negN/A

      \[\leadsto \color{blue}{y \cdot \left(y \cdot \left(z \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) - z\right) + \left(\mathsf{neg}\left(t\right)\right)} \]
    2. *-commutativeN/A

      \[\leadsto \color{blue}{\left(y \cdot \left(z \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) - z\right) \cdot y} + \left(\mathsf{neg}\left(t\right)\right) \]
    3. sub-negN/A

      \[\leadsto \color{blue}{\left(y \cdot \left(z \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) + \left(\mathsf{neg}\left(z\right)\right)\right)} \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
    4. *-commutativeN/A

      \[\leadsto \left(y \cdot \color{blue}{\left(\left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) \cdot z\right)} + \left(\mathsf{neg}\left(z\right)\right)\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
    5. associate-*r*N/A

      \[\leadsto \left(\color{blue}{\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) \cdot z} + \left(\mathsf{neg}\left(z\right)\right)\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
    6. mul-1-negN/A

      \[\leadsto \left(\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right)\right) \cdot z + \color{blue}{-1 \cdot z}\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
    7. distribute-rgt-inN/A

      \[\leadsto \color{blue}{\left(z \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) + -1\right)\right)} \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
    8. metadata-evalN/A

      \[\leadsto \left(z \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) + \color{blue}{\left(\mathsf{neg}\left(1\right)\right)}\right)\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
    9. sub-negN/A

      \[\leadsto \left(z \cdot \color{blue}{\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right)}\right) \cdot y + \left(\mathsf{neg}\left(t\right)\right) \]
    10. associate-*l*N/A

      \[\leadsto \color{blue}{z \cdot \left(\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right) \cdot y\right)} + \left(\mathsf{neg}\left(t\right)\right) \]
    11. *-commutativeN/A

      \[\leadsto z \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right)\right)} + \left(\mathsf{neg}\left(t\right)\right) \]
    12. accelerator-lowering-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, y \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right), \mathsf{neg}\left(t\right)\right)} \]
  8. Simplified58.2%

    \[\leadsto \color{blue}{\mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right), -t\right)} \]
  9. Add Preprocessing

Alternative 8: 44.0% accurate, 15.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z \leq 1.15 \cdot 10^{+156}:\\ \;\;\;\;-t\\ \mathbf{else}:\\ \;\;\;\;-y \cdot z\\ \end{array} \end{array} \]
(FPCore (x y z t) :precision binary64 (if (<= z 1.15e+156) (- t) (- (* y z))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (z <= 1.15e+156) {
		tmp = -t;
	} else {
		tmp = -(y * z);
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if (z <= 1.15d+156) then
        tmp = -t
    else
        tmp = -(y * z)
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (z <= 1.15e+156) {
		tmp = -t;
	} else {
		tmp = -(y * z);
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if z <= 1.15e+156:
		tmp = -t
	else:
		tmp = -(y * z)
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (z <= 1.15e+156)
		tmp = Float64(-t);
	else
		tmp = Float64(-Float64(y * z));
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if (z <= 1.15e+156)
		tmp = -t;
	else
		tmp = -(y * z);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[LessEqual[z, 1.15e+156], (-t), (-N[(y * z), $MachinePrecision])]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;z \leq 1.15 \cdot 10^{+156}:\\
\;\;\;\;-t\\

\mathbf{else}:\\
\;\;\;\;-y \cdot z\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < 1.1499999999999999e156

    1. Initial program 90.4%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in t around inf

      \[\leadsto \color{blue}{-1 \cdot t} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(t\right)} \]
      2. neg-lowering-neg.f6448.2

        \[\leadsto \color{blue}{-t} \]
    5. Simplified48.2%

      \[\leadsto \color{blue}{-t} \]

    if 1.1499999999999999e156 < z

    1. Initial program 56.2%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + x \cdot \log y\right) - t} \]
    4. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{\left(x \cdot \log y + -1 \cdot \left(y \cdot z\right)\right)} - t \]
      2. mul-1-negN/A

        \[\leadsto \left(x \cdot \log y + \color{blue}{\left(\mathsf{neg}\left(y \cdot z\right)\right)}\right) - t \]
      3. unsub-negN/A

        \[\leadsto \color{blue}{\left(x \cdot \log y - y \cdot z\right)} - t \]
      4. remove-double-negN/A

        \[\leadsto \left(x \cdot \color{blue}{\left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right)} - y \cdot z\right) - t \]
      5. mul-1-negN/A

        \[\leadsto \left(x \cdot \left(\mathsf{neg}\left(\color{blue}{-1 \cdot \log y}\right)\right) - y \cdot z\right) - t \]
      6. distribute-rgt-neg-inN/A

        \[\leadsto \left(\color{blue}{\left(\mathsf{neg}\left(x \cdot \left(-1 \cdot \log y\right)\right)\right)} - y \cdot z\right) - t \]
      7. neg-mul-1N/A

        \[\leadsto \left(\color{blue}{-1 \cdot \left(x \cdot \left(-1 \cdot \log y\right)\right)} - y \cdot z\right) - t \]
      8. mul-1-negN/A

        \[\leadsto \left(-1 \cdot \left(x \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) - y \cdot z\right) - t \]
      9. log-recN/A

        \[\leadsto \left(-1 \cdot \left(x \cdot \color{blue}{\log \left(\frac{1}{y}\right)}\right) - y \cdot z\right) - t \]
      10. associate--l-N/A

        \[\leadsto \color{blue}{-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - \left(y \cdot z + t\right)} \]
      11. --lowering--.f64N/A

        \[\leadsto \color{blue}{-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - \left(y \cdot z + t\right)} \]
    5. Simplified97.8%

      \[\leadsto \color{blue}{x \cdot \log y - \mathsf{fma}\left(z, y, t\right)} \]
    6. Taylor expanded in y around inf

      \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z\right)} \]
    7. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(y \cdot z\right)} \]
      2. *-commutativeN/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{z \cdot y}\right) \]
      3. distribute-rgt-neg-inN/A

        \[\leadsto \color{blue}{z \cdot \left(\mathsf{neg}\left(y\right)\right)} \]
      4. mul-1-negN/A

        \[\leadsto z \cdot \color{blue}{\left(-1 \cdot y\right)} \]
      5. *-lowering-*.f64N/A

        \[\leadsto \color{blue}{z \cdot \left(-1 \cdot y\right)} \]
      6. mul-1-negN/A

        \[\leadsto z \cdot \color{blue}{\left(\mathsf{neg}\left(y\right)\right)} \]
      7. neg-lowering-neg.f6443.7

        \[\leadsto z \cdot \color{blue}{\left(-y\right)} \]
    8. Simplified43.7%

      \[\leadsto \color{blue}{z \cdot \left(-y\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification47.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq 1.15 \cdot 10^{+156}:\\ \;\;\;\;-t\\ \mathbf{else}:\\ \;\;\;\;-y \cdot z\\ \end{array} \]
  5. Add Preprocessing

Alternative 9: 57.3% accurate, 24.4× speedup?

\[\begin{array}{l} \\ -\mathsf{fma}\left(z, y, t\right) \end{array} \]
(FPCore (x y z t) :precision binary64 (- (fma z y t)))
double code(double x, double y, double z, double t) {
	return -fma(z, y, t);
}
function code(x, y, z, t)
	return Float64(-fma(z, y, t))
end
code[x_, y_, z_, t_] := (-N[(z * y + t), $MachinePrecision])
\begin{array}{l}

\\
-\mathsf{fma}\left(z, y, t\right)
\end{array}
Derivation
  1. Initial program 85.4%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + x \cdot \log y\right) - t} \]
  4. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \color{blue}{\left(x \cdot \log y + -1 \cdot \left(y \cdot z\right)\right)} - t \]
    2. mul-1-negN/A

      \[\leadsto \left(x \cdot \log y + \color{blue}{\left(\mathsf{neg}\left(y \cdot z\right)\right)}\right) - t \]
    3. unsub-negN/A

      \[\leadsto \color{blue}{\left(x \cdot \log y - y \cdot z\right)} - t \]
    4. remove-double-negN/A

      \[\leadsto \left(x \cdot \color{blue}{\left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right)} - y \cdot z\right) - t \]
    5. mul-1-negN/A

      \[\leadsto \left(x \cdot \left(\mathsf{neg}\left(\color{blue}{-1 \cdot \log y}\right)\right) - y \cdot z\right) - t \]
    6. distribute-rgt-neg-inN/A

      \[\leadsto \left(\color{blue}{\left(\mathsf{neg}\left(x \cdot \left(-1 \cdot \log y\right)\right)\right)} - y \cdot z\right) - t \]
    7. neg-mul-1N/A

      \[\leadsto \left(\color{blue}{-1 \cdot \left(x \cdot \left(-1 \cdot \log y\right)\right)} - y \cdot z\right) - t \]
    8. mul-1-negN/A

      \[\leadsto \left(-1 \cdot \left(x \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) - y \cdot z\right) - t \]
    9. log-recN/A

      \[\leadsto \left(-1 \cdot \left(x \cdot \color{blue}{\log \left(\frac{1}{y}\right)}\right) - y \cdot z\right) - t \]
    10. associate--l-N/A

      \[\leadsto \color{blue}{-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - \left(y \cdot z + t\right)} \]
    11. --lowering--.f64N/A

      \[\leadsto \color{blue}{-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - \left(y \cdot z + t\right)} \]
  5. Simplified99.1%

    \[\leadsto \color{blue}{x \cdot \log y - \mathsf{fma}\left(z, y, t\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \color{blue}{-1 \cdot \left(t + y \cdot z\right)} \]
  7. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(\left(t + y \cdot z\right)\right)} \]
    2. neg-lowering-neg.f64N/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(\left(t + y \cdot z\right)\right)} \]
    3. +-commutativeN/A

      \[\leadsto \mathsf{neg}\left(\color{blue}{\left(y \cdot z + t\right)}\right) \]
    4. *-commutativeN/A

      \[\leadsto \mathsf{neg}\left(\left(\color{blue}{z \cdot y} + t\right)\right) \]
    5. accelerator-lowering-fma.f6457.7

      \[\leadsto -\color{blue}{\mathsf{fma}\left(z, y, t\right)} \]
  8. Simplified57.7%

    \[\leadsto \color{blue}{-\mathsf{fma}\left(z, y, t\right)} \]
  9. Add Preprocessing

Alternative 10: 42.8% accurate, 73.3× speedup?

\[\begin{array}{l} \\ -t \end{array} \]
(FPCore (x y z t) :precision binary64 (- t))
double code(double x, double y, double z, double t) {
	return -t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = -t
end function
public static double code(double x, double y, double z, double t) {
	return -t;
}
def code(x, y, z, t):
	return -t
function code(x, y, z, t)
	return Float64(-t)
end
function tmp = code(x, y, z, t)
	tmp = -t;
end
code[x_, y_, z_, t_] := (-t)
\begin{array}{l}

\\
-t
\end{array}
Derivation
  1. Initial program 85.4%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in t around inf

    \[\leadsto \color{blue}{-1 \cdot t} \]
  4. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(t\right)} \]
    2. neg-lowering-neg.f6442.8

      \[\leadsto \color{blue}{-t} \]
  5. Simplified42.8%

    \[\leadsto \color{blue}{-t} \]
  6. Add Preprocessing

Developer Target 1: 99.6% accurate, 1.3× speedup?

\[\begin{array}{l} \\ \left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (-
  (*
   (- z)
   (+
    (+ (* 0.5 (* y y)) y)
    (* (/ 0.3333333333333333 (* 1.0 (* 1.0 1.0))) (* y (* y y)))))
  (- t (* x (log y)))))
double code(double x, double y, double z, double t) {
	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (-z * (((0.5d0 * (y * y)) + y) + ((0.3333333333333333d0 / (1.0d0 * (1.0d0 * 1.0d0))) * (y * (y * y))))) - (t - (x * log(y)))
end function
public static double code(double x, double y, double z, double t) {
	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * Math.log(y)));
}
def code(x, y, z, t):
	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * math.log(y)))
function code(x, y, z, t)
	return Float64(Float64(Float64(-z) * Float64(Float64(Float64(0.5 * Float64(y * y)) + y) + Float64(Float64(0.3333333333333333 / Float64(1.0 * Float64(1.0 * 1.0))) * Float64(y * Float64(y * y))))) - Float64(t - Float64(x * log(y))))
end
function tmp = code(x, y, z, t)
	tmp = (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
end
code[x_, y_, z_, t_] := N[(N[((-z) * N[(N[(N[(0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] + N[(N[(0.3333333333333333 / N[(1.0 * N[(1.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(t - N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right)
\end{array}

Reproduce

?
herbie shell --seed 2024204 
(FPCore (x y z t)
  :name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B"
  :precision binary64

  :alt
  (! :herbie-platform default (- (* (- z) (+ (+ (* 1/2 (* y y)) y) (* (/ 1/3 (* 1 (* 1 1))) (* y (* y y))))) (- t (* x (log y)))))

  (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))