Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B

Percentage Accurate: 84.5% → 99.8%
Time: 13.8s
Alternatives: 11
Speedup: 1.9×

Specification

?
\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 11 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 84.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}

Alternative 1: 99.8% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (fma z (log1p (- y)) (* x (log y))) t))
double code(double x, double y, double z, double t) {
	return fma(z, log1p(-y), (x * log(y))) - t;
}
function code(x, y, z, t)
	return Float64(fma(z, log1p(Float64(-y)), Float64(x * log(y))) - t)
end
code[x_, y_, z_, t_] := N[(N[(z * N[Log[1 + (-y)], $MachinePrecision] + N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t
\end{array}
Derivation
  1. Initial program 83.1%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Step-by-step derivation
    1. +-commutative83.1%

      \[\leadsto \color{blue}{\left(z \cdot \log \left(1 - y\right) + x \cdot \log y\right)} - t \]
    2. fma-def83.1%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, \log \left(1 - y\right), x \cdot \log y\right)} - t \]
    3. sub-neg83.1%

      \[\leadsto \mathsf{fma}\left(z, \log \color{blue}{\left(1 + \left(-y\right)\right)}, x \cdot \log y\right) - t \]
    4. log1p-def99.8%

      \[\leadsto \mathsf{fma}\left(z, \color{blue}{\mathsf{log1p}\left(-y\right)}, x \cdot \log y\right) - t \]
  3. Simplified99.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t} \]
  4. Final simplification99.8%

    \[\leadsto \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t \]

Alternative 2: 99.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(z, -0.5 \cdot \left(y \cdot y\right) - y, x \cdot \log y - t\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (fma z (- (* -0.5 (* y y)) y) (- (* x (log y)) t)))
double code(double x, double y, double z, double t) {
	return fma(z, ((-0.5 * (y * y)) - y), ((x * log(y)) - t));
}
function code(x, y, z, t)
	return fma(z, Float64(Float64(-0.5 * Float64(y * y)) - y), Float64(Float64(x * log(y)) - t))
end
code[x_, y_, z_, t_] := N[(z * N[(N[(-0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision] + N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(z, -0.5 \cdot \left(y \cdot y\right) - y, x \cdot \log y - t\right)
\end{array}
Derivation
  1. Initial program 83.1%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 99.5%

    \[\leadsto \color{blue}{\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + \left(-1 \cdot \left(y \cdot z\right) + \log y \cdot x\right)\right)} - t \]
  3. Step-by-step derivation
    1. associate-+r+99.5%

      \[\leadsto \color{blue}{\left(\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + -1 \cdot \left(y \cdot z\right)\right) + \log y \cdot x\right)} - t \]
    2. associate-*r*99.5%

      \[\leadsto \left(\left(\color{blue}{\left(-0.5 \cdot {y}^{2}\right) \cdot z} + -1 \cdot \left(y \cdot z\right)\right) + \log y \cdot x\right) - t \]
    3. associate-*r*99.5%

      \[\leadsto \left(\left(\left(-0.5 \cdot {y}^{2}\right) \cdot z + \color{blue}{\left(-1 \cdot y\right) \cdot z}\right) + \log y \cdot x\right) - t \]
    4. distribute-rgt-out99.5%

      \[\leadsto \left(\color{blue}{z \cdot \left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)} + \log y \cdot x\right) - t \]
    5. mul-1-neg99.5%

      \[\leadsto \left(z \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right) + \log y \cdot x\right) - t \]
    6. unsub-neg99.5%

      \[\leadsto \left(z \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} + \log y \cdot x\right) - t \]
    7. *-commutative99.5%

      \[\leadsto \left(z \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right) + \log y \cdot x\right) - t \]
    8. unpow299.5%

      \[\leadsto \left(z \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right) + \log y \cdot x\right) - t \]
    9. associate-*l*99.5%

      \[\leadsto \left(z \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right) + \log y \cdot x\right) - t \]
  4. Simplified99.5%

    \[\leadsto \color{blue}{\left(z \cdot \left(y \cdot \left(y \cdot -0.5\right) - y\right) + \log y \cdot x\right)} - t \]
  5. Taylor expanded in z around 0 99.5%

    \[\leadsto \color{blue}{\left(\log y \cdot x + \left(-0.5 \cdot {y}^{2} - y\right) \cdot z\right) - t} \]
  6. Step-by-step derivation
    1. sub-neg99.5%

      \[\leadsto \color{blue}{\left(\log y \cdot x + \left(-0.5 \cdot {y}^{2} - y\right) \cdot z\right) + \left(-t\right)} \]
    2. +-commutative99.5%

      \[\leadsto \color{blue}{\left(-t\right) + \left(\log y \cdot x + \left(-0.5 \cdot {y}^{2} - y\right) \cdot z\right)} \]
    3. associate-+r+99.5%

      \[\leadsto \color{blue}{\left(\left(-t\right) + \log y \cdot x\right) + \left(-0.5 \cdot {y}^{2} - y\right) \cdot z} \]
    4. +-commutative99.5%

      \[\leadsto \color{blue}{\left(\log y \cdot x + \left(-t\right)\right)} + \left(-0.5 \cdot {y}^{2} - y\right) \cdot z \]
    5. sub-neg99.5%

      \[\leadsto \color{blue}{\left(\log y \cdot x - t\right)} + \left(-0.5 \cdot {y}^{2} - y\right) \cdot z \]
    6. *-commutative99.5%

      \[\leadsto \left(\log y \cdot x - t\right) + \color{blue}{z \cdot \left(-0.5 \cdot {y}^{2} - y\right)} \]
    7. *-commutative99.5%

      \[\leadsto \left(\log y \cdot x - t\right) + z \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right) \]
    8. fma-neg99.5%

      \[\leadsto \left(\log y \cdot x - t\right) + z \cdot \color{blue}{\mathsf{fma}\left({y}^{2}, -0.5, -y\right)} \]
    9. unpow299.5%

      \[\leadsto \left(\log y \cdot x - t\right) + z \cdot \mathsf{fma}\left(\color{blue}{y \cdot y}, -0.5, -y\right) \]
    10. +-commutative99.5%

      \[\leadsto \color{blue}{z \cdot \mathsf{fma}\left(y \cdot y, -0.5, -y\right) + \left(\log y \cdot x - t\right)} \]
    11. fma-def99.5%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, \mathsf{fma}\left(y \cdot y, -0.5, -y\right), \log y \cdot x - t\right)} \]
    12. unpow299.5%

      \[\leadsto \mathsf{fma}\left(z, \mathsf{fma}\left(\color{blue}{{y}^{2}}, -0.5, -y\right), \log y \cdot x - t\right) \]
    13. fma-neg99.5%

      \[\leadsto \mathsf{fma}\left(z, \color{blue}{{y}^{2} \cdot -0.5 - y}, \log y \cdot x - t\right) \]
    14. *-commutative99.5%

      \[\leadsto \mathsf{fma}\left(z, \color{blue}{-0.5 \cdot {y}^{2}} - y, \log y \cdot x - t\right) \]
    15. unpow299.5%

      \[\leadsto \mathsf{fma}\left(z, -0.5 \cdot \color{blue}{\left(y \cdot y\right)} - y, \log y \cdot x - t\right) \]
    16. *-commutative99.5%

      \[\leadsto \mathsf{fma}\left(z, -0.5 \cdot \left(y \cdot y\right) - y, \color{blue}{x \cdot \log y} - t\right) \]
  7. Simplified99.5%

    \[\leadsto \color{blue}{\mathsf{fma}\left(z, -0.5 \cdot \left(y \cdot y\right) - y, x \cdot \log y - t\right)} \]
  8. Final simplification99.5%

    \[\leadsto \mathsf{fma}\left(z, -0.5 \cdot \left(y \cdot y\right) - y, x \cdot \log y - t\right) \]

Alternative 3: 99.5% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \left(z \cdot \left(y \cdot \left(y \cdot -0.5\right) - y\right) + x \cdot \log y\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* z (- (* y (* y -0.5)) y)) (* x (log y))) t))
double code(double x, double y, double z, double t) {
	return ((z * ((y * (y * -0.5)) - y)) + (x * log(y))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((z * ((y * (y * (-0.5d0))) - y)) + (x * log(y))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((z * ((y * (y * -0.5)) - y)) + (x * Math.log(y))) - t;
}
def code(x, y, z, t):
	return ((z * ((y * (y * -0.5)) - y)) + (x * math.log(y))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(z * Float64(Float64(y * Float64(y * -0.5)) - y)) + Float64(x * log(y))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((z * ((y * (y * -0.5)) - y)) + (x * log(y))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(z * N[(N[(y * N[(y * -0.5), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision] + N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(z \cdot \left(y \cdot \left(y \cdot -0.5\right) - y\right) + x \cdot \log y\right) - t
\end{array}
Derivation
  1. Initial program 83.1%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 99.5%

    \[\leadsto \color{blue}{\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + \left(-1 \cdot \left(y \cdot z\right) + \log y \cdot x\right)\right)} - t \]
  3. Step-by-step derivation
    1. associate-+r+99.5%

      \[\leadsto \color{blue}{\left(\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + -1 \cdot \left(y \cdot z\right)\right) + \log y \cdot x\right)} - t \]
    2. associate-*r*99.5%

      \[\leadsto \left(\left(\color{blue}{\left(-0.5 \cdot {y}^{2}\right) \cdot z} + -1 \cdot \left(y \cdot z\right)\right) + \log y \cdot x\right) - t \]
    3. associate-*r*99.5%

      \[\leadsto \left(\left(\left(-0.5 \cdot {y}^{2}\right) \cdot z + \color{blue}{\left(-1 \cdot y\right) \cdot z}\right) + \log y \cdot x\right) - t \]
    4. distribute-rgt-out99.5%

      \[\leadsto \left(\color{blue}{z \cdot \left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)} + \log y \cdot x\right) - t \]
    5. mul-1-neg99.5%

      \[\leadsto \left(z \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right) + \log y \cdot x\right) - t \]
    6. unsub-neg99.5%

      \[\leadsto \left(z \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} + \log y \cdot x\right) - t \]
    7. *-commutative99.5%

      \[\leadsto \left(z \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right) + \log y \cdot x\right) - t \]
    8. unpow299.5%

      \[\leadsto \left(z \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right) + \log y \cdot x\right) - t \]
    9. associate-*l*99.5%

      \[\leadsto \left(z \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right) + \log y \cdot x\right) - t \]
  4. Simplified99.5%

    \[\leadsto \color{blue}{\left(z \cdot \left(y \cdot \left(y \cdot -0.5\right) - y\right) + \log y \cdot x\right)} - t \]
  5. Final simplification99.5%

    \[\leadsto \left(z \cdot \left(y \cdot \left(y \cdot -0.5\right) - y\right) + x \cdot \log y\right) - t \]

Alternative 4: 72.7% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.65 \cdot 10^{-76} \lor \neg \left(x \leq 1.2 \cdot 10^{-7} \lor \neg \left(x \leq 7.6 \cdot 10^{+56}\right) \land x \leq 1.5 \cdot 10^{+154}\right):\\ \;\;\;\;x \cdot \log y\\ \mathbf{else}:\\ \;\;\;\;z \cdot \left(-y\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (or (<= x -1.65e-76)
         (not (or (<= x 1.2e-7) (and (not (<= x 7.6e+56)) (<= x 1.5e+154)))))
   (* x (log y))
   (- (* z (- y)) t)))
double code(double x, double y, double z, double t) {
	double tmp;
	if ((x <= -1.65e-76) || !((x <= 1.2e-7) || (!(x <= 7.6e+56) && (x <= 1.5e+154)))) {
		tmp = x * log(y);
	} else {
		tmp = (z * -y) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((x <= (-1.65d-76)) .or. (.not. (x <= 1.2d-7) .or. (.not. (x <= 7.6d+56)) .and. (x <= 1.5d+154))) then
        tmp = x * log(y)
    else
        tmp = (z * -y) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if ((x <= -1.65e-76) || !((x <= 1.2e-7) || (!(x <= 7.6e+56) && (x <= 1.5e+154)))) {
		tmp = x * Math.log(y);
	} else {
		tmp = (z * -y) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if (x <= -1.65e-76) or not ((x <= 1.2e-7) or (not (x <= 7.6e+56) and (x <= 1.5e+154))):
		tmp = x * math.log(y)
	else:
		tmp = (z * -y) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if ((x <= -1.65e-76) || !((x <= 1.2e-7) || (!(x <= 7.6e+56) && (x <= 1.5e+154))))
		tmp = Float64(x * log(y));
	else
		tmp = Float64(Float64(z * Float64(-y)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if ((x <= -1.65e-76) || ~(((x <= 1.2e-7) || (~((x <= 7.6e+56)) && (x <= 1.5e+154)))))
		tmp = x * log(y);
	else
		tmp = (z * -y) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[Or[LessEqual[x, -1.65e-76], N[Not[Or[LessEqual[x, 1.2e-7], And[N[Not[LessEqual[x, 7.6e+56]], $MachinePrecision], LessEqual[x, 1.5e+154]]]], $MachinePrecision]], N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision], N[(N[(z * (-y)), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.65 \cdot 10^{-76} \lor \neg \left(x \leq 1.2 \cdot 10^{-7} \lor \neg \left(x \leq 7.6 \cdot 10^{+56}\right) \land x \leq 1.5 \cdot 10^{+154}\right):\\
\;\;\;\;x \cdot \log y\\

\mathbf{else}:\\
\;\;\;\;z \cdot \left(-y\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -1.64999999999999992e-76 or 1.19999999999999989e-7 < x < 7.59999999999999991e56 or 1.50000000000000013e154 < x

    1. Initial program 92.3%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 99.0%

      \[\leadsto \color{blue}{\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + \left(-1 \cdot \left(y \cdot z\right) + \log y \cdot x\right)\right)} - t \]
    3. Step-by-step derivation
      1. associate-+r+99.0%

        \[\leadsto \color{blue}{\left(\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + -1 \cdot \left(y \cdot z\right)\right) + \log y \cdot x\right)} - t \]
      2. associate-*r*99.0%

        \[\leadsto \left(\left(\color{blue}{\left(-0.5 \cdot {y}^{2}\right) \cdot z} + -1 \cdot \left(y \cdot z\right)\right) + \log y \cdot x\right) - t \]
      3. associate-*r*99.0%

        \[\leadsto \left(\left(\left(-0.5 \cdot {y}^{2}\right) \cdot z + \color{blue}{\left(-1 \cdot y\right) \cdot z}\right) + \log y \cdot x\right) - t \]
      4. distribute-rgt-out99.0%

        \[\leadsto \left(\color{blue}{z \cdot \left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)} + \log y \cdot x\right) - t \]
      5. mul-1-neg99.0%

        \[\leadsto \left(z \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right) + \log y \cdot x\right) - t \]
      6. unsub-neg99.0%

        \[\leadsto \left(z \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} + \log y \cdot x\right) - t \]
      7. *-commutative99.0%

        \[\leadsto \left(z \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right) + \log y \cdot x\right) - t \]
      8. unpow299.0%

        \[\leadsto \left(z \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right) + \log y \cdot x\right) - t \]
      9. associate-*l*99.0%

        \[\leadsto \left(z \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right) + \log y \cdot x\right) - t \]
    4. Simplified99.0%

      \[\leadsto \color{blue}{\left(z \cdot \left(y \cdot \left(y \cdot -0.5\right) - y\right) + \log y \cdot x\right)} - t \]
    5. Taylor expanded in x around inf 72.5%

      \[\leadsto \color{blue}{\log y \cdot x} \]

    if -1.64999999999999992e-76 < x < 1.19999999999999989e-7 or 7.59999999999999991e56 < x < 1.50000000000000013e154

    1. Initial program 77.0%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 99.5%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + \log y \cdot x\right)} - t \]
    3. Step-by-step derivation
      1. +-commutative99.5%

        \[\leadsto \color{blue}{\left(\log y \cdot x + -1 \cdot \left(y \cdot z\right)\right)} - t \]
      2. *-commutative99.5%

        \[\leadsto \left(\color{blue}{x \cdot \log y} + -1 \cdot \left(y \cdot z\right)\right) - t \]
      3. mul-1-neg99.5%

        \[\leadsto \left(x \cdot \log y + \color{blue}{\left(-y \cdot z\right)}\right) - t \]
      4. unsub-neg99.5%

        \[\leadsto \color{blue}{\left(x \cdot \log y - y \cdot z\right)} - t \]
      5. *-commutative99.5%

        \[\leadsto \left(\color{blue}{\log y \cdot x} - y \cdot z\right) - t \]
    4. Simplified99.5%

      \[\leadsto \color{blue}{\left(\log y \cdot x - y \cdot z\right)} - t \]
    5. Step-by-step derivation
      1. *-commutative99.5%

        \[\leadsto \left(\color{blue}{x \cdot \log y} - y \cdot z\right) - t \]
      2. add-sqr-sqrt37.7%

        \[\leadsto \left(\color{blue}{\sqrt{x \cdot \log y} \cdot \sqrt{x \cdot \log y}} - y \cdot z\right) - t \]
      3. unpow237.7%

        \[\leadsto \left(\color{blue}{{\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
      4. add-cbrt-cube34.4%

        \[\leadsto \left(\color{blue}{\sqrt[3]{\left({\left(\sqrt{x \cdot \log y}\right)}^{2} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}\right) \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}}} - y \cdot z\right) - t \]
      5. pow-prod-down34.4%

        \[\leadsto \left(\sqrt[3]{\color{blue}{{\left(\sqrt{x \cdot \log y} \cdot \sqrt{x \cdot \log y}\right)}^{2}} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
      6. add-sqr-sqrt34.4%

        \[\leadsto \left(\sqrt[3]{{\color{blue}{\left(x \cdot \log y\right)}}^{2} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
      7. unpow234.4%

        \[\leadsto \left(\sqrt[3]{\color{blue}{\left(\left(x \cdot \log y\right) \cdot \left(x \cdot \log y\right)\right)} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
      8. unpow234.4%

        \[\leadsto \left(\sqrt[3]{\left(\left(x \cdot \log y\right) \cdot \left(x \cdot \log y\right)\right) \cdot \color{blue}{\left(\sqrt{x \cdot \log y} \cdot \sqrt{x \cdot \log y}\right)}} - y \cdot z\right) - t \]
      9. add-sqr-sqrt80.8%

        \[\leadsto \left(\sqrt[3]{\left(\left(x \cdot \log y\right) \cdot \left(x \cdot \log y\right)\right) \cdot \color{blue}{\left(x \cdot \log y\right)}} - y \cdot z\right) - t \]
      10. pow380.8%

        \[\leadsto \left(\sqrt[3]{\color{blue}{{\left(x \cdot \log y\right)}^{3}}} - y \cdot z\right) - t \]
      11. *-commutative80.8%

        \[\leadsto \left(\sqrt[3]{{\color{blue}{\left(\log y \cdot x\right)}}^{3}} - y \cdot z\right) - t \]
    6. Applied egg-rr80.8%

      \[\leadsto \left(\color{blue}{\sqrt[3]{{\left(\log y \cdot x\right)}^{3}}} - y \cdot z\right) - t \]
    7. Taylor expanded in y around inf 81.0%

      \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z\right)} - t \]
    8. Step-by-step derivation
      1. mul-1-neg81.0%

        \[\leadsto \color{blue}{\left(-y \cdot z\right)} - t \]
      2. distribute-rgt-neg-in81.0%

        \[\leadsto \color{blue}{y \cdot \left(-z\right)} - t \]
    9. Simplified81.0%

      \[\leadsto \color{blue}{y \cdot \left(-z\right)} - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification77.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.65 \cdot 10^{-76} \lor \neg \left(x \leq 1.2 \cdot 10^{-7} \lor \neg \left(x \leq 7.6 \cdot 10^{+56}\right) \land x \leq 1.5 \cdot 10^{+154}\right):\\ \;\;\;\;x \cdot \log y\\ \mathbf{else}:\\ \;\;\;\;z \cdot \left(-y\right) - t\\ \end{array} \]

Alternative 5: 86.4% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z \leq -1 \cdot 10^{+240} \lor \neg \left(z \leq 1.15 \cdot 10^{+133}\right):\\ \;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\ \mathbf{else}:\\ \;\;\;\;x \cdot \log y - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (or (<= z -1e+240) (not (<= z 1.15e+133)))
   (- (* z (log1p (- y))) t)
   (- (* x (log y)) t)))
double code(double x, double y, double z, double t) {
	double tmp;
	if ((z <= -1e+240) || !(z <= 1.15e+133)) {
		tmp = (z * log1p(-y)) - t;
	} else {
		tmp = (x * log(y)) - t;
	}
	return tmp;
}
public static double code(double x, double y, double z, double t) {
	double tmp;
	if ((z <= -1e+240) || !(z <= 1.15e+133)) {
		tmp = (z * Math.log1p(-y)) - t;
	} else {
		tmp = (x * Math.log(y)) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if (z <= -1e+240) or not (z <= 1.15e+133):
		tmp = (z * math.log1p(-y)) - t
	else:
		tmp = (x * math.log(y)) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if ((z <= -1e+240) || !(z <= 1.15e+133))
		tmp = Float64(Float64(z * log1p(Float64(-y))) - t);
	else
		tmp = Float64(Float64(x * log(y)) - t);
	end
	return tmp
end
code[x_, y_, z_, t_] := If[Or[LessEqual[z, -1e+240], N[Not[LessEqual[z, 1.15e+133]], $MachinePrecision]], N[(N[(z * N[Log[1 + (-y)], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;z \leq -1 \cdot 10^{+240} \lor \neg \left(z \leq 1.15 \cdot 10^{+133}\right):\\
\;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\

\mathbf{else}:\\
\;\;\;\;x \cdot \log y - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < -1.00000000000000001e240 or 1.14999999999999995e133 < z

    1. Initial program 41.9%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in x around 0 30.2%

      \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
    3. Step-by-step derivation
      1. sub-neg30.2%

        \[\leadsto z \cdot \log \color{blue}{\left(1 + \left(-y\right)\right)} - t \]
      2. mul-1-neg30.2%

        \[\leadsto z \cdot \log \left(1 + \color{blue}{-1 \cdot y}\right) - t \]
      3. log1p-def85.8%

        \[\leadsto z \cdot \color{blue}{\mathsf{log1p}\left(-1 \cdot y\right)} - t \]
      4. mul-1-neg85.8%

        \[\leadsto z \cdot \mathsf{log1p}\left(\color{blue}{-y}\right) - t \]
    4. Simplified85.8%

      \[\leadsto \color{blue}{z \cdot \mathsf{log1p}\left(-y\right)} - t \]

    if -1.00000000000000001e240 < z < 1.14999999999999995e133

    1. Initial program 93.4%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. +-commutative93.4%

        \[\leadsto \color{blue}{\left(z \cdot \log \left(1 - y\right) + x \cdot \log y\right)} - t \]
      2. fma-def93.4%

        \[\leadsto \color{blue}{\mathsf{fma}\left(z, \log \left(1 - y\right), x \cdot \log y\right)} - t \]
      3. sub-neg93.4%

        \[\leadsto \mathsf{fma}\left(z, \log \color{blue}{\left(1 + \left(-y\right)\right)}, x \cdot \log y\right) - t \]
      4. log1p-def99.8%

        \[\leadsto \mathsf{fma}\left(z, \color{blue}{\mathsf{log1p}\left(-y\right)}, x \cdot \log y\right) - t \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t} \]
    4. Taylor expanded in z around 0 92.9%

      \[\leadsto \color{blue}{\log y \cdot x - t} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification91.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -1 \cdot 10^{+240} \lor \neg \left(z \leq 1.15 \cdot 10^{+133}\right):\\ \;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\ \mathbf{else}:\\ \;\;\;\;x \cdot \log y - t\\ \end{array} \]

Alternative 6: 85.8% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y\\ \mathbf{if}\;z \leq -3.4 \cdot 10^{+121}:\\ \;\;\;\;t_1 - z \cdot y\\ \mathbf{elif}\;z \leq 4.2 \cdot 10^{+132}:\\ \;\;\;\;t_1 - t\\ \mathbf{else}:\\ \;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (* x (log y))))
   (if (<= z -3.4e+121)
     (- t_1 (* z y))
     (if (<= z 4.2e+132) (- t_1 t) (- (* z (log1p (- y))) t)))))
double code(double x, double y, double z, double t) {
	double t_1 = x * log(y);
	double tmp;
	if (z <= -3.4e+121) {
		tmp = t_1 - (z * y);
	} else if (z <= 4.2e+132) {
		tmp = t_1 - t;
	} else {
		tmp = (z * log1p(-y)) - t;
	}
	return tmp;
}
public static double code(double x, double y, double z, double t) {
	double t_1 = x * Math.log(y);
	double tmp;
	if (z <= -3.4e+121) {
		tmp = t_1 - (z * y);
	} else if (z <= 4.2e+132) {
		tmp = t_1 - t;
	} else {
		tmp = (z * Math.log1p(-y)) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	t_1 = x * math.log(y)
	tmp = 0
	if z <= -3.4e+121:
		tmp = t_1 - (z * y)
	elif z <= 4.2e+132:
		tmp = t_1 - t
	else:
		tmp = (z * math.log1p(-y)) - t
	return tmp
function code(x, y, z, t)
	t_1 = Float64(x * log(y))
	tmp = 0.0
	if (z <= -3.4e+121)
		tmp = Float64(t_1 - Float64(z * y));
	elseif (z <= 4.2e+132)
		tmp = Float64(t_1 - t);
	else
		tmp = Float64(Float64(z * log1p(Float64(-y))) - t);
	end
	return tmp
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[z, -3.4e+121], N[(t$95$1 - N[(z * y), $MachinePrecision]), $MachinePrecision], If[LessEqual[z, 4.2e+132], N[(t$95$1 - t), $MachinePrecision], N[(N[(z * N[Log[1 + (-y)], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := x \cdot \log y\\
\mathbf{if}\;z \leq -3.4 \cdot 10^{+121}:\\
\;\;\;\;t_1 - z \cdot y\\

\mathbf{elif}\;z \leq 4.2 \cdot 10^{+132}:\\
\;\;\;\;t_1 - t\\

\mathbf{else}:\\
\;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if z < -3.4000000000000001e121

    1. Initial program 60.3%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 97.6%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + \log y \cdot x\right)} - t \]
    3. Step-by-step derivation
      1. +-commutative97.6%

        \[\leadsto \color{blue}{\left(\log y \cdot x + -1 \cdot \left(y \cdot z\right)\right)} - t \]
      2. *-commutative97.6%

        \[\leadsto \left(\color{blue}{x \cdot \log y} + -1 \cdot \left(y \cdot z\right)\right) - t \]
      3. mul-1-neg97.6%

        \[\leadsto \left(x \cdot \log y + \color{blue}{\left(-y \cdot z\right)}\right) - t \]
      4. unsub-neg97.6%

        \[\leadsto \color{blue}{\left(x \cdot \log y - y \cdot z\right)} - t \]
      5. *-commutative97.6%

        \[\leadsto \left(\color{blue}{\log y \cdot x} - y \cdot z\right) - t \]
    4. Simplified97.6%

      \[\leadsto \color{blue}{\left(\log y \cdot x - y \cdot z\right)} - t \]
    5. Taylor expanded in t around 0 75.7%

      \[\leadsto \color{blue}{\log y \cdot x - y \cdot z} \]

    if -3.4000000000000001e121 < z < 4.19999999999999987e132

    1. Initial program 96.7%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. +-commutative96.7%

        \[\leadsto \color{blue}{\left(z \cdot \log \left(1 - y\right) + x \cdot \log y\right)} - t \]
      2. fma-def96.7%

        \[\leadsto \color{blue}{\mathsf{fma}\left(z, \log \left(1 - y\right), x \cdot \log y\right)} - t \]
      3. sub-neg96.7%

        \[\leadsto \mathsf{fma}\left(z, \log \color{blue}{\left(1 + \left(-y\right)\right)}, x \cdot \log y\right) - t \]
      4. log1p-def99.8%

        \[\leadsto \mathsf{fma}\left(z, \color{blue}{\mathsf{log1p}\left(-y\right)}, x \cdot \log y\right) - t \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t} \]
    4. Taylor expanded in z around 0 96.7%

      \[\leadsto \color{blue}{\log y \cdot x - t} \]

    if 4.19999999999999987e132 < z

    1. Initial program 41.8%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in x around 0 31.2%

      \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
    3. Step-by-step derivation
      1. sub-neg31.2%

        \[\leadsto z \cdot \log \color{blue}{\left(1 + \left(-y\right)\right)} - t \]
      2. mul-1-neg31.2%

        \[\leadsto z \cdot \log \left(1 + \color{blue}{-1 \cdot y}\right) - t \]
      3. log1p-def86.4%

        \[\leadsto z \cdot \color{blue}{\mathsf{log1p}\left(-1 \cdot y\right)} - t \]
      4. mul-1-neg86.4%

        \[\leadsto z \cdot \mathsf{log1p}\left(\color{blue}{-y}\right) - t \]
    4. Simplified86.4%

      \[\leadsto \color{blue}{z \cdot \mathsf{log1p}\left(-y\right)} - t \]
  3. Recombined 3 regimes into one program.
  4. Final simplification92.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -3.4 \cdot 10^{+121}:\\ \;\;\;\;x \cdot \log y - z \cdot y\\ \mathbf{elif}\;z \leq 4.2 \cdot 10^{+132}:\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{else}:\\ \;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\ \end{array} \]

Alternative 7: 86.1% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z \leq -8.2 \cdot 10^{+238} \lor \neg \left(z \leq 2.2 \cdot 10^{+132}\right):\\ \;\;\;\;z \cdot \left(-y\right) - t\\ \mathbf{else}:\\ \;\;\;\;x \cdot \log y - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (or (<= z -8.2e+238) (not (<= z 2.2e+132)))
   (- (* z (- y)) t)
   (- (* x (log y)) t)))
double code(double x, double y, double z, double t) {
	double tmp;
	if ((z <= -8.2e+238) || !(z <= 2.2e+132)) {
		tmp = (z * -y) - t;
	} else {
		tmp = (x * log(y)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((z <= (-8.2d+238)) .or. (.not. (z <= 2.2d+132))) then
        tmp = (z * -y) - t
    else
        tmp = (x * log(y)) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if ((z <= -8.2e+238) || !(z <= 2.2e+132)) {
		tmp = (z * -y) - t;
	} else {
		tmp = (x * Math.log(y)) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if (z <= -8.2e+238) or not (z <= 2.2e+132):
		tmp = (z * -y) - t
	else:
		tmp = (x * math.log(y)) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if ((z <= -8.2e+238) || !(z <= 2.2e+132))
		tmp = Float64(Float64(z * Float64(-y)) - t);
	else
		tmp = Float64(Float64(x * log(y)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if ((z <= -8.2e+238) || ~((z <= 2.2e+132)))
		tmp = (z * -y) - t;
	else
		tmp = (x * log(y)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[Or[LessEqual[z, -8.2e+238], N[Not[LessEqual[z, 2.2e+132]], $MachinePrecision]], N[(N[(z * (-y)), $MachinePrecision] - t), $MachinePrecision], N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;z \leq -8.2 \cdot 10^{+238} \lor \neg \left(z \leq 2.2 \cdot 10^{+132}\right):\\
\;\;\;\;z \cdot \left(-y\right) - t\\

\mathbf{else}:\\
\;\;\;\;x \cdot \log y - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < -8.1999999999999998e238 or 2.19999999999999989e132 < z

    1. Initial program 41.9%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 98.5%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + \log y \cdot x\right)} - t \]
    3. Step-by-step derivation
      1. +-commutative98.5%

        \[\leadsto \color{blue}{\left(\log y \cdot x + -1 \cdot \left(y \cdot z\right)\right)} - t \]
      2. *-commutative98.5%

        \[\leadsto \left(\color{blue}{x \cdot \log y} + -1 \cdot \left(y \cdot z\right)\right) - t \]
      3. mul-1-neg98.5%

        \[\leadsto \left(x \cdot \log y + \color{blue}{\left(-y \cdot z\right)}\right) - t \]
      4. unsub-neg98.5%

        \[\leadsto \color{blue}{\left(x \cdot \log y - y \cdot z\right)} - t \]
      5. *-commutative98.5%

        \[\leadsto \left(\color{blue}{\log y \cdot x} - y \cdot z\right) - t \]
    4. Simplified98.5%

      \[\leadsto \color{blue}{\left(\log y \cdot x - y \cdot z\right)} - t \]
    5. Step-by-step derivation
      1. *-commutative98.5%

        \[\leadsto \left(\color{blue}{x \cdot \log y} - y \cdot z\right) - t \]
      2. add-sqr-sqrt50.7%

        \[\leadsto \left(\color{blue}{\sqrt{x \cdot \log y} \cdot \sqrt{x \cdot \log y}} - y \cdot z\right) - t \]
      3. unpow250.7%

        \[\leadsto \left(\color{blue}{{\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
      4. add-cbrt-cube33.6%

        \[\leadsto \left(\color{blue}{\sqrt[3]{\left({\left(\sqrt{x \cdot \log y}\right)}^{2} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}\right) \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}}} - y \cdot z\right) - t \]
      5. pow-prod-down33.6%

        \[\leadsto \left(\sqrt[3]{\color{blue}{{\left(\sqrt{x \cdot \log y} \cdot \sqrt{x \cdot \log y}\right)}^{2}} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
      6. add-sqr-sqrt33.6%

        \[\leadsto \left(\sqrt[3]{{\color{blue}{\left(x \cdot \log y\right)}}^{2} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
      7. unpow233.6%

        \[\leadsto \left(\sqrt[3]{\color{blue}{\left(\left(x \cdot \log y\right) \cdot \left(x \cdot \log y\right)\right)} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
      8. unpow233.6%

        \[\leadsto \left(\sqrt[3]{\left(\left(x \cdot \log y\right) \cdot \left(x \cdot \log y\right)\right) \cdot \color{blue}{\left(\sqrt{x \cdot \log y} \cdot \sqrt{x \cdot \log y}\right)}} - y \cdot z\right) - t \]
      9. add-sqr-sqrt69.4%

        \[\leadsto \left(\sqrt[3]{\left(\left(x \cdot \log y\right) \cdot \left(x \cdot \log y\right)\right) \cdot \color{blue}{\left(x \cdot \log y\right)}} - y \cdot z\right) - t \]
      10. pow369.4%

        \[\leadsto \left(\sqrt[3]{\color{blue}{{\left(x \cdot \log y\right)}^{3}}} - y \cdot z\right) - t \]
      11. *-commutative69.4%

        \[\leadsto \left(\sqrt[3]{{\color{blue}{\left(\log y \cdot x\right)}}^{3}} - y \cdot z\right) - t \]
    6. Applied egg-rr69.4%

      \[\leadsto \left(\color{blue}{\sqrt[3]{{\left(\log y \cdot x\right)}^{3}}} - y \cdot z\right) - t \]
    7. Taylor expanded in y around inf 85.0%

      \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z\right)} - t \]
    8. Step-by-step derivation
      1. mul-1-neg85.0%

        \[\leadsto \color{blue}{\left(-y \cdot z\right)} - t \]
      2. distribute-rgt-neg-in85.0%

        \[\leadsto \color{blue}{y \cdot \left(-z\right)} - t \]
    9. Simplified85.0%

      \[\leadsto \color{blue}{y \cdot \left(-z\right)} - t \]

    if -8.1999999999999998e238 < z < 2.19999999999999989e132

    1. Initial program 93.4%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. +-commutative93.4%

        \[\leadsto \color{blue}{\left(z \cdot \log \left(1 - y\right) + x \cdot \log y\right)} - t \]
      2. fma-def93.4%

        \[\leadsto \color{blue}{\mathsf{fma}\left(z, \log \left(1 - y\right), x \cdot \log y\right)} - t \]
      3. sub-neg93.4%

        \[\leadsto \mathsf{fma}\left(z, \log \color{blue}{\left(1 + \left(-y\right)\right)}, x \cdot \log y\right) - t \]
      4. log1p-def99.8%

        \[\leadsto \mathsf{fma}\left(z, \color{blue}{\mathsf{log1p}\left(-y\right)}, x \cdot \log y\right) - t \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t} \]
    4. Taylor expanded in z around 0 92.9%

      \[\leadsto \color{blue}{\log y \cdot x - t} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification91.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -8.2 \cdot 10^{+238} \lor \neg \left(z \leq 2.2 \cdot 10^{+132}\right):\\ \;\;\;\;z \cdot \left(-y\right) - t\\ \mathbf{else}:\\ \;\;\;\;x \cdot \log y - t\\ \end{array} \]

Alternative 8: 99.1% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \left(x \cdot \log y - z \cdot y\right) - t \end{array} \]
(FPCore (x y z t) :precision binary64 (- (- (* x (log y)) (* z y)) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) - (z * y)) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((x * log(y)) - (z * y)) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) - (z * y)) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) - (z * y)) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) - Float64(z * y)) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((x * log(y)) - (z * y)) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - N[(z * y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y - z \cdot y\right) - t
\end{array}
Derivation
  1. Initial program 83.1%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 99.2%

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + \log y \cdot x\right)} - t \]
  3. Step-by-step derivation
    1. +-commutative99.2%

      \[\leadsto \color{blue}{\left(\log y \cdot x + -1 \cdot \left(y \cdot z\right)\right)} - t \]
    2. *-commutative99.2%

      \[\leadsto \left(\color{blue}{x \cdot \log y} + -1 \cdot \left(y \cdot z\right)\right) - t \]
    3. mul-1-neg99.2%

      \[\leadsto \left(x \cdot \log y + \color{blue}{\left(-y \cdot z\right)}\right) - t \]
    4. unsub-neg99.2%

      \[\leadsto \color{blue}{\left(x \cdot \log y - y \cdot z\right)} - t \]
    5. *-commutative99.2%

      \[\leadsto \left(\color{blue}{\log y \cdot x} - y \cdot z\right) - t \]
  4. Simplified99.2%

    \[\leadsto \color{blue}{\left(\log y \cdot x - y \cdot z\right)} - t \]
  5. Final simplification99.2%

    \[\leadsto \left(x \cdot \log y - z \cdot y\right) - t \]

Alternative 9: 48.7% accurate, 26.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -7.2 \cdot 10^{-71}:\\ \;\;\;\;-t\\ \mathbf{elif}\;t \leq 2.5 \cdot 10^{-128}:\\ \;\;\;\;z \cdot \left(-y\right)\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= t -7.2e-71) (- t) (if (<= t 2.5e-128) (* z (- y)) (- t))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (t <= -7.2e-71) {
		tmp = -t;
	} else if (t <= 2.5e-128) {
		tmp = z * -y;
	} else {
		tmp = -t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if (t <= (-7.2d-71)) then
        tmp = -t
    else if (t <= 2.5d-128) then
        tmp = z * -y
    else
        tmp = -t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (t <= -7.2e-71) {
		tmp = -t;
	} else if (t <= 2.5e-128) {
		tmp = z * -y;
	} else {
		tmp = -t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if t <= -7.2e-71:
		tmp = -t
	elif t <= 2.5e-128:
		tmp = z * -y
	else:
		tmp = -t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (t <= -7.2e-71)
		tmp = Float64(-t);
	elseif (t <= 2.5e-128)
		tmp = Float64(z * Float64(-y));
	else
		tmp = Float64(-t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if (t <= -7.2e-71)
		tmp = -t;
	elseif (t <= 2.5e-128)
		tmp = z * -y;
	else
		tmp = -t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[LessEqual[t, -7.2e-71], (-t), If[LessEqual[t, 2.5e-128], N[(z * (-y)), $MachinePrecision], (-t)]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -7.2 \cdot 10^{-71}:\\
\;\;\;\;-t\\

\mathbf{elif}\;t \leq 2.5 \cdot 10^{-128}:\\
\;\;\;\;z \cdot \left(-y\right)\\

\mathbf{else}:\\
\;\;\;\;-t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -7.2e-71 or 2.5000000000000001e-128 < t

    1. Initial program 92.0%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. +-commutative92.0%

        \[\leadsto \color{blue}{\left(z \cdot \log \left(1 - y\right) + x \cdot \log y\right)} - t \]
      2. fma-def92.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(z, \log \left(1 - y\right), x \cdot \log y\right)} - t \]
      3. sub-neg92.0%

        \[\leadsto \mathsf{fma}\left(z, \log \color{blue}{\left(1 + \left(-y\right)\right)}, x \cdot \log y\right) - t \]
      4. log1p-def99.8%

        \[\leadsto \mathsf{fma}\left(z, \color{blue}{\mathsf{log1p}\left(-y\right)}, x \cdot \log y\right) - t \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t} \]
    4. Taylor expanded in t around inf 61.6%

      \[\leadsto \color{blue}{-1 \cdot t} \]
    5. Step-by-step derivation
      1. neg-mul-161.6%

        \[\leadsto \color{blue}{-t} \]
    6. Simplified61.6%

      \[\leadsto \color{blue}{-t} \]

    if -7.2e-71 < t < 2.5000000000000001e-128

    1. Initial program 67.8%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 99.3%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + \log y \cdot x\right)} - t \]
    3. Step-by-step derivation
      1. +-commutative99.3%

        \[\leadsto \color{blue}{\left(\log y \cdot x + -1 \cdot \left(y \cdot z\right)\right)} - t \]
      2. *-commutative99.3%

        \[\leadsto \left(\color{blue}{x \cdot \log y} + -1 \cdot \left(y \cdot z\right)\right) - t \]
      3. mul-1-neg99.3%

        \[\leadsto \left(x \cdot \log y + \color{blue}{\left(-y \cdot z\right)}\right) - t \]
      4. unsub-neg99.3%

        \[\leadsto \color{blue}{\left(x \cdot \log y - y \cdot z\right)} - t \]
      5. *-commutative99.3%

        \[\leadsto \left(\color{blue}{\log y \cdot x} - y \cdot z\right) - t \]
    4. Simplified99.3%

      \[\leadsto \color{blue}{\left(\log y \cdot x - y \cdot z\right)} - t \]
    5. Step-by-step derivation
      1. *-commutative99.3%

        \[\leadsto \left(\color{blue}{x \cdot \log y} - y \cdot z\right) - t \]
      2. add-sqr-sqrt55.0%

        \[\leadsto \left(\color{blue}{\sqrt{x \cdot \log y} \cdot \sqrt{x \cdot \log y}} - y \cdot z\right) - t \]
      3. unpow255.0%

        \[\leadsto \left(\color{blue}{{\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
      4. add-cbrt-cube36.5%

        \[\leadsto \left(\color{blue}{\sqrt[3]{\left({\left(\sqrt{x \cdot \log y}\right)}^{2} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}\right) \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}}} - y \cdot z\right) - t \]
      5. pow-prod-down36.5%

        \[\leadsto \left(\sqrt[3]{\color{blue}{{\left(\sqrt{x \cdot \log y} \cdot \sqrt{x \cdot \log y}\right)}^{2}} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
      6. add-sqr-sqrt36.6%

        \[\leadsto \left(\sqrt[3]{{\color{blue}{\left(x \cdot \log y\right)}}^{2} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
      7. unpow236.6%

        \[\leadsto \left(\sqrt[3]{\color{blue}{\left(\left(x \cdot \log y\right) \cdot \left(x \cdot \log y\right)\right)} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
      8. unpow236.6%

        \[\leadsto \left(\sqrt[3]{\left(\left(x \cdot \log y\right) \cdot \left(x \cdot \log y\right)\right) \cdot \color{blue}{\left(\sqrt{x \cdot \log y} \cdot \sqrt{x \cdot \log y}\right)}} - y \cdot z\right) - t \]
      9. add-sqr-sqrt66.1%

        \[\leadsto \left(\sqrt[3]{\left(\left(x \cdot \log y\right) \cdot \left(x \cdot \log y\right)\right) \cdot \color{blue}{\left(x \cdot \log y\right)}} - y \cdot z\right) - t \]
      10. pow366.0%

        \[\leadsto \left(\sqrt[3]{\color{blue}{{\left(x \cdot \log y\right)}^{3}}} - y \cdot z\right) - t \]
      11. *-commutative66.0%

        \[\leadsto \left(\sqrt[3]{{\color{blue}{\left(\log y \cdot x\right)}}^{3}} - y \cdot z\right) - t \]
    6. Applied egg-rr66.0%

      \[\leadsto \left(\color{blue}{\sqrt[3]{{\left(\log y \cdot x\right)}^{3}}} - y \cdot z\right) - t \]
    7. Taylor expanded in y around inf 99.3%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + -1 \cdot \left(\log \left(\frac{1}{y}\right) \cdot x\right)\right)} - t \]
    8. Step-by-step derivation
      1. distribute-lft-out99.3%

        \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z + \log \left(\frac{1}{y}\right) \cdot x\right)} - t \]
      2. mul-1-neg99.3%

        \[\leadsto \color{blue}{\left(-\left(y \cdot z + \log \left(\frac{1}{y}\right) \cdot x\right)\right)} - t \]
      3. fma-def99.3%

        \[\leadsto \left(-\color{blue}{\mathsf{fma}\left(y, z, \log \left(\frac{1}{y}\right) \cdot x\right)}\right) - t \]
      4. log-rec99.3%

        \[\leadsto \left(-\mathsf{fma}\left(y, z, \color{blue}{\left(-\log y\right)} \cdot x\right)\right) - t \]
      5. distribute-lft-neg-in99.3%

        \[\leadsto \left(-\mathsf{fma}\left(y, z, \color{blue}{-\log y \cdot x}\right)\right) - t \]
      6. distribute-rgt-neg-in99.3%

        \[\leadsto \left(-\mathsf{fma}\left(y, z, \color{blue}{\log y \cdot \left(-x\right)}\right)\right) - t \]
    9. Simplified99.3%

      \[\leadsto \color{blue}{\left(-\mathsf{fma}\left(y, z, \log y \cdot \left(-x\right)\right)\right)} - t \]
    10. Taylor expanded in y around inf 35.0%

      \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z\right)} \]
    11. Step-by-step derivation
      1. mul-1-neg35.0%

        \[\leadsto \color{blue}{-y \cdot z} \]
    12. Simplified35.0%

      \[\leadsto \color{blue}{-y \cdot z} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification51.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -7.2 \cdot 10^{-71}:\\ \;\;\;\;-t\\ \mathbf{elif}\;t \leq 2.5 \cdot 10^{-128}:\\ \;\;\;\;z \cdot \left(-y\right)\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \]

Alternative 10: 57.4% accurate, 35.2× speedup?

\[\begin{array}{l} \\ z \cdot \left(-y\right) - t \end{array} \]
(FPCore (x y z t) :precision binary64 (- (* z (- y)) t))
double code(double x, double y, double z, double t) {
	return (z * -y) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (z * -y) - t
end function
public static double code(double x, double y, double z, double t) {
	return (z * -y) - t;
}
def code(x, y, z, t):
	return (z * -y) - t
function code(x, y, z, t)
	return Float64(Float64(z * Float64(-y)) - t)
end
function tmp = code(x, y, z, t)
	tmp = (z * -y) - t;
end
code[x_, y_, z_, t_] := N[(N[(z * (-y)), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
z \cdot \left(-y\right) - t
\end{array}
Derivation
  1. Initial program 83.1%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 99.2%

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + \log y \cdot x\right)} - t \]
  3. Step-by-step derivation
    1. +-commutative99.2%

      \[\leadsto \color{blue}{\left(\log y \cdot x + -1 \cdot \left(y \cdot z\right)\right)} - t \]
    2. *-commutative99.2%

      \[\leadsto \left(\color{blue}{x \cdot \log y} + -1 \cdot \left(y \cdot z\right)\right) - t \]
    3. mul-1-neg99.2%

      \[\leadsto \left(x \cdot \log y + \color{blue}{\left(-y \cdot z\right)}\right) - t \]
    4. unsub-neg99.2%

      \[\leadsto \color{blue}{\left(x \cdot \log y - y \cdot z\right)} - t \]
    5. *-commutative99.2%

      \[\leadsto \left(\color{blue}{\log y \cdot x} - y \cdot z\right) - t \]
  4. Simplified99.2%

    \[\leadsto \color{blue}{\left(\log y \cdot x - y \cdot z\right)} - t \]
  5. Step-by-step derivation
    1. *-commutative99.2%

      \[\leadsto \left(\color{blue}{x \cdot \log y} - y \cdot z\right) - t \]
    2. add-sqr-sqrt48.6%

      \[\leadsto \left(\color{blue}{\sqrt{x \cdot \log y} \cdot \sqrt{x \cdot \log y}} - y \cdot z\right) - t \]
    3. unpow248.6%

      \[\leadsto \left(\color{blue}{{\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
    4. add-cbrt-cube32.6%

      \[\leadsto \left(\color{blue}{\sqrt[3]{\left({\left(\sqrt{x \cdot \log y}\right)}^{2} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}\right) \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}}} - y \cdot z\right) - t \]
    5. pow-prod-down32.6%

      \[\leadsto \left(\sqrt[3]{\color{blue}{{\left(\sqrt{x \cdot \log y} \cdot \sqrt{x \cdot \log y}\right)}^{2}} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
    6. add-sqr-sqrt32.6%

      \[\leadsto \left(\sqrt[3]{{\color{blue}{\left(x \cdot \log y\right)}}^{2} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
    7. unpow232.6%

      \[\leadsto \left(\sqrt[3]{\color{blue}{\left(\left(x \cdot \log y\right) \cdot \left(x \cdot \log y\right)\right)} \cdot {\left(\sqrt{x \cdot \log y}\right)}^{2}} - y \cdot z\right) - t \]
    8. unpow232.6%

      \[\leadsto \left(\sqrt[3]{\left(\left(x \cdot \log y\right) \cdot \left(x \cdot \log y\right)\right) \cdot \color{blue}{\left(\sqrt{x \cdot \log y} \cdot \sqrt{x \cdot \log y}\right)}} - y \cdot z\right) - t \]
    9. add-sqr-sqrt64.7%

      \[\leadsto \left(\sqrt[3]{\left(\left(x \cdot \log y\right) \cdot \left(x \cdot \log y\right)\right) \cdot \color{blue}{\left(x \cdot \log y\right)}} - y \cdot z\right) - t \]
    10. pow364.7%

      \[\leadsto \left(\sqrt[3]{\color{blue}{{\left(x \cdot \log y\right)}^{3}}} - y \cdot z\right) - t \]
    11. *-commutative64.7%

      \[\leadsto \left(\sqrt[3]{{\color{blue}{\left(\log y \cdot x\right)}}^{3}} - y \cdot z\right) - t \]
  6. Applied egg-rr64.7%

    \[\leadsto \left(\color{blue}{\sqrt[3]{{\left(\log y \cdot x\right)}^{3}}} - y \cdot z\right) - t \]
  7. Taylor expanded in y around inf 59.1%

    \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z\right)} - t \]
  8. Step-by-step derivation
    1. mul-1-neg59.1%

      \[\leadsto \color{blue}{\left(-y \cdot z\right)} - t \]
    2. distribute-rgt-neg-in59.1%

      \[\leadsto \color{blue}{y \cdot \left(-z\right)} - t \]
  9. Simplified59.1%

    \[\leadsto \color{blue}{y \cdot \left(-z\right)} - t \]
  10. Final simplification59.1%

    \[\leadsto z \cdot \left(-y\right) - t \]

Alternative 11: 42.2% accurate, 105.5× speedup?

\[\begin{array}{l} \\ -t \end{array} \]
(FPCore (x y z t) :precision binary64 (- t))
double code(double x, double y, double z, double t) {
	return -t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = -t
end function
public static double code(double x, double y, double z, double t) {
	return -t;
}
def code(x, y, z, t):
	return -t
function code(x, y, z, t)
	return Float64(-t)
end
function tmp = code(x, y, z, t)
	tmp = -t;
end
code[x_, y_, z_, t_] := (-t)
\begin{array}{l}

\\
-t
\end{array}
Derivation
  1. Initial program 83.1%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Step-by-step derivation
    1. +-commutative83.1%

      \[\leadsto \color{blue}{\left(z \cdot \log \left(1 - y\right) + x \cdot \log y\right)} - t \]
    2. fma-def83.1%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, \log \left(1 - y\right), x \cdot \log y\right)} - t \]
    3. sub-neg83.1%

      \[\leadsto \mathsf{fma}\left(z, \log \color{blue}{\left(1 + \left(-y\right)\right)}, x \cdot \log y\right) - t \]
    4. log1p-def99.8%

      \[\leadsto \mathsf{fma}\left(z, \color{blue}{\mathsf{log1p}\left(-y\right)}, x \cdot \log y\right) - t \]
  3. Simplified99.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t} \]
  4. Taylor expanded in t around inf 42.7%

    \[\leadsto \color{blue}{-1 \cdot t} \]
  5. Step-by-step derivation
    1. neg-mul-142.7%

      \[\leadsto \color{blue}{-t} \]
  6. Simplified42.7%

    \[\leadsto \color{blue}{-t} \]
  7. Final simplification42.7%

    \[\leadsto -t \]

Developer target: 99.6% accurate, 1.6× speedup?

\[\begin{array}{l} \\ \left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (-
  (*
   (- z)
   (+
    (+ (* 0.5 (* y y)) y)
    (* (/ 0.3333333333333333 (* 1.0 (* 1.0 1.0))) (* y (* y y)))))
  (- t (* x (log y)))))
double code(double x, double y, double z, double t) {
	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (-z * (((0.5d0 * (y * y)) + y) + ((0.3333333333333333d0 / (1.0d0 * (1.0d0 * 1.0d0))) * (y * (y * y))))) - (t - (x * log(y)))
end function
public static double code(double x, double y, double z, double t) {
	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * Math.log(y)));
}
def code(x, y, z, t):
	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * math.log(y)))
function code(x, y, z, t)
	return Float64(Float64(Float64(-z) * Float64(Float64(Float64(0.5 * Float64(y * y)) + y) + Float64(Float64(0.3333333333333333 / Float64(1.0 * Float64(1.0 * 1.0))) * Float64(y * Float64(y * y))))) - Float64(t - Float64(x * log(y))))
end
function tmp = code(x, y, z, t)
	tmp = (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
end
code[x_, y_, z_, t_] := N[(N[((-z) * N[(N[(N[(0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] + N[(N[(0.3333333333333333 / N[(1.0 * N[(1.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(t - N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right)
\end{array}

Reproduce

?
herbie shell --seed 2023196 
(FPCore (x y z t)
  :name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B"
  :precision binary64

  :herbie-target
  (- (* (- z) (+ (+ (* 0.5 (* y y)) y) (* (/ 0.3333333333333333 (* 1.0 (* 1.0 1.0))) (* y (* y y))))) (- t (* x (log y))))

  (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))