Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B

Percentage Accurate: 85.1% → 99.8%
Time: 12.8s
Alternatives: 10
Speedup: 1.9×

Specification

?
\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 10 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 85.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}

Alternative 1: 99.8% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (fma z (log1p (- y)) (* x (log y))) t))
double code(double x, double y, double z, double t) {
	return fma(z, log1p(-y), (x * log(y))) - t;
}
function code(x, y, z, t)
	return Float64(fma(z, log1p(Float64(-y)), Float64(x * log(y))) - t)
end
code[x_, y_, z_, t_] := N[(N[(z * N[Log[1 + (-y)], $MachinePrecision] + N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t
\end{array}
Derivation
  1. Initial program 81.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Step-by-step derivation
    1. +-commutative81.8%

      \[\leadsto \color{blue}{\left(z \cdot \log \left(1 - y\right) + x \cdot \log y\right)} - t \]
    2. fma-def81.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, \log \left(1 - y\right), x \cdot \log y\right)} - t \]
    3. sub-neg81.8%

      \[\leadsto \mathsf{fma}\left(z, \log \color{blue}{\left(1 + \left(-y\right)\right)}, x \cdot \log y\right) - t \]
    4. log1p-def99.8%

      \[\leadsto \mathsf{fma}\left(z, \color{blue}{\mathsf{log1p}\left(-y\right)}, x \cdot \log y\right) - t \]
  3. Simplified99.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t} \]
  4. Final simplification99.8%

    \[\leadsto \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y\right) - t \]

Alternative 2: 99.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\mathsf{fma}\left(-0.5, z \cdot \left(y \cdot y\right), z \cdot \left(-y\right)\right) + x \cdot \log y\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (fma -0.5 (* z (* y y)) (* z (- y))) (* x (log y))) t))
double code(double x, double y, double z, double t) {
	return (fma(-0.5, (z * (y * y)), (z * -y)) + (x * log(y))) - t;
}
function code(x, y, z, t)
	return Float64(Float64(fma(-0.5, Float64(z * Float64(y * y)), Float64(z * Float64(-y))) + Float64(x * log(y))) - t)
end
code[x_, y_, z_, t_] := N[(N[(N[(-0.5 * N[(z * N[(y * y), $MachinePrecision]), $MachinePrecision] + N[(z * (-y)), $MachinePrecision]), $MachinePrecision] + N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\mathsf{fma}\left(-0.5, z \cdot \left(y \cdot y\right), z \cdot \left(-y\right)\right) + x \cdot \log y\right) - t
\end{array}
Derivation
  1. Initial program 81.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 99.5%

    \[\leadsto \left(x \cdot \log y + \color{blue}{\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + -1 \cdot \left(y \cdot z\right)\right)}\right) - t \]
  3. Step-by-step derivation
    1. fma-def99.5%

      \[\leadsto \left(x \cdot \log y + \color{blue}{\mathsf{fma}\left(-0.5, {y}^{2} \cdot z, -1 \cdot \left(y \cdot z\right)\right)}\right) - t \]
    2. unpow299.5%

      \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \color{blue}{\left(y \cdot y\right)} \cdot z, -1 \cdot \left(y \cdot z\right)\right)\right) - t \]
    3. associate-*r*99.5%

      \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \color{blue}{\left(-1 \cdot y\right) \cdot z}\right)\right) - t \]
    4. mul-1-neg99.5%

      \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \color{blue}{\left(-y\right)} \cdot z\right)\right) - t \]
  4. Simplified99.5%

    \[\leadsto \left(x \cdot \log y + \color{blue}{\mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \left(-y\right) \cdot z\right)}\right) - t \]
  5. Final simplification99.5%

    \[\leadsto \left(\mathsf{fma}\left(-0.5, z \cdot \left(y \cdot y\right), z \cdot \left(-y\right)\right) + x \cdot \log y\right) - t \]

Alternative 3: 99.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(y, -z, x \cdot \log y\right) - t \end{array} \]
(FPCore (x y z t) :precision binary64 (- (fma y (- z) (* x (log y))) t))
double code(double x, double y, double z, double t) {
	return fma(y, -z, (x * log(y))) - t;
}
function code(x, y, z, t)
	return Float64(fma(y, Float64(-z), Float64(x * log(y))) - t)
end
code[x_, y_, z_, t_] := N[(N[(y * (-z) + N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(y, -z, x \cdot \log y\right) - t
\end{array}
Derivation
  1. Initial program 81.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 99.5%

    \[\leadsto \left(x \cdot \log y + \color{blue}{\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + -1 \cdot \left(y \cdot z\right)\right)}\right) - t \]
  3. Step-by-step derivation
    1. fma-def99.5%

      \[\leadsto \left(x \cdot \log y + \color{blue}{\mathsf{fma}\left(-0.5, {y}^{2} \cdot z, -1 \cdot \left(y \cdot z\right)\right)}\right) - t \]
    2. unpow299.5%

      \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \color{blue}{\left(y \cdot y\right)} \cdot z, -1 \cdot \left(y \cdot z\right)\right)\right) - t \]
    3. associate-*r*99.5%

      \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \color{blue}{\left(-1 \cdot y\right) \cdot z}\right)\right) - t \]
    4. mul-1-neg99.5%

      \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \color{blue}{\left(-y\right)} \cdot z\right)\right) - t \]
  4. Simplified99.5%

    \[\leadsto \left(x \cdot \log y + \color{blue}{\mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \left(-y\right) \cdot z\right)}\right) - t \]
  5. Taylor expanded in y around 0 98.8%

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + \log y \cdot x\right)} - t \]
  6. Step-by-step derivation
    1. remove-double-neg98.8%

      \[\leadsto \left(-1 \cdot \left(y \cdot z\right) + \color{blue}{\left(-\left(-\log y\right)\right)} \cdot x\right) - t \]
    2. log-rec98.8%

      \[\leadsto \left(-1 \cdot \left(y \cdot z\right) + \left(-\color{blue}{\log \left(\frac{1}{y}\right)}\right) \cdot x\right) - t \]
    3. distribute-lft-neg-in98.8%

      \[\leadsto \left(-1 \cdot \left(y \cdot z\right) + \color{blue}{\left(-\log \left(\frac{1}{y}\right) \cdot x\right)}\right) - t \]
    4. unsub-neg98.8%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) - \log \left(\frac{1}{y}\right) \cdot x\right)} - t \]
    5. mul-1-neg98.8%

      \[\leadsto \left(\color{blue}{\left(-y \cdot z\right)} - \log \left(\frac{1}{y}\right) \cdot x\right) - t \]
    6. distribute-rgt-neg-in98.8%

      \[\leadsto \left(\color{blue}{y \cdot \left(-z\right)} - \log \left(\frac{1}{y}\right) \cdot x\right) - t \]
    7. mul-1-neg98.8%

      \[\leadsto \left(y \cdot \color{blue}{\left(-1 \cdot z\right)} - \log \left(\frac{1}{y}\right) \cdot x\right) - t \]
    8. fma-neg98.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(y, -1 \cdot z, -\log \left(\frac{1}{y}\right) \cdot x\right)} - t \]
    9. mul-1-neg98.8%

      \[\leadsto \mathsf{fma}\left(y, \color{blue}{-z}, -\log \left(\frac{1}{y}\right) \cdot x\right) - t \]
    10. distribute-lft-neg-in98.8%

      \[\leadsto \mathsf{fma}\left(y, -z, \color{blue}{\left(-\log \left(\frac{1}{y}\right)\right) \cdot x}\right) - t \]
    11. log-rec98.8%

      \[\leadsto \mathsf{fma}\left(y, -z, \left(-\color{blue}{\left(-\log y\right)}\right) \cdot x\right) - t \]
    12. remove-double-neg98.8%

      \[\leadsto \mathsf{fma}\left(y, -z, \color{blue}{\log y} \cdot x\right) - t \]
  7. Simplified98.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(y, -z, \log y \cdot x\right)} - t \]
  8. Final simplification98.8%

    \[\leadsto \mathsf{fma}\left(y, -z, x \cdot \log y\right) - t \]

Alternative 4: 90.3% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -8.5 \cdot 10^{-84} \lor \neg \left(x \leq 9 \cdot 10^{-20}\right):\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{else}:\\ \;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (or (<= x -8.5e-84) (not (<= x 9e-20)))
   (- (* x (log y)) t)
   (- (* z (log1p (- y))) t)))
double code(double x, double y, double z, double t) {
	double tmp;
	if ((x <= -8.5e-84) || !(x <= 9e-20)) {
		tmp = (x * log(y)) - t;
	} else {
		tmp = (z * log1p(-y)) - t;
	}
	return tmp;
}
public static double code(double x, double y, double z, double t) {
	double tmp;
	if ((x <= -8.5e-84) || !(x <= 9e-20)) {
		tmp = (x * Math.log(y)) - t;
	} else {
		tmp = (z * Math.log1p(-y)) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if (x <= -8.5e-84) or not (x <= 9e-20):
		tmp = (x * math.log(y)) - t
	else:
		tmp = (z * math.log1p(-y)) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if ((x <= -8.5e-84) || !(x <= 9e-20))
		tmp = Float64(Float64(x * log(y)) - t);
	else
		tmp = Float64(Float64(z * log1p(Float64(-y))) - t);
	end
	return tmp
end
code[x_, y_, z_, t_] := If[Or[LessEqual[x, -8.5e-84], N[Not[LessEqual[x, 9e-20]], $MachinePrecision]], N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(z * N[Log[1 + (-y)], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -8.5 \cdot 10^{-84} \lor \neg \left(x \leq 9 \cdot 10^{-20}\right):\\
\;\;\;\;x \cdot \log y - t\\

\mathbf{else}:\\
\;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -8.4999999999999994e-84 or 9.0000000000000003e-20 < x

    1. Initial program 91.1%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. +-commutative91.1%

        \[\leadsto \color{blue}{\left(z \cdot \log \left(1 - y\right) + x \cdot \log y\right)} - t \]
      2. associate--l+91.1%

        \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right) + \left(x \cdot \log y - t\right)} \]
      3. +-commutative91.1%

        \[\leadsto \color{blue}{\left(x \cdot \log y - t\right) + z \cdot \log \left(1 - y\right)} \]
      4. associate-+l-91.1%

        \[\leadsto \color{blue}{x \cdot \log y - \left(t - z \cdot \log \left(1 - y\right)\right)} \]
      5. fma-neg91.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, -\left(t - z \cdot \log \left(1 - y\right)\right)\right)} \]
      6. sub0-neg91.0%

        \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{0 - \left(t - z \cdot \log \left(1 - y\right)\right)}\right) \]
      7. associate-+l-91.0%

        \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\left(0 - t\right) + z \cdot \log \left(1 - y\right)}\right) \]
      8. neg-sub091.0%

        \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\left(-t\right)} + z \cdot \log \left(1 - y\right)\right) \]
      9. +-commutative91.0%

        \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{z \cdot \log \left(1 - y\right) + \left(-t\right)}\right) \]
      10. fma-def91.0%

        \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\mathsf{fma}\left(z, \log \left(1 - y\right), -t\right)}\right) \]
      11. sub-neg91.0%

        \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \log \color{blue}{\left(1 + \left(-y\right)\right)}, -t\right)\right) \]
      12. log1p-def99.7%

        \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \color{blue}{\mathsf{log1p}\left(-y\right)}, -t\right)\right) \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\right)} \]
    4. Taylor expanded in y around 0 90.7%

      \[\leadsto \color{blue}{\log y \cdot x - t} \]

    if -8.4999999999999994e-84 < x < 9.0000000000000003e-20

    1. Initial program 72.0%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in x around 0 61.5%

      \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
    3. Step-by-step derivation
      1. sub-neg61.5%

        \[\leadsto z \cdot \log \color{blue}{\left(1 + \left(-y\right)\right)} - t \]
      2. mul-1-neg61.5%

        \[\leadsto z \cdot \log \left(1 + \color{blue}{-1 \cdot y}\right) - t \]
      3. log1p-def89.3%

        \[\leadsto z \cdot \color{blue}{\mathsf{log1p}\left(-1 \cdot y\right)} - t \]
      4. mul-1-neg89.3%

        \[\leadsto z \cdot \mathsf{log1p}\left(\color{blue}{-y}\right) - t \]
    4. Simplified89.3%

      \[\leadsto \color{blue}{z \cdot \mathsf{log1p}\left(-y\right)} - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification90.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -8.5 \cdot 10^{-84} \lor \neg \left(x \leq 9 \cdot 10^{-20}\right):\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{else}:\\ \;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\ \end{array} \]

Alternative 5: 90.2% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.15 \cdot 10^{-84} \lor \neg \left(x \leq 3.7 \cdot 10^{-24}\right):\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{else}:\\ \;\;\;\;\left(\left(y \cdot y\right) \cdot \left(z \cdot -0.5\right) - z \cdot y\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (or (<= x -1.15e-84) (not (<= x 3.7e-24)))
   (- (* x (log y)) t)
   (- (- (* (* y y) (* z -0.5)) (* z y)) t)))
double code(double x, double y, double z, double t) {
	double tmp;
	if ((x <= -1.15e-84) || !(x <= 3.7e-24)) {
		tmp = (x * log(y)) - t;
	} else {
		tmp = (((y * y) * (z * -0.5)) - (z * y)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((x <= (-1.15d-84)) .or. (.not. (x <= 3.7d-24))) then
        tmp = (x * log(y)) - t
    else
        tmp = (((y * y) * (z * (-0.5d0))) - (z * y)) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if ((x <= -1.15e-84) || !(x <= 3.7e-24)) {
		tmp = (x * Math.log(y)) - t;
	} else {
		tmp = (((y * y) * (z * -0.5)) - (z * y)) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if (x <= -1.15e-84) or not (x <= 3.7e-24):
		tmp = (x * math.log(y)) - t
	else:
		tmp = (((y * y) * (z * -0.5)) - (z * y)) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if ((x <= -1.15e-84) || !(x <= 3.7e-24))
		tmp = Float64(Float64(x * log(y)) - t);
	else
		tmp = Float64(Float64(Float64(Float64(y * y) * Float64(z * -0.5)) - Float64(z * y)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if ((x <= -1.15e-84) || ~((x <= 3.7e-24)))
		tmp = (x * log(y)) - t;
	else
		tmp = (((y * y) * (z * -0.5)) - (z * y)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[Or[LessEqual[x, -1.15e-84], N[Not[LessEqual[x, 3.7e-24]], $MachinePrecision]], N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(N[(N[(y * y), $MachinePrecision] * N[(z * -0.5), $MachinePrecision]), $MachinePrecision] - N[(z * y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.15 \cdot 10^{-84} \lor \neg \left(x \leq 3.7 \cdot 10^{-24}\right):\\
\;\;\;\;x \cdot \log y - t\\

\mathbf{else}:\\
\;\;\;\;\left(\left(y \cdot y\right) \cdot \left(z \cdot -0.5\right) - z \cdot y\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -1.1499999999999999e-84 or 3.69999999999999981e-24 < x

    1. Initial program 91.1%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. +-commutative91.1%

        \[\leadsto \color{blue}{\left(z \cdot \log \left(1 - y\right) + x \cdot \log y\right)} - t \]
      2. associate--l+91.1%

        \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right) + \left(x \cdot \log y - t\right)} \]
      3. +-commutative91.1%

        \[\leadsto \color{blue}{\left(x \cdot \log y - t\right) + z \cdot \log \left(1 - y\right)} \]
      4. associate-+l-91.1%

        \[\leadsto \color{blue}{x \cdot \log y - \left(t - z \cdot \log \left(1 - y\right)\right)} \]
      5. fma-neg91.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, -\left(t - z \cdot \log \left(1 - y\right)\right)\right)} \]
      6. sub0-neg91.0%

        \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{0 - \left(t - z \cdot \log \left(1 - y\right)\right)}\right) \]
      7. associate-+l-91.0%

        \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\left(0 - t\right) + z \cdot \log \left(1 - y\right)}\right) \]
      8. neg-sub091.0%

        \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\left(-t\right)} + z \cdot \log \left(1 - y\right)\right) \]
      9. +-commutative91.0%

        \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{z \cdot \log \left(1 - y\right) + \left(-t\right)}\right) \]
      10. fma-def91.0%

        \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\mathsf{fma}\left(z, \log \left(1 - y\right), -t\right)}\right) \]
      11. sub-neg91.0%

        \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \log \color{blue}{\left(1 + \left(-y\right)\right)}, -t\right)\right) \]
      12. log1p-def99.7%

        \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \color{blue}{\mathsf{log1p}\left(-y\right)}, -t\right)\right) \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\right)} \]
    4. Taylor expanded in y around 0 90.7%

      \[\leadsto \color{blue}{\log y \cdot x - t} \]

    if -1.1499999999999999e-84 < x < 3.69999999999999981e-24

    1. Initial program 72.0%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 99.2%

      \[\leadsto \left(x \cdot \log y + \color{blue}{\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + -1 \cdot \left(y \cdot z\right)\right)}\right) - t \]
    3. Step-by-step derivation
      1. fma-def99.2%

        \[\leadsto \left(x \cdot \log y + \color{blue}{\mathsf{fma}\left(-0.5, {y}^{2} \cdot z, -1 \cdot \left(y \cdot z\right)\right)}\right) - t \]
      2. unpow299.2%

        \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \color{blue}{\left(y \cdot y\right)} \cdot z, -1 \cdot \left(y \cdot z\right)\right)\right) - t \]
      3. associate-*r*99.2%

        \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \color{blue}{\left(-1 \cdot y\right) \cdot z}\right)\right) - t \]
      4. mul-1-neg99.2%

        \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \color{blue}{\left(-y\right)} \cdot z\right)\right) - t \]
    4. Simplified99.2%

      \[\leadsto \left(x \cdot \log y + \color{blue}{\mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \left(-y\right) \cdot z\right)}\right) - t \]
    5. Taylor expanded in x around 0 88.6%

      \[\leadsto \color{blue}{\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + -1 \cdot \left(y \cdot z\right)\right)} - t \]
    6. Step-by-step derivation
      1. *-commutative88.6%

        \[\leadsto \left(\color{blue}{\left({y}^{2} \cdot z\right) \cdot -0.5} + -1 \cdot \left(y \cdot z\right)\right) - t \]
      2. associate-*l*88.6%

        \[\leadsto \left(\color{blue}{{y}^{2} \cdot \left(z \cdot -0.5\right)} + -1 \cdot \left(y \cdot z\right)\right) - t \]
      3. fma-def88.6%

        \[\leadsto \color{blue}{\mathsf{fma}\left({y}^{2}, z \cdot -0.5, -1 \cdot \left(y \cdot z\right)\right)} - t \]
      4. mul-1-neg88.6%

        \[\leadsto \mathsf{fma}\left({y}^{2}, z \cdot -0.5, \color{blue}{-y \cdot z}\right) - t \]
      5. fma-neg88.6%

        \[\leadsto \color{blue}{\left({y}^{2} \cdot \left(z \cdot -0.5\right) - y \cdot z\right)} - t \]
      6. unpow288.6%

        \[\leadsto \left(\color{blue}{\left(y \cdot y\right)} \cdot \left(z \cdot -0.5\right) - y \cdot z\right) - t \]
    7. Simplified88.6%

      \[\leadsto \color{blue}{\left(\left(y \cdot y\right) \cdot \left(z \cdot -0.5\right) - y \cdot z\right)} - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification89.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.15 \cdot 10^{-84} \lor \neg \left(x \leq 3.7 \cdot 10^{-24}\right):\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{else}:\\ \;\;\;\;\left(\left(y \cdot y\right) \cdot \left(z \cdot -0.5\right) - z \cdot y\right) - t\\ \end{array} \]

Alternative 6: 99.2% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \left(x \cdot \log y - z \cdot y\right) - t \end{array} \]
(FPCore (x y z t) :precision binary64 (- (- (* x (log y)) (* z y)) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) - (z * y)) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((x * log(y)) - (z * y)) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) - (z * y)) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) - (z * y)) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) - Float64(z * y)) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((x * log(y)) - (z * y)) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - N[(z * y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y - z \cdot y\right) - t
\end{array}
Derivation
  1. Initial program 81.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 98.8%

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + \log y \cdot x\right)} - t \]
  3. Step-by-step derivation
    1. +-commutative98.8%

      \[\leadsto \color{blue}{\left(\log y \cdot x + -1 \cdot \left(y \cdot z\right)\right)} - t \]
    2. fma-def98.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log y, x, -1 \cdot \left(y \cdot z\right)\right)} - t \]
    3. mul-1-neg98.8%

      \[\leadsto \mathsf{fma}\left(\log y, x, \color{blue}{-y \cdot z}\right) - t \]
    4. fma-neg98.8%

      \[\leadsto \color{blue}{\left(\log y \cdot x - y \cdot z\right)} - t \]
  4. Simplified98.8%

    \[\leadsto \color{blue}{\left(\log y \cdot x - y \cdot z\right)} - t \]
  5. Final simplification98.8%

    \[\leadsto \left(x \cdot \log y - z \cdot y\right) - t \]

Alternative 7: 56.9% accurate, 16.2× speedup?

\[\begin{array}{l} \\ \left(\left(y \cdot y\right) \cdot \left(z \cdot -0.5\right) - z \cdot y\right) - t \end{array} \]
(FPCore (x y z t) :precision binary64 (- (- (* (* y y) (* z -0.5)) (* z y)) t))
double code(double x, double y, double z, double t) {
	return (((y * y) * (z * -0.5)) - (z * y)) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (((y * y) * (z * (-0.5d0))) - (z * y)) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((y * y) * (z * -0.5)) - (z * y)) - t;
}
def code(x, y, z, t):
	return (((y * y) * (z * -0.5)) - (z * y)) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(y * y) * Float64(z * -0.5)) - Float64(z * y)) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((y * y) * (z * -0.5)) - (z * y)) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(y * y), $MachinePrecision] * N[(z * -0.5), $MachinePrecision]), $MachinePrecision] - N[(z * y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(y \cdot y\right) \cdot \left(z \cdot -0.5\right) - z \cdot y\right) - t
\end{array}
Derivation
  1. Initial program 81.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 99.5%

    \[\leadsto \left(x \cdot \log y + \color{blue}{\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + -1 \cdot \left(y \cdot z\right)\right)}\right) - t \]
  3. Step-by-step derivation
    1. fma-def99.5%

      \[\leadsto \left(x \cdot \log y + \color{blue}{\mathsf{fma}\left(-0.5, {y}^{2} \cdot z, -1 \cdot \left(y \cdot z\right)\right)}\right) - t \]
    2. unpow299.5%

      \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \color{blue}{\left(y \cdot y\right)} \cdot z, -1 \cdot \left(y \cdot z\right)\right)\right) - t \]
    3. associate-*r*99.5%

      \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \color{blue}{\left(-1 \cdot y\right) \cdot z}\right)\right) - t \]
    4. mul-1-neg99.5%

      \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \color{blue}{\left(-y\right)} \cdot z\right)\right) - t \]
  4. Simplified99.5%

    \[\leadsto \left(x \cdot \log y + \color{blue}{\mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \left(-y\right) \cdot z\right)}\right) - t \]
  5. Taylor expanded in x around 0 63.2%

    \[\leadsto \color{blue}{\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + -1 \cdot \left(y \cdot z\right)\right)} - t \]
  6. Step-by-step derivation
    1. *-commutative63.2%

      \[\leadsto \left(\color{blue}{\left({y}^{2} \cdot z\right) \cdot -0.5} + -1 \cdot \left(y \cdot z\right)\right) - t \]
    2. associate-*l*63.2%

      \[\leadsto \left(\color{blue}{{y}^{2} \cdot \left(z \cdot -0.5\right)} + -1 \cdot \left(y \cdot z\right)\right) - t \]
    3. fma-def63.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left({y}^{2}, z \cdot -0.5, -1 \cdot \left(y \cdot z\right)\right)} - t \]
    4. mul-1-neg63.2%

      \[\leadsto \mathsf{fma}\left({y}^{2}, z \cdot -0.5, \color{blue}{-y \cdot z}\right) - t \]
    5. fma-neg63.2%

      \[\leadsto \color{blue}{\left({y}^{2} \cdot \left(z \cdot -0.5\right) - y \cdot z\right)} - t \]
    6. unpow263.2%

      \[\leadsto \left(\color{blue}{\left(y \cdot y\right)} \cdot \left(z \cdot -0.5\right) - y \cdot z\right) - t \]
  7. Simplified63.2%

    \[\leadsto \color{blue}{\left(\left(y \cdot y\right) \cdot \left(z \cdot -0.5\right) - y \cdot z\right)} - t \]
  8. Final simplification63.2%

    \[\leadsto \left(\left(y \cdot y\right) \cdot \left(z \cdot -0.5\right) - z \cdot y\right) - t \]

Alternative 8: 56.9% accurate, 19.2× speedup?

\[\begin{array}{l} \\ z \cdot \left(y \cdot \left(y \cdot -0.5\right) - y\right) - t \end{array} \]
(FPCore (x y z t) :precision binary64 (- (* z (- (* y (* y -0.5)) y)) t))
double code(double x, double y, double z, double t) {
	return (z * ((y * (y * -0.5)) - y)) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (z * ((y * (y * (-0.5d0))) - y)) - t
end function
public static double code(double x, double y, double z, double t) {
	return (z * ((y * (y * -0.5)) - y)) - t;
}
def code(x, y, z, t):
	return (z * ((y * (y * -0.5)) - y)) - t
function code(x, y, z, t)
	return Float64(Float64(z * Float64(Float64(y * Float64(y * -0.5)) - y)) - t)
end
function tmp = code(x, y, z, t)
	tmp = (z * ((y * (y * -0.5)) - y)) - t;
end
code[x_, y_, z_, t_] := N[(N[(z * N[(N[(y * N[(y * -0.5), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
z \cdot \left(y \cdot \left(y \cdot -0.5\right) - y\right) - t
\end{array}
Derivation
  1. Initial program 81.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 99.5%

    \[\leadsto \left(x \cdot \log y + \color{blue}{\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + -1 \cdot \left(y \cdot z\right)\right)}\right) - t \]
  3. Step-by-step derivation
    1. fma-def99.5%

      \[\leadsto \left(x \cdot \log y + \color{blue}{\mathsf{fma}\left(-0.5, {y}^{2} \cdot z, -1 \cdot \left(y \cdot z\right)\right)}\right) - t \]
    2. unpow299.5%

      \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \color{blue}{\left(y \cdot y\right)} \cdot z, -1 \cdot \left(y \cdot z\right)\right)\right) - t \]
    3. associate-*r*99.5%

      \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \color{blue}{\left(-1 \cdot y\right) \cdot z}\right)\right) - t \]
    4. mul-1-neg99.5%

      \[\leadsto \left(x \cdot \log y + \mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \color{blue}{\left(-y\right)} \cdot z\right)\right) - t \]
  4. Simplified99.5%

    \[\leadsto \left(x \cdot \log y + \color{blue}{\mathsf{fma}\left(-0.5, \left(y \cdot y\right) \cdot z, \left(-y\right) \cdot z\right)}\right) - t \]
  5. Taylor expanded in x around 0 63.2%

    \[\leadsto \color{blue}{\left(-0.5 \cdot \left({y}^{2} \cdot z\right) + -1 \cdot \left(y \cdot z\right)\right)} - t \]
  6. Step-by-step derivation
    1. associate-*r*63.2%

      \[\leadsto \left(\color{blue}{\left(-0.5 \cdot {y}^{2}\right) \cdot z} + -1 \cdot \left(y \cdot z\right)\right) - t \]
    2. associate-*r*63.2%

      \[\leadsto \left(\left(-0.5 \cdot {y}^{2}\right) \cdot z + \color{blue}{\left(-1 \cdot y\right) \cdot z}\right) - t \]
    3. distribute-rgt-in63.2%

      \[\leadsto \color{blue}{z \cdot \left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)} - t \]
    4. mul-1-neg63.2%

      \[\leadsto z \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right) - t \]
    5. unsub-neg63.2%

      \[\leadsto z \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} - t \]
    6. *-commutative63.2%

      \[\leadsto z \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right) - t \]
    7. unpow263.2%

      \[\leadsto z \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right) - t \]
    8. associate-*l*63.2%

      \[\leadsto z \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right) - t \]
  7. Simplified63.2%

    \[\leadsto \color{blue}{z \cdot \left(y \cdot \left(y \cdot -0.5\right) - y\right)} - t \]
  8. Final simplification63.2%

    \[\leadsto z \cdot \left(y \cdot \left(y \cdot -0.5\right) - y\right) - t \]

Alternative 9: 56.7% accurate, 35.2× speedup?

\[\begin{array}{l} \\ z \cdot \left(-y\right) - t \end{array} \]
(FPCore (x y z t) :precision binary64 (- (* z (- y)) t))
double code(double x, double y, double z, double t) {
	return (z * -y) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (z * -y) - t
end function
public static double code(double x, double y, double z, double t) {
	return (z * -y) - t;
}
def code(x, y, z, t):
	return (z * -y) - t
function code(x, y, z, t)
	return Float64(Float64(z * Float64(-y)) - t)
end
function tmp = code(x, y, z, t)
	tmp = (z * -y) - t;
end
code[x_, y_, z_, t_] := N[(N[(z * (-y)), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
z \cdot \left(-y\right) - t
\end{array}
Derivation
  1. Initial program 81.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Step-by-step derivation
    1. +-commutative81.8%

      \[\leadsto \color{blue}{\left(z \cdot \log \left(1 - y\right) + x \cdot \log y\right)} - t \]
    2. associate--l+81.8%

      \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right) + \left(x \cdot \log y - t\right)} \]
    3. +-commutative81.8%

      \[\leadsto \color{blue}{\left(x \cdot \log y - t\right) + z \cdot \log \left(1 - y\right)} \]
    4. associate-+l-81.8%

      \[\leadsto \color{blue}{x \cdot \log y - \left(t - z \cdot \log \left(1 - y\right)\right)} \]
    5. fma-neg81.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, -\left(t - z \cdot \log \left(1 - y\right)\right)\right)} \]
    6. sub0-neg81.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{0 - \left(t - z \cdot \log \left(1 - y\right)\right)}\right) \]
    7. associate-+l-81.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\left(0 - t\right) + z \cdot \log \left(1 - y\right)}\right) \]
    8. neg-sub081.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\left(-t\right)} + z \cdot \log \left(1 - y\right)\right) \]
    9. +-commutative81.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{z \cdot \log \left(1 - y\right) + \left(-t\right)}\right) \]
    10. fma-def81.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\mathsf{fma}\left(z, \log \left(1 - y\right), -t\right)}\right) \]
    11. sub-neg81.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \log \color{blue}{\left(1 + \left(-y\right)\right)}, -t\right)\right) \]
    12. log1p-def99.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \color{blue}{\mathsf{log1p}\left(-y\right)}, -t\right)\right) \]
  3. Simplified99.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\right)} \]
  4. Taylor expanded in y around 0 98.8%

    \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{-1 \cdot t + -1 \cdot \left(y \cdot z\right)}\right) \]
  5. Step-by-step derivation
    1. mul-1-neg98.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\left(-t\right)} + -1 \cdot \left(y \cdot z\right)\right) \]
    2. +-commutative98.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{-1 \cdot \left(y \cdot z\right) + \left(-t\right)}\right) \]
    3. unsub-neg98.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{-1 \cdot \left(y \cdot z\right) - t}\right) \]
    4. mul-1-neg98.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\left(-y \cdot z\right)} - t\right) \]
    5. distribute-rgt-neg-in98.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{y \cdot \left(-z\right)} - t\right) \]
  6. Simplified98.8%

    \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{y \cdot \left(-z\right) - t}\right) \]
  7. Taylor expanded in x around 0 62.6%

    \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z\right) - t} \]
  8. Step-by-step derivation
    1. associate-*r*62.6%

      \[\leadsto \color{blue}{\left(-1 \cdot y\right) \cdot z} - t \]
    2. mul-1-neg62.6%

      \[\leadsto \color{blue}{\left(-y\right)} \cdot z - t \]
  9. Simplified62.6%

    \[\leadsto \color{blue}{\left(-y\right) \cdot z - t} \]
  10. Final simplification62.6%

    \[\leadsto z \cdot \left(-y\right) - t \]

Alternative 10: 41.9% accurate, 105.5× speedup?

\[\begin{array}{l} \\ -t \end{array} \]
(FPCore (x y z t) :precision binary64 (- t))
double code(double x, double y, double z, double t) {
	return -t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = -t
end function
public static double code(double x, double y, double z, double t) {
	return -t;
}
def code(x, y, z, t):
	return -t
function code(x, y, z, t)
	return Float64(-t)
end
function tmp = code(x, y, z, t)
	tmp = -t;
end
code[x_, y_, z_, t_] := (-t)
\begin{array}{l}

\\
-t
\end{array}
Derivation
  1. Initial program 81.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Step-by-step derivation
    1. +-commutative81.8%

      \[\leadsto \color{blue}{\left(z \cdot \log \left(1 - y\right) + x \cdot \log y\right)} - t \]
    2. associate--l+81.8%

      \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right) + \left(x \cdot \log y - t\right)} \]
    3. +-commutative81.8%

      \[\leadsto \color{blue}{\left(x \cdot \log y - t\right) + z \cdot \log \left(1 - y\right)} \]
    4. associate-+l-81.8%

      \[\leadsto \color{blue}{x \cdot \log y - \left(t - z \cdot \log \left(1 - y\right)\right)} \]
    5. fma-neg81.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, -\left(t - z \cdot \log \left(1 - y\right)\right)\right)} \]
    6. sub0-neg81.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{0 - \left(t - z \cdot \log \left(1 - y\right)\right)}\right) \]
    7. associate-+l-81.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\left(0 - t\right) + z \cdot \log \left(1 - y\right)}\right) \]
    8. neg-sub081.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\left(-t\right)} + z \cdot \log \left(1 - y\right)\right) \]
    9. +-commutative81.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{z \cdot \log \left(1 - y\right) + \left(-t\right)}\right) \]
    10. fma-def81.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\mathsf{fma}\left(z, \log \left(1 - y\right), -t\right)}\right) \]
    11. sub-neg81.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \log \color{blue}{\left(1 + \left(-y\right)\right)}, -t\right)\right) \]
    12. log1p-def99.8%

      \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \color{blue}{\mathsf{log1p}\left(-y\right)}, -t\right)\right) \]
  3. Simplified99.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\right)} \]
  4. Taylor expanded in t around inf 44.5%

    \[\leadsto \color{blue}{-1 \cdot t} \]
  5. Step-by-step derivation
    1. mul-1-neg44.5%

      \[\leadsto \color{blue}{-t} \]
  6. Simplified44.5%

    \[\leadsto \color{blue}{-t} \]
  7. Final simplification44.5%

    \[\leadsto -t \]

Developer target: 99.6% accurate, 1.6× speedup?

\[\begin{array}{l} \\ \left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (-
  (*
   (- z)
   (+
    (+ (* 0.5 (* y y)) y)
    (* (/ 0.3333333333333333 (* 1.0 (* 1.0 1.0))) (* y (* y y)))))
  (- t (* x (log y)))))
double code(double x, double y, double z, double t) {
	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (-z * (((0.5d0 * (y * y)) + y) + ((0.3333333333333333d0 / (1.0d0 * (1.0d0 * 1.0d0))) * (y * (y * y))))) - (t - (x * log(y)))
end function
public static double code(double x, double y, double z, double t) {
	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * Math.log(y)));
}
def code(x, y, z, t):
	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * math.log(y)))
function code(x, y, z, t)
	return Float64(Float64(Float64(-z) * Float64(Float64(Float64(0.5 * Float64(y * y)) + y) + Float64(Float64(0.3333333333333333 / Float64(1.0 * Float64(1.0 * 1.0))) * Float64(y * Float64(y * y))))) - Float64(t - Float64(x * log(y))))
end
function tmp = code(x, y, z, t)
	tmp = (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
end
code[x_, y_, z_, t_] := N[(N[((-z) * N[(N[(N[(0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] + N[(N[(0.3333333333333333 / N[(1.0 * N[(1.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(t - N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right)
\end{array}

Reproduce

?
herbie shell --seed 2023229 
(FPCore (x y z t)
  :name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B"
  :precision binary64

  :herbie-target
  (- (* (- z) (+ (+ (* 0.5 (* y y)) y) (* (/ 0.3333333333333333 (* 1.0 (* 1.0 1.0))) (* y (* y y))))) (- t (* x (log y))))

  (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))