Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B

Percentage Accurate: 85.4% → 99.8%
Time: 16.9s
Alternatives: 17
Speedup: 1.9×

Specification

?
\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 17 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 85.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}

Alternative 1: 99.8% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\left(y \cdot y\right) \cdot \left(-y\right)\right) - \mathsf{log1p}\left(\mathsf{fma}\left(y, y, y\right)\right)\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (-
  (+ (* x (log y)) (* z (- (log1p (* (* y y) (- y))) (log1p (fma y y y)))))
  t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * (log1p(((y * y) * -y)) - log1p(fma(y, y, y))))) - t;
}
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * Float64(log1p(Float64(Float64(y * y) * Float64(-y))) - log1p(fma(y, y, y))))) - t)
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[(N[Log[1 + N[(N[(y * y), $MachinePrecision] * (-y)), $MachinePrecision]], $MachinePrecision] - N[Log[1 + N[(y * y + y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\left(y \cdot y\right) \cdot \left(-y\right)\right) - \mathsf{log1p}\left(\mathsf{fma}\left(y, y, y\right)\right)\right)\right) - t
\end{array}
Derivation
  1. Initial program 84.1%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift-log.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\log \left(1 - y\right)}\right) - t \]
    2. lift--.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \log \color{blue}{\left(1 - y\right)}\right) - t \]
    3. flip3--N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \log \color{blue}{\left(\frac{{1}^{3} - {y}^{3}}{1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)}\right)}\right) - t \]
    4. log-divN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(\log \left({1}^{3} - {y}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)}\right) - t \]
    5. lower--.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(\log \left({1}^{3} - {y}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)}\right) - t \]
    6. metadata-evalN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(\color{blue}{1} - {y}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    7. sub-negN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \color{blue}{\left(1 + \left(\mathsf{neg}\left({y}^{3}\right)\right)\right)} - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    8. cube-negN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(1 + \color{blue}{{\left(\mathsf{neg}\left(y\right)\right)}^{3}}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    9. metadata-evalN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(\color{blue}{{1}^{3}} + {\left(\mathsf{neg}\left(y\right)\right)}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    10. metadata-evalN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(\color{blue}{1} + {\left(\mathsf{neg}\left(y\right)\right)}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    11. lower-log1p.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\color{blue}{\mathsf{log1p}\left({\left(\mathsf{neg}\left(y\right)\right)}^{3}\right)} - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    12. cube-negN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left({y}^{3}\right)}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    13. lower-neg.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left({y}^{3}\right)}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    14. cube-multN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(\color{blue}{y \cdot \left(y \cdot y\right)}\right)\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    15. lower-*.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(\color{blue}{y \cdot \left(y \cdot y\right)}\right)\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    16. lower-*.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \color{blue}{\left(y \cdot y\right)}\right)\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    17. metadata-evalN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \left(y \cdot y\right)\right)\right) - \log \left(\color{blue}{1} + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    18. lower-log1p.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \left(y \cdot y\right)\right)\right) - \color{blue}{\mathsf{log1p}\left(y \cdot y + 1 \cdot y\right)}\right)\right) - t \]
    19. *-lft-identityN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \left(y \cdot y\right)\right)\right) - \mathsf{log1p}\left(y \cdot y + \color{blue}{y}\right)\right)\right) - t \]
    20. lower-fma.f6499.7

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(-y \cdot \left(y \cdot y\right)\right) - \mathsf{log1p}\left(\color{blue}{\mathsf{fma}\left(y, y, y\right)}\right)\right)\right) - t \]
  4. Applied rewrites99.7%

    \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(\mathsf{log1p}\left(-y \cdot \left(y \cdot y\right)\right) - \mathsf{log1p}\left(\mathsf{fma}\left(y, y, y\right)\right)\right)}\right) - t \]
  5. Final simplification99.7%

    \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\left(y \cdot y\right) \cdot \left(-y\right)\right) - \mathsf{log1p}\left(\mathsf{fma}\left(y, y, y\right)\right)\right)\right) - t \]
  6. Add Preprocessing

Alternative 2: 99.6% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \frac{1}{\frac{1}{\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\right)}} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (/ 1.0 (/ 1.0 (fma x (log y) (fma z (log1p (- y)) (- t))))))
double code(double x, double y, double z, double t) {
	return 1.0 / (1.0 / fma(x, log(y), fma(z, log1p(-y), -t)));
}
function code(x, y, z, t)
	return Float64(1.0 / Float64(1.0 / fma(x, log(y), fma(z, log1p(Float64(-y)), Float64(-t)))))
end
code[x_, y_, z_, t_] := N[(1.0 / N[(1.0 / N[(x * N[Log[y], $MachinePrecision] + N[(z * N[Log[1 + (-y)], $MachinePrecision] + (-t)), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{1}{\frac{1}{\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\right)}}
\end{array}
Derivation
  1. Initial program 84.1%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto \color{blue}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t} \]
    2. flip--N/A

      \[\leadsto \color{blue}{\frac{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) \cdot \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \cdot t}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) + t}} \]
    3. clear-numN/A

      \[\leadsto \color{blue}{\frac{1}{\frac{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) + t}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) \cdot \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \cdot t}}} \]
    4. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{1}{\frac{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) + t}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) \cdot \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \cdot t}}} \]
    5. clear-numN/A

      \[\leadsto \frac{1}{\color{blue}{\frac{1}{\frac{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) \cdot \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \cdot t}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) + t}}}} \]
    6. flip--N/A

      \[\leadsto \frac{1}{\frac{1}{\color{blue}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t}}} \]
    7. lift--.f64N/A

      \[\leadsto \frac{1}{\frac{1}{\color{blue}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t}}} \]
    8. lower-/.f6483.9

      \[\leadsto \frac{1}{\color{blue}{\frac{1}{\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t}}} \]
  4. Applied rewrites99.5%

    \[\leadsto \color{blue}{\frac{1}{\frac{1}{\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), -t\right)\right)}}} \]
  5. Add Preprocessing

Alternative 3: 99.6% accurate, 1.6× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(y, \left(y \cdot z\right) \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right) - z, \mathsf{fma}\left(x, \log y, -t\right)\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (fma
  y
  (- (* (* y z) (fma y -0.3333333333333333 -0.5)) z)
  (fma x (log y) (- t))))
double code(double x, double y, double z, double t) {
	return fma(y, (((y * z) * fma(y, -0.3333333333333333, -0.5)) - z), fma(x, log(y), -t));
}
function code(x, y, z, t)
	return fma(y, Float64(Float64(Float64(y * z) * fma(y, -0.3333333333333333, -0.5)) - z), fma(x, log(y), Float64(-t)))
end
code[x_, y_, z_, t_] := N[(y * N[(N[(N[(y * z), $MachinePrecision] * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision] + N[(x * N[Log[y], $MachinePrecision] + (-t)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(y, \left(y \cdot z\right) \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right) - z, \mathsf{fma}\left(x, \log y, -t\right)\right)
\end{array}
Derivation
  1. Initial program 84.1%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
  4. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \color{blue}{\left(y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right) + x \cdot \log y\right)} - t \]
    2. associate--l+N/A

      \[\leadsto \color{blue}{y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right) + \left(x \cdot \log y - t\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(y, -1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right), x \cdot \log y - t\right)} \]
  5. Applied rewrites99.1%

    \[\leadsto \color{blue}{\mathsf{fma}\left(y, \left(z \cdot y\right) \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right) - z, \mathsf{fma}\left(x, \log y, -t\right)\right)} \]
  6. Final simplification99.1%

    \[\leadsto \mathsf{fma}\left(y, \left(y \cdot z\right) \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right) - z, \mathsf{fma}\left(x, \log y, -t\right)\right) \]
  7. Add Preprocessing

Alternative 4: 99.4% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \mathsf{fma}\left(y \cdot z, -0.5, -z\right), -t\right)\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (fma x (log y) (fma y (fma (* y z) -0.5 (- z)) (- t))))
double code(double x, double y, double z, double t) {
	return fma(x, log(y), fma(y, fma((y * z), -0.5, -z), -t));
}
function code(x, y, z, t)
	return fma(x, log(y), fma(y, fma(Float64(y * z), -0.5, Float64(-z)), Float64(-t)))
end
code[x_, y_, z_, t_] := N[(x * N[Log[y], $MachinePrecision] + N[(y * N[(N[(y * z), $MachinePrecision] * -0.5 + (-z)), $MachinePrecision] + (-t)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \mathsf{fma}\left(y \cdot z, -0.5, -z\right), -t\right)\right)
\end{array}
Derivation
  1. Initial program 84.1%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift-log.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\log \left(1 - y\right)}\right) - t \]
    2. lift--.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \log \color{blue}{\left(1 - y\right)}\right) - t \]
    3. flip3--N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \log \color{blue}{\left(\frac{{1}^{3} - {y}^{3}}{1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)}\right)}\right) - t \]
    4. log-divN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(\log \left({1}^{3} - {y}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)}\right) - t \]
    5. lower--.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(\log \left({1}^{3} - {y}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)}\right) - t \]
    6. metadata-evalN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(\color{blue}{1} - {y}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    7. sub-negN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \color{blue}{\left(1 + \left(\mathsf{neg}\left({y}^{3}\right)\right)\right)} - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    8. cube-negN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(1 + \color{blue}{{\left(\mathsf{neg}\left(y\right)\right)}^{3}}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    9. metadata-evalN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(\color{blue}{{1}^{3}} + {\left(\mathsf{neg}\left(y\right)\right)}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    10. metadata-evalN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(\color{blue}{1} + {\left(\mathsf{neg}\left(y\right)\right)}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    11. lower-log1p.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\color{blue}{\mathsf{log1p}\left({\left(\mathsf{neg}\left(y\right)\right)}^{3}\right)} - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    12. cube-negN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left({y}^{3}\right)}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    13. lower-neg.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left({y}^{3}\right)}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    14. cube-multN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(\color{blue}{y \cdot \left(y \cdot y\right)}\right)\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    15. lower-*.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(\color{blue}{y \cdot \left(y \cdot y\right)}\right)\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    16. lower-*.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \color{blue}{\left(y \cdot y\right)}\right)\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    17. metadata-evalN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \left(y \cdot y\right)\right)\right) - \log \left(\color{blue}{1} + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
    18. lower-log1p.f64N/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \left(y \cdot y\right)\right)\right) - \color{blue}{\mathsf{log1p}\left(y \cdot y + 1 \cdot y\right)}\right)\right) - t \]
    19. *-lft-identityN/A

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \left(y \cdot y\right)\right)\right) - \mathsf{log1p}\left(y \cdot y + \color{blue}{y}\right)\right)\right) - t \]
    20. lower-fma.f6499.7

      \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(-y \cdot \left(y \cdot y\right)\right) - \mathsf{log1p}\left(\color{blue}{\mathsf{fma}\left(y, y, y\right)}\right)\right)\right) - t \]
  4. Applied rewrites99.7%

    \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(\mathsf{log1p}\left(-y \cdot \left(y \cdot y\right)\right) - \mathsf{log1p}\left(\mathsf{fma}\left(y, y, y\right)\right)\right)}\right) - t \]
  5. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right)\right) - t} \]
  6. Step-by-step derivation
    1. associate--l+N/A

      \[\leadsto \color{blue}{x \cdot \log y + \left(y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right) - t\right)} \]
    2. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right) - t\right)} \]
    3. lower-log.f64N/A

      \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log y}, y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right) - t\right) \]
    4. sub-negN/A

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right) + \left(\mathsf{neg}\left(t\right)\right)}\right) \]
    5. lower-fma.f64N/A

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{\mathsf{fma}\left(y, -1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right), \mathsf{neg}\left(t\right)\right)}\right) \]
    6. +-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \color{blue}{\frac{-1}{2} \cdot \left(y \cdot z\right) + -1 \cdot z}, \mathsf{neg}\left(t\right)\right)\right) \]
    7. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \color{blue}{\left(y \cdot z\right) \cdot \frac{-1}{2}} + -1 \cdot z, \mathsf{neg}\left(t\right)\right)\right) \]
    8. lower-fma.f64N/A

      \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \color{blue}{\mathsf{fma}\left(y \cdot z, \frac{-1}{2}, -1 \cdot z\right)}, \mathsf{neg}\left(t\right)\right)\right) \]
    9. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \mathsf{fma}\left(\color{blue}{z \cdot y}, \frac{-1}{2}, -1 \cdot z\right), \mathsf{neg}\left(t\right)\right)\right) \]
    10. lower-*.f64N/A

      \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \mathsf{fma}\left(\color{blue}{z \cdot y}, \frac{-1}{2}, -1 \cdot z\right), \mathsf{neg}\left(t\right)\right)\right) \]
    11. mul-1-negN/A

      \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \mathsf{fma}\left(z \cdot y, \frac{-1}{2}, \color{blue}{\mathsf{neg}\left(z\right)}\right), \mathsf{neg}\left(t\right)\right)\right) \]
    12. lower-neg.f64N/A

      \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \mathsf{fma}\left(z \cdot y, \frac{-1}{2}, \color{blue}{\mathsf{neg}\left(z\right)}\right), \mathsf{neg}\left(t\right)\right)\right) \]
    13. lower-neg.f6498.9

      \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \mathsf{fma}\left(z \cdot y, -0.5, -z\right), \color{blue}{-t}\right)\right) \]
  7. Applied rewrites98.9%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \mathsf{fma}\left(z \cdot y, -0.5, -z\right), -t\right)\right)} \]
  8. Final simplification98.9%

    \[\leadsto \mathsf{fma}\left(x, \log y, \mathsf{fma}\left(y, \mathsf{fma}\left(y \cdot z, -0.5, -z\right), -t\right)\right) \]
  9. Add Preprocessing

Alternative 5: 88.3% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y - t\\ \mathbf{if}\;x \leq -4.8 \cdot 10^{+57}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x \leq 3.3 \cdot 10^{-43}:\\ \;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (- (* x (log y)) t)))
   (if (<= x -4.8e+57) t_1 (if (<= x 3.3e-43) (- (* z (log1p (- y))) t) t_1))))
double code(double x, double y, double z, double t) {
	double t_1 = (x * log(y)) - t;
	double tmp;
	if (x <= -4.8e+57) {
		tmp = t_1;
	} else if (x <= 3.3e-43) {
		tmp = (z * log1p(-y)) - t;
	} else {
		tmp = t_1;
	}
	return tmp;
}
public static double code(double x, double y, double z, double t) {
	double t_1 = (x * Math.log(y)) - t;
	double tmp;
	if (x <= -4.8e+57) {
		tmp = t_1;
	} else if (x <= 3.3e-43) {
		tmp = (z * Math.log1p(-y)) - t;
	} else {
		tmp = t_1;
	}
	return tmp;
}
def code(x, y, z, t):
	t_1 = (x * math.log(y)) - t
	tmp = 0
	if x <= -4.8e+57:
		tmp = t_1
	elif x <= 3.3e-43:
		tmp = (z * math.log1p(-y)) - t
	else:
		tmp = t_1
	return tmp
function code(x, y, z, t)
	t_1 = Float64(Float64(x * log(y)) - t)
	tmp = 0.0
	if (x <= -4.8e+57)
		tmp = t_1;
	elseif (x <= 3.3e-43)
		tmp = Float64(Float64(z * log1p(Float64(-y))) - t);
	else
		tmp = t_1;
	end
	return tmp
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -4.8e+57], t$95$1, If[LessEqual[x, 3.3e-43], N[(N[(z * N[Log[1 + (-y)], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -4.8 \cdot 10^{+57}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;x \leq 3.3 \cdot 10^{-43}:\\
\;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -4.80000000000000009e57 or 3.30000000000000016e-43 < x

    1. Initial program 91.2%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \log y} - t \]
    4. Step-by-step derivation
      1. remove-double-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right)} - t \]
      2. mul-1-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{-1 \cdot \log y}\right)\right) - t \]
      3. mul-1-negN/A

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \left(-1 \cdot \log y\right)\right)} - t \]
      4. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) - t \]
      5. log-recN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\log \left(\frac{1}{y}\right)}\right) - t \]
      6. lower-*.f64N/A

        \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{y}\right)\right)} - t \]
      7. log-recN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) - t \]
      8. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(-1 \cdot \log y\right)}\right) - t \]
      9. mul-1-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(-1 \cdot \log y\right)\right)} - t \]
      10. mul-1-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right)\right) - t \]
      11. remove-double-negN/A

        \[\leadsto x \cdot \color{blue}{\log y} - t \]
      12. lower-log.f6490.3

        \[\leadsto x \cdot \color{blue}{\log y} - t \]
    5. Applied rewrites90.3%

      \[\leadsto \color{blue}{x \cdot \log y} - t \]

    if -4.80000000000000009e57 < x < 3.30000000000000016e-43

    1. Initial program 75.7%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
      2. sub-negN/A

        \[\leadsto z \cdot \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)} - t \]
      3. lower-log1p.f64N/A

        \[\leadsto z \cdot \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(y\right)\right)} - t \]
      4. lower-neg.f6492.0

        \[\leadsto z \cdot \mathsf{log1p}\left(\color{blue}{-y}\right) - t \]
    5. Applied rewrites92.0%

      \[\leadsto \color{blue}{z \cdot \mathsf{log1p}\left(-y\right)} - t \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 6: 88.2% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y - t\\ \mathbf{if}\;x \leq -4.8 \cdot 10^{+57}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x \leq 3.3 \cdot 10^{-43}:\\ \;\;\;\;z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right) - t\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (- (* x (log y)) t)))
   (if (<= x -4.8e+57)
     t_1
     (if (<= x 3.3e-43)
       (-
        (* z (* y (fma y (fma y (fma y -0.25 -0.3333333333333333) -0.5) -1.0)))
        t)
       t_1))))
double code(double x, double y, double z, double t) {
	double t_1 = (x * log(y)) - t;
	double tmp;
	if (x <= -4.8e+57) {
		tmp = t_1;
	} else if (x <= 3.3e-43) {
		tmp = (z * (y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0))) - t;
	} else {
		tmp = t_1;
	}
	return tmp;
}
function code(x, y, z, t)
	t_1 = Float64(Float64(x * log(y)) - t)
	tmp = 0.0
	if (x <= -4.8e+57)
		tmp = t_1;
	elseif (x <= 3.3e-43)
		tmp = Float64(Float64(z * Float64(y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0))) - t);
	else
		tmp = t_1;
	end
	return tmp
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -4.8e+57], t$95$1, If[LessEqual[x, 3.3e-43], N[(N[(z * N[(y * N[(y * N[(y * N[(y * -0.25 + -0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -4.8 \cdot 10^{+57}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;x \leq 3.3 \cdot 10^{-43}:\\
\;\;\;\;z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right) - t\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -4.80000000000000009e57 or 3.30000000000000016e-43 < x

    1. Initial program 91.2%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \log y} - t \]
    4. Step-by-step derivation
      1. remove-double-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right)} - t \]
      2. mul-1-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{-1 \cdot \log y}\right)\right) - t \]
      3. mul-1-negN/A

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \left(-1 \cdot \log y\right)\right)} - t \]
      4. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) - t \]
      5. log-recN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\log \left(\frac{1}{y}\right)}\right) - t \]
      6. lower-*.f64N/A

        \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{y}\right)\right)} - t \]
      7. log-recN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) - t \]
      8. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(-1 \cdot \log y\right)}\right) - t \]
      9. mul-1-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(-1 \cdot \log y\right)\right)} - t \]
      10. mul-1-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right)\right) - t \]
      11. remove-double-negN/A

        \[\leadsto x \cdot \color{blue}{\log y} - t \]
      12. lower-log.f6490.3

        \[\leadsto x \cdot \color{blue}{\log y} - t \]
    5. Applied rewrites90.3%

      \[\leadsto \color{blue}{x \cdot \log y} - t \]

    if -4.80000000000000009e57 < x < 3.30000000000000016e-43

    1. Initial program 75.7%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
      2. sub-negN/A

        \[\leadsto z \cdot \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)} - t \]
      3. lower-log1p.f64N/A

        \[\leadsto z \cdot \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(y\right)\right)} - t \]
      4. lower-neg.f6492.0

        \[\leadsto z \cdot \mathsf{log1p}\left(\color{blue}{-y}\right) - t \]
    5. Applied rewrites92.0%

      \[\leadsto \color{blue}{z \cdot \mathsf{log1p}\left(-y\right)} - t \]
    6. Taylor expanded in y around 0

      \[\leadsto z \cdot \left(y \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) - 1\right)}\right) - t \]
    7. Step-by-step derivation
      1. Applied rewrites91.2%

        \[\leadsto z \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)}\right) - t \]
    8. Recombined 2 regimes into one program.
    9. Add Preprocessing

    Alternative 7: 77.6% accurate, 1.9× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y\\ \mathbf{if}\;x \leq -1.82 \cdot 10^{+81}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x \leq 5.8 \cdot 10^{+26}:\\ \;\;\;\;z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right) - t\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
    (FPCore (x y z t)
     :precision binary64
     (let* ((t_1 (* x (log y))))
       (if (<= x -1.82e+81)
         t_1
         (if (<= x 5.8e+26)
           (-
            (* z (* y (fma y (fma y (fma y -0.25 -0.3333333333333333) -0.5) -1.0)))
            t)
           t_1))))
    double code(double x, double y, double z, double t) {
    	double t_1 = x * log(y);
    	double tmp;
    	if (x <= -1.82e+81) {
    		tmp = t_1;
    	} else if (x <= 5.8e+26) {
    		tmp = (z * (y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0))) - t;
    	} else {
    		tmp = t_1;
    	}
    	return tmp;
    }
    
    function code(x, y, z, t)
    	t_1 = Float64(x * log(y))
    	tmp = 0.0
    	if (x <= -1.82e+81)
    		tmp = t_1;
    	elseif (x <= 5.8e+26)
    		tmp = Float64(Float64(z * Float64(y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0))) - t);
    	else
    		tmp = t_1;
    	end
    	return tmp
    end
    
    code[x_, y_, z_, t_] := Block[{t$95$1 = N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -1.82e+81], t$95$1, If[LessEqual[x, 5.8e+26], N[(N[(z * N[(y * N[(y * N[(y * N[(y * -0.25 + -0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    t_1 := x \cdot \log y\\
    \mathbf{if}\;x \leq -1.82 \cdot 10^{+81}:\\
    \;\;\;\;t\_1\\
    
    \mathbf{elif}\;x \leq 5.8 \cdot 10^{+26}:\\
    \;\;\;\;z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right) - t\\
    
    \mathbf{else}:\\
    \;\;\;\;t\_1\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if x < -1.82000000000000003e81 or 5.8e26 < x

      1. Initial program 92.1%

        \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
      2. Add Preprocessing
      3. Taylor expanded in x around inf

        \[\leadsto \color{blue}{x \cdot \log y} \]
      4. Step-by-step derivation
        1. remove-double-negN/A

          \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right)} \]
        2. mul-1-negN/A

          \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{-1 \cdot \log y}\right)\right) \]
        3. mul-1-negN/A

          \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \left(-1 \cdot \log y\right)\right)} \]
        4. mul-1-negN/A

          \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) \]
        5. log-recN/A

          \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\log \left(\frac{1}{y}\right)}\right) \]
        6. lower-*.f64N/A

          \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{y}\right)\right)} \]
        7. log-recN/A

          \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) \]
        8. mul-1-negN/A

          \[\leadsto x \cdot \left(-1 \cdot \color{blue}{\left(-1 \cdot \log y\right)}\right) \]
        9. mul-1-negN/A

          \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(-1 \cdot \log y\right)\right)} \]
        10. mul-1-negN/A

          \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right)\right) \]
        11. remove-double-negN/A

          \[\leadsto x \cdot \color{blue}{\log y} \]
        12. lower-log.f6471.3

          \[\leadsto x \cdot \color{blue}{\log y} \]
      5. Applied rewrites71.3%

        \[\leadsto \color{blue}{x \cdot \log y} \]

      if -1.82000000000000003e81 < x < 5.8e26

      1. Initial program 77.6%

        \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
      2. Add Preprocessing
      3. Taylor expanded in x around 0

        \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
      4. Step-by-step derivation
        1. lower-*.f64N/A

          \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
        2. sub-negN/A

          \[\leadsto z \cdot \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)} - t \]
        3. lower-log1p.f64N/A

          \[\leadsto z \cdot \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(y\right)\right)} - t \]
        4. lower-neg.f6488.7

          \[\leadsto z \cdot \mathsf{log1p}\left(\color{blue}{-y}\right) - t \]
      5. Applied rewrites88.7%

        \[\leadsto \color{blue}{z \cdot \mathsf{log1p}\left(-y\right)} - t \]
      6. Taylor expanded in y around 0

        \[\leadsto z \cdot \left(y \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) - 1\right)}\right) - t \]
      7. Step-by-step derivation
        1. Applied rewrites88.0%

          \[\leadsto z \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)}\right) - t \]
      8. Recombined 2 regimes into one program.
      9. Add Preprocessing

      Alternative 8: 99.1% accurate, 1.9× speedup?

      \[\begin{array}{l} \\ x \cdot \log y - \mathsf{fma}\left(z, y, t\right) \end{array} \]
      (FPCore (x y z t) :precision binary64 (- (* x (log y)) (fma z y t)))
      double code(double x, double y, double z, double t) {
      	return (x * log(y)) - fma(z, y, t);
      }
      
      function code(x, y, z, t)
      	return Float64(Float64(x * log(y)) - fma(z, y, t))
      end
      
      code[x_, y_, z_, t_] := N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - N[(z * y + t), $MachinePrecision]), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      x \cdot \log y - \mathsf{fma}\left(z, y, t\right)
      \end{array}
      
      Derivation
      1. Initial program 84.1%

        \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
      2. Add Preprocessing
      3. Taylor expanded in y around 0

        \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + x \cdot \log y\right) - t} \]
      4. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \color{blue}{\left(x \cdot \log y + -1 \cdot \left(y \cdot z\right)\right)} - t \]
        2. mul-1-negN/A

          \[\leadsto \left(x \cdot \log y + \color{blue}{\left(\mathsf{neg}\left(y \cdot z\right)\right)}\right) - t \]
        3. unsub-negN/A

          \[\leadsto \color{blue}{\left(x \cdot \log y - y \cdot z\right)} - t \]
        4. remove-double-negN/A

          \[\leadsto \left(x \cdot \color{blue}{\left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right)} - y \cdot z\right) - t \]
        5. mul-1-negN/A

          \[\leadsto \left(x \cdot \left(\mathsf{neg}\left(\color{blue}{-1 \cdot \log y}\right)\right) - y \cdot z\right) - t \]
        6. distribute-rgt-neg-inN/A

          \[\leadsto \left(\color{blue}{\left(\mathsf{neg}\left(x \cdot \left(-1 \cdot \log y\right)\right)\right)} - y \cdot z\right) - t \]
        7. neg-mul-1N/A

          \[\leadsto \left(\color{blue}{-1 \cdot \left(x \cdot \left(-1 \cdot \log y\right)\right)} - y \cdot z\right) - t \]
        8. mul-1-negN/A

          \[\leadsto \left(-1 \cdot \left(x \cdot \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) - y \cdot z\right) - t \]
        9. log-recN/A

          \[\leadsto \left(-1 \cdot \left(x \cdot \color{blue}{\log \left(\frac{1}{y}\right)}\right) - y \cdot z\right) - t \]
        10. associate--l-N/A

          \[\leadsto \color{blue}{-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - \left(y \cdot z + t\right)} \]
        11. lower--.f64N/A

          \[\leadsto \color{blue}{-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - \left(y \cdot z + t\right)} \]
      5. Applied rewrites98.7%

        \[\leadsto \color{blue}{x \cdot \log y - \mathsf{fma}\left(z, y, t\right)} \]
      6. Add Preprocessing

      Alternative 9: 57.3% accurate, 6.9× speedup?

      \[\begin{array}{l} \\ z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right) - t \end{array} \]
      (FPCore (x y z t)
       :precision binary64
       (- (* z (* y (fma y (fma y (fma y -0.25 -0.3333333333333333) -0.5) -1.0))) t))
      double code(double x, double y, double z, double t) {
      	return (z * (y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0))) - t;
      }
      
      function code(x, y, z, t)
      	return Float64(Float64(z * Float64(y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0))) - t)
      end
      
      code[x_, y_, z_, t_] := N[(N[(z * N[(y * N[(y * N[(y * N[(y * -0.25 + -0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right) - t
      \end{array}
      
      Derivation
      1. Initial program 84.1%

        \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
      2. Add Preprocessing
      3. Taylor expanded in x around 0

        \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
      4. Step-by-step derivation
        1. lower-*.f64N/A

          \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
        2. sub-negN/A

          \[\leadsto z \cdot \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)} - t \]
        3. lower-log1p.f64N/A

          \[\leadsto z \cdot \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(y\right)\right)} - t \]
        4. lower-neg.f6462.2

          \[\leadsto z \cdot \mathsf{log1p}\left(\color{blue}{-y}\right) - t \]
      5. Applied rewrites62.2%

        \[\leadsto \color{blue}{z \cdot \mathsf{log1p}\left(-y\right)} - t \]
      6. Taylor expanded in y around 0

        \[\leadsto z \cdot \left(y \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) - 1\right)}\right) - t \]
      7. Step-by-step derivation
        1. Applied rewrites61.6%

          \[\leadsto z \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)}\right) - t \]
        2. Add Preprocessing

        Alternative 10: 57.2% accurate, 7.9× speedup?

        \[\begin{array}{l} \\ z \cdot \mathsf{fma}\left(\mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), y \cdot y, -y\right) - t \end{array} \]
        (FPCore (x y z t)
         :precision binary64
         (- (* z (fma (fma y -0.3333333333333333 -0.5) (* y y) (- y))) t))
        double code(double x, double y, double z, double t) {
        	return (z * fma(fma(y, -0.3333333333333333, -0.5), (y * y), -y)) - t;
        }
        
        function code(x, y, z, t)
        	return Float64(Float64(z * fma(fma(y, -0.3333333333333333, -0.5), Float64(y * y), Float64(-y))) - t)
        end
        
        code[x_, y_, z_, t_] := N[(N[(z * N[(N[(y * -0.3333333333333333 + -0.5), $MachinePrecision] * N[(y * y), $MachinePrecision] + (-y)), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        z \cdot \mathsf{fma}\left(\mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), y \cdot y, -y\right) - t
        \end{array}
        
        Derivation
        1. Initial program 84.1%

          \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
        2. Add Preprocessing
        3. Taylor expanded in x around 0

          \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
        4. Step-by-step derivation
          1. lower-*.f64N/A

            \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
          2. sub-negN/A

            \[\leadsto z \cdot \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)} - t \]
          3. lower-log1p.f64N/A

            \[\leadsto z \cdot \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(y\right)\right)} - t \]
          4. lower-neg.f6462.2

            \[\leadsto z \cdot \mathsf{log1p}\left(\color{blue}{-y}\right) - t \]
        5. Applied rewrites62.2%

          \[\leadsto \color{blue}{z \cdot \mathsf{log1p}\left(-y\right)} - t \]
        6. Taylor expanded in y around 0

          \[\leadsto z \cdot \left(y \cdot \color{blue}{\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right)}\right) - t \]
        7. Step-by-step derivation
          1. Applied rewrites61.5%

            \[\leadsto z \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right)}\right) - t \]
          2. Step-by-step derivation
            1. Applied rewrites61.5%

              \[\leadsto z \cdot \mathsf{fma}\left(\mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), y \cdot \color{blue}{y}, -y\right) - t \]
            2. Add Preprocessing

            Alternative 11: 57.2% accurate, 7.9× speedup?

            \[\begin{array}{l} \\ y \cdot \mathsf{fma}\left(y, z \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -z\right) - t \end{array} \]
            (FPCore (x y z t)
             :precision binary64
             (- (* y (fma y (* z (fma y -0.3333333333333333 -0.5)) (- z))) t))
            double code(double x, double y, double z, double t) {
            	return (y * fma(y, (z * fma(y, -0.3333333333333333, -0.5)), -z)) - t;
            }
            
            function code(x, y, z, t)
            	return Float64(Float64(y * fma(y, Float64(z * fma(y, -0.3333333333333333, -0.5)), Float64(-z))) - t)
            end
            
            code[x_, y_, z_, t_] := N[(N[(y * N[(y * N[(z * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + (-z)), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
            
            \begin{array}{l}
            
            \\
            y \cdot \mathsf{fma}\left(y, z \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -z\right) - t
            \end{array}
            
            Derivation
            1. Initial program 84.1%

              \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
            2. Add Preprocessing
            3. Taylor expanded in x around 0

              \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
            4. Step-by-step derivation
              1. lower-*.f64N/A

                \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
              2. sub-negN/A

                \[\leadsto z \cdot \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)} - t \]
              3. lower-log1p.f64N/A

                \[\leadsto z \cdot \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(y\right)\right)} - t \]
              4. lower-neg.f6462.2

                \[\leadsto z \cdot \mathsf{log1p}\left(\color{blue}{-y}\right) - t \]
            5. Applied rewrites62.2%

              \[\leadsto \color{blue}{z \cdot \mathsf{log1p}\left(-y\right)} - t \]
            6. Taylor expanded in y around 0

              \[\leadsto z \cdot \left(-1 \cdot \color{blue}{y}\right) - t \]
            7. Step-by-step derivation
              1. Applied rewrites61.1%

                \[\leadsto z \cdot \left(-y\right) - t \]
              2. Taylor expanded in y around 0

                \[\leadsto y \cdot \color{blue}{\left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)} - t \]
              3. Step-by-step derivation
                1. Applied rewrites61.5%

                  \[\leadsto y \cdot \color{blue}{\mathsf{fma}\left(y, z \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -z\right)} - t \]
                2. Add Preprocessing

                Alternative 12: 57.2% accurate, 8.5× speedup?

                \[\begin{array}{l} \\ z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right)\right) - t \end{array} \]
                (FPCore (x y z t)
                 :precision binary64
                 (- (* z (* y (fma y (fma y -0.3333333333333333 -0.5) -1.0))) t))
                double code(double x, double y, double z, double t) {
                	return (z * (y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0))) - t;
                }
                
                function code(x, y, z, t)
                	return Float64(Float64(z * Float64(y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0))) - t)
                end
                
                code[x_, y_, z_, t_] := N[(N[(z * N[(y * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
                
                \begin{array}{l}
                
                \\
                z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right)\right) - t
                \end{array}
                
                Derivation
                1. Initial program 84.1%

                  \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
                2. Add Preprocessing
                3. Taylor expanded in x around 0

                  \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
                4. Step-by-step derivation
                  1. lower-*.f64N/A

                    \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
                  2. sub-negN/A

                    \[\leadsto z \cdot \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)} - t \]
                  3. lower-log1p.f64N/A

                    \[\leadsto z \cdot \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(y\right)\right)} - t \]
                  4. lower-neg.f6462.2

                    \[\leadsto z \cdot \mathsf{log1p}\left(\color{blue}{-y}\right) - t \]
                5. Applied rewrites62.2%

                  \[\leadsto \color{blue}{z \cdot \mathsf{log1p}\left(-y\right)} - t \]
                6. Taylor expanded in y around 0

                  \[\leadsto z \cdot \left(y \cdot \color{blue}{\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right)}\right) - t \]
                7. Step-by-step derivation
                  1. Applied rewrites61.5%

                    \[\leadsto z \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right)}\right) - t \]
                  2. Add Preprocessing

                  Alternative 13: 48.7% accurate, 11.0× speedup?

                  \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -1.1 \cdot 10^{-45}:\\ \;\;\;\;-t\\ \mathbf{elif}\;t \leq 4.8 \cdot 10^{-78}:\\ \;\;\;\;z \cdot \left(-y\right)\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \end{array} \]
                  (FPCore (x y z t)
                   :precision binary64
                   (if (<= t -1.1e-45) (- t) (if (<= t 4.8e-78) (* z (- y)) (- t))))
                  double code(double x, double y, double z, double t) {
                  	double tmp;
                  	if (t <= -1.1e-45) {
                  		tmp = -t;
                  	} else if (t <= 4.8e-78) {
                  		tmp = z * -y;
                  	} else {
                  		tmp = -t;
                  	}
                  	return tmp;
                  }
                  
                  real(8) function code(x, y, z, t)
                      real(8), intent (in) :: x
                      real(8), intent (in) :: y
                      real(8), intent (in) :: z
                      real(8), intent (in) :: t
                      real(8) :: tmp
                      if (t <= (-1.1d-45)) then
                          tmp = -t
                      else if (t <= 4.8d-78) then
                          tmp = z * -y
                      else
                          tmp = -t
                      end if
                      code = tmp
                  end function
                  
                  public static double code(double x, double y, double z, double t) {
                  	double tmp;
                  	if (t <= -1.1e-45) {
                  		tmp = -t;
                  	} else if (t <= 4.8e-78) {
                  		tmp = z * -y;
                  	} else {
                  		tmp = -t;
                  	}
                  	return tmp;
                  }
                  
                  def code(x, y, z, t):
                  	tmp = 0
                  	if t <= -1.1e-45:
                  		tmp = -t
                  	elif t <= 4.8e-78:
                  		tmp = z * -y
                  	else:
                  		tmp = -t
                  	return tmp
                  
                  function code(x, y, z, t)
                  	tmp = 0.0
                  	if (t <= -1.1e-45)
                  		tmp = Float64(-t);
                  	elseif (t <= 4.8e-78)
                  		tmp = Float64(z * Float64(-y));
                  	else
                  		tmp = Float64(-t);
                  	end
                  	return tmp
                  end
                  
                  function tmp_2 = code(x, y, z, t)
                  	tmp = 0.0;
                  	if (t <= -1.1e-45)
                  		tmp = -t;
                  	elseif (t <= 4.8e-78)
                  		tmp = z * -y;
                  	else
                  		tmp = -t;
                  	end
                  	tmp_2 = tmp;
                  end
                  
                  code[x_, y_, z_, t_] := If[LessEqual[t, -1.1e-45], (-t), If[LessEqual[t, 4.8e-78], N[(z * (-y)), $MachinePrecision], (-t)]]
                  
                  \begin{array}{l}
                  
                  \\
                  \begin{array}{l}
                  \mathbf{if}\;t \leq -1.1 \cdot 10^{-45}:\\
                  \;\;\;\;-t\\
                  
                  \mathbf{elif}\;t \leq 4.8 \cdot 10^{-78}:\\
                  \;\;\;\;z \cdot \left(-y\right)\\
                  
                  \mathbf{else}:\\
                  \;\;\;\;-t\\
                  
                  
                  \end{array}
                  \end{array}
                  
                  Derivation
                  1. Split input into 2 regimes
                  2. if t < -1.09999999999999997e-45 or 4.79999999999999999e-78 < t

                    1. Initial program 93.0%

                      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
                    2. Add Preprocessing
                    3. Taylor expanded in t around inf

                      \[\leadsto \color{blue}{-1 \cdot t} \]
                    4. Step-by-step derivation
                      1. mul-1-negN/A

                        \[\leadsto \color{blue}{\mathsf{neg}\left(t\right)} \]
                      2. lower-neg.f6465.6

                        \[\leadsto \color{blue}{-t} \]
                    5. Applied rewrites65.6%

                      \[\leadsto \color{blue}{-t} \]

                    if -1.09999999999999997e-45 < t < 4.79999999999999999e-78

                    1. Initial program 69.7%

                      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
                    2. Add Preprocessing
                    3. Step-by-step derivation
                      1. lift-log.f64N/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\log \left(1 - y\right)}\right) - t \]
                      2. lift--.f64N/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \log \color{blue}{\left(1 - y\right)}\right) - t \]
                      3. flip3--N/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \log \color{blue}{\left(\frac{{1}^{3} - {y}^{3}}{1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)}\right)}\right) - t \]
                      4. log-divN/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(\log \left({1}^{3} - {y}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)}\right) - t \]
                      5. lower--.f64N/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(\log \left({1}^{3} - {y}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)}\right) - t \]
                      6. metadata-evalN/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(\color{blue}{1} - {y}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                      7. sub-negN/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \color{blue}{\left(1 + \left(\mathsf{neg}\left({y}^{3}\right)\right)\right)} - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                      8. cube-negN/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(1 + \color{blue}{{\left(\mathsf{neg}\left(y\right)\right)}^{3}}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                      9. metadata-evalN/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(\color{blue}{{1}^{3}} + {\left(\mathsf{neg}\left(y\right)\right)}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                      10. metadata-evalN/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(\color{blue}{1} + {\left(\mathsf{neg}\left(y\right)\right)}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                      11. lower-log1p.f64N/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\color{blue}{\mathsf{log1p}\left({\left(\mathsf{neg}\left(y\right)\right)}^{3}\right)} - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                      12. cube-negN/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left({y}^{3}\right)}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                      13. lower-neg.f64N/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left({y}^{3}\right)}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                      14. cube-multN/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(\color{blue}{y \cdot \left(y \cdot y\right)}\right)\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                      15. lower-*.f64N/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(\color{blue}{y \cdot \left(y \cdot y\right)}\right)\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                      16. lower-*.f64N/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \color{blue}{\left(y \cdot y\right)}\right)\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                      17. metadata-evalN/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \left(y \cdot y\right)\right)\right) - \log \left(\color{blue}{1} + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                      18. lower-log1p.f64N/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \left(y \cdot y\right)\right)\right) - \color{blue}{\mathsf{log1p}\left(y \cdot y + 1 \cdot y\right)}\right)\right) - t \]
                      19. *-lft-identityN/A

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \left(y \cdot y\right)\right)\right) - \mathsf{log1p}\left(y \cdot y + \color{blue}{y}\right)\right)\right) - t \]
                      20. lower-fma.f6499.6

                        \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(-y \cdot \left(y \cdot y\right)\right) - \mathsf{log1p}\left(\color{blue}{\mathsf{fma}\left(y, y, y\right)}\right)\right)\right) - t \]
                    4. Applied rewrites99.6%

                      \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(\mathsf{log1p}\left(-y \cdot \left(y \cdot y\right)\right) - \mathsf{log1p}\left(\mathsf{fma}\left(y, y, y\right)\right)\right)}\right) - t \]
                    5. Taylor expanded in y around 0

                      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + x \cdot \log y\right) - t} \]
                    6. Step-by-step derivation
                      1. associate--l+N/A

                        \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z\right) + \left(x \cdot \log y - t\right)} \]
                      2. mul-1-negN/A

                        \[\leadsto \color{blue}{\left(\mathsf{neg}\left(y \cdot z\right)\right)} + \left(x \cdot \log y - t\right) \]
                      3. *-commutativeN/A

                        \[\leadsto \left(\mathsf{neg}\left(\color{blue}{z \cdot y}\right)\right) + \left(x \cdot \log y - t\right) \]
                      4. distribute-rgt-neg-inN/A

                        \[\leadsto \color{blue}{z \cdot \left(\mathsf{neg}\left(y\right)\right)} + \left(x \cdot \log y - t\right) \]
                      5. lower-fma.f64N/A

                        \[\leadsto \color{blue}{\mathsf{fma}\left(z, \mathsf{neg}\left(y\right), x \cdot \log y - t\right)} \]
                      6. lower-neg.f64N/A

                        \[\leadsto \mathsf{fma}\left(z, \color{blue}{\mathsf{neg}\left(y\right)}, x \cdot \log y - t\right) \]
                      7. sub-negN/A

                        \[\leadsto \mathsf{fma}\left(z, \mathsf{neg}\left(y\right), \color{blue}{x \cdot \log y + \left(\mathsf{neg}\left(t\right)\right)}\right) \]
                      8. lower-fma.f64N/A

                        \[\leadsto \mathsf{fma}\left(z, \mathsf{neg}\left(y\right), \color{blue}{\mathsf{fma}\left(x, \log y, \mathsf{neg}\left(t\right)\right)}\right) \]
                      9. lower-log.f64N/A

                        \[\leadsto \mathsf{fma}\left(z, \mathsf{neg}\left(y\right), \mathsf{fma}\left(x, \color{blue}{\log y}, \mathsf{neg}\left(t\right)\right)\right) \]
                      10. lower-neg.f6497.6

                        \[\leadsto \mathsf{fma}\left(z, -y, \mathsf{fma}\left(x, \log y, \color{blue}{-t}\right)\right) \]
                    7. Applied rewrites97.6%

                      \[\leadsto \color{blue}{\mathsf{fma}\left(z, -y, \mathsf{fma}\left(x, \log y, -t\right)\right)} \]
                    8. Taylor expanded in z around inf

                      \[\leadsto -1 \cdot \color{blue}{\left(y \cdot z\right)} \]
                    9. Step-by-step derivation
                      1. Applied rewrites31.4%

                        \[\leadsto z \cdot \color{blue}{\left(-y\right)} \]
                    10. Recombined 2 regimes into one program.
                    11. Add Preprocessing

                    Alternative 14: 57.1% accurate, 11.0× speedup?

                    \[\begin{array}{l} \\ z \cdot \left(y \cdot \mathsf{fma}\left(y, -0.5, -1\right)\right) - t \end{array} \]
                    (FPCore (x y z t) :precision binary64 (- (* z (* y (fma y -0.5 -1.0))) t))
                    double code(double x, double y, double z, double t) {
                    	return (z * (y * fma(y, -0.5, -1.0))) - t;
                    }
                    
                    function code(x, y, z, t)
                    	return Float64(Float64(z * Float64(y * fma(y, -0.5, -1.0))) - t)
                    end
                    
                    code[x_, y_, z_, t_] := N[(N[(z * N[(y * N[(y * -0.5 + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
                    
                    \begin{array}{l}
                    
                    \\
                    z \cdot \left(y \cdot \mathsf{fma}\left(y, -0.5, -1\right)\right) - t
                    \end{array}
                    
                    Derivation
                    1. Initial program 84.1%

                      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
                    2. Add Preprocessing
                    3. Taylor expanded in x around 0

                      \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
                    4. Step-by-step derivation
                      1. lower-*.f64N/A

                        \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
                      2. sub-negN/A

                        \[\leadsto z \cdot \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)} - t \]
                      3. lower-log1p.f64N/A

                        \[\leadsto z \cdot \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(y\right)\right)} - t \]
                      4. lower-neg.f6462.2

                        \[\leadsto z \cdot \mathsf{log1p}\left(\color{blue}{-y}\right) - t \]
                    5. Applied rewrites62.2%

                      \[\leadsto \color{blue}{z \cdot \mathsf{log1p}\left(-y\right)} - t \]
                    6. Taylor expanded in y around 0

                      \[\leadsto z \cdot \left(y \cdot \color{blue}{\left(\frac{-1}{2} \cdot y - 1\right)}\right) - t \]
                    7. Step-by-step derivation
                      1. Applied rewrites61.4%

                        \[\leadsto z \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(y, -0.5, -1\right)}\right) - t \]
                      2. Add Preprocessing

                      Alternative 15: 56.8% accurate, 24.4× speedup?

                      \[\begin{array}{l} \\ -\mathsf{fma}\left(z, y, t\right) \end{array} \]
                      (FPCore (x y z t) :precision binary64 (- (fma z y t)))
                      double code(double x, double y, double z, double t) {
                      	return -fma(z, y, t);
                      }
                      
                      function code(x, y, z, t)
                      	return Float64(-fma(z, y, t))
                      end
                      
                      code[x_, y_, z_, t_] := (-N[(z * y + t), $MachinePrecision])
                      
                      \begin{array}{l}
                      
                      \\
                      -\mathsf{fma}\left(z, y, t\right)
                      \end{array}
                      
                      Derivation
                      1. Initial program 84.1%

                        \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
                      2. Add Preprocessing
                      3. Step-by-step derivation
                        1. lift-log.f64N/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\log \left(1 - y\right)}\right) - t \]
                        2. lift--.f64N/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \log \color{blue}{\left(1 - y\right)}\right) - t \]
                        3. flip3--N/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \log \color{blue}{\left(\frac{{1}^{3} - {y}^{3}}{1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)}\right)}\right) - t \]
                        4. log-divN/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(\log \left({1}^{3} - {y}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)}\right) - t \]
                        5. lower--.f64N/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(\log \left({1}^{3} - {y}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)}\right) - t \]
                        6. metadata-evalN/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(\color{blue}{1} - {y}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                        7. sub-negN/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \color{blue}{\left(1 + \left(\mathsf{neg}\left({y}^{3}\right)\right)\right)} - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                        8. cube-negN/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(1 + \color{blue}{{\left(\mathsf{neg}\left(y\right)\right)}^{3}}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                        9. metadata-evalN/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(\color{blue}{{1}^{3}} + {\left(\mathsf{neg}\left(y\right)\right)}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                        10. metadata-evalN/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\log \left(\color{blue}{1} + {\left(\mathsf{neg}\left(y\right)\right)}^{3}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                        11. lower-log1p.f64N/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\color{blue}{\mathsf{log1p}\left({\left(\mathsf{neg}\left(y\right)\right)}^{3}\right)} - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                        12. cube-negN/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left({y}^{3}\right)}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                        13. lower-neg.f64N/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left({y}^{3}\right)}\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                        14. cube-multN/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(\color{blue}{y \cdot \left(y \cdot y\right)}\right)\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                        15. lower-*.f64N/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(\color{blue}{y \cdot \left(y \cdot y\right)}\right)\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                        16. lower-*.f64N/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \color{blue}{\left(y \cdot y\right)}\right)\right) - \log \left(1 \cdot 1 + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                        17. metadata-evalN/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \left(y \cdot y\right)\right)\right) - \log \left(\color{blue}{1} + \left(y \cdot y + 1 \cdot y\right)\right)\right)\right) - t \]
                        18. lower-log1p.f64N/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \left(y \cdot y\right)\right)\right) - \color{blue}{\mathsf{log1p}\left(y \cdot y + 1 \cdot y\right)}\right)\right) - t \]
                        19. *-lft-identityN/A

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(\mathsf{neg}\left(y \cdot \left(y \cdot y\right)\right)\right) - \mathsf{log1p}\left(y \cdot y + \color{blue}{y}\right)\right)\right) - t \]
                        20. lower-fma.f6499.7

                          \[\leadsto \left(x \cdot \log y + z \cdot \left(\mathsf{log1p}\left(-y \cdot \left(y \cdot y\right)\right) - \mathsf{log1p}\left(\color{blue}{\mathsf{fma}\left(y, y, y\right)}\right)\right)\right) - t \]
                      4. Applied rewrites99.7%

                        \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(\mathsf{log1p}\left(-y \cdot \left(y \cdot y\right)\right) - \mathsf{log1p}\left(\mathsf{fma}\left(y, y, y\right)\right)\right)}\right) - t \]
                      5. Taylor expanded in y around 0

                        \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + x \cdot \log y\right) - t} \]
                      6. Step-by-step derivation
                        1. associate--l+N/A

                          \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z\right) + \left(x \cdot \log y - t\right)} \]
                        2. mul-1-negN/A

                          \[\leadsto \color{blue}{\left(\mathsf{neg}\left(y \cdot z\right)\right)} + \left(x \cdot \log y - t\right) \]
                        3. *-commutativeN/A

                          \[\leadsto \left(\mathsf{neg}\left(\color{blue}{z \cdot y}\right)\right) + \left(x \cdot \log y - t\right) \]
                        4. distribute-rgt-neg-inN/A

                          \[\leadsto \color{blue}{z \cdot \left(\mathsf{neg}\left(y\right)\right)} + \left(x \cdot \log y - t\right) \]
                        5. lower-fma.f64N/A

                          \[\leadsto \color{blue}{\mathsf{fma}\left(z, \mathsf{neg}\left(y\right), x \cdot \log y - t\right)} \]
                        6. lower-neg.f64N/A

                          \[\leadsto \mathsf{fma}\left(z, \color{blue}{\mathsf{neg}\left(y\right)}, x \cdot \log y - t\right) \]
                        7. sub-negN/A

                          \[\leadsto \mathsf{fma}\left(z, \mathsf{neg}\left(y\right), \color{blue}{x \cdot \log y + \left(\mathsf{neg}\left(t\right)\right)}\right) \]
                        8. lower-fma.f64N/A

                          \[\leadsto \mathsf{fma}\left(z, \mathsf{neg}\left(y\right), \color{blue}{\mathsf{fma}\left(x, \log y, \mathsf{neg}\left(t\right)\right)}\right) \]
                        9. lower-log.f64N/A

                          \[\leadsto \mathsf{fma}\left(z, \mathsf{neg}\left(y\right), \mathsf{fma}\left(x, \color{blue}{\log y}, \mathsf{neg}\left(t\right)\right)\right) \]
                        10. lower-neg.f6498.6

                          \[\leadsto \mathsf{fma}\left(z, -y, \mathsf{fma}\left(x, \log y, \color{blue}{-t}\right)\right) \]
                      7. Applied rewrites98.6%

                        \[\leadsto \color{blue}{\mathsf{fma}\left(z, -y, \mathsf{fma}\left(x, \log y, -t\right)\right)} \]
                      8. Taylor expanded in x around 0

                        \[\leadsto -1 \cdot \left(y \cdot z\right) - \color{blue}{t} \]
                      9. Step-by-step derivation
                        1. Applied rewrites61.1%

                          \[\leadsto -\mathsf{fma}\left(z, y, t\right) \]
                        2. Add Preprocessing

                        Alternative 16: 42.4% accurate, 73.3× speedup?

                        \[\begin{array}{l} \\ -t \end{array} \]
                        (FPCore (x y z t) :precision binary64 (- t))
                        double code(double x, double y, double z, double t) {
                        	return -t;
                        }
                        
                        real(8) function code(x, y, z, t)
                            real(8), intent (in) :: x
                            real(8), intent (in) :: y
                            real(8), intent (in) :: z
                            real(8), intent (in) :: t
                            code = -t
                        end function
                        
                        public static double code(double x, double y, double z, double t) {
                        	return -t;
                        }
                        
                        def code(x, y, z, t):
                        	return -t
                        
                        function code(x, y, z, t)
                        	return Float64(-t)
                        end
                        
                        function tmp = code(x, y, z, t)
                        	tmp = -t;
                        end
                        
                        code[x_, y_, z_, t_] := (-t)
                        
                        \begin{array}{l}
                        
                        \\
                        -t
                        \end{array}
                        
                        Derivation
                        1. Initial program 84.1%

                          \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
                        2. Add Preprocessing
                        3. Taylor expanded in t around inf

                          \[\leadsto \color{blue}{-1 \cdot t} \]
                        4. Step-by-step derivation
                          1. mul-1-negN/A

                            \[\leadsto \color{blue}{\mathsf{neg}\left(t\right)} \]
                          2. lower-neg.f6445.8

                            \[\leadsto \color{blue}{-t} \]
                        5. Applied rewrites45.8%

                          \[\leadsto \color{blue}{-t} \]
                        6. Add Preprocessing

                        Alternative 17: 2.3% accurate, 220.0× speedup?

                        \[\begin{array}{l} \\ t \end{array} \]
                        (FPCore (x y z t) :precision binary64 t)
                        double code(double x, double y, double z, double t) {
                        	return t;
                        }
                        
                        real(8) function code(x, y, z, t)
                            real(8), intent (in) :: x
                            real(8), intent (in) :: y
                            real(8), intent (in) :: z
                            real(8), intent (in) :: t
                            code = t
                        end function
                        
                        public static double code(double x, double y, double z, double t) {
                        	return t;
                        }
                        
                        def code(x, y, z, t):
                        	return t
                        
                        function code(x, y, z, t)
                        	return t
                        end
                        
                        function tmp = code(x, y, z, t)
                        	tmp = t;
                        end
                        
                        code[x_, y_, z_, t_] := t
                        
                        \begin{array}{l}
                        
                        \\
                        t
                        \end{array}
                        
                        Derivation
                        1. Initial program 84.1%

                          \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
                        2. Add Preprocessing
                        3. Taylor expanded in t around inf

                          \[\leadsto \color{blue}{-1 \cdot t} \]
                        4. Step-by-step derivation
                          1. mul-1-negN/A

                            \[\leadsto \color{blue}{\mathsf{neg}\left(t\right)} \]
                          2. lower-neg.f6445.8

                            \[\leadsto \color{blue}{-t} \]
                        5. Applied rewrites45.8%

                          \[\leadsto \color{blue}{-t} \]
                        6. Step-by-step derivation
                          1. Applied rewrites14.3%

                            \[\leadsto \frac{0 - t \cdot \left(t \cdot t\right)}{\color{blue}{0 + \mathsf{fma}\left(t, t, 0 \cdot t\right)}} \]
                          2. Applied rewrites2.0%

                            \[\leadsto \color{blue}{t} \]
                          3. Add Preprocessing

                          Developer Target 1: 99.5% accurate, 1.3× speedup?

                          \[\begin{array}{l} \\ \left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right) \end{array} \]
                          (FPCore (x y z t)
                           :precision binary64
                           (-
                            (*
                             (- z)
                             (+
                              (+ (* 0.5 (* y y)) y)
                              (* (/ 0.3333333333333333 (* 1.0 (* 1.0 1.0))) (* y (* y y)))))
                            (- t (* x (log y)))))
                          double code(double x, double y, double z, double t) {
                          	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
                          }
                          
                          real(8) function code(x, y, z, t)
                              real(8), intent (in) :: x
                              real(8), intent (in) :: y
                              real(8), intent (in) :: z
                              real(8), intent (in) :: t
                              code = (-z * (((0.5d0 * (y * y)) + y) + ((0.3333333333333333d0 / (1.0d0 * (1.0d0 * 1.0d0))) * (y * (y * y))))) - (t - (x * log(y)))
                          end function
                          
                          public static double code(double x, double y, double z, double t) {
                          	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * Math.log(y)));
                          }
                          
                          def code(x, y, z, t):
                          	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * math.log(y)))
                          
                          function code(x, y, z, t)
                          	return Float64(Float64(Float64(-z) * Float64(Float64(Float64(0.5 * Float64(y * y)) + y) + Float64(Float64(0.3333333333333333 / Float64(1.0 * Float64(1.0 * 1.0))) * Float64(y * Float64(y * y))))) - Float64(t - Float64(x * log(y))))
                          end
                          
                          function tmp = code(x, y, z, t)
                          	tmp = (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
                          end
                          
                          code[x_, y_, z_, t_] := N[(N[((-z) * N[(N[(N[(0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] + N[(N[(0.3333333333333333 / N[(1.0 * N[(1.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(t - N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
                          
                          \begin{array}{l}
                          
                          \\
                          \left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right)
                          \end{array}
                          

                          Reproduce

                          ?
                          herbie shell --seed 2024233 
                          (FPCore (x y z t)
                            :name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B"
                            :precision binary64
                          
                            :alt
                            (! :herbie-platform default (- (* (- z) (+ (+ (* 1/2 (* y y)) y) (* (/ 1/3 (* 1 (* 1 1))) (* y (* y y))))) (- t (* x (log y)))))
                          
                            (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))