Statistics.Distribution.Beta:$cdensity from math-functions-0.1.5.2

Percentage Accurate: 89.4% → 99.4%
Time: 13.0s
Alternatives: 17
Speedup: 1.9×

Specification

?
\[\begin{array}{l} \\ \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (((x - 1.0d0) * log(y)) + ((z - 1.0d0) * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((x - 1.0) * Math.log(y)) + ((z - 1.0) * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return (((x - 1.0) * math.log(y)) + ((z - 1.0) * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 17 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 89.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (((x - 1.0d0) * log(y)) + ((z - 1.0d0) * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((x - 1.0) * Math.log(y)) + ((z - 1.0) * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return (((x - 1.0) * math.log(y)) + ((z - 1.0) * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t
\end{array}

Alternative 1: 99.4% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \mathsf{log1p}\left(-y\right)\\ t_2 := \mathsf{fma}\left(\log y, x - 1, \left(1 - z\right) \cdot t\_1\right)\\ \mathsf{fma}\left(\mathsf{fma}\left(t\_1, z - 1, \log y \cdot \left(x - 1\right)\right), t\_2 \cdot {t\_2}^{-1}, -t\right) \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (log1p (- y))) (t_2 (fma (log y) (- x 1.0) (* (- 1.0 z) t_1))))
   (fma
    (fma t_1 (- z 1.0) (* (log y) (- x 1.0)))
    (* t_2 (pow t_2 -1.0))
    (- t))))
double code(double x, double y, double z, double t) {
	double t_1 = log1p(-y);
	double t_2 = fma(log(y), (x - 1.0), ((1.0 - z) * t_1));
	return fma(fma(t_1, (z - 1.0), (log(y) * (x - 1.0))), (t_2 * pow(t_2, -1.0)), -t);
}
function code(x, y, z, t)
	t_1 = log1p(Float64(-y))
	t_2 = fma(log(y), Float64(x - 1.0), Float64(Float64(1.0 - z) * t_1))
	return fma(fma(t_1, Float64(z - 1.0), Float64(log(y) * Float64(x - 1.0))), Float64(t_2 * (t_2 ^ -1.0)), Float64(-t))
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[Log[1 + (-y)], $MachinePrecision]}, Block[{t$95$2 = N[(N[Log[y], $MachinePrecision] * N[(x - 1.0), $MachinePrecision] + N[(N[(1.0 - z), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision]}, N[(N[(t$95$1 * N[(z - 1.0), $MachinePrecision] + N[(N[Log[y], $MachinePrecision] * N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(t$95$2 * N[Power[t$95$2, -1.0], $MachinePrecision]), $MachinePrecision] + (-t)), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \mathsf{log1p}\left(-y\right)\\
t_2 := \mathsf{fma}\left(\log y, x - 1, \left(1 - z\right) \cdot t\_1\right)\\
\mathsf{fma}\left(\mathsf{fma}\left(t\_1, z - 1, \log y \cdot \left(x - 1\right)\right), t\_2 \cdot {t\_2}^{-1}, -t\right)
\end{array}
\end{array}
Derivation
  1. Initial program 90.7%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Applied rewrites99.7%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{log1p}\left(-y\right), z - 1, \log y \cdot \left(x - 1\right)\right), \mathsf{fma}\left(\log y, x - 1, \left(1 - z\right) \cdot \mathsf{log1p}\left(-y\right)\right) \cdot {\left(\mathsf{fma}\left(\log y, x - 1, \left(1 - z\right) \cdot \mathsf{log1p}\left(-y\right)\right)\right)}^{-1}, -t\right)} \]
  4. Add Preprocessing

Alternative 2: 85.6% accurate, 0.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\\ \mathbf{if}\;t\_1 \leq -5000 \lor \neg \left(t\_1 \leq 660\right):\\ \;\;\;\;\log y \cdot x - t\\ \mathbf{else}:\\ \;\;\;\;\left(y - \log y\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y))))))
   (if (or (<= t_1 -5000.0) (not (<= t_1 660.0)))
     (- (* (log y) x) t)
     (- (- y (log y)) t))))
double code(double x, double y, double z, double t) {
	double t_1 = ((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)));
	double tmp;
	if ((t_1 <= -5000.0) || !(t_1 <= 660.0)) {
		tmp = (log(y) * x) - t;
	} else {
		tmp = (y - log(y)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: tmp
    t_1 = ((x - 1.0d0) * log(y)) + ((z - 1.0d0) * log((1.0d0 - y)))
    if ((t_1 <= (-5000.0d0)) .or. (.not. (t_1 <= 660.0d0))) then
        tmp = (log(y) * x) - t
    else
        tmp = (y - log(y)) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double t_1 = ((x - 1.0) * Math.log(y)) + ((z - 1.0) * Math.log((1.0 - y)));
	double tmp;
	if ((t_1 <= -5000.0) || !(t_1 <= 660.0)) {
		tmp = (Math.log(y) * x) - t;
	} else {
		tmp = (y - Math.log(y)) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	t_1 = ((x - 1.0) * math.log(y)) + ((z - 1.0) * math.log((1.0 - y)))
	tmp = 0
	if (t_1 <= -5000.0) or not (t_1 <= 660.0):
		tmp = (math.log(y) * x) - t
	else:
		tmp = (y - math.log(y)) - t
	return tmp
function code(x, y, z, t)
	t_1 = Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * log(Float64(1.0 - y))))
	tmp = 0.0
	if ((t_1 <= -5000.0) || !(t_1 <= 660.0))
		tmp = Float64(Float64(log(y) * x) - t);
	else
		tmp = Float64(Float64(y - log(y)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	t_1 = ((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)));
	tmp = 0.0;
	if ((t_1 <= -5000.0) || ~((t_1 <= 660.0)))
		tmp = (log(y) * x) - t;
	else
		tmp = (y - log(y)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$1, -5000.0], N[Not[LessEqual[t$95$1, 660.0]], $MachinePrecision]], N[(N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision] - t), $MachinePrecision], N[(N[(y - N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\\
\mathbf{if}\;t\_1 \leq -5000 \lor \neg \left(t\_1 \leq 660\right):\\
\;\;\;\;\log y \cdot x - t\\

\mathbf{else}:\\
\;\;\;\;\left(y - \log y\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (+.f64 (*.f64 (-.f64 x #s(literal 1 binary64)) (log.f64 y)) (*.f64 (-.f64 z #s(literal 1 binary64)) (log.f64 (-.f64 #s(literal 1 binary64) y)))) < -5e3 or 660 < (+.f64 (*.f64 (-.f64 x #s(literal 1 binary64)) (log.f64 y)) (*.f64 (-.f64 z #s(literal 1 binary64)) (log.f64 (-.f64 #s(literal 1 binary64) y))))

    1. Initial program 93.3%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \log y} - t \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \color{blue}{\log y \cdot x} - t \]
      2. lower-*.f64N/A

        \[\leadsto \color{blue}{\log y \cdot x} - t \]
      3. lower-log.f6491.2

        \[\leadsto \color{blue}{\log y} \cdot x - t \]
    5. Applied rewrites91.2%

      \[\leadsto \color{blue}{\log y \cdot x} - t \]

    if -5e3 < (+.f64 (*.f64 (-.f64 x #s(literal 1 binary64)) (log.f64 y)) (*.f64 (-.f64 z #s(literal 1 binary64)) (log.f64 (-.f64 #s(literal 1 binary64) y)))) < 660

    1. Initial program 87.5%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(-1 \cdot \color{blue}{\left(\left(z - 1\right) \cdot y\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
      2. associate-*r*N/A

        \[\leadsto \left(\color{blue}{\left(-1 \cdot \left(z - 1\right)\right) \cdot y} + \log y \cdot \left(x - 1\right)\right) - t \]
      3. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(-1 \cdot \left(z - 1\right), y, \log y \cdot \left(x - 1\right)\right)} - t \]
      4. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{neg}\left(\left(z - 1\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
      5. neg-sub0N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{0 - \left(z - 1\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
      6. sub-negN/A

        \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(z + \left(\mathsf{neg}\left(1\right)\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
      7. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left(0 - \left(z + \color{blue}{-1}\right), y, \log y \cdot \left(x - 1\right)\right) - t \]
      8. +-commutativeN/A

        \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(-1 + z\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
      9. associate--r+N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\left(0 - -1\right) - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
      10. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{1} - z, y, \log y \cdot \left(x - 1\right)\right) - t \]
      11. lower--.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{1 - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
      12. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
      13. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
      14. lower--.f64N/A

        \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right)} \cdot \log y\right) - t \]
      15. lower-log.f6499.3

        \[\leadsto \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \color{blue}{\log y}\right) - t \]
    5. Applied rewrites99.3%

      \[\leadsto \color{blue}{\mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right)} - t \]
    6. Taylor expanded in z around 0

      \[\leadsto \left(y + \color{blue}{\log y \cdot \left(x - 1\right)}\right) - t \]
    7. Step-by-step derivation
      1. Applied rewrites86.9%

        \[\leadsto \mathsf{fma}\left(\log y, \color{blue}{x - 1}, y\right) - t \]
      2. Taylor expanded in x around 0

        \[\leadsto \left(y + -1 \cdot \color{blue}{\log y}\right) - t \]
      3. Step-by-step derivation
        1. Applied rewrites84.9%

          \[\leadsto \left(y - \log y\right) - t \]
      4. Recombined 2 regimes into one program.
      5. Final simplification88.5%

        \[\leadsto \begin{array}{l} \mathbf{if}\;\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right) \leq -5000 \lor \neg \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right) \leq 660\right):\\ \;\;\;\;\log y \cdot x - t\\ \mathbf{else}:\\ \;\;\;\;\left(y - \log y\right) - t\\ \end{array} \]
      6. Add Preprocessing

      Alternative 3: 99.6% accurate, 1.5× speedup?

      \[\begin{array}{l} \\ \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.25, y, -0.3333333333333333\right), y, -0.5\right), y, -1\right) \cdot y\right)\right) - t \end{array} \]
      (FPCore (x y z t)
       :precision binary64
       (-
        (+
         (* (- x 1.0) (log y))
         (*
          (- z 1.0)
          (* (fma (fma (fma -0.25 y -0.3333333333333333) y -0.5) y -1.0) y)))
        t))
      double code(double x, double y, double z, double t) {
      	return (((x - 1.0) * log(y)) + ((z - 1.0) * (fma(fma(fma(-0.25, y, -0.3333333333333333), y, -0.5), y, -1.0) * y))) - t;
      }
      
      function code(x, y, z, t)
      	return Float64(Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * Float64(fma(fma(fma(-0.25, y, -0.3333333333333333), y, -0.5), y, -1.0) * y))) - t)
      end
      
      code[x_, y_, z_, t_] := N[(N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[(N[(N[(N[(-0.25 * y + -0.3333333333333333), $MachinePrecision] * y + -0.5), $MachinePrecision] * y + -1.0), $MachinePrecision] * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.25, y, -0.3333333333333333\right), y, -0.5\right), y, -1\right) \cdot y\right)\right) - t
      \end{array}
      
      Derivation
      1. Initial program 90.7%

        \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
      2. Add Preprocessing
      3. Taylor expanded in y around 0

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) - 1\right)\right)}\right) - t \]
      4. Step-by-step derivation
        1. *-commutativeN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(\left(y \cdot \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) - 1\right) \cdot y\right)}\right) - t \]
        2. lower-*.f64N/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(\left(y \cdot \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) - 1\right) \cdot y\right)}\right) - t \]
        3. sub-negN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) + \left(\mathsf{neg}\left(1\right)\right)\right)} \cdot y\right)\right) - t \]
        4. *-commutativeN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\left(\color{blue}{\left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) \cdot y} + \left(\mathsf{neg}\left(1\right)\right)\right) \cdot y\right)\right) - t \]
        5. metadata-evalN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\left(\left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) \cdot y + \color{blue}{-1}\right) \cdot y\right)\right) - t \]
        6. lower-fma.f64N/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\mathsf{fma}\left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}, y, -1\right)} \cdot y\right)\right) - t \]
        7. sub-negN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\color{blue}{y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y, -1\right) \cdot y\right)\right) - t \]
        8. *-commutativeN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) \cdot y} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right), y, -1\right) \cdot y\right)\right) - t \]
        9. metadata-evalN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) \cdot y + \color{blue}{\frac{-1}{2}}, y, -1\right) \cdot y\right)\right) - t \]
        10. lower-fma.f64N/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(\frac{-1}{4} \cdot y - \frac{1}{3}, y, \frac{-1}{2}\right)}, y, -1\right) \cdot y\right)\right) - t \]
        11. sub-negN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{\frac{-1}{4} \cdot y + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right)}, y, \frac{-1}{2}\right), y, -1\right) \cdot y\right)\right) - t \]
        12. metadata-evalN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{-1}{4} \cdot y + \color{blue}{\frac{-1}{3}}, y, \frac{-1}{2}\right), y, -1\right) \cdot y\right)\right) - t \]
        13. lower-fma.f6499.6

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(-0.25, y, -0.3333333333333333\right)}, y, -0.5\right), y, -1\right) \cdot y\right)\right) - t \]
      5. Applied rewrites99.6%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.25, y, -0.3333333333333333\right), y, -0.5\right), y, -1\right) \cdot y\right)}\right) - t \]
      6. Add Preprocessing

      Alternative 4: 99.6% accurate, 1.6× speedup?

      \[\begin{array}{l} \\ \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, y, -0.5\right), y, -1\right) \cdot y\right)\right) - t \end{array} \]
      (FPCore (x y z t)
       :precision binary64
       (-
        (+
         (* (- x 1.0) (log y))
         (* (- z 1.0) (* (fma (fma -0.3333333333333333 y -0.5) y -1.0) y)))
        t))
      double code(double x, double y, double z, double t) {
      	return (((x - 1.0) * log(y)) + ((z - 1.0) * (fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * y))) - t;
      }
      
      function code(x, y, z, t)
      	return Float64(Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * Float64(fma(fma(-0.3333333333333333, y, -0.5), y, -1.0) * y))) - t)
      end
      
      code[x_, y_, z_, t_] := N[(N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[(N[(N[(-0.3333333333333333 * y + -0.5), $MachinePrecision] * y + -1.0), $MachinePrecision] * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, y, -0.5\right), y, -1\right) \cdot y\right)\right) - t
      \end{array}
      
      Derivation
      1. Initial program 90.7%

        \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
      2. Add Preprocessing
      3. Taylor expanded in y around 0

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right)\right)}\right) - t \]
      4. Step-by-step derivation
        1. *-commutativeN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right) \cdot y\right)}\right) - t \]
        2. lower-*.f64N/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right) \cdot y\right)}\right) - t \]
        3. sub-negN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) + \left(\mathsf{neg}\left(1\right)\right)\right)} \cdot y\right)\right) - t \]
        4. *-commutativeN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\left(\color{blue}{\left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) \cdot y} + \left(\mathsf{neg}\left(1\right)\right)\right) \cdot y\right)\right) - t \]
        5. metadata-evalN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\left(\left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) \cdot y + \color{blue}{-1}\right) \cdot y\right)\right) - t \]
        6. lower-fma.f64N/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\mathsf{fma}\left(\frac{-1}{3} \cdot y - \frac{1}{2}, y, -1\right)} \cdot y\right)\right) - t \]
        7. sub-negN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\frac{-1}{3} \cdot y + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y, -1\right) \cdot y\right)\right) - t \]
        8. metadata-evalN/A

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\frac{-1}{3} \cdot y + \color{blue}{\frac{-1}{2}}, y, -1\right) \cdot y\right)\right) - t \]
        9. lower-fma.f6499.6

          \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(-0.3333333333333333, y, -0.5\right)}, y, -1\right) \cdot y\right)\right) - t \]
      5. Applied rewrites99.6%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, y, -0.5\right), y, -1\right) \cdot y\right)}\right) - t \]
      6. Add Preprocessing

      Alternative 5: 76.9% accurate, 1.7× speedup?

      \[\begin{array}{l} \\ \begin{array}{l} t_1 := \log y \cdot x\\ \mathbf{if}\;x - 1 \leq -1 \cdot 10^{+94}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x - 1 \leq -1.00000002:\\ \;\;\;\;\left(1 - z\right) \cdot y - t\\ \mathbf{elif}\;x - 1 \leq 10^{+21}:\\ \;\;\;\;\left(y - \log y\right) - t\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
      (FPCore (x y z t)
       :precision binary64
       (let* ((t_1 (* (log y) x)))
         (if (<= (- x 1.0) -1e+94)
           t_1
           (if (<= (- x 1.0) -1.00000002)
             (- (* (- 1.0 z) y) t)
             (if (<= (- x 1.0) 1e+21) (- (- y (log y)) t) t_1)))))
      double code(double x, double y, double z, double t) {
      	double t_1 = log(y) * x;
      	double tmp;
      	if ((x - 1.0) <= -1e+94) {
      		tmp = t_1;
      	} else if ((x - 1.0) <= -1.00000002) {
      		tmp = ((1.0 - z) * y) - t;
      	} else if ((x - 1.0) <= 1e+21) {
      		tmp = (y - log(y)) - t;
      	} else {
      		tmp = t_1;
      	}
      	return tmp;
      }
      
      real(8) function code(x, y, z, t)
          real(8), intent (in) :: x
          real(8), intent (in) :: y
          real(8), intent (in) :: z
          real(8), intent (in) :: t
          real(8) :: t_1
          real(8) :: tmp
          t_1 = log(y) * x
          if ((x - 1.0d0) <= (-1d+94)) then
              tmp = t_1
          else if ((x - 1.0d0) <= (-1.00000002d0)) then
              tmp = ((1.0d0 - z) * y) - t
          else if ((x - 1.0d0) <= 1d+21) then
              tmp = (y - log(y)) - t
          else
              tmp = t_1
          end if
          code = tmp
      end function
      
      public static double code(double x, double y, double z, double t) {
      	double t_1 = Math.log(y) * x;
      	double tmp;
      	if ((x - 1.0) <= -1e+94) {
      		tmp = t_1;
      	} else if ((x - 1.0) <= -1.00000002) {
      		tmp = ((1.0 - z) * y) - t;
      	} else if ((x - 1.0) <= 1e+21) {
      		tmp = (y - Math.log(y)) - t;
      	} else {
      		tmp = t_1;
      	}
      	return tmp;
      }
      
      def code(x, y, z, t):
      	t_1 = math.log(y) * x
      	tmp = 0
      	if (x - 1.0) <= -1e+94:
      		tmp = t_1
      	elif (x - 1.0) <= -1.00000002:
      		tmp = ((1.0 - z) * y) - t
      	elif (x - 1.0) <= 1e+21:
      		tmp = (y - math.log(y)) - t
      	else:
      		tmp = t_1
      	return tmp
      
      function code(x, y, z, t)
      	t_1 = Float64(log(y) * x)
      	tmp = 0.0
      	if (Float64(x - 1.0) <= -1e+94)
      		tmp = t_1;
      	elseif (Float64(x - 1.0) <= -1.00000002)
      		tmp = Float64(Float64(Float64(1.0 - z) * y) - t);
      	elseif (Float64(x - 1.0) <= 1e+21)
      		tmp = Float64(Float64(y - log(y)) - t);
      	else
      		tmp = t_1;
      	end
      	return tmp
      end
      
      function tmp_2 = code(x, y, z, t)
      	t_1 = log(y) * x;
      	tmp = 0.0;
      	if ((x - 1.0) <= -1e+94)
      		tmp = t_1;
      	elseif ((x - 1.0) <= -1.00000002)
      		tmp = ((1.0 - z) * y) - t;
      	elseif ((x - 1.0) <= 1e+21)
      		tmp = (y - log(y)) - t;
      	else
      		tmp = t_1;
      	end
      	tmp_2 = tmp;
      end
      
      code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision]}, If[LessEqual[N[(x - 1.0), $MachinePrecision], -1e+94], t$95$1, If[LessEqual[N[(x - 1.0), $MachinePrecision], -1.00000002], N[(N[(N[(1.0 - z), $MachinePrecision] * y), $MachinePrecision] - t), $MachinePrecision], If[LessEqual[N[(x - 1.0), $MachinePrecision], 1e+21], N[(N[(y - N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]]
      
      \begin{array}{l}
      
      \\
      \begin{array}{l}
      t_1 := \log y \cdot x\\
      \mathbf{if}\;x - 1 \leq -1 \cdot 10^{+94}:\\
      \;\;\;\;t\_1\\
      
      \mathbf{elif}\;x - 1 \leq -1.00000002:\\
      \;\;\;\;\left(1 - z\right) \cdot y - t\\
      
      \mathbf{elif}\;x - 1 \leq 10^{+21}:\\
      \;\;\;\;\left(y - \log y\right) - t\\
      
      \mathbf{else}:\\
      \;\;\;\;t\_1\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 3 regimes
      2. if (-.f64 x #s(literal 1 binary64)) < -1e94 or 1e21 < (-.f64 x #s(literal 1 binary64))

        1. Initial program 97.0%

          \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
        2. Add Preprocessing
        3. Applied rewrites99.6%

          \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{log1p}\left(-y\right), z - 1, \log y \cdot \left(x - 1\right)\right), \mathsf{fma}\left(\log y, x - 1, \left(1 - z\right) \cdot \mathsf{log1p}\left(-y\right)\right) \cdot {\left(\mathsf{fma}\left(\log y, x - 1, \left(1 - z\right) \cdot \mathsf{log1p}\left(-y\right)\right)\right)}^{-1}, -t\right)} \]
        4. Taylor expanded in x around inf

          \[\leadsto \color{blue}{x \cdot \log y} \]
        5. Step-by-step derivation
          1. *-commutativeN/A

            \[\leadsto \color{blue}{\log y \cdot x} \]
          2. lower-*.f64N/A

            \[\leadsto \color{blue}{\log y \cdot x} \]
          3. lower-log.f6486.6

            \[\leadsto \color{blue}{\log y} \cdot x \]
        6. Applied rewrites86.6%

          \[\leadsto \color{blue}{\log y \cdot x} \]

        if -1e94 < (-.f64 x #s(literal 1 binary64)) < -1.0000000200000001

        1. Initial program 74.4%

          \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
        2. Add Preprocessing
        3. Taylor expanded in y around 0

          \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
        4. Step-by-step derivation
          1. *-commutativeN/A

            \[\leadsto \left(-1 \cdot \color{blue}{\left(\left(z - 1\right) \cdot y\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
          2. associate-*r*N/A

            \[\leadsto \left(\color{blue}{\left(-1 \cdot \left(z - 1\right)\right) \cdot y} + \log y \cdot \left(x - 1\right)\right) - t \]
          3. lower-fma.f64N/A

            \[\leadsto \color{blue}{\mathsf{fma}\left(-1 \cdot \left(z - 1\right), y, \log y \cdot \left(x - 1\right)\right)} - t \]
          4. mul-1-negN/A

            \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{neg}\left(\left(z - 1\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
          5. neg-sub0N/A

            \[\leadsto \mathsf{fma}\left(\color{blue}{0 - \left(z - 1\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
          6. sub-negN/A

            \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(z + \left(\mathsf{neg}\left(1\right)\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
          7. metadata-evalN/A

            \[\leadsto \mathsf{fma}\left(0 - \left(z + \color{blue}{-1}\right), y, \log y \cdot \left(x - 1\right)\right) - t \]
          8. +-commutativeN/A

            \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(-1 + z\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
          9. associate--r+N/A

            \[\leadsto \mathsf{fma}\left(\color{blue}{\left(0 - -1\right) - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
          10. metadata-evalN/A

            \[\leadsto \mathsf{fma}\left(\color{blue}{1} - z, y, \log y \cdot \left(x - 1\right)\right) - t \]
          11. lower--.f64N/A

            \[\leadsto \mathsf{fma}\left(\color{blue}{1 - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
          12. *-commutativeN/A

            \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
          13. lower-*.f64N/A

            \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
          14. lower--.f64N/A

            \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right)} \cdot \log y\right) - t \]
          15. lower-log.f6499.8

            \[\leadsto \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \color{blue}{\log y}\right) - t \]
        5. Applied rewrites99.8%

          \[\leadsto \color{blue}{\mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right)} - t \]
        6. Taylor expanded in y around inf

          \[\leadsto y \cdot \color{blue}{\left(1 - z\right)} - t \]
        7. Step-by-step derivation
          1. Applied rewrites66.8%

            \[\leadsto \left(1 - z\right) \cdot \color{blue}{y} - t \]

          if -1.0000000200000001 < (-.f64 x #s(literal 1 binary64)) < 1e21

          1. Initial program 89.7%

            \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
          2. Add Preprocessing
          3. Taylor expanded in y around 0

            \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
          4. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \left(-1 \cdot \color{blue}{\left(\left(z - 1\right) \cdot y\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
            2. associate-*r*N/A

              \[\leadsto \left(\color{blue}{\left(-1 \cdot \left(z - 1\right)\right) \cdot y} + \log y \cdot \left(x - 1\right)\right) - t \]
            3. lower-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(-1 \cdot \left(z - 1\right), y, \log y \cdot \left(x - 1\right)\right)} - t \]
            4. mul-1-negN/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{neg}\left(\left(z - 1\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
            5. neg-sub0N/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{0 - \left(z - 1\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
            6. sub-negN/A

              \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(z + \left(\mathsf{neg}\left(1\right)\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
            7. metadata-evalN/A

              \[\leadsto \mathsf{fma}\left(0 - \left(z + \color{blue}{-1}\right), y, \log y \cdot \left(x - 1\right)\right) - t \]
            8. +-commutativeN/A

              \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(-1 + z\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
            9. associate--r+N/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{\left(0 - -1\right) - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
            10. metadata-evalN/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{1} - z, y, \log y \cdot \left(x - 1\right)\right) - t \]
            11. lower--.f64N/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{1 - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
            12. *-commutativeN/A

              \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
            13. lower-*.f64N/A

              \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
            14. lower--.f64N/A

              \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right)} \cdot \log y\right) - t \]
            15. lower-log.f6499.0

              \[\leadsto \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \color{blue}{\log y}\right) - t \]
          5. Applied rewrites99.0%

            \[\leadsto \color{blue}{\mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right)} - t \]
          6. Taylor expanded in z around 0

            \[\leadsto \left(y + \color{blue}{\log y \cdot \left(x - 1\right)}\right) - t \]
          7. Step-by-step derivation
            1. Applied rewrites88.7%

              \[\leadsto \mathsf{fma}\left(\log y, \color{blue}{x - 1}, y\right) - t \]
            2. Taylor expanded in x around 0

              \[\leadsto \left(y + -1 \cdot \color{blue}{\log y}\right) - t \]
            3. Step-by-step derivation
              1. Applied rewrites85.3%

                \[\leadsto \left(y - \log y\right) - t \]
            4. Recombined 3 regimes into one program.
            5. Final simplification83.5%

              \[\leadsto \begin{array}{l} \mathbf{if}\;x - 1 \leq -1 \cdot 10^{+94}:\\ \;\;\;\;\log y \cdot x\\ \mathbf{elif}\;x - 1 \leq -1.00000002:\\ \;\;\;\;\left(1 - z\right) \cdot y - t\\ \mathbf{elif}\;x - 1 \leq 10^{+21}:\\ \;\;\;\;\left(y - \log y\right) - t\\ \mathbf{else}:\\ \;\;\;\;\log y \cdot x\\ \end{array} \]
            6. Add Preprocessing

            Alternative 6: 76.7% accurate, 1.7× speedup?

            \[\begin{array}{l} \\ \begin{array}{l} t_1 := \log y \cdot x\\ \mathbf{if}\;x - 1 \leq -1 \cdot 10^{+94}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x - 1 \leq -1.00000002:\\ \;\;\;\;\left(1 - z\right) \cdot y - t\\ \mathbf{elif}\;x - 1 \leq 10^{+21}:\\ \;\;\;\;-\left(\log y + t\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
            (FPCore (x y z t)
             :precision binary64
             (let* ((t_1 (* (log y) x)))
               (if (<= (- x 1.0) -1e+94)
                 t_1
                 (if (<= (- x 1.0) -1.00000002)
                   (- (* (- 1.0 z) y) t)
                   (if (<= (- x 1.0) 1e+21) (- (+ (log y) t)) t_1)))))
            double code(double x, double y, double z, double t) {
            	double t_1 = log(y) * x;
            	double tmp;
            	if ((x - 1.0) <= -1e+94) {
            		tmp = t_1;
            	} else if ((x - 1.0) <= -1.00000002) {
            		tmp = ((1.0 - z) * y) - t;
            	} else if ((x - 1.0) <= 1e+21) {
            		tmp = -(log(y) + t);
            	} else {
            		tmp = t_1;
            	}
            	return tmp;
            }
            
            real(8) function code(x, y, z, t)
                real(8), intent (in) :: x
                real(8), intent (in) :: y
                real(8), intent (in) :: z
                real(8), intent (in) :: t
                real(8) :: t_1
                real(8) :: tmp
                t_1 = log(y) * x
                if ((x - 1.0d0) <= (-1d+94)) then
                    tmp = t_1
                else if ((x - 1.0d0) <= (-1.00000002d0)) then
                    tmp = ((1.0d0 - z) * y) - t
                else if ((x - 1.0d0) <= 1d+21) then
                    tmp = -(log(y) + t)
                else
                    tmp = t_1
                end if
                code = tmp
            end function
            
            public static double code(double x, double y, double z, double t) {
            	double t_1 = Math.log(y) * x;
            	double tmp;
            	if ((x - 1.0) <= -1e+94) {
            		tmp = t_1;
            	} else if ((x - 1.0) <= -1.00000002) {
            		tmp = ((1.0 - z) * y) - t;
            	} else if ((x - 1.0) <= 1e+21) {
            		tmp = -(Math.log(y) + t);
            	} else {
            		tmp = t_1;
            	}
            	return tmp;
            }
            
            def code(x, y, z, t):
            	t_1 = math.log(y) * x
            	tmp = 0
            	if (x - 1.0) <= -1e+94:
            		tmp = t_1
            	elif (x - 1.0) <= -1.00000002:
            		tmp = ((1.0 - z) * y) - t
            	elif (x - 1.0) <= 1e+21:
            		tmp = -(math.log(y) + t)
            	else:
            		tmp = t_1
            	return tmp
            
            function code(x, y, z, t)
            	t_1 = Float64(log(y) * x)
            	tmp = 0.0
            	if (Float64(x - 1.0) <= -1e+94)
            		tmp = t_1;
            	elseif (Float64(x - 1.0) <= -1.00000002)
            		tmp = Float64(Float64(Float64(1.0 - z) * y) - t);
            	elseif (Float64(x - 1.0) <= 1e+21)
            		tmp = Float64(-Float64(log(y) + t));
            	else
            		tmp = t_1;
            	end
            	return tmp
            end
            
            function tmp_2 = code(x, y, z, t)
            	t_1 = log(y) * x;
            	tmp = 0.0;
            	if ((x - 1.0) <= -1e+94)
            		tmp = t_1;
            	elseif ((x - 1.0) <= -1.00000002)
            		tmp = ((1.0 - z) * y) - t;
            	elseif ((x - 1.0) <= 1e+21)
            		tmp = -(log(y) + t);
            	else
            		tmp = t_1;
            	end
            	tmp_2 = tmp;
            end
            
            code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision]}, If[LessEqual[N[(x - 1.0), $MachinePrecision], -1e+94], t$95$1, If[LessEqual[N[(x - 1.0), $MachinePrecision], -1.00000002], N[(N[(N[(1.0 - z), $MachinePrecision] * y), $MachinePrecision] - t), $MachinePrecision], If[LessEqual[N[(x - 1.0), $MachinePrecision], 1e+21], (-N[(N[Log[y], $MachinePrecision] + t), $MachinePrecision]), t$95$1]]]]
            
            \begin{array}{l}
            
            \\
            \begin{array}{l}
            t_1 := \log y \cdot x\\
            \mathbf{if}\;x - 1 \leq -1 \cdot 10^{+94}:\\
            \;\;\;\;t\_1\\
            
            \mathbf{elif}\;x - 1 \leq -1.00000002:\\
            \;\;\;\;\left(1 - z\right) \cdot y - t\\
            
            \mathbf{elif}\;x - 1 \leq 10^{+21}:\\
            \;\;\;\;-\left(\log y + t\right)\\
            
            \mathbf{else}:\\
            \;\;\;\;t\_1\\
            
            
            \end{array}
            \end{array}
            
            Derivation
            1. Split input into 3 regimes
            2. if (-.f64 x #s(literal 1 binary64)) < -1e94 or 1e21 < (-.f64 x #s(literal 1 binary64))

              1. Initial program 97.0%

                \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
              2. Add Preprocessing
              3. Applied rewrites99.6%

                \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{log1p}\left(-y\right), z - 1, \log y \cdot \left(x - 1\right)\right), \mathsf{fma}\left(\log y, x - 1, \left(1 - z\right) \cdot \mathsf{log1p}\left(-y\right)\right) \cdot {\left(\mathsf{fma}\left(\log y, x - 1, \left(1 - z\right) \cdot \mathsf{log1p}\left(-y\right)\right)\right)}^{-1}, -t\right)} \]
              4. Taylor expanded in x around inf

                \[\leadsto \color{blue}{x \cdot \log y} \]
              5. Step-by-step derivation
                1. *-commutativeN/A

                  \[\leadsto \color{blue}{\log y \cdot x} \]
                2. lower-*.f64N/A

                  \[\leadsto \color{blue}{\log y \cdot x} \]
                3. lower-log.f6486.6

                  \[\leadsto \color{blue}{\log y} \cdot x \]
              6. Applied rewrites86.6%

                \[\leadsto \color{blue}{\log y \cdot x} \]

              if -1e94 < (-.f64 x #s(literal 1 binary64)) < -1.0000000200000001

              1. Initial program 74.4%

                \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
              2. Add Preprocessing
              3. Taylor expanded in y around 0

                \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
              4. Step-by-step derivation
                1. *-commutativeN/A

                  \[\leadsto \left(-1 \cdot \color{blue}{\left(\left(z - 1\right) \cdot y\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
                2. associate-*r*N/A

                  \[\leadsto \left(\color{blue}{\left(-1 \cdot \left(z - 1\right)\right) \cdot y} + \log y \cdot \left(x - 1\right)\right) - t \]
                3. lower-fma.f64N/A

                  \[\leadsto \color{blue}{\mathsf{fma}\left(-1 \cdot \left(z - 1\right), y, \log y \cdot \left(x - 1\right)\right)} - t \]
                4. mul-1-negN/A

                  \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{neg}\left(\left(z - 1\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                5. neg-sub0N/A

                  \[\leadsto \mathsf{fma}\left(\color{blue}{0 - \left(z - 1\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                6. sub-negN/A

                  \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(z + \left(\mathsf{neg}\left(1\right)\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                7. metadata-evalN/A

                  \[\leadsto \mathsf{fma}\left(0 - \left(z + \color{blue}{-1}\right), y, \log y \cdot \left(x - 1\right)\right) - t \]
                8. +-commutativeN/A

                  \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(-1 + z\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                9. associate--r+N/A

                  \[\leadsto \mathsf{fma}\left(\color{blue}{\left(0 - -1\right) - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                10. metadata-evalN/A

                  \[\leadsto \mathsf{fma}\left(\color{blue}{1} - z, y, \log y \cdot \left(x - 1\right)\right) - t \]
                11. lower--.f64N/A

                  \[\leadsto \mathsf{fma}\left(\color{blue}{1 - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                12. *-commutativeN/A

                  \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                13. lower-*.f64N/A

                  \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                14. lower--.f64N/A

                  \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right)} \cdot \log y\right) - t \]
                15. lower-log.f6499.8

                  \[\leadsto \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \color{blue}{\log y}\right) - t \]
              5. Applied rewrites99.8%

                \[\leadsto \color{blue}{\mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right)} - t \]
              6. Taylor expanded in y around inf

                \[\leadsto y \cdot \color{blue}{\left(1 - z\right)} - t \]
              7. Step-by-step derivation
                1. Applied rewrites66.8%

                  \[\leadsto \left(1 - z\right) \cdot \color{blue}{y} - t \]

                if -1.0000000200000001 < (-.f64 x #s(literal 1 binary64)) < 1e21

                1. Initial program 89.7%

                  \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                2. Add Preprocessing
                3. Taylor expanded in y around 0

                  \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) - t} \]
                4. Step-by-step derivation
                  1. sub-negN/A

                    \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) + \left(\mathsf{neg}\left(t\right)\right)} \]
                  2. *-commutativeN/A

                    \[\leadsto \color{blue}{\left(x - 1\right) \cdot \log y} + \left(\mathsf{neg}\left(t\right)\right) \]
                  3. lower-fma.f64N/A

                    \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, \mathsf{neg}\left(t\right)\right)} \]
                  4. lower--.f64N/A

                    \[\leadsto \mathsf{fma}\left(\color{blue}{x - 1}, \log y, \mathsf{neg}\left(t\right)\right) \]
                  5. lower-log.f64N/A

                    \[\leadsto \mathsf{fma}\left(x - 1, \color{blue}{\log y}, \mathsf{neg}\left(t\right)\right) \]
                  6. lower-neg.f6488.4

                    \[\leadsto \mathsf{fma}\left(x - 1, \log y, \color{blue}{-t}\right) \]
                5. Applied rewrites88.4%

                  \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, -t\right)} \]
                6. Taylor expanded in x around 0

                  \[\leadsto -1 \cdot \log y - \color{blue}{t} \]
                7. Step-by-step derivation
                  1. Applied rewrites85.0%

                    \[\leadsto -\left(\log y + t\right) \]
                8. Recombined 3 regimes into one program.
                9. Final simplification83.4%

                  \[\leadsto \begin{array}{l} \mathbf{if}\;x - 1 \leq -1 \cdot 10^{+94}:\\ \;\;\;\;\log y \cdot x\\ \mathbf{elif}\;x - 1 \leq -1.00000002:\\ \;\;\;\;\left(1 - z\right) \cdot y - t\\ \mathbf{elif}\;x - 1 \leq 10^{+21}:\\ \;\;\;\;-\left(\log y + t\right)\\ \mathbf{else}:\\ \;\;\;\;\log y \cdot x\\ \end{array} \]
                10. Add Preprocessing

                Alternative 7: 95.3% accurate, 1.7× speedup?

                \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x - 1 \leq -4 \cdot 10^{+19}:\\ \;\;\;\;\log y \cdot x - t\\ \mathbf{elif}\;x - 1 \leq -1:\\ \;\;\;\;\mathsf{fma}\left(-z, y, -\log y\right) - t\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(x - 1, \log y, -t\right)\\ \end{array} \end{array} \]
                (FPCore (x y z t)
                 :precision binary64
                 (if (<= (- x 1.0) -4e+19)
                   (- (* (log y) x) t)
                   (if (<= (- x 1.0) -1.0)
                     (- (fma (- z) y (- (log y))) t)
                     (fma (- x 1.0) (log y) (- t)))))
                double code(double x, double y, double z, double t) {
                	double tmp;
                	if ((x - 1.0) <= -4e+19) {
                		tmp = (log(y) * x) - t;
                	} else if ((x - 1.0) <= -1.0) {
                		tmp = fma(-z, y, -log(y)) - t;
                	} else {
                		tmp = fma((x - 1.0), log(y), -t);
                	}
                	return tmp;
                }
                
                function code(x, y, z, t)
                	tmp = 0.0
                	if (Float64(x - 1.0) <= -4e+19)
                		tmp = Float64(Float64(log(y) * x) - t);
                	elseif (Float64(x - 1.0) <= -1.0)
                		tmp = Float64(fma(Float64(-z), y, Float64(-log(y))) - t);
                	else
                		tmp = fma(Float64(x - 1.0), log(y), Float64(-t));
                	end
                	return tmp
                end
                
                code[x_, y_, z_, t_] := If[LessEqual[N[(x - 1.0), $MachinePrecision], -4e+19], N[(N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision] - t), $MachinePrecision], If[LessEqual[N[(x - 1.0), $MachinePrecision], -1.0], N[(N[((-z) * y + (-N[Log[y], $MachinePrecision])), $MachinePrecision] - t), $MachinePrecision], N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision] + (-t)), $MachinePrecision]]]
                
                \begin{array}{l}
                
                \\
                \begin{array}{l}
                \mathbf{if}\;x - 1 \leq -4 \cdot 10^{+19}:\\
                \;\;\;\;\log y \cdot x - t\\
                
                \mathbf{elif}\;x - 1 \leq -1:\\
                \;\;\;\;\mathsf{fma}\left(-z, y, -\log y\right) - t\\
                
                \mathbf{else}:\\
                \;\;\;\;\mathsf{fma}\left(x - 1, \log y, -t\right)\\
                
                
                \end{array}
                \end{array}
                
                Derivation
                1. Split input into 3 regimes
                2. if (-.f64 x #s(literal 1 binary64)) < -4e19

                  1. Initial program 94.5%

                    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                  2. Add Preprocessing
                  3. Taylor expanded in x around inf

                    \[\leadsto \color{blue}{x \cdot \log y} - t \]
                  4. Step-by-step derivation
                    1. *-commutativeN/A

                      \[\leadsto \color{blue}{\log y \cdot x} - t \]
                    2. lower-*.f64N/A

                      \[\leadsto \color{blue}{\log y \cdot x} - t \]
                    3. lower-log.f6494.1

                      \[\leadsto \color{blue}{\log y} \cdot x - t \]
                  5. Applied rewrites94.1%

                    \[\leadsto \color{blue}{\log y \cdot x} - t \]

                  if -4e19 < (-.f64 x #s(literal 1 binary64)) < -1

                  1. Initial program 86.0%

                    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                  2. Add Preprocessing
                  3. Taylor expanded in y around 0

                    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
                  4. Step-by-step derivation
                    1. *-commutativeN/A

                      \[\leadsto \left(-1 \cdot \color{blue}{\left(\left(z - 1\right) \cdot y\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
                    2. associate-*r*N/A

                      \[\leadsto \left(\color{blue}{\left(-1 \cdot \left(z - 1\right)\right) \cdot y} + \log y \cdot \left(x - 1\right)\right) - t \]
                    3. lower-fma.f64N/A

                      \[\leadsto \color{blue}{\mathsf{fma}\left(-1 \cdot \left(z - 1\right), y, \log y \cdot \left(x - 1\right)\right)} - t \]
                    4. mul-1-negN/A

                      \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{neg}\left(\left(z - 1\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                    5. neg-sub0N/A

                      \[\leadsto \mathsf{fma}\left(\color{blue}{0 - \left(z - 1\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                    6. sub-negN/A

                      \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(z + \left(\mathsf{neg}\left(1\right)\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                    7. metadata-evalN/A

                      \[\leadsto \mathsf{fma}\left(0 - \left(z + \color{blue}{-1}\right), y, \log y \cdot \left(x - 1\right)\right) - t \]
                    8. +-commutativeN/A

                      \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(-1 + z\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                    9. associate--r+N/A

                      \[\leadsto \mathsf{fma}\left(\color{blue}{\left(0 - -1\right) - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                    10. metadata-evalN/A

                      \[\leadsto \mathsf{fma}\left(\color{blue}{1} - z, y, \log y \cdot \left(x - 1\right)\right) - t \]
                    11. lower--.f64N/A

                      \[\leadsto \mathsf{fma}\left(\color{blue}{1 - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                    12. *-commutativeN/A

                      \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                    13. lower-*.f64N/A

                      \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                    14. lower--.f64N/A

                      \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right)} \cdot \log y\right) - t \]
                    15. lower-log.f6499.1

                      \[\leadsto \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \color{blue}{\log y}\right) - t \]
                  5. Applied rewrites99.1%

                    \[\leadsto \color{blue}{\mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right)} - t \]
                  6. Taylor expanded in z around inf

                    \[\leadsto \mathsf{fma}\left(-1 \cdot z, y, \left(x - 1\right) \cdot \log y\right) - t \]
                  7. Step-by-step derivation
                    1. Applied rewrites98.8%

                      \[\leadsto \mathsf{fma}\left(-z, y, \left(x - 1\right) \cdot \log y\right) - t \]
                    2. Taylor expanded in x around 0

                      \[\leadsto \mathsf{fma}\left(-z, y, -1 \cdot \log y\right) - t \]
                    3. Step-by-step derivation
                      1. Applied rewrites96.8%

                        \[\leadsto \mathsf{fma}\left(-z, y, -\log y\right) - t \]

                      if -1 < (-.f64 x #s(literal 1 binary64))

                      1. Initial program 95.7%

                        \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                      2. Add Preprocessing
                      3. Taylor expanded in y around 0

                        \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) - t} \]
                      4. Step-by-step derivation
                        1. sub-negN/A

                          \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) + \left(\mathsf{neg}\left(t\right)\right)} \]
                        2. *-commutativeN/A

                          \[\leadsto \color{blue}{\left(x - 1\right) \cdot \log y} + \left(\mathsf{neg}\left(t\right)\right) \]
                        3. lower-fma.f64N/A

                          \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, \mathsf{neg}\left(t\right)\right)} \]
                        4. lower--.f64N/A

                          \[\leadsto \mathsf{fma}\left(\color{blue}{x - 1}, \log y, \mathsf{neg}\left(t\right)\right) \]
                        5. lower-log.f64N/A

                          \[\leadsto \mathsf{fma}\left(x - 1, \color{blue}{\log y}, \mathsf{neg}\left(t\right)\right) \]
                        6. lower-neg.f6495.5

                          \[\leadsto \mathsf{fma}\left(x - 1, \log y, \color{blue}{-t}\right) \]
                      5. Applied rewrites95.5%

                        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, -t\right)} \]
                    4. Recombined 3 regimes into one program.
                    5. Add Preprocessing

                    Alternative 8: 99.5% accurate, 1.7× speedup?

                    \[\begin{array}{l} \\ \mathsf{fma}\left(\left(z - 1\right) \cdot y, \mathsf{fma}\left(-0.5, y, -1\right), \left(x - 1\right) \cdot \log y\right) - t \end{array} \]
                    (FPCore (x y z t)
                     :precision binary64
                     (- (fma (* (- z 1.0) y) (fma -0.5 y -1.0) (* (- x 1.0) (log y))) t))
                    double code(double x, double y, double z, double t) {
                    	return fma(((z - 1.0) * y), fma(-0.5, y, -1.0), ((x - 1.0) * log(y))) - t;
                    }
                    
                    function code(x, y, z, t)
                    	return Float64(fma(Float64(Float64(z - 1.0) * y), fma(-0.5, y, -1.0), Float64(Float64(x - 1.0) * log(y))) - t)
                    end
                    
                    code[x_, y_, z_, t_] := N[(N[(N[(N[(z - 1.0), $MachinePrecision] * y), $MachinePrecision] * N[(-0.5 * y + -1.0), $MachinePrecision] + N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
                    
                    \begin{array}{l}
                    
                    \\
                    \mathsf{fma}\left(\left(z - 1\right) \cdot y, \mathsf{fma}\left(-0.5, y, -1\right), \left(x - 1\right) \cdot \log y\right) - t
                    \end{array}
                    
                    Derivation
                    1. Initial program 90.7%

                      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                    2. Add Preprocessing
                    3. Taylor expanded in y around 0

                      \[\leadsto \color{blue}{\left(y \cdot \left(-1 \cdot \left(z - 1\right) + \frac{-1}{2} \cdot \left(y \cdot \left(z - 1\right)\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
                    4. Step-by-step derivation
                      1. associate-*r*N/A

                        \[\leadsto \left(y \cdot \left(-1 \cdot \left(z - 1\right) + \color{blue}{\left(\frac{-1}{2} \cdot y\right) \cdot \left(z - 1\right)}\right) + \log y \cdot \left(x - 1\right)\right) - t \]
                      2. distribute-rgt-outN/A

                        \[\leadsto \left(y \cdot \color{blue}{\left(\left(z - 1\right) \cdot \left(-1 + \frac{-1}{2} \cdot y\right)\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
                      3. +-commutativeN/A

                        \[\leadsto \left(y \cdot \left(\left(z - 1\right) \cdot \color{blue}{\left(\frac{-1}{2} \cdot y + -1\right)}\right) + \log y \cdot \left(x - 1\right)\right) - t \]
                      4. metadata-evalN/A

                        \[\leadsto \left(y \cdot \left(\left(z - 1\right) \cdot \left(\frac{-1}{2} \cdot y + \color{blue}{\left(\mathsf{neg}\left(1\right)\right)}\right)\right) + \log y \cdot \left(x - 1\right)\right) - t \]
                      5. sub-negN/A

                        \[\leadsto \left(y \cdot \left(\left(z - 1\right) \cdot \color{blue}{\left(\frac{-1}{2} \cdot y - 1\right)}\right) + \log y \cdot \left(x - 1\right)\right) - t \]
                      6. associate-*r*N/A

                        \[\leadsto \left(\color{blue}{\left(y \cdot \left(z - 1\right)\right) \cdot \left(\frac{-1}{2} \cdot y - 1\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
                      7. lower-fma.f64N/A

                        \[\leadsto \color{blue}{\mathsf{fma}\left(y \cdot \left(z - 1\right), \frac{-1}{2} \cdot y - 1, \log y \cdot \left(x - 1\right)\right)} - t \]
                      8. *-commutativeN/A

                        \[\leadsto \mathsf{fma}\left(\color{blue}{\left(z - 1\right) \cdot y}, \frac{-1}{2} \cdot y - 1, \log y \cdot \left(x - 1\right)\right) - t \]
                      9. lower-*.f64N/A

                        \[\leadsto \mathsf{fma}\left(\color{blue}{\left(z - 1\right) \cdot y}, \frac{-1}{2} \cdot y - 1, \log y \cdot \left(x - 1\right)\right) - t \]
                      10. lower--.f64N/A

                        \[\leadsto \mathsf{fma}\left(\color{blue}{\left(z - 1\right)} \cdot y, \frac{-1}{2} \cdot y - 1, \log y \cdot \left(x - 1\right)\right) - t \]
                      11. sub-negN/A

                        \[\leadsto \mathsf{fma}\left(\left(z - 1\right) \cdot y, \color{blue}{\frac{-1}{2} \cdot y + \left(\mathsf{neg}\left(1\right)\right)}, \log y \cdot \left(x - 1\right)\right) - t \]
                      12. metadata-evalN/A

                        \[\leadsto \mathsf{fma}\left(\left(z - 1\right) \cdot y, \frac{-1}{2} \cdot y + \color{blue}{-1}, \log y \cdot \left(x - 1\right)\right) - t \]
                      13. lower-fma.f64N/A

                        \[\leadsto \mathsf{fma}\left(\left(z - 1\right) \cdot y, \color{blue}{\mathsf{fma}\left(\frac{-1}{2}, y, -1\right)}, \log y \cdot \left(x - 1\right)\right) - t \]
                      14. *-commutativeN/A

                        \[\leadsto \mathsf{fma}\left(\left(z - 1\right) \cdot y, \mathsf{fma}\left(\frac{-1}{2}, y, -1\right), \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                      15. lower-*.f64N/A

                        \[\leadsto \mathsf{fma}\left(\left(z - 1\right) \cdot y, \mathsf{fma}\left(\frac{-1}{2}, y, -1\right), \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                      16. lower--.f64N/A

                        \[\leadsto \mathsf{fma}\left(\left(z - 1\right) \cdot y, \mathsf{fma}\left(\frac{-1}{2}, y, -1\right), \color{blue}{\left(x - 1\right)} \cdot \log y\right) - t \]
                      17. lower-log.f6499.6

                        \[\leadsto \mathsf{fma}\left(\left(z - 1\right) \cdot y, \mathsf{fma}\left(-0.5, y, -1\right), \left(x - 1\right) \cdot \color{blue}{\log y}\right) - t \]
                    5. Applied rewrites99.6%

                      \[\leadsto \color{blue}{\mathsf{fma}\left(\left(z - 1\right) \cdot y, \mathsf{fma}\left(-0.5, y, -1\right), \left(x - 1\right) \cdot \log y\right)} - t \]
                    6. Add Preprocessing

                    Alternative 9: 95.4% accurate, 1.8× speedup?

                    \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -2.2 \cdot 10^{+19}:\\ \;\;\;\;\log y \cdot x - t\\ \mathbf{elif}\;x \leq 5 \cdot 10^{-27}:\\ \;\;\;\;\left(-\mathsf{fma}\left(z - 1, y, \log y\right)\right) - t\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(x - 1, \log y, -t\right)\\ \end{array} \end{array} \]
                    (FPCore (x y z t)
                     :precision binary64
                     (if (<= x -2.2e+19)
                       (- (* (log y) x) t)
                       (if (<= x 5e-27)
                         (- (- (fma (- z 1.0) y (log y))) t)
                         (fma (- x 1.0) (log y) (- t)))))
                    double code(double x, double y, double z, double t) {
                    	double tmp;
                    	if (x <= -2.2e+19) {
                    		tmp = (log(y) * x) - t;
                    	} else if (x <= 5e-27) {
                    		tmp = -fma((z - 1.0), y, log(y)) - t;
                    	} else {
                    		tmp = fma((x - 1.0), log(y), -t);
                    	}
                    	return tmp;
                    }
                    
                    function code(x, y, z, t)
                    	tmp = 0.0
                    	if (x <= -2.2e+19)
                    		tmp = Float64(Float64(log(y) * x) - t);
                    	elseif (x <= 5e-27)
                    		tmp = Float64(Float64(-fma(Float64(z - 1.0), y, log(y))) - t);
                    	else
                    		tmp = fma(Float64(x - 1.0), log(y), Float64(-t));
                    	end
                    	return tmp
                    end
                    
                    code[x_, y_, z_, t_] := If[LessEqual[x, -2.2e+19], N[(N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision] - t), $MachinePrecision], If[LessEqual[x, 5e-27], N[((-N[(N[(z - 1.0), $MachinePrecision] * y + N[Log[y], $MachinePrecision]), $MachinePrecision]) - t), $MachinePrecision], N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision] + (-t)), $MachinePrecision]]]
                    
                    \begin{array}{l}
                    
                    \\
                    \begin{array}{l}
                    \mathbf{if}\;x \leq -2.2 \cdot 10^{+19}:\\
                    \;\;\;\;\log y \cdot x - t\\
                    
                    \mathbf{elif}\;x \leq 5 \cdot 10^{-27}:\\
                    \;\;\;\;\left(-\mathsf{fma}\left(z - 1, y, \log y\right)\right) - t\\
                    
                    \mathbf{else}:\\
                    \;\;\;\;\mathsf{fma}\left(x - 1, \log y, -t\right)\\
                    
                    
                    \end{array}
                    \end{array}
                    
                    Derivation
                    1. Split input into 3 regimes
                    2. if x < -2.2e19

                      1. Initial program 94.5%

                        \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                      2. Add Preprocessing
                      3. Taylor expanded in x around inf

                        \[\leadsto \color{blue}{x \cdot \log y} - t \]
                      4. Step-by-step derivation
                        1. *-commutativeN/A

                          \[\leadsto \color{blue}{\log y \cdot x} - t \]
                        2. lower-*.f64N/A

                          \[\leadsto \color{blue}{\log y \cdot x} - t \]
                        3. lower-log.f6494.1

                          \[\leadsto \color{blue}{\log y} \cdot x - t \]
                      5. Applied rewrites94.1%

                        \[\leadsto \color{blue}{\log y \cdot x} - t \]

                      if -2.2e19 < x < 5.0000000000000002e-27

                      1. Initial program 85.6%

                        \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                      2. Add Preprocessing
                      3. Taylor expanded in x around 0

                        \[\leadsto \color{blue}{\left(-1 \cdot \log y + \log \left(1 - y\right) \cdot \left(z - 1\right)\right)} - t \]
                      4. Step-by-step derivation
                        1. +-commutativeN/A

                          \[\leadsto \color{blue}{\left(\log \left(1 - y\right) \cdot \left(z - 1\right) + -1 \cdot \log y\right)} - t \]
                        2. *-commutativeN/A

                          \[\leadsto \left(\color{blue}{\left(z - 1\right) \cdot \log \left(1 - y\right)} + -1 \cdot \log y\right) - t \]
                        3. lower-fma.f64N/A

                          \[\leadsto \color{blue}{\mathsf{fma}\left(z - 1, \log \left(1 - y\right), -1 \cdot \log y\right)} - t \]
                        4. lower--.f64N/A

                          \[\leadsto \mathsf{fma}\left(\color{blue}{z - 1}, \log \left(1 - y\right), -1 \cdot \log y\right) - t \]
                        5. sub-negN/A

                          \[\leadsto \mathsf{fma}\left(z - 1, \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)}, -1 \cdot \log y\right) - t \]
                        6. lower-log1p.f64N/A

                          \[\leadsto \mathsf{fma}\left(z - 1, \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(y\right)\right)}, -1 \cdot \log y\right) - t \]
                        7. lower-neg.f64N/A

                          \[\leadsto \mathsf{fma}\left(z - 1, \mathsf{log1p}\left(\color{blue}{-y}\right), -1 \cdot \log y\right) - t \]
                        8. mul-1-negN/A

                          \[\leadsto \mathsf{fma}\left(z - 1, \mathsf{log1p}\left(-y\right), \color{blue}{\mathsf{neg}\left(\log y\right)}\right) - t \]
                        9. lower-neg.f64N/A

                          \[\leadsto \mathsf{fma}\left(z - 1, \mathsf{log1p}\left(-y\right), \color{blue}{-\log y}\right) - t \]
                        10. lower-log.f6497.9

                          \[\leadsto \mathsf{fma}\left(z - 1, \mathsf{log1p}\left(-y\right), -\color{blue}{\log y}\right) - t \]
                      5. Applied rewrites97.9%

                        \[\leadsto \color{blue}{\mathsf{fma}\left(z - 1, \mathsf{log1p}\left(-y\right), -\log y\right)} - t \]
                      6. Taylor expanded in y around 0

                        \[\leadsto \left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) - \color{blue}{\log y}\right) - t \]
                      7. Step-by-step derivation
                        1. Applied rewrites96.9%

                          \[\leadsto \left(-\mathsf{fma}\left(z - 1, y, \log y\right)\right) - t \]

                        if 5.0000000000000002e-27 < x

                        1. Initial program 95.9%

                          \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                        2. Add Preprocessing
                        3. Taylor expanded in y around 0

                          \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) - t} \]
                        4. Step-by-step derivation
                          1. sub-negN/A

                            \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) + \left(\mathsf{neg}\left(t\right)\right)} \]
                          2. *-commutativeN/A

                            \[\leadsto \color{blue}{\left(x - 1\right) \cdot \log y} + \left(\mathsf{neg}\left(t\right)\right) \]
                          3. lower-fma.f64N/A

                            \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, \mathsf{neg}\left(t\right)\right)} \]
                          4. lower--.f64N/A

                            \[\leadsto \mathsf{fma}\left(\color{blue}{x - 1}, \log y, \mathsf{neg}\left(t\right)\right) \]
                          5. lower-log.f64N/A

                            \[\leadsto \mathsf{fma}\left(x - 1, \color{blue}{\log y}, \mathsf{neg}\left(t\right)\right) \]
                          6. lower-neg.f6495.8

                            \[\leadsto \mathsf{fma}\left(x - 1, \log y, \color{blue}{-t}\right) \]
                        5. Applied rewrites95.8%

                          \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, -t\right)} \]
                      8. Recombined 3 regimes into one program.
                      9. Add Preprocessing

                      Alternative 10: 67.0% accurate, 1.8× speedup?

                      \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x - 1 \leq -1 \cdot 10^{+94} \lor \neg \left(x - 1 \leq 5 \cdot 10^{+38}\right):\\ \;\;\;\;\log y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\left(1 - z\right) \cdot y - t\\ \end{array} \end{array} \]
                      (FPCore (x y z t)
                       :precision binary64
                       (if (or (<= (- x 1.0) -1e+94) (not (<= (- x 1.0) 5e+38)))
                         (* (log y) x)
                         (- (* (- 1.0 z) y) t)))
                      double code(double x, double y, double z, double t) {
                      	double tmp;
                      	if (((x - 1.0) <= -1e+94) || !((x - 1.0) <= 5e+38)) {
                      		tmp = log(y) * x;
                      	} else {
                      		tmp = ((1.0 - z) * y) - t;
                      	}
                      	return tmp;
                      }
                      
                      real(8) function code(x, y, z, t)
                          real(8), intent (in) :: x
                          real(8), intent (in) :: y
                          real(8), intent (in) :: z
                          real(8), intent (in) :: t
                          real(8) :: tmp
                          if (((x - 1.0d0) <= (-1d+94)) .or. (.not. ((x - 1.0d0) <= 5d+38))) then
                              tmp = log(y) * x
                          else
                              tmp = ((1.0d0 - z) * y) - t
                          end if
                          code = tmp
                      end function
                      
                      public static double code(double x, double y, double z, double t) {
                      	double tmp;
                      	if (((x - 1.0) <= -1e+94) || !((x - 1.0) <= 5e+38)) {
                      		tmp = Math.log(y) * x;
                      	} else {
                      		tmp = ((1.0 - z) * y) - t;
                      	}
                      	return tmp;
                      }
                      
                      def code(x, y, z, t):
                      	tmp = 0
                      	if ((x - 1.0) <= -1e+94) or not ((x - 1.0) <= 5e+38):
                      		tmp = math.log(y) * x
                      	else:
                      		tmp = ((1.0 - z) * y) - t
                      	return tmp
                      
                      function code(x, y, z, t)
                      	tmp = 0.0
                      	if ((Float64(x - 1.0) <= -1e+94) || !(Float64(x - 1.0) <= 5e+38))
                      		tmp = Float64(log(y) * x);
                      	else
                      		tmp = Float64(Float64(Float64(1.0 - z) * y) - t);
                      	end
                      	return tmp
                      end
                      
                      function tmp_2 = code(x, y, z, t)
                      	tmp = 0.0;
                      	if (((x - 1.0) <= -1e+94) || ~(((x - 1.0) <= 5e+38)))
                      		tmp = log(y) * x;
                      	else
                      		tmp = ((1.0 - z) * y) - t;
                      	end
                      	tmp_2 = tmp;
                      end
                      
                      code[x_, y_, z_, t_] := If[Or[LessEqual[N[(x - 1.0), $MachinePrecision], -1e+94], N[Not[LessEqual[N[(x - 1.0), $MachinePrecision], 5e+38]], $MachinePrecision]], N[(N[Log[y], $MachinePrecision] * x), $MachinePrecision], N[(N[(N[(1.0 - z), $MachinePrecision] * y), $MachinePrecision] - t), $MachinePrecision]]
                      
                      \begin{array}{l}
                      
                      \\
                      \begin{array}{l}
                      \mathbf{if}\;x - 1 \leq -1 \cdot 10^{+94} \lor \neg \left(x - 1 \leq 5 \cdot 10^{+38}\right):\\
                      \;\;\;\;\log y \cdot x\\
                      
                      \mathbf{else}:\\
                      \;\;\;\;\left(1 - z\right) \cdot y - t\\
                      
                      
                      \end{array}
                      \end{array}
                      
                      Derivation
                      1. Split input into 2 regimes
                      2. if (-.f64 x #s(literal 1 binary64)) < -1e94 or 4.9999999999999997e38 < (-.f64 x #s(literal 1 binary64))

                        1. Initial program 97.9%

                          \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                        2. Add Preprocessing
                        3. Applied rewrites99.6%

                          \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{log1p}\left(-y\right), z - 1, \log y \cdot \left(x - 1\right)\right), \mathsf{fma}\left(\log y, x - 1, \left(1 - z\right) \cdot \mathsf{log1p}\left(-y\right)\right) \cdot {\left(\mathsf{fma}\left(\log y, x - 1, \left(1 - z\right) \cdot \mathsf{log1p}\left(-y\right)\right)\right)}^{-1}, -t\right)} \]
                        4. Taylor expanded in x around inf

                          \[\leadsto \color{blue}{x \cdot \log y} \]
                        5. Step-by-step derivation
                          1. *-commutativeN/A

                            \[\leadsto \color{blue}{\log y \cdot x} \]
                          2. lower-*.f64N/A

                            \[\leadsto \color{blue}{\log y \cdot x} \]
                          3. lower-log.f6487.4

                            \[\leadsto \color{blue}{\log y} \cdot x \]
                        6. Applied rewrites87.4%

                          \[\leadsto \color{blue}{\log y \cdot x} \]

                        if -1e94 < (-.f64 x #s(literal 1 binary64)) < 4.9999999999999997e38

                        1. Initial program 86.0%

                          \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                        2. Add Preprocessing
                        3. Taylor expanded in y around 0

                          \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
                        4. Step-by-step derivation
                          1. *-commutativeN/A

                            \[\leadsto \left(-1 \cdot \color{blue}{\left(\left(z - 1\right) \cdot y\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
                          2. associate-*r*N/A

                            \[\leadsto \left(\color{blue}{\left(-1 \cdot \left(z - 1\right)\right) \cdot y} + \log y \cdot \left(x - 1\right)\right) - t \]
                          3. lower-fma.f64N/A

                            \[\leadsto \color{blue}{\mathsf{fma}\left(-1 \cdot \left(z - 1\right), y, \log y \cdot \left(x - 1\right)\right)} - t \]
                          4. mul-1-negN/A

                            \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{neg}\left(\left(z - 1\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                          5. neg-sub0N/A

                            \[\leadsto \mathsf{fma}\left(\color{blue}{0 - \left(z - 1\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                          6. sub-negN/A

                            \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(z + \left(\mathsf{neg}\left(1\right)\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                          7. metadata-evalN/A

                            \[\leadsto \mathsf{fma}\left(0 - \left(z + \color{blue}{-1}\right), y, \log y \cdot \left(x - 1\right)\right) - t \]
                          8. +-commutativeN/A

                            \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(-1 + z\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                          9. associate--r+N/A

                            \[\leadsto \mathsf{fma}\left(\color{blue}{\left(0 - -1\right) - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                          10. metadata-evalN/A

                            \[\leadsto \mathsf{fma}\left(\color{blue}{1} - z, y, \log y \cdot \left(x - 1\right)\right) - t \]
                          11. lower--.f64N/A

                            \[\leadsto \mathsf{fma}\left(\color{blue}{1 - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                          12. *-commutativeN/A

                            \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                          13. lower-*.f64N/A

                            \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                          14. lower--.f64N/A

                            \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right)} \cdot \log y\right) - t \]
                          15. lower-log.f6499.2

                            \[\leadsto \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \color{blue}{\log y}\right) - t \]
                        5. Applied rewrites99.2%

                          \[\leadsto \color{blue}{\mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right)} - t \]
                        6. Taylor expanded in y around inf

                          \[\leadsto y \cdot \color{blue}{\left(1 - z\right)} - t \]
                        7. Step-by-step derivation
                          1. Applied rewrites55.7%

                            \[\leadsto \left(1 - z\right) \cdot \color{blue}{y} - t \]
                        8. Recombined 2 regimes into one program.
                        9. Final simplification68.3%

                          \[\leadsto \begin{array}{l} \mathbf{if}\;x - 1 \leq -1 \cdot 10^{+94} \lor \neg \left(x - 1 \leq 5 \cdot 10^{+38}\right):\\ \;\;\;\;\log y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\left(1 - z\right) \cdot y - t\\ \end{array} \]
                        10. Add Preprocessing

                        Alternative 11: 89.0% accurate, 1.9× speedup?

                        \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z - 1 \leq 10^{+207}:\\ \;\;\;\;\mathsf{fma}\left(\log y, x - 1, y\right) - t\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(-y\right) \cdot z - t\\ \end{array} \end{array} \]
                        (FPCore (x y z t)
                         :precision binary64
                         (if (<= (- z 1.0) 1e+207)
                           (- (fma (log y) (- x 1.0) y) t)
                           (- (* (log1p (- y)) z) t)))
                        double code(double x, double y, double z, double t) {
                        	double tmp;
                        	if ((z - 1.0) <= 1e+207) {
                        		tmp = fma(log(y), (x - 1.0), y) - t;
                        	} else {
                        		tmp = (log1p(-y) * z) - t;
                        	}
                        	return tmp;
                        }
                        
                        function code(x, y, z, t)
                        	tmp = 0.0
                        	if (Float64(z - 1.0) <= 1e+207)
                        		tmp = Float64(fma(log(y), Float64(x - 1.0), y) - t);
                        	else
                        		tmp = Float64(Float64(log1p(Float64(-y)) * z) - t);
                        	end
                        	return tmp
                        end
                        
                        code[x_, y_, z_, t_] := If[LessEqual[N[(z - 1.0), $MachinePrecision], 1e+207], N[(N[(N[Log[y], $MachinePrecision] * N[(x - 1.0), $MachinePrecision] + y), $MachinePrecision] - t), $MachinePrecision], N[(N[(N[Log[1 + (-y)], $MachinePrecision] * z), $MachinePrecision] - t), $MachinePrecision]]
                        
                        \begin{array}{l}
                        
                        \\
                        \begin{array}{l}
                        \mathbf{if}\;z - 1 \leq 10^{+207}:\\
                        \;\;\;\;\mathsf{fma}\left(\log y, x - 1, y\right) - t\\
                        
                        \mathbf{else}:\\
                        \;\;\;\;\mathsf{log1p}\left(-y\right) \cdot z - t\\
                        
                        
                        \end{array}
                        \end{array}
                        
                        Derivation
                        1. Split input into 2 regimes
                        2. if (-.f64 z #s(literal 1 binary64)) < 1e207

                          1. Initial program 94.9%

                            \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                          2. Add Preprocessing
                          3. Taylor expanded in y around 0

                            \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
                          4. Step-by-step derivation
                            1. *-commutativeN/A

                              \[\leadsto \left(-1 \cdot \color{blue}{\left(\left(z - 1\right) \cdot y\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
                            2. associate-*r*N/A

                              \[\leadsto \left(\color{blue}{\left(-1 \cdot \left(z - 1\right)\right) \cdot y} + \log y \cdot \left(x - 1\right)\right) - t \]
                            3. lower-fma.f64N/A

                              \[\leadsto \color{blue}{\mathsf{fma}\left(-1 \cdot \left(z - 1\right), y, \log y \cdot \left(x - 1\right)\right)} - t \]
                            4. mul-1-negN/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{neg}\left(\left(z - 1\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            5. neg-sub0N/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{0 - \left(z - 1\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            6. sub-negN/A

                              \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(z + \left(\mathsf{neg}\left(1\right)\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            7. metadata-evalN/A

                              \[\leadsto \mathsf{fma}\left(0 - \left(z + \color{blue}{-1}\right), y, \log y \cdot \left(x - 1\right)\right) - t \]
                            8. +-commutativeN/A

                              \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(-1 + z\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            9. associate--r+N/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{\left(0 - -1\right) - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            10. metadata-evalN/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{1} - z, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            11. lower--.f64N/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{1 - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            12. *-commutativeN/A

                              \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                            13. lower-*.f64N/A

                              \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                            14. lower--.f64N/A

                              \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right)} \cdot \log y\right) - t \]
                            15. lower-log.f6499.5

                              \[\leadsto \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \color{blue}{\log y}\right) - t \]
                          5. Applied rewrites99.5%

                            \[\leadsto \color{blue}{\mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right)} - t \]
                          6. Taylor expanded in z around 0

                            \[\leadsto \left(y + \color{blue}{\log y \cdot \left(x - 1\right)}\right) - t \]
                          7. Step-by-step derivation
                            1. Applied rewrites94.5%

                              \[\leadsto \mathsf{fma}\left(\log y, \color{blue}{x - 1}, y\right) - t \]

                            if 1e207 < (-.f64 z #s(literal 1 binary64))

                            1. Initial program 35.8%

                              \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                            2. Add Preprocessing
                            3. Taylor expanded in z around inf

                              \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
                            4. Step-by-step derivation
                              1. *-commutativeN/A

                                \[\leadsto \color{blue}{\log \left(1 - y\right) \cdot z} - t \]
                              2. lower-*.f64N/A

                                \[\leadsto \color{blue}{\log \left(1 - y\right) \cdot z} - t \]
                              3. sub-negN/A

                                \[\leadsto \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)} \cdot z - t \]
                              4. lower-log1p.f64N/A

                                \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(y\right)\right)} \cdot z - t \]
                              5. lower-neg.f6479.1

                                \[\leadsto \mathsf{log1p}\left(\color{blue}{-y}\right) \cdot z - t \]
                            5. Applied rewrites79.1%

                              \[\leadsto \color{blue}{\mathsf{log1p}\left(-y\right) \cdot z} - t \]
                          8. Recombined 2 regimes into one program.
                          9. Add Preprocessing

                          Alternative 12: 88.9% accurate, 1.9× speedup?

                          \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z - 1 \leq 10^{+207}:\\ \;\;\;\;\mathsf{fma}\left(x - 1, \log y, -t\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(-y\right) \cdot z - t\\ \end{array} \end{array} \]
                          (FPCore (x y z t)
                           :precision binary64
                           (if (<= (- z 1.0) 1e+207)
                             (fma (- x 1.0) (log y) (- t))
                             (- (* (log1p (- y)) z) t)))
                          double code(double x, double y, double z, double t) {
                          	double tmp;
                          	if ((z - 1.0) <= 1e+207) {
                          		tmp = fma((x - 1.0), log(y), -t);
                          	} else {
                          		tmp = (log1p(-y) * z) - t;
                          	}
                          	return tmp;
                          }
                          
                          function code(x, y, z, t)
                          	tmp = 0.0
                          	if (Float64(z - 1.0) <= 1e+207)
                          		tmp = fma(Float64(x - 1.0), log(y), Float64(-t));
                          	else
                          		tmp = Float64(Float64(log1p(Float64(-y)) * z) - t);
                          	end
                          	return tmp
                          end
                          
                          code[x_, y_, z_, t_] := If[LessEqual[N[(z - 1.0), $MachinePrecision], 1e+207], N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision] + (-t)), $MachinePrecision], N[(N[(N[Log[1 + (-y)], $MachinePrecision] * z), $MachinePrecision] - t), $MachinePrecision]]
                          
                          \begin{array}{l}
                          
                          \\
                          \begin{array}{l}
                          \mathbf{if}\;z - 1 \leq 10^{+207}:\\
                          \;\;\;\;\mathsf{fma}\left(x - 1, \log y, -t\right)\\
                          
                          \mathbf{else}:\\
                          \;\;\;\;\mathsf{log1p}\left(-y\right) \cdot z - t\\
                          
                          
                          \end{array}
                          \end{array}
                          
                          Derivation
                          1. Split input into 2 regimes
                          2. if (-.f64 z #s(literal 1 binary64)) < 1e207

                            1. Initial program 94.9%

                              \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                            2. Add Preprocessing
                            3. Taylor expanded in y around 0

                              \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) - t} \]
                            4. Step-by-step derivation
                              1. sub-negN/A

                                \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) + \left(\mathsf{neg}\left(t\right)\right)} \]
                              2. *-commutativeN/A

                                \[\leadsto \color{blue}{\left(x - 1\right) \cdot \log y} + \left(\mathsf{neg}\left(t\right)\right) \]
                              3. lower-fma.f64N/A

                                \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, \mathsf{neg}\left(t\right)\right)} \]
                              4. lower--.f64N/A

                                \[\leadsto \mathsf{fma}\left(\color{blue}{x - 1}, \log y, \mathsf{neg}\left(t\right)\right) \]
                              5. lower-log.f64N/A

                                \[\leadsto \mathsf{fma}\left(x - 1, \color{blue}{\log y}, \mathsf{neg}\left(t\right)\right) \]
                              6. lower-neg.f6494.4

                                \[\leadsto \mathsf{fma}\left(x - 1, \log y, \color{blue}{-t}\right) \]
                            5. Applied rewrites94.4%

                              \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, -t\right)} \]

                            if 1e207 < (-.f64 z #s(literal 1 binary64))

                            1. Initial program 35.8%

                              \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                            2. Add Preprocessing
                            3. Taylor expanded in z around inf

                              \[\leadsto \color{blue}{z \cdot \log \left(1 - y\right)} - t \]
                            4. Step-by-step derivation
                              1. *-commutativeN/A

                                \[\leadsto \color{blue}{\log \left(1 - y\right) \cdot z} - t \]
                              2. lower-*.f64N/A

                                \[\leadsto \color{blue}{\log \left(1 - y\right) \cdot z} - t \]
                              3. sub-negN/A

                                \[\leadsto \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)} \cdot z - t \]
                              4. lower-log1p.f64N/A

                                \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(y\right)\right)} \cdot z - t \]
                              5. lower-neg.f6479.1

                                \[\leadsto \mathsf{log1p}\left(\color{blue}{-y}\right) \cdot z - t \]
                            5. Applied rewrites79.1%

                              \[\leadsto \color{blue}{\mathsf{log1p}\left(-y\right) \cdot z} - t \]
                          3. Recombined 2 regimes into one program.
                          4. Add Preprocessing

                          Alternative 13: 99.1% accurate, 1.9× speedup?

                          \[\begin{array}{l} \\ \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right) - t \end{array} \]
                          (FPCore (x y z t)
                           :precision binary64
                           (- (fma (- 1.0 z) y (* (- x 1.0) (log y))) t))
                          double code(double x, double y, double z, double t) {
                          	return fma((1.0 - z), y, ((x - 1.0) * log(y))) - t;
                          }
                          
                          function code(x, y, z, t)
                          	return Float64(fma(Float64(1.0 - z), y, Float64(Float64(x - 1.0) * log(y))) - t)
                          end
                          
                          code[x_, y_, z_, t_] := N[(N[(N[(1.0 - z), $MachinePrecision] * y + N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
                          
                          \begin{array}{l}
                          
                          \\
                          \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right) - t
                          \end{array}
                          
                          Derivation
                          1. Initial program 90.7%

                            \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                          2. Add Preprocessing
                          3. Taylor expanded in y around 0

                            \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
                          4. Step-by-step derivation
                            1. *-commutativeN/A

                              \[\leadsto \left(-1 \cdot \color{blue}{\left(\left(z - 1\right) \cdot y\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
                            2. associate-*r*N/A

                              \[\leadsto \left(\color{blue}{\left(-1 \cdot \left(z - 1\right)\right) \cdot y} + \log y \cdot \left(x - 1\right)\right) - t \]
                            3. lower-fma.f64N/A

                              \[\leadsto \color{blue}{\mathsf{fma}\left(-1 \cdot \left(z - 1\right), y, \log y \cdot \left(x - 1\right)\right)} - t \]
                            4. mul-1-negN/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{neg}\left(\left(z - 1\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            5. neg-sub0N/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{0 - \left(z - 1\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            6. sub-negN/A

                              \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(z + \left(\mathsf{neg}\left(1\right)\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            7. metadata-evalN/A

                              \[\leadsto \mathsf{fma}\left(0 - \left(z + \color{blue}{-1}\right), y, \log y \cdot \left(x - 1\right)\right) - t \]
                            8. +-commutativeN/A

                              \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(-1 + z\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            9. associate--r+N/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{\left(0 - -1\right) - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            10. metadata-evalN/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{1} - z, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            11. lower--.f64N/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{1 - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            12. *-commutativeN/A

                              \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                            13. lower-*.f64N/A

                              \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                            14. lower--.f64N/A

                              \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right)} \cdot \log y\right) - t \]
                            15. lower-log.f6499.4

                              \[\leadsto \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \color{blue}{\log y}\right) - t \]
                          5. Applied rewrites99.4%

                            \[\leadsto \color{blue}{\mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right)} - t \]
                          6. Add Preprocessing

                          Alternative 14: 98.9% accurate, 1.9× speedup?

                          \[\begin{array}{l} \\ \mathsf{fma}\left(-z, y, \left(x - 1\right) \cdot \log y\right) - t \end{array} \]
                          (FPCore (x y z t)
                           :precision binary64
                           (- (fma (- z) y (* (- x 1.0) (log y))) t))
                          double code(double x, double y, double z, double t) {
                          	return fma(-z, y, ((x - 1.0) * log(y))) - t;
                          }
                          
                          function code(x, y, z, t)
                          	return Float64(fma(Float64(-z), y, Float64(Float64(x - 1.0) * log(y))) - t)
                          end
                          
                          code[x_, y_, z_, t_] := N[(N[((-z) * y + N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
                          
                          \begin{array}{l}
                          
                          \\
                          \mathsf{fma}\left(-z, y, \left(x - 1\right) \cdot \log y\right) - t
                          \end{array}
                          
                          Derivation
                          1. Initial program 90.7%

                            \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                          2. Add Preprocessing
                          3. Taylor expanded in y around 0

                            \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
                          4. Step-by-step derivation
                            1. *-commutativeN/A

                              \[\leadsto \left(-1 \cdot \color{blue}{\left(\left(z - 1\right) \cdot y\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
                            2. associate-*r*N/A

                              \[\leadsto \left(\color{blue}{\left(-1 \cdot \left(z - 1\right)\right) \cdot y} + \log y \cdot \left(x - 1\right)\right) - t \]
                            3. lower-fma.f64N/A

                              \[\leadsto \color{blue}{\mathsf{fma}\left(-1 \cdot \left(z - 1\right), y, \log y \cdot \left(x - 1\right)\right)} - t \]
                            4. mul-1-negN/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{neg}\left(\left(z - 1\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            5. neg-sub0N/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{0 - \left(z - 1\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            6. sub-negN/A

                              \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(z + \left(\mathsf{neg}\left(1\right)\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            7. metadata-evalN/A

                              \[\leadsto \mathsf{fma}\left(0 - \left(z + \color{blue}{-1}\right), y, \log y \cdot \left(x - 1\right)\right) - t \]
                            8. +-commutativeN/A

                              \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(-1 + z\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            9. associate--r+N/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{\left(0 - -1\right) - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            10. metadata-evalN/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{1} - z, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            11. lower--.f64N/A

                              \[\leadsto \mathsf{fma}\left(\color{blue}{1 - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                            12. *-commutativeN/A

                              \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                            13. lower-*.f64N/A

                              \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                            14. lower--.f64N/A

                              \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right)} \cdot \log y\right) - t \]
                            15. lower-log.f6499.4

                              \[\leadsto \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \color{blue}{\log y}\right) - t \]
                          5. Applied rewrites99.4%

                            \[\leadsto \color{blue}{\mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right)} - t \]
                          6. Taylor expanded in z around inf

                            \[\leadsto \mathsf{fma}\left(-1 \cdot z, y, \left(x - 1\right) \cdot \log y\right) - t \]
                          7. Step-by-step derivation
                            1. Applied rewrites99.3%

                              \[\leadsto \mathsf{fma}\left(-z, y, \left(x - 1\right) \cdot \log y\right) - t \]
                            2. Add Preprocessing

                            Alternative 15: 46.2% accurate, 18.8× speedup?

                            \[\begin{array}{l} \\ \left(1 - z\right) \cdot y - t \end{array} \]
                            (FPCore (x y z t) :precision binary64 (- (* (- 1.0 z) y) t))
                            double code(double x, double y, double z, double t) {
                            	return ((1.0 - z) * y) - t;
                            }
                            
                            real(8) function code(x, y, z, t)
                                real(8), intent (in) :: x
                                real(8), intent (in) :: y
                                real(8), intent (in) :: z
                                real(8), intent (in) :: t
                                code = ((1.0d0 - z) * y) - t
                            end function
                            
                            public static double code(double x, double y, double z, double t) {
                            	return ((1.0 - z) * y) - t;
                            }
                            
                            def code(x, y, z, t):
                            	return ((1.0 - z) * y) - t
                            
                            function code(x, y, z, t)
                            	return Float64(Float64(Float64(1.0 - z) * y) - t)
                            end
                            
                            function tmp = code(x, y, z, t)
                            	tmp = ((1.0 - z) * y) - t;
                            end
                            
                            code[x_, y_, z_, t_] := N[(N[(N[(1.0 - z), $MachinePrecision] * y), $MachinePrecision] - t), $MachinePrecision]
                            
                            \begin{array}{l}
                            
                            \\
                            \left(1 - z\right) \cdot y - t
                            \end{array}
                            
                            Derivation
                            1. Initial program 90.7%

                              \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                            2. Add Preprocessing
                            3. Taylor expanded in y around 0

                              \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
                            4. Step-by-step derivation
                              1. *-commutativeN/A

                                \[\leadsto \left(-1 \cdot \color{blue}{\left(\left(z - 1\right) \cdot y\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
                              2. associate-*r*N/A

                                \[\leadsto \left(\color{blue}{\left(-1 \cdot \left(z - 1\right)\right) \cdot y} + \log y \cdot \left(x - 1\right)\right) - t \]
                              3. lower-fma.f64N/A

                                \[\leadsto \color{blue}{\mathsf{fma}\left(-1 \cdot \left(z - 1\right), y, \log y \cdot \left(x - 1\right)\right)} - t \]
                              4. mul-1-negN/A

                                \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{neg}\left(\left(z - 1\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                              5. neg-sub0N/A

                                \[\leadsto \mathsf{fma}\left(\color{blue}{0 - \left(z - 1\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                              6. sub-negN/A

                                \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(z + \left(\mathsf{neg}\left(1\right)\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                              7. metadata-evalN/A

                                \[\leadsto \mathsf{fma}\left(0 - \left(z + \color{blue}{-1}\right), y, \log y \cdot \left(x - 1\right)\right) - t \]
                              8. +-commutativeN/A

                                \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(-1 + z\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                              9. associate--r+N/A

                                \[\leadsto \mathsf{fma}\left(\color{blue}{\left(0 - -1\right) - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                              10. metadata-evalN/A

                                \[\leadsto \mathsf{fma}\left(\color{blue}{1} - z, y, \log y \cdot \left(x - 1\right)\right) - t \]
                              11. lower--.f64N/A

                                \[\leadsto \mathsf{fma}\left(\color{blue}{1 - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                              12. *-commutativeN/A

                                \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                              13. lower-*.f64N/A

                                \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                              14. lower--.f64N/A

                                \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right)} \cdot \log y\right) - t \]
                              15. lower-log.f6499.4

                                \[\leadsto \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \color{blue}{\log y}\right) - t \]
                            5. Applied rewrites99.4%

                              \[\leadsto \color{blue}{\mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right)} - t \]
                            6. Taylor expanded in y around inf

                              \[\leadsto y \cdot \color{blue}{\left(1 - z\right)} - t \]
                            7. Step-by-step derivation
                              1. Applied rewrites39.3%

                                \[\leadsto \left(1 - z\right) \cdot \color{blue}{y} - t \]
                              2. Add Preprocessing

                              Alternative 16: 46.0% accurate, 20.5× speedup?

                              \[\begin{array}{l} \\ \left(-y\right) \cdot z - t \end{array} \]
                              (FPCore (x y z t) :precision binary64 (- (* (- y) z) t))
                              double code(double x, double y, double z, double t) {
                              	return (-y * z) - t;
                              }
                              
                              real(8) function code(x, y, z, t)
                                  real(8), intent (in) :: x
                                  real(8), intent (in) :: y
                                  real(8), intent (in) :: z
                                  real(8), intent (in) :: t
                                  code = (-y * z) - t
                              end function
                              
                              public static double code(double x, double y, double z, double t) {
                              	return (-y * z) - t;
                              }
                              
                              def code(x, y, z, t):
                              	return (-y * z) - t
                              
                              function code(x, y, z, t)
                              	return Float64(Float64(Float64(-y) * z) - t)
                              end
                              
                              function tmp = code(x, y, z, t)
                              	tmp = (-y * z) - t;
                              end
                              
                              code[x_, y_, z_, t_] := N[(N[((-y) * z), $MachinePrecision] - t), $MachinePrecision]
                              
                              \begin{array}{l}
                              
                              \\
                              \left(-y\right) \cdot z - t
                              \end{array}
                              
                              Derivation
                              1. Initial program 90.7%

                                \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                              2. Add Preprocessing
                              3. Taylor expanded in y around 0

                                \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
                              4. Step-by-step derivation
                                1. *-commutativeN/A

                                  \[\leadsto \left(-1 \cdot \color{blue}{\left(\left(z - 1\right) \cdot y\right)} + \log y \cdot \left(x - 1\right)\right) - t \]
                                2. associate-*r*N/A

                                  \[\leadsto \left(\color{blue}{\left(-1 \cdot \left(z - 1\right)\right) \cdot y} + \log y \cdot \left(x - 1\right)\right) - t \]
                                3. lower-fma.f64N/A

                                  \[\leadsto \color{blue}{\mathsf{fma}\left(-1 \cdot \left(z - 1\right), y, \log y \cdot \left(x - 1\right)\right)} - t \]
                                4. mul-1-negN/A

                                  \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{neg}\left(\left(z - 1\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                                5. neg-sub0N/A

                                  \[\leadsto \mathsf{fma}\left(\color{blue}{0 - \left(z - 1\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                                6. sub-negN/A

                                  \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(z + \left(\mathsf{neg}\left(1\right)\right)\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                                7. metadata-evalN/A

                                  \[\leadsto \mathsf{fma}\left(0 - \left(z + \color{blue}{-1}\right), y, \log y \cdot \left(x - 1\right)\right) - t \]
                                8. +-commutativeN/A

                                  \[\leadsto \mathsf{fma}\left(0 - \color{blue}{\left(-1 + z\right)}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                                9. associate--r+N/A

                                  \[\leadsto \mathsf{fma}\left(\color{blue}{\left(0 - -1\right) - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                                10. metadata-evalN/A

                                  \[\leadsto \mathsf{fma}\left(\color{blue}{1} - z, y, \log y \cdot \left(x - 1\right)\right) - t \]
                                11. lower--.f64N/A

                                  \[\leadsto \mathsf{fma}\left(\color{blue}{1 - z}, y, \log y \cdot \left(x - 1\right)\right) - t \]
                                12. *-commutativeN/A

                                  \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                                13. lower-*.f64N/A

                                  \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right) \cdot \log y}\right) - t \]
                                14. lower--.f64N/A

                                  \[\leadsto \mathsf{fma}\left(1 - z, y, \color{blue}{\left(x - 1\right)} \cdot \log y\right) - t \]
                                15. lower-log.f6499.4

                                  \[\leadsto \mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \color{blue}{\log y}\right) - t \]
                              5. Applied rewrites99.4%

                                \[\leadsto \color{blue}{\mathsf{fma}\left(1 - z, y, \left(x - 1\right) \cdot \log y\right)} - t \]
                              6. Taylor expanded in z around inf

                                \[\leadsto -1 \cdot \color{blue}{\left(y \cdot z\right)} - t \]
                              7. Step-by-step derivation
                                1. Applied rewrites39.1%

                                  \[\leadsto \left(-y\right) \cdot \color{blue}{z} - t \]
                                2. Add Preprocessing

                                Alternative 17: 35.8% accurate, 75.3× speedup?

                                \[\begin{array}{l} \\ -t \end{array} \]
                                (FPCore (x y z t) :precision binary64 (- t))
                                double code(double x, double y, double z, double t) {
                                	return -t;
                                }
                                
                                real(8) function code(x, y, z, t)
                                    real(8), intent (in) :: x
                                    real(8), intent (in) :: y
                                    real(8), intent (in) :: z
                                    real(8), intent (in) :: t
                                    code = -t
                                end function
                                
                                public static double code(double x, double y, double z, double t) {
                                	return -t;
                                }
                                
                                def code(x, y, z, t):
                                	return -t
                                
                                function code(x, y, z, t)
                                	return Float64(-t)
                                end
                                
                                function tmp = code(x, y, z, t)
                                	tmp = -t;
                                end
                                
                                code[x_, y_, z_, t_] := (-t)
                                
                                \begin{array}{l}
                                
                                \\
                                -t
                                \end{array}
                                
                                Derivation
                                1. Initial program 90.7%

                                  \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
                                2. Add Preprocessing
                                3. Taylor expanded in t around inf

                                  \[\leadsto \color{blue}{-1 \cdot t} \]
                                4. Step-by-step derivation
                                  1. mul-1-negN/A

                                    \[\leadsto \color{blue}{\mathsf{neg}\left(t\right)} \]
                                  2. lower-neg.f6430.4

                                    \[\leadsto \color{blue}{-t} \]
                                5. Applied rewrites30.4%

                                  \[\leadsto \color{blue}{-t} \]
                                6. Add Preprocessing

                                Reproduce

                                ?
                                herbie shell --seed 2024325 
                                (FPCore (x y z t)
                                  :name "Statistics.Distribution.Beta:$cdensity from math-functions-0.1.5.2"
                                  :precision binary64
                                  (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))