Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B

Percentage Accurate: 85.4% → 99.8%
Time: 18.9s
Alternatives: 19
Speedup: 1.9×

Specification

?
\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 19 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 85.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}

Alternative 1: 99.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \mathsf{log1p}\left(0 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* x (log y)) (* z (log1p (- 0.0 y)))) t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * log1p((0.0 - y)))) - t;
}
public static double code(double x, double y, double z, double t) {
	return ((x * Math.log(y)) + (z * Math.log1p((0.0 - y)))) - t;
}
def code(x, y, z, t):
	return ((x * math.log(y)) + (z * math.log1p((0.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * log1p(Float64(0.0 - y)))) - t)
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[1 + N[(0.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \mathsf{log1p}\left(0 - y\right)\right) - t
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. sub-negN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \log \left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)\right)\right), t\right) \]
    2. accelerator-lowering-log1p.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{log1p.f64}\left(\left(\mathsf{neg}\left(y\right)\right)\right)\right)\right), t\right) \]
    3. neg-sub0N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{log1p.f64}\left(\left(0 - y\right)\right)\right)\right), t\right) \]
    4. --lowering--.f6499.8%

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{log1p.f64}\left(\mathsf{\_.f64}\left(0, y\right)\right)\right)\right), t\right) \]
  4. Applied egg-rr99.8%

    \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\mathsf{log1p}\left(0 - y\right)}\right) - t \]
  5. Add Preprocessing

Alternative 2: 99.6% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \left(x \cdot \log y + y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), z \cdot -0.5\right), 0 - z\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (-
  (+
   (* x (log y))
   (*
    y
    (fma
     y
     (fma z (* y (fma y -0.25 -0.3333333333333333)) (* z -0.5))
     (- 0.0 z))))
  t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (y * fma(y, fma(z, (y * fma(y, -0.25, -0.3333333333333333)), (z * -0.5)), (0.0 - z)))) - t;
}
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(y * fma(y, fma(z, Float64(y * fma(y, -0.25, -0.3333333333333333)), Float64(z * -0.5)), Float64(0.0 - z)))) - t)
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(y * N[(y * N[(z * N[(y * N[(y * -0.25 + -0.3333333333333333), $MachinePrecision]), $MachinePrecision] + N[(z * -0.5), $MachinePrecision]), $MachinePrecision] + N[(0.0 - z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), z \cdot -0.5\right), 0 - z\right)\right) - t
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \color{blue}{\left(y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + y \cdot \left(\frac{-1}{3} \cdot z + \frac{-1}{4} \cdot \left(y \cdot z\right)\right)\right)\right)\right)}\right), t\right) \]
  4. Step-by-step derivation
    1. *-lowering-*.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(y, \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + y \cdot \left(\frac{-1}{3} \cdot z + \frac{-1}{4} \cdot \left(y \cdot z\right)\right)\right)\right)\right)\right), t\right) \]
    2. +-commutativeN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(y, \left(y \cdot \left(\frac{-1}{2} \cdot z + y \cdot \left(\frac{-1}{3} \cdot z + \frac{-1}{4} \cdot \left(y \cdot z\right)\right)\right) + -1 \cdot z\right)\right)\right), t\right) \]
    3. accelerator-lowering-fma.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \left(\frac{-1}{2} \cdot z + y \cdot \left(\frac{-1}{3} \cdot z + \frac{-1}{4} \cdot \left(y \cdot z\right)\right)\right), \left(-1 \cdot z\right)\right)\right)\right), t\right) \]
  5. Simplified99.7%

    \[\leadsto \left(x \cdot \log y + \color{blue}{y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), z \cdot -0.5\right), 0 - z\right)}\right) - t \]
  6. Add Preprocessing

Alternative 3: 99.6% accurate, 1.6× speedup?

\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (-
  (+
   (* x (log y))
   (* z (* y (fma y (fma y (fma y -0.25 -0.3333333333333333) -0.5) -1.0))))
  t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * (y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0)))) - t;
}
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * Float64(y * fma(y, fma(y, fma(y, -0.25, -0.3333333333333333), -0.5), -1.0)))) - t)
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[(y * N[(y * N[(y * N[(y * -0.25 + -0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right)\right) - t
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \color{blue}{\left(y \cdot \left(y \cdot \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) - 1\right)\right)}\right)\right), t\right) \]
  4. Step-by-step derivation
    1. *-lowering-*.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \left(y \cdot \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) - 1\right)\right)\right)\right), t\right) \]
    2. sub-negN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \left(y \cdot \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) + \left(\mathsf{neg}\left(1\right)\right)\right)\right)\right)\right), t\right) \]
    3. metadata-evalN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \left(y \cdot \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right) + -1\right)\right)\right)\right), t\right) \]
    4. accelerator-lowering-fma.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) - \frac{1}{2}\right), -1\right)\right)\right)\right), t\right) \]
    5. sub-negN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), -1\right)\right)\right)\right), t\right) \]
    6. metadata-evalN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \left(y \cdot \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right) + \frac{-1}{2}\right), -1\right)\right)\right)\right), t\right) \]
    7. accelerator-lowering-fma.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(y, \left(\frac{-1}{4} \cdot y - \frac{1}{3}\right), \frac{-1}{2}\right), -1\right)\right)\right)\right), t\right) \]
    8. sub-negN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(y, \left(\frac{-1}{4} \cdot y + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right)\right), \frac{-1}{2}\right), -1\right)\right)\right)\right), t\right) \]
    9. *-commutativeN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(y, \left(y \cdot \frac{-1}{4} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right)\right), \frac{-1}{2}\right), -1\right)\right)\right)\right), t\right) \]
    10. metadata-evalN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(y, \left(y \cdot \frac{-1}{4} + \frac{-1}{3}\right), \frac{-1}{2}\right), -1\right)\right)\right)\right), t\right) \]
    11. accelerator-lowering-fma.f6499.7%

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{4}, \frac{-1}{3}\right), \frac{-1}{2}\right), -1\right)\right)\right)\right), t\right) \]
  5. Simplified99.7%

    \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.25, -0.3333333333333333\right), -0.5\right), -1\right)\right)}\right) - t \]
  6. Add Preprocessing

Alternative 4: 99.5% accurate, 1.6× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (fma
  y
  (fma z (* y (fma y -0.3333333333333333 -0.5)) (- 0.0 z))
  (fma x (log y) (- 0.0 t))))
double code(double x, double y, double z, double t) {
	return fma(y, fma(z, (y * fma(y, -0.3333333333333333, -0.5)), (0.0 - z)), fma(x, log(y), (0.0 - t)));
}
function code(x, y, z, t)
	return fma(y, fma(z, Float64(y * fma(y, -0.3333333333333333, -0.5)), Float64(0.0 - z)), fma(x, log(y), Float64(0.0 - t)))
end
code[x_, y_, z_, t_] := N[(y * N[(z * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + N[(0.0 - z), $MachinePrecision]), $MachinePrecision] + N[(x * N[Log[y], $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
  4. Simplified99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)} \]
  5. Add Preprocessing

Alternative 5: 99.5% accurate, 1.6× speedup?

\[\begin{array}{l} \\ \left(x \cdot \log y + z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right)\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (-
  (+ (* x (log y)) (* z (* y (fma y (fma y -0.3333333333333333 -0.5) -1.0))))
  t))
double code(double x, double y, double z, double t) {
	return ((x * log(y)) + (z * (y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0)))) - t;
}
function code(x, y, z, t)
	return Float64(Float64(Float64(x * log(y)) + Float64(z * Float64(y * fma(y, fma(y, -0.3333333333333333, -0.5), -1.0)))) - t)
end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[(y * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot \log y + z \cdot \left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right)\right)\right) - t
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \color{blue}{\left(y \cdot \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right)\right)}\right)\right), t\right) \]
  4. Step-by-step derivation
    1. *-lowering-*.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) - 1\right)\right)\right)\right), t\right) \]
    2. sub-negN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) + \left(\mathsf{neg}\left(1\right)\right)\right)\right)\right)\right), t\right) \]
    3. metadata-evalN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \left(y \cdot \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right) + -1\right)\right)\right)\right), t\right) \]
    4. accelerator-lowering-fma.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \left(\frac{-1}{3} \cdot y - \frac{1}{2}\right), -1\right)\right)\right)\right), t\right) \]
    5. sub-negN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \left(\frac{-1}{3} \cdot y + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), -1\right)\right)\right)\right), t\right) \]
    6. *-commutativeN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \left(y \cdot \frac{-1}{3} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), -1\right)\right)\right)\right), t\right) \]
    7. metadata-evalN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \left(y \cdot \frac{-1}{3} + \frac{-1}{2}\right), -1\right)\right)\right)\right), t\right) \]
    8. accelerator-lowering-fma.f6499.6%

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right), -1\right)\right)\right)\right), t\right) \]
  5. Simplified99.6%

    \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\left(y \cdot \mathsf{fma}\left(y, \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), -1\right)\right)}\right) - t \]
  6. Add Preprocessing

Alternative 6: 99.4% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(y \cdot z, \mathsf{fma}\left(y, -0.5, -1\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (fma (* y z) (fma y -0.5 -1.0) (fma x (log y) (- 0.0 t))))
double code(double x, double y, double z, double t) {
	return fma((y * z), fma(y, -0.5, -1.0), fma(x, log(y), (0.0 - t)));
}
function code(x, y, z, t)
	return fma(Float64(y * z), fma(y, -0.5, -1.0), fma(x, log(y), Float64(0.0 - t)))
end
code[x_, y_, z_, t_] := N[(N[(y * z), $MachinePrecision] * N[(y * -0.5 + -1.0), $MachinePrecision] + N[(x * N[Log[y], $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(y \cdot z, \mathsf{fma}\left(y, -0.5, -1\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right)\right) - t} \]
  4. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \left(y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right) + x \cdot \log y\right) - t \]
    2. associate--l+N/A

      \[\leadsto y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right) + \color{blue}{\left(x \cdot \log y - t\right)} \]
    3. associate-*r*N/A

      \[\leadsto y \cdot \left(-1 \cdot z + \left(\frac{-1}{2} \cdot y\right) \cdot z\right) + \left(x \cdot \log y - t\right) \]
    4. distribute-rgt-outN/A

      \[\leadsto y \cdot \left(z \cdot \left(-1 + \frac{-1}{2} \cdot y\right)\right) + \left(x \cdot \color{blue}{\log y} - t\right) \]
    5. +-commutativeN/A

      \[\leadsto y \cdot \left(z \cdot \left(\frac{-1}{2} \cdot y + -1\right)\right) + \left(x \cdot \log y - t\right) \]
    6. metadata-evalN/A

      \[\leadsto y \cdot \left(z \cdot \left(\frac{-1}{2} \cdot y + \left(\mathsf{neg}\left(1\right)\right)\right)\right) + \left(x \cdot \log y - t\right) \]
    7. sub-negN/A

      \[\leadsto y \cdot \left(z \cdot \left(\frac{-1}{2} \cdot y - 1\right)\right) + \left(x \cdot \log y - t\right) \]
    8. associate-*r*N/A

      \[\leadsto \left(y \cdot z\right) \cdot \left(\frac{-1}{2} \cdot y - 1\right) + \left(\color{blue}{x \cdot \log y} - t\right) \]
    9. accelerator-lowering-fma.f64N/A

      \[\leadsto \mathsf{fma.f64}\left(\left(y \cdot z\right), \color{blue}{\left(\frac{-1}{2} \cdot y - 1\right)}, \left(x \cdot \log y - t\right)\right) \]
    10. *-commutativeN/A

      \[\leadsto \mathsf{fma.f64}\left(\left(z \cdot y\right), \left(\color{blue}{\frac{-1}{2} \cdot y} - 1\right), \left(x \cdot \log y - t\right)\right) \]
    11. *-lowering-*.f64N/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(z, y\right), \left(\color{blue}{\frac{-1}{2} \cdot y} - 1\right), \left(x \cdot \log y - t\right)\right) \]
    12. sub-negN/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(z, y\right), \left(\frac{-1}{2} \cdot y + \color{blue}{\left(\mathsf{neg}\left(1\right)\right)}\right), \left(x \cdot \log y - t\right)\right) \]
    13. *-commutativeN/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(z, y\right), \left(y \cdot \frac{-1}{2} + \left(\mathsf{neg}\left(\color{blue}{1}\right)\right)\right), \left(x \cdot \log y - t\right)\right) \]
    14. metadata-evalN/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(z, y\right), \left(y \cdot \frac{-1}{2} + -1\right), \left(x \cdot \log y - t\right)\right) \]
    15. accelerator-lowering-fma.f64N/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(z, y\right), \mathsf{fma.f64}\left(y, \color{blue}{\frac{-1}{2}}, -1\right), \left(x \cdot \log y - t\right)\right) \]
    16. sub-negN/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(z, y\right), \mathsf{fma.f64}\left(y, \frac{-1}{2}, \color{blue}{-1}\right), \left(x \cdot \log y + \left(\mathsf{neg}\left(t\right)\right)\right)\right) \]
    17. remove-double-negN/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(z, y\right), \mathsf{fma.f64}\left(y, \frac{-1}{2}, -1\right), \left(x \cdot \left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right) + \left(\mathsf{neg}\left(t\right)\right)\right)\right) \]
    18. mul-1-negN/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(z, y\right), \mathsf{fma.f64}\left(y, \frac{-1}{2}, -1\right), \left(x \cdot \left(\mathsf{neg}\left(-1 \cdot \log y\right)\right) + \left(\mathsf{neg}\left(t\right)\right)\right)\right) \]
    19. distribute-rgt-neg-inN/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(z, y\right), \mathsf{fma.f64}\left(y, \frac{-1}{2}, -1\right), \left(\left(\mathsf{neg}\left(x \cdot \left(-1 \cdot \log y\right)\right)\right) + \left(\mathsf{neg}\left(t\right)\right)\right)\right) \]
  5. Simplified99.4%

    \[\leadsto \color{blue}{\mathsf{fma}\left(z \cdot y, \mathsf{fma}\left(y, -0.5, -1\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)} \]
  6. Final simplification99.4%

    \[\leadsto \mathsf{fma}\left(y \cdot z, \mathsf{fma}\left(y, -0.5, -1\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right) \]
  7. Add Preprocessing

Alternative 7: 86.6% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y - y \cdot z\\ \mathbf{if}\;z \leq -1.45 \cdot 10^{+218}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;z \leq 1.3 \cdot 10^{+150}:\\ \;\;\;\;\mathsf{fma}\left(x, \log y, 0 - t\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (- (* x (log y)) (* y z))))
   (if (<= z -1.45e+218)
     t_1
     (if (<= z 1.3e+150) (fma x (log y) (- 0.0 t)) t_1))))
double code(double x, double y, double z, double t) {
	double t_1 = (x * log(y)) - (y * z);
	double tmp;
	if (z <= -1.45e+218) {
		tmp = t_1;
	} else if (z <= 1.3e+150) {
		tmp = fma(x, log(y), (0.0 - t));
	} else {
		tmp = t_1;
	}
	return tmp;
}
function code(x, y, z, t)
	t_1 = Float64(Float64(x * log(y)) - Float64(y * z))
	tmp = 0.0
	if (z <= -1.45e+218)
		tmp = t_1;
	elseif (z <= 1.3e+150)
		tmp = fma(x, log(y), Float64(0.0 - t));
	else
		tmp = t_1;
	end
	return tmp
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - N[(y * z), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[z, -1.45e+218], t$95$1, If[LessEqual[z, 1.3e+150], N[(x * N[Log[y], $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := x \cdot \log y - y \cdot z\\
\mathbf{if}\;z \leq -1.45 \cdot 10^{+218}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;z \leq 1.3 \cdot 10^{+150}:\\
\;\;\;\;\mathsf{fma}\left(x, \log y, 0 - t\right)\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < -1.45e218 or 1.30000000000000003e150 < z

    1. Initial program 42.9%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + x \cdot \log y\right) - t} \]
    4. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \left(x \cdot \log y + -1 \cdot \left(y \cdot z\right)\right) - t \]
      2. mul-1-negN/A

        \[\leadsto \left(x \cdot \log y + \left(\mathsf{neg}\left(y \cdot z\right)\right)\right) - t \]
      3. unsub-negN/A

        \[\leadsto \left(x \cdot \log y - y \cdot z\right) - t \]
      4. remove-double-negN/A

        \[\leadsto \left(x \cdot \left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right) - y \cdot z\right) - t \]
      5. mul-1-negN/A

        \[\leadsto \left(x \cdot \left(\mathsf{neg}\left(-1 \cdot \log y\right)\right) - y \cdot z\right) - t \]
      6. distribute-rgt-neg-inN/A

        \[\leadsto \left(\left(\mathsf{neg}\left(x \cdot \left(-1 \cdot \log y\right)\right)\right) - y \cdot z\right) - t \]
      7. neg-mul-1N/A

        \[\leadsto \left(-1 \cdot \left(x \cdot \left(-1 \cdot \log y\right)\right) - y \cdot z\right) - t \]
      8. mul-1-negN/A

        \[\leadsto \left(-1 \cdot \left(x \cdot \left(\mathsf{neg}\left(\log y\right)\right)\right) - y \cdot z\right) - t \]
      9. log-recN/A

        \[\leadsto \left(-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - y \cdot z\right) - t \]
      10. associate--l-N/A

        \[\leadsto -1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - \color{blue}{\left(y \cdot z + t\right)} \]
      11. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\left(-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right)\right), \color{blue}{\left(y \cdot z + t\right)}\right) \]
    5. Simplified97.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, 0\right) - \mathsf{fma}\left(z, y, t\right)} \]
    6. Taylor expanded in t around 0

      \[\leadsto \color{blue}{x \cdot \log y - y \cdot z} \]
    7. Step-by-step derivation
      1. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\left(x \cdot \log y\right), \color{blue}{\left(y \cdot z\right)}\right) \]
      2. *-lowering-*.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \log y\right), \left(\color{blue}{y} \cdot z\right)\right) \]
      3. log-lowering-log.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \left(y \cdot z\right)\right) \]
      4. *-commutativeN/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \left(z \cdot \color{blue}{y}\right)\right) \]
      5. *-lowering-*.f6488.7%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \color{blue}{y}\right)\right) \]
    8. Simplified88.7%

      \[\leadsto \color{blue}{x \cdot \log y - z \cdot y} \]

    if -1.45e218 < z < 1.30000000000000003e150

    1. Initial program 94.4%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{x \cdot \log y - t} \]
    4. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto x \cdot \log y + \color{blue}{\left(\mathsf{neg}\left(t\right)\right)} \]
      2. remove-double-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right) + \left(\mathsf{neg}\left(t\right)\right) \]
      3. mul-1-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(-1 \cdot \log y\right)\right) + \left(\mathsf{neg}\left(t\right)\right) \]
      4. distribute-rgt-neg-inN/A

        \[\leadsto \left(\mathsf{neg}\left(x \cdot \left(-1 \cdot \log y\right)\right)\right) + \left(\mathsf{neg}\left(\color{blue}{t}\right)\right) \]
      5. distribute-rgt-neg-inN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(-1 \cdot \log y\right)\right) + \left(\mathsf{neg}\left(\color{blue}{t}\right)\right) \]
      6. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \left(-1 \cdot \log y\right)\right) + \left(\mathsf{neg}\left(t\right)\right) \]
      7. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \left(\mathsf{neg}\left(\log y\right)\right)\right) + \left(\mathsf{neg}\left(t\right)\right) \]
      8. log-recN/A

        \[\leadsto x \cdot \left(-1 \cdot \log \left(\frac{1}{y}\right)\right) + \left(\mathsf{neg}\left(t\right)\right) \]
      9. accelerator-lowering-fma.f64N/A

        \[\leadsto \mathsf{fma.f64}\left(x, \color{blue}{\left(-1 \cdot \log \left(\frac{1}{y}\right)\right)}, \left(\mathsf{neg}\left(t\right)\right)\right) \]
      10. log-recN/A

        \[\leadsto \mathsf{fma.f64}\left(x, \left(-1 \cdot \left(\mathsf{neg}\left(\log y\right)\right)\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      11. mul-1-negN/A

        \[\leadsto \mathsf{fma.f64}\left(x, \left(-1 \cdot \left(-1 \cdot \color{blue}{\log y}\right)\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      12. mul-1-negN/A

        \[\leadsto \mathsf{fma.f64}\left(x, \left(\mathsf{neg}\left(-1 \cdot \log y\right)\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      13. mul-1-negN/A

        \[\leadsto \mathsf{fma.f64}\left(x, \left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      14. remove-double-negN/A

        \[\leadsto \mathsf{fma.f64}\left(x, \log y, \left(\mathsf{neg}\left(t\right)\right)\right) \]
      15. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma.f64}\left(x, \mathsf{log.f64}\left(y\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      16. neg-sub0N/A

        \[\leadsto \mathsf{fma.f64}\left(x, \mathsf{log.f64}\left(y\right), \left(0 - t\right)\right) \]
      17. --lowering--.f6493.6%

        \[\leadsto \mathsf{fma.f64}\left(x, \mathsf{log.f64}\left(y\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
    5. Simplified93.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, 0 - t\right)} \]
    6. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \mathsf{fma.f64}\left(x, \mathsf{log.f64}\left(y\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      2. neg-lowering-neg.f6493.6%

        \[\leadsto \mathsf{fma.f64}\left(x, \mathsf{log.f64}\left(y\right), \mathsf{neg.f64}\left(t\right)\right) \]
    7. Applied egg-rr93.6%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{-t}\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification92.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -1.45 \cdot 10^{+218}:\\ \;\;\;\;x \cdot \log y - y \cdot z\\ \mathbf{elif}\;z \leq 1.3 \cdot 10^{+150}:\\ \;\;\;\;\mathsf{fma}\left(x, \log y, 0 - t\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \log y - y \cdot z\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 89.9% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.95 \cdot 10^{-146}:\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{elif}\;x \leq 7 \cdot 10^{-101}:\\ \;\;\;\;\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(x, \log y, 0 - t\right)\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= x -1.95e-146)
   (- (* x (log y)) t)
   (if (<= x 7e-101)
     (fma y (fma z (* y (fma y -0.3333333333333333 -0.5)) (- 0.0 z)) (- 0.0 t))
     (fma x (log y) (- 0.0 t)))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (x <= -1.95e-146) {
		tmp = (x * log(y)) - t;
	} else if (x <= 7e-101) {
		tmp = fma(y, fma(z, (y * fma(y, -0.3333333333333333, -0.5)), (0.0 - z)), (0.0 - t));
	} else {
		tmp = fma(x, log(y), (0.0 - t));
	}
	return tmp;
}
function code(x, y, z, t)
	tmp = 0.0
	if (x <= -1.95e-146)
		tmp = Float64(Float64(x * log(y)) - t);
	elseif (x <= 7e-101)
		tmp = fma(y, fma(z, Float64(y * fma(y, -0.3333333333333333, -0.5)), Float64(0.0 - z)), Float64(0.0 - t));
	else
		tmp = fma(x, log(y), Float64(0.0 - t));
	end
	return tmp
end
code[x_, y_, z_, t_] := If[LessEqual[x, -1.95e-146], N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], If[LessEqual[x, 7e-101], N[(y * N[(z * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + N[(0.0 - z), $MachinePrecision]), $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision], N[(x * N[Log[y], $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.95 \cdot 10^{-146}:\\
\;\;\;\;x \cdot \log y - t\\

\mathbf{elif}\;x \leq 7 \cdot 10^{-101}:\\
\;\;\;\;\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x, \log y, 0 - t\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.95000000000000001e-146

    1. Initial program 90.9%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \log \left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)\right)\right), t\right) \]
      2. accelerator-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{log1p.f64}\left(\left(\mathsf{neg}\left(y\right)\right)\right)\right)\right), t\right) \]
      3. neg-sub0N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{log1p.f64}\left(\left(0 - y\right)\right)\right)\right), t\right) \]
      4. --lowering--.f6499.8%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{log1p.f64}\left(\mathsf{\_.f64}\left(0, y\right)\right)\right)\right), t\right) \]
    4. Applied egg-rr99.8%

      \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\mathsf{log1p}\left(0 - y\right)}\right) - t \]
    5. Taylor expanded in x around inf

      \[\leadsto \mathsf{\_.f64}\left(\color{blue}{\left(x \cdot \log y\right)}, t\right) \]
    6. Step-by-step derivation
      1. *-lowering-*.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \log y\right), t\right) \]
      2. log-lowering-log.f6489.9%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), t\right) \]
    7. Simplified89.9%

      \[\leadsto \color{blue}{x \cdot \log y} - t \]

    if -1.95000000000000001e-146 < x < 6.99999999999999989e-101

    1. Initial program 68.9%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
    4. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)} \]
    5. Taylor expanded in x around 0

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \mathsf{\_.f64}\left(0, z\right)\right), \color{blue}{\left(-1 \cdot t\right)}\right) \]
    6. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      2. neg-sub0N/A

        \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(0 - t\right)\right) \]
      3. --lowering--.f6493.7%

        \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
    7. Simplified93.7%

      \[\leadsto \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \color{blue}{0 - t}\right) \]

    if 6.99999999999999989e-101 < x

    1. Initial program 93.5%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{x \cdot \log y - t} \]
    4. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto x \cdot \log y + \color{blue}{\left(\mathsf{neg}\left(t\right)\right)} \]
      2. remove-double-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right) + \left(\mathsf{neg}\left(t\right)\right) \]
      3. mul-1-negN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(-1 \cdot \log y\right)\right) + \left(\mathsf{neg}\left(t\right)\right) \]
      4. distribute-rgt-neg-inN/A

        \[\leadsto \left(\mathsf{neg}\left(x \cdot \left(-1 \cdot \log y\right)\right)\right) + \left(\mathsf{neg}\left(\color{blue}{t}\right)\right) \]
      5. distribute-rgt-neg-inN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(-1 \cdot \log y\right)\right) + \left(\mathsf{neg}\left(\color{blue}{t}\right)\right) \]
      6. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \left(-1 \cdot \log y\right)\right) + \left(\mathsf{neg}\left(t\right)\right) \]
      7. mul-1-negN/A

        \[\leadsto x \cdot \left(-1 \cdot \left(\mathsf{neg}\left(\log y\right)\right)\right) + \left(\mathsf{neg}\left(t\right)\right) \]
      8. log-recN/A

        \[\leadsto x \cdot \left(-1 \cdot \log \left(\frac{1}{y}\right)\right) + \left(\mathsf{neg}\left(t\right)\right) \]
      9. accelerator-lowering-fma.f64N/A

        \[\leadsto \mathsf{fma.f64}\left(x, \color{blue}{\left(-1 \cdot \log \left(\frac{1}{y}\right)\right)}, \left(\mathsf{neg}\left(t\right)\right)\right) \]
      10. log-recN/A

        \[\leadsto \mathsf{fma.f64}\left(x, \left(-1 \cdot \left(\mathsf{neg}\left(\log y\right)\right)\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      11. mul-1-negN/A

        \[\leadsto \mathsf{fma.f64}\left(x, \left(-1 \cdot \left(-1 \cdot \color{blue}{\log y}\right)\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      12. mul-1-negN/A

        \[\leadsto \mathsf{fma.f64}\left(x, \left(\mathsf{neg}\left(-1 \cdot \log y\right)\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      13. mul-1-negN/A

        \[\leadsto \mathsf{fma.f64}\left(x, \left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      14. remove-double-negN/A

        \[\leadsto \mathsf{fma.f64}\left(x, \log y, \left(\mathsf{neg}\left(t\right)\right)\right) \]
      15. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma.f64}\left(x, \mathsf{log.f64}\left(y\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      16. neg-sub0N/A

        \[\leadsto \mathsf{fma.f64}\left(x, \mathsf{log.f64}\left(y\right), \left(0 - t\right)\right) \]
      17. --lowering--.f6492.1%

        \[\leadsto \mathsf{fma.f64}\left(x, \mathsf{log.f64}\left(y\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
    5. Simplified92.1%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, 0 - t\right)} \]
    6. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \mathsf{fma.f64}\left(x, \mathsf{log.f64}\left(y\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      2. neg-lowering-neg.f6492.1%

        \[\leadsto \mathsf{fma.f64}\left(x, \mathsf{log.f64}\left(y\right), \mathsf{neg.f64}\left(t\right)\right) \]
    7. Applied egg-rr92.1%

      \[\leadsto \mathsf{fma}\left(x, \log y, \color{blue}{-t}\right) \]
  3. Recombined 3 regimes into one program.
  4. Final simplification91.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.95 \cdot 10^{-146}:\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{elif}\;x \leq 7 \cdot 10^{-101}:\\ \;\;\;\;\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(x, \log y, 0 - t\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 9: 89.9% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y - t\\ \mathbf{if}\;x \leq -1.2 \cdot 10^{-146}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x \leq 5.4 \cdot 10^{-101}:\\ \;\;\;\;\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (- (* x (log y)) t)))
   (if (<= x -1.2e-146)
     t_1
     (if (<= x 5.4e-101)
       (fma
        y
        (fma z (* y (fma y -0.3333333333333333 -0.5)) (- 0.0 z))
        (- 0.0 t))
       t_1))))
double code(double x, double y, double z, double t) {
	double t_1 = (x * log(y)) - t;
	double tmp;
	if (x <= -1.2e-146) {
		tmp = t_1;
	} else if (x <= 5.4e-101) {
		tmp = fma(y, fma(z, (y * fma(y, -0.3333333333333333, -0.5)), (0.0 - z)), (0.0 - t));
	} else {
		tmp = t_1;
	}
	return tmp;
}
function code(x, y, z, t)
	t_1 = Float64(Float64(x * log(y)) - t)
	tmp = 0.0
	if (x <= -1.2e-146)
		tmp = t_1;
	elseif (x <= 5.4e-101)
		tmp = fma(y, fma(z, Float64(y * fma(y, -0.3333333333333333, -0.5)), Float64(0.0 - z)), Float64(0.0 - t));
	else
		tmp = t_1;
	end
	return tmp
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -1.2e-146], t$95$1, If[LessEqual[x, 5.4e-101], N[(y * N[(z * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + N[(0.0 - z), $MachinePrecision]), $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -1.2 \cdot 10^{-146}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;x \leq 5.4 \cdot 10^{-101}:\\
\;\;\;\;\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right)\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -1.2000000000000001e-146 or 5.4000000000000003e-101 < x

    1. Initial program 92.1%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \log \left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)\right)\right), t\right) \]
      2. accelerator-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{log1p.f64}\left(\left(\mathsf{neg}\left(y\right)\right)\right)\right)\right), t\right) \]
      3. neg-sub0N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{log1p.f64}\left(\left(0 - y\right)\right)\right)\right), t\right) \]
      4. --lowering--.f6499.8%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{log1p.f64}\left(\mathsf{\_.f64}\left(0, y\right)\right)\right)\right), t\right) \]
    4. Applied egg-rr99.8%

      \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\mathsf{log1p}\left(0 - y\right)}\right) - t \]
    5. Taylor expanded in x around inf

      \[\leadsto \mathsf{\_.f64}\left(\color{blue}{\left(x \cdot \log y\right)}, t\right) \]
    6. Step-by-step derivation
      1. *-lowering-*.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \log y\right), t\right) \]
      2. log-lowering-log.f6490.9%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), t\right) \]
    7. Simplified90.9%

      \[\leadsto \color{blue}{x \cdot \log y} - t \]

    if -1.2000000000000001e-146 < x < 5.4000000000000003e-101

    1. Initial program 68.9%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
    4. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)} \]
    5. Taylor expanded in x around 0

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \mathsf{\_.f64}\left(0, z\right)\right), \color{blue}{\left(-1 \cdot t\right)}\right) \]
    6. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      2. neg-sub0N/A

        \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(0 - t\right)\right) \]
      3. --lowering--.f6493.7%

        \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
    7. Simplified93.7%

      \[\leadsto \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \color{blue}{0 - t}\right) \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 10: 78.8% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y\\ \mathbf{if}\;x \leq -3.9 \cdot 10^{+49}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x \leq 5 \cdot 10^{+53}:\\ \;\;\;\;\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (* x (log y))))
   (if (<= x -3.9e+49)
     t_1
     (if (<= x 5e+53)
       (fma
        y
        (fma z (* y (fma y -0.3333333333333333 -0.5)) (- 0.0 z))
        (- 0.0 t))
       t_1))))
double code(double x, double y, double z, double t) {
	double t_1 = x * log(y);
	double tmp;
	if (x <= -3.9e+49) {
		tmp = t_1;
	} else if (x <= 5e+53) {
		tmp = fma(y, fma(z, (y * fma(y, -0.3333333333333333, -0.5)), (0.0 - z)), (0.0 - t));
	} else {
		tmp = t_1;
	}
	return tmp;
}
function code(x, y, z, t)
	t_1 = Float64(x * log(y))
	tmp = 0.0
	if (x <= -3.9e+49)
		tmp = t_1;
	elseif (x <= 5e+53)
		tmp = fma(y, fma(z, Float64(y * fma(y, -0.3333333333333333, -0.5)), Float64(0.0 - z)), Float64(0.0 - t));
	else
		tmp = t_1;
	end
	return tmp
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -3.9e+49], t$95$1, If[LessEqual[x, 5e+53], N[(y * N[(z * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + N[(0.0 - z), $MachinePrecision]), $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := x \cdot \log y\\
\mathbf{if}\;x \leq -3.9 \cdot 10^{+49}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;x \leq 5 \cdot 10^{+53}:\\
\;\;\;\;\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right)\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -3.9000000000000001e49 or 5.0000000000000004e53 < x

    1. Initial program 96.7%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \log \left(1 + \left(\mathsf{neg}\left(y\right)\right)\right)\right)\right), t\right) \]
      2. accelerator-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{log1p.f64}\left(\left(\mathsf{neg}\left(y\right)\right)\right)\right)\right), t\right) \]
      3. neg-sub0N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{log1p.f64}\left(\left(0 - y\right)\right)\right)\right), t\right) \]
      4. --lowering--.f6499.8%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{+.f64}\left(\mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right), \mathsf{*.f64}\left(z, \mathsf{log1p.f64}\left(\mathsf{\_.f64}\left(0, y\right)\right)\right)\right), t\right) \]
    4. Applied egg-rr99.8%

      \[\leadsto \left(x \cdot \log y + z \cdot \color{blue}{\mathsf{log1p}\left(0 - y\right)}\right) - t \]
    5. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \log y} \]
    6. Step-by-step derivation
      1. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\log y}\right) \]
      2. log-lowering-log.f6482.3%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{log.f64}\left(y\right)\right) \]
    7. Simplified82.3%

      \[\leadsto \color{blue}{x \cdot \log y} \]

    if -3.9000000000000001e49 < x < 5.0000000000000004e53

    1. Initial program 76.3%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
    4. Simplified99.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)} \]
    5. Taylor expanded in x around 0

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \mathsf{\_.f64}\left(0, z\right)\right), \color{blue}{\left(-1 \cdot t\right)}\right) \]
    6. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
      2. neg-sub0N/A

        \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(0 - t\right)\right) \]
      3. --lowering--.f6483.7%

        \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
    7. Simplified83.7%

      \[\leadsto \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \color{blue}{0 - t}\right) \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 11: 99.0% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(x, \log y, 0\right) - \mathsf{fma}\left(z, y, t\right) \end{array} \]
(FPCore (x y z t) :precision binary64 (- (fma x (log y) 0.0) (fma z y t)))
double code(double x, double y, double z, double t) {
	return fma(x, log(y), 0.0) - fma(z, y, t);
}
function code(x, y, z, t)
	return Float64(fma(x, log(y), 0.0) - fma(z, y, t))
end
code[x_, y_, z_, t_] := N[(N[(x * N[Log[y], $MachinePrecision] + 0.0), $MachinePrecision] - N[(z * y + t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(x, \log y, 0\right) - \mathsf{fma}\left(z, y, t\right)
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + x \cdot \log y\right) - t} \]
  4. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \left(x \cdot \log y + -1 \cdot \left(y \cdot z\right)\right) - t \]
    2. mul-1-negN/A

      \[\leadsto \left(x \cdot \log y + \left(\mathsf{neg}\left(y \cdot z\right)\right)\right) - t \]
    3. unsub-negN/A

      \[\leadsto \left(x \cdot \log y - y \cdot z\right) - t \]
    4. remove-double-negN/A

      \[\leadsto \left(x \cdot \left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right) - y \cdot z\right) - t \]
    5. mul-1-negN/A

      \[\leadsto \left(x \cdot \left(\mathsf{neg}\left(-1 \cdot \log y\right)\right) - y \cdot z\right) - t \]
    6. distribute-rgt-neg-inN/A

      \[\leadsto \left(\left(\mathsf{neg}\left(x \cdot \left(-1 \cdot \log y\right)\right)\right) - y \cdot z\right) - t \]
    7. neg-mul-1N/A

      \[\leadsto \left(-1 \cdot \left(x \cdot \left(-1 \cdot \log y\right)\right) - y \cdot z\right) - t \]
    8. mul-1-negN/A

      \[\leadsto \left(-1 \cdot \left(x \cdot \left(\mathsf{neg}\left(\log y\right)\right)\right) - y \cdot z\right) - t \]
    9. log-recN/A

      \[\leadsto \left(-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - y \cdot z\right) - t \]
    10. associate--l-N/A

      \[\leadsto -1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - \color{blue}{\left(y \cdot z + t\right)} \]
    11. --lowering--.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\left(-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right)\right), \color{blue}{\left(y \cdot z + t\right)}\right) \]
  5. Simplified99.0%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, 0\right) - \mathsf{fma}\left(z, y, t\right)} \]
  6. Add Preprocessing

Alternative 12: 57.1% accurate, 6.9× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), y \cdot z, 0 - \mathsf{fma}\left(y, z, t\right)\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (fma (* y (fma y -0.3333333333333333 -0.5)) (* y z) (- 0.0 (fma y z t))))
double code(double x, double y, double z, double t) {
	return fma((y * fma(y, -0.3333333333333333, -0.5)), (y * z), (0.0 - fma(y, z, t)));
}
function code(x, y, z, t)
	return fma(Float64(y * fma(y, -0.3333333333333333, -0.5)), Float64(y * z), Float64(0.0 - fma(y, z, t)))
end
code[x_, y_, z_, t_] := N[(N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] * N[(y * z), $MachinePrecision] + N[(0.0 - N[(y * z + t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), y \cdot z, 0 - \mathsf{fma}\left(y, z, t\right)\right)
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
  4. Simplified99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)} \]
  5. Taylor expanded in x around 0

    \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \mathsf{\_.f64}\left(0, z\right)\right), \color{blue}{\left(-1 \cdot t\right)}\right) \]
  6. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
    2. neg-sub0N/A

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(0 - t\right)\right) \]
    3. --lowering--.f6459.0%

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
  7. Simplified59.0%

    \[\leadsto \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \color{blue}{0 - t}\right) \]
  8. Step-by-step derivation
    1. distribute-rgt-inN/A

      \[\leadsto \left(\left(z \cdot \left(y \cdot \left(y \cdot \frac{-1}{3} + \frac{-1}{2}\right)\right)\right) \cdot y + \left(0 - z\right) \cdot y\right) + \left(\color{blue}{0} - t\right) \]
    2. sub0-negN/A

      \[\leadsto \left(\left(z \cdot \left(y \cdot \left(y \cdot \frac{-1}{3} + \frac{-1}{2}\right)\right)\right) \cdot y + \left(\mathsf{neg}\left(z\right)\right) \cdot y\right) + \left(0 - t\right) \]
    3. distribute-lft-neg-inN/A

      \[\leadsto \left(\left(z \cdot \left(y \cdot \left(y \cdot \frac{-1}{3} + \frac{-1}{2}\right)\right)\right) \cdot y + \left(\mathsf{neg}\left(z \cdot y\right)\right)\right) + \left(0 - t\right) \]
    4. associate-+l+N/A

      \[\leadsto \left(z \cdot \left(y \cdot \left(y \cdot \frac{-1}{3} + \frac{-1}{2}\right)\right)\right) \cdot y + \color{blue}{\left(\left(\mathsf{neg}\left(z \cdot y\right)\right) + \left(0 - t\right)\right)} \]
    5. *-commutativeN/A

      \[\leadsto \left(\left(y \cdot \left(y \cdot \frac{-1}{3} + \frac{-1}{2}\right)\right) \cdot z\right) \cdot y + \left(\left(\mathsf{neg}\left(\color{blue}{z \cdot y}\right)\right) + \left(0 - t\right)\right) \]
    6. associate-*l*N/A

      \[\leadsto \left(y \cdot \left(y \cdot \frac{-1}{3} + \frac{-1}{2}\right)\right) \cdot \left(z \cdot y\right) + \left(\color{blue}{\left(\mathsf{neg}\left(z \cdot y\right)\right)} + \left(0 - t\right)\right) \]
    7. neg-sub0N/A

      \[\leadsto \left(y \cdot \left(y \cdot \frac{-1}{3} + \frac{-1}{2}\right)\right) \cdot \left(z \cdot y\right) + \left(\left(\mathsf{neg}\left(z \cdot y\right)\right) + \left(\mathsf{neg}\left(t\right)\right)\right) \]
    8. distribute-neg-inN/A

      \[\leadsto \left(y \cdot \left(y \cdot \frac{-1}{3} + \frac{-1}{2}\right)\right) \cdot \left(z \cdot y\right) + \left(\mathsf{neg}\left(\left(z \cdot y + t\right)\right)\right) \]
    9. accelerator-lowering-fma.f64N/A

      \[\leadsto \mathsf{fma.f64}\left(\left(y \cdot \left(y \cdot \frac{-1}{3} + \frac{-1}{2}\right)\right), \color{blue}{\left(z \cdot y\right)}, \left(\mathsf{neg}\left(\left(z \cdot y + t\right)\right)\right)\right) \]
    10. *-lowering-*.f64N/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(y, \left(y \cdot \frac{-1}{3} + \frac{-1}{2}\right)\right), \left(\color{blue}{z} \cdot y\right), \left(\mathsf{neg}\left(\left(z \cdot y + t\right)\right)\right)\right) \]
    11. accelerator-lowering-fma.f64N/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \left(z \cdot y\right), \left(\mathsf{neg}\left(\left(z \cdot y + t\right)\right)\right)\right) \]
    12. *-commutativeN/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \left(y \cdot \color{blue}{z}\right), \left(\mathsf{neg}\left(\left(z \cdot y + t\right)\right)\right)\right) \]
    13. *-lowering-*.f64N/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \mathsf{*.f64}\left(y, \color{blue}{z}\right), \left(\mathsf{neg}\left(\left(z \cdot y + t\right)\right)\right)\right) \]
    14. neg-sub0N/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \mathsf{*.f64}\left(y, z\right), \left(0 - \left(z \cdot y + t\right)\right)\right) \]
    15. --lowering--.f64N/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \mathsf{*.f64}\left(y, z\right), \mathsf{\_.f64}\left(0, \left(z \cdot y + t\right)\right)\right) \]
    16. *-commutativeN/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \mathsf{*.f64}\left(y, z\right), \mathsf{\_.f64}\left(0, \left(y \cdot z + t\right)\right)\right) \]
    17. accelerator-lowering-fma.f6459.0%

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \mathsf{*.f64}\left(y, z\right), \mathsf{\_.f64}\left(0, \mathsf{fma.f64}\left(y, z, t\right)\right)\right) \]
  9. Applied egg-rr59.0%

    \[\leadsto \color{blue}{\mathsf{fma}\left(y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), y \cdot z, 0 - \mathsf{fma}\left(y, z, t\right)\right)} \]
  10. Add Preprocessing

Alternative 13: 57.1% accurate, 7.3× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (fma y (fma z (* y (fma y -0.3333333333333333 -0.5)) (- 0.0 z)) (- 0.0 t)))
double code(double x, double y, double z, double t) {
	return fma(y, fma(z, (y * fma(y, -0.3333333333333333, -0.5)), (0.0 - z)), (0.0 - t));
}
function code(x, y, z, t)
	return fma(y, fma(z, Float64(y * fma(y, -0.3333333333333333, -0.5)), Float64(0.0 - z)), Float64(0.0 - t))
end
code[x_, y_, z_, t_] := N[(y * N[(z * N[(y * N[(y * -0.3333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + N[(0.0 - z), $MachinePrecision]), $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), 0 - t\right)
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
  4. Simplified99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)} \]
  5. Taylor expanded in x around 0

    \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \mathsf{\_.f64}\left(0, z\right)\right), \color{blue}{\left(-1 \cdot t\right)}\right) \]
  6. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
    2. neg-sub0N/A

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(0 - t\right)\right) \]
    3. --lowering--.f6459.0%

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
  7. Simplified59.0%

    \[\leadsto \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \color{blue}{0 - t}\right) \]
  8. Add Preprocessing

Alternative 14: 44.6% accurate, 10.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := y \cdot \left(0 - z\right)\\ \mathbf{if}\;z \leq -8 \cdot 10^{+217}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;z \leq 3.95 \cdot 10^{+155}:\\ \;\;\;\;0 - t\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (* y (- 0.0 z))))
   (if (<= z -8e+217) t_1 (if (<= z 3.95e+155) (- 0.0 t) t_1))))
double code(double x, double y, double z, double t) {
	double t_1 = y * (0.0 - z);
	double tmp;
	if (z <= -8e+217) {
		tmp = t_1;
	} else if (z <= 3.95e+155) {
		tmp = 0.0 - t;
	} else {
		tmp = t_1;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: tmp
    t_1 = y * (0.0d0 - z)
    if (z <= (-8d+217)) then
        tmp = t_1
    else if (z <= 3.95d+155) then
        tmp = 0.0d0 - t
    else
        tmp = t_1
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double t_1 = y * (0.0 - z);
	double tmp;
	if (z <= -8e+217) {
		tmp = t_1;
	} else if (z <= 3.95e+155) {
		tmp = 0.0 - t;
	} else {
		tmp = t_1;
	}
	return tmp;
}
def code(x, y, z, t):
	t_1 = y * (0.0 - z)
	tmp = 0
	if z <= -8e+217:
		tmp = t_1
	elif z <= 3.95e+155:
		tmp = 0.0 - t
	else:
		tmp = t_1
	return tmp
function code(x, y, z, t)
	t_1 = Float64(y * Float64(0.0 - z))
	tmp = 0.0
	if (z <= -8e+217)
		tmp = t_1;
	elseif (z <= 3.95e+155)
		tmp = Float64(0.0 - t);
	else
		tmp = t_1;
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	t_1 = y * (0.0 - z);
	tmp = 0.0;
	if (z <= -8e+217)
		tmp = t_1;
	elseif (z <= 3.95e+155)
		tmp = 0.0 - t;
	else
		tmp = t_1;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(y * N[(0.0 - z), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[z, -8e+217], t$95$1, If[LessEqual[z, 3.95e+155], N[(0.0 - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := y \cdot \left(0 - z\right)\\
\mathbf{if}\;z \leq -8 \cdot 10^{+217}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;z \leq 3.95 \cdot 10^{+155}:\\
\;\;\;\;0 - t\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < -7.99999999999999968e217 or 3.94999999999999992e155 < z

    1. Initial program 42.5%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + x \cdot \log y\right) - t} \]
    4. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \left(x \cdot \log y + -1 \cdot \left(y \cdot z\right)\right) - t \]
      2. mul-1-negN/A

        \[\leadsto \left(x \cdot \log y + \left(\mathsf{neg}\left(y \cdot z\right)\right)\right) - t \]
      3. unsub-negN/A

        \[\leadsto \left(x \cdot \log y - y \cdot z\right) - t \]
      4. remove-double-negN/A

        \[\leadsto \left(x \cdot \left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right) - y \cdot z\right) - t \]
      5. mul-1-negN/A

        \[\leadsto \left(x \cdot \left(\mathsf{neg}\left(-1 \cdot \log y\right)\right) - y \cdot z\right) - t \]
      6. distribute-rgt-neg-inN/A

        \[\leadsto \left(\left(\mathsf{neg}\left(x \cdot \left(-1 \cdot \log y\right)\right)\right) - y \cdot z\right) - t \]
      7. neg-mul-1N/A

        \[\leadsto \left(-1 \cdot \left(x \cdot \left(-1 \cdot \log y\right)\right) - y \cdot z\right) - t \]
      8. mul-1-negN/A

        \[\leadsto \left(-1 \cdot \left(x \cdot \left(\mathsf{neg}\left(\log y\right)\right)\right) - y \cdot z\right) - t \]
      9. log-recN/A

        \[\leadsto \left(-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - y \cdot z\right) - t \]
      10. associate--l-N/A

        \[\leadsto -1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - \color{blue}{\left(y \cdot z + t\right)} \]
      11. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\left(-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right)\right), \color{blue}{\left(y \cdot z + t\right)}\right) \]
    5. Simplified97.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, 0\right) - \mathsf{fma}\left(z, y, t\right)} \]
    6. Taylor expanded in y around inf

      \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z\right)} \]
    7. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{neg}\left(y \cdot z\right) \]
      2. *-commutativeN/A

        \[\leadsto \mathsf{neg}\left(z \cdot y\right) \]
      3. distribute-rgt-neg-inN/A

        \[\leadsto z \cdot \color{blue}{\left(\mathsf{neg}\left(y\right)\right)} \]
      4. mul-1-negN/A

        \[\leadsto z \cdot \left(-1 \cdot \color{blue}{y}\right) \]
      5. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(z, \color{blue}{\left(-1 \cdot y\right)}\right) \]
      6. mul-1-negN/A

        \[\leadsto \mathsf{*.f64}\left(z, \left(\mathsf{neg}\left(y\right)\right)\right) \]
      7. neg-sub0N/A

        \[\leadsto \mathsf{*.f64}\left(z, \left(0 - \color{blue}{y}\right)\right) \]
      8. --lowering--.f6461.7%

        \[\leadsto \mathsf{*.f64}\left(z, \mathsf{\_.f64}\left(0, \color{blue}{y}\right)\right) \]
    8. Simplified61.7%

      \[\leadsto \color{blue}{z \cdot \left(0 - y\right)} \]

    if -7.99999999999999968e217 < z < 3.94999999999999992e155

    1. Initial program 94.0%

      \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in t around inf

      \[\leadsto \color{blue}{-1 \cdot t} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{neg}\left(t\right) \]
      2. neg-sub0N/A

        \[\leadsto 0 - \color{blue}{t} \]
      3. --lowering--.f6449.8%

        \[\leadsto \mathsf{\_.f64}\left(0, \color{blue}{t}\right) \]
    5. Simplified49.8%

      \[\leadsto \color{blue}{0 - t} \]
    6. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \mathsf{neg}\left(t\right) \]
      2. neg-lowering-neg.f6449.8%

        \[\leadsto \mathsf{neg.f64}\left(t\right) \]
    7. Applied egg-rr49.8%

      \[\leadsto \color{blue}{-t} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification52.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -8 \cdot 10^{+217}:\\ \;\;\;\;y \cdot \left(0 - z\right)\\ \mathbf{elif}\;z \leq 3.95 \cdot 10^{+155}:\\ \;\;\;\;0 - t\\ \mathbf{else}:\\ \;\;\;\;y \cdot \left(0 - z\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 15: 57.0% accurate, 10.5× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(y, z \cdot \mathsf{fma}\left(y, -0.5, -1\right), 0 - t\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (fma y (* z (fma y -0.5 -1.0)) (- 0.0 t)))
double code(double x, double y, double z, double t) {
	return fma(y, (z * fma(y, -0.5, -1.0)), (0.0 - t));
}
function code(x, y, z, t)
	return fma(y, Float64(z * fma(y, -0.5, -1.0)), Float64(0.0 - t))
end
code[x_, y_, z_, t_] := N[(y * N[(z * N[(y * -0.5 + -1.0), $MachinePrecision]), $MachinePrecision] + N[(0.0 - t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(y, z \cdot \mathsf{fma}\left(y, -0.5, -1\right), 0 - t\right)
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
  4. Simplified99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)} \]
  5. Taylor expanded in x around 0

    \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \mathsf{\_.f64}\left(0, z\right)\right), \color{blue}{\left(-1 \cdot t\right)}\right) \]
  6. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
    2. neg-sub0N/A

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(0 - t\right)\right) \]
    3. --lowering--.f6459.0%

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
  7. Simplified59.0%

    \[\leadsto \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \color{blue}{0 - t}\right) \]
  8. Taylor expanded in y around 0

    \[\leadsto \mathsf{fma.f64}\left(y, \color{blue}{\left(\frac{-1}{2} \cdot \left(y \cdot z\right) - z\right)}, \mathsf{\_.f64}\left(0, t\right)\right) \]
  9. Step-by-step derivation
    1. sub-negN/A

      \[\leadsto \mathsf{fma.f64}\left(y, \left(\frac{-1}{2} \cdot \left(y \cdot z\right) + \color{blue}{\left(\mathsf{neg}\left(z\right)\right)}\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
    2. associate-*r*N/A

      \[\leadsto \mathsf{fma.f64}\left(y, \left(\left(\frac{-1}{2} \cdot y\right) \cdot z + \left(\mathsf{neg}\left(\color{blue}{z}\right)\right)\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
    3. mul-1-negN/A

      \[\leadsto \mathsf{fma.f64}\left(y, \left(\left(\frac{-1}{2} \cdot y\right) \cdot z + -1 \cdot \color{blue}{z}\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
    4. distribute-rgt-outN/A

      \[\leadsto \mathsf{fma.f64}\left(y, \left(z \cdot \color{blue}{\left(\frac{-1}{2} \cdot y + -1\right)}\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
    5. *-lowering-*.f64N/A

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{*.f64}\left(z, \color{blue}{\left(\frac{-1}{2} \cdot y + -1\right)}\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
    6. *-commutativeN/A

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{*.f64}\left(z, \left(y \cdot \frac{-1}{2} + -1\right)\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
    7. accelerator-lowering-fma.f6458.8%

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{*.f64}\left(z, \mathsf{fma.f64}\left(y, \color{blue}{\frac{-1}{2}}, -1\right)\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
  10. Simplified58.8%

    \[\leadsto \mathsf{fma}\left(y, \color{blue}{z \cdot \mathsf{fma}\left(y, -0.5, -1\right)}, 0 - t\right) \]
  11. Add Preprocessing

Alternative 16: 57.0% accurate, 10.5× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(y \cdot z, \mathsf{fma}\left(y, -0.5, -1\right), 0\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (fma (* y z) (fma y -0.5 -1.0) 0.0) t))
double code(double x, double y, double z, double t) {
	return fma((y * z), fma(y, -0.5, -1.0), 0.0) - t;
}
function code(x, y, z, t)
	return Float64(fma(Float64(y * z), fma(y, -0.5, -1.0), 0.0) - t)
end
code[x_, y_, z_, t_] := N[(N[(N[(y * z), $MachinePrecision] * N[(y * -0.5 + -1.0), $MachinePrecision] + 0.0), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(y \cdot z, \mathsf{fma}\left(y, -0.5, -1\right), 0\right) - t
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(x \cdot \log y + y \cdot \left(-1 \cdot z + y \cdot \left(\frac{-1}{2} \cdot z + \frac{-1}{3} \cdot \left(y \cdot z\right)\right)\right)\right) - t} \]
  4. Simplified99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \mathsf{fma}\left(x, \log y, 0 - t\right)\right)} \]
  5. Taylor expanded in x around 0

    \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \mathsf{\_.f64}\left(0, z\right)\right), \color{blue}{\left(-1 \cdot t\right)}\right) \]
  6. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(\mathsf{neg}\left(t\right)\right)\right) \]
    2. neg-sub0N/A

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \left(0 - t\right)\right) \]
    3. --lowering--.f6459.0%

      \[\leadsto \mathsf{fma.f64}\left(y, \mathsf{fma.f64}\left(z, \mathsf{*.f64}\left(y, \mathsf{fma.f64}\left(y, \frac{-1}{3}, \frac{-1}{2}\right)\right), \color{blue}{\mathsf{\_.f64}\left(0, z\right)}\right), \mathsf{\_.f64}\left(0, t\right)\right) \]
  7. Simplified59.0%

    \[\leadsto \mathsf{fma}\left(y, \mathsf{fma}\left(z, y \cdot \mathsf{fma}\left(y, -0.3333333333333333, -0.5\right), 0 - z\right), \color{blue}{0 - t}\right) \]
  8. Taylor expanded in y around 0

    \[\leadsto \color{blue}{y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right) - t} \]
  9. Step-by-step derivation
    1. sub-negN/A

      \[\leadsto y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right) + \color{blue}{\left(\mathsf{neg}\left(t\right)\right)} \]
    2. neg-sub0N/A

      \[\leadsto y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right) + \left(0 - \color{blue}{t}\right) \]
    3. associate-+r-N/A

      \[\leadsto \left(y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right) + 0\right) - \color{blue}{t} \]
    4. --lowering--.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\left(y \cdot \left(-1 \cdot z + \frac{-1}{2} \cdot \left(y \cdot z\right)\right) + 0\right), \color{blue}{t}\right) \]
    5. distribute-lft-inN/A

      \[\leadsto \mathsf{\_.f64}\left(\left(\left(y \cdot \left(-1 \cdot z\right) + y \cdot \left(\frac{-1}{2} \cdot \left(y \cdot z\right)\right)\right) + 0\right), t\right) \]
    6. mul-1-negN/A

      \[\leadsto \mathsf{\_.f64}\left(\left(\left(y \cdot \left(\mathsf{neg}\left(z\right)\right) + y \cdot \left(\frac{-1}{2} \cdot \left(y \cdot z\right)\right)\right) + 0\right), t\right) \]
    7. distribute-rgt-neg-inN/A

      \[\leadsto \mathsf{\_.f64}\left(\left(\left(\left(\mathsf{neg}\left(y \cdot z\right)\right) + y \cdot \left(\frac{-1}{2} \cdot \left(y \cdot z\right)\right)\right) + 0\right), t\right) \]
    8. mul-1-negN/A

      \[\leadsto \mathsf{\_.f64}\left(\left(\left(-1 \cdot \left(y \cdot z\right) + y \cdot \left(\frac{-1}{2} \cdot \left(y \cdot z\right)\right)\right) + 0\right), t\right) \]
    9. associate-*r*N/A

      \[\leadsto \mathsf{\_.f64}\left(\left(\left(-1 \cdot \left(y \cdot z\right) + \left(y \cdot \frac{-1}{2}\right) \cdot \left(y \cdot z\right)\right) + 0\right), t\right) \]
    10. *-commutativeN/A

      \[\leadsto \mathsf{\_.f64}\left(\left(\left(-1 \cdot \left(y \cdot z\right) + \left(\frac{-1}{2} \cdot y\right) \cdot \left(y \cdot z\right)\right) + 0\right), t\right) \]
    11. distribute-rgt-outN/A

      \[\leadsto \mathsf{\_.f64}\left(\left(\left(y \cdot z\right) \cdot \left(-1 + \frac{-1}{2} \cdot y\right) + 0\right), t\right) \]
    12. +-commutativeN/A

      \[\leadsto \mathsf{\_.f64}\left(\left(\left(y \cdot z\right) \cdot \left(\frac{-1}{2} \cdot y + -1\right) + 0\right), t\right) \]
    13. accelerator-lowering-fma.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{fma.f64}\left(\left(y \cdot z\right), \left(\frac{-1}{2} \cdot y + -1\right), 0\right), t\right) \]
    14. *-lowering-*.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{fma.f64}\left(\mathsf{*.f64}\left(y, z\right), \left(\frac{-1}{2} \cdot y + -1\right), 0\right), t\right) \]
    15. *-commutativeN/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{fma.f64}\left(\mathsf{*.f64}\left(y, z\right), \left(y \cdot \frac{-1}{2} + -1\right), 0\right), t\right) \]
    16. accelerator-lowering-fma.f6458.8%

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{fma.f64}\left(\mathsf{*.f64}\left(y, z\right), \mathsf{fma.f64}\left(y, \frac{-1}{2}, -1\right), 0\right), t\right) \]
  10. Simplified58.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(y \cdot z, \mathsf{fma}\left(y, -0.5, -1\right), 0\right) - t} \]
  11. Add Preprocessing

Alternative 17: 56.7% accurate, 22.0× speedup?

\[\begin{array}{l} \\ 0 - \mathsf{fma}\left(z, y, t\right) \end{array} \]
(FPCore (x y z t) :precision binary64 (- 0.0 (fma z y t)))
double code(double x, double y, double z, double t) {
	return 0.0 - fma(z, y, t);
}
function code(x, y, z, t)
	return Float64(0.0 - fma(z, y, t))
end
code[x_, y_, z_, t_] := N[(0.0 - N[(z * y + t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
0 - \mathsf{fma}\left(z, y, t\right)
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot z\right) + x \cdot \log y\right) - t} \]
  4. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \left(x \cdot \log y + -1 \cdot \left(y \cdot z\right)\right) - t \]
    2. mul-1-negN/A

      \[\leadsto \left(x \cdot \log y + \left(\mathsf{neg}\left(y \cdot z\right)\right)\right) - t \]
    3. unsub-negN/A

      \[\leadsto \left(x \cdot \log y - y \cdot z\right) - t \]
    4. remove-double-negN/A

      \[\leadsto \left(x \cdot \left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log y\right)\right)\right)\right) - y \cdot z\right) - t \]
    5. mul-1-negN/A

      \[\leadsto \left(x \cdot \left(\mathsf{neg}\left(-1 \cdot \log y\right)\right) - y \cdot z\right) - t \]
    6. distribute-rgt-neg-inN/A

      \[\leadsto \left(\left(\mathsf{neg}\left(x \cdot \left(-1 \cdot \log y\right)\right)\right) - y \cdot z\right) - t \]
    7. neg-mul-1N/A

      \[\leadsto \left(-1 \cdot \left(x \cdot \left(-1 \cdot \log y\right)\right) - y \cdot z\right) - t \]
    8. mul-1-negN/A

      \[\leadsto \left(-1 \cdot \left(x \cdot \left(\mathsf{neg}\left(\log y\right)\right)\right) - y \cdot z\right) - t \]
    9. log-recN/A

      \[\leadsto \left(-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - y \cdot z\right) - t \]
    10. associate--l-N/A

      \[\leadsto -1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right) - \color{blue}{\left(y \cdot z + t\right)} \]
    11. --lowering--.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\left(-1 \cdot \left(x \cdot \log \left(\frac{1}{y}\right)\right)\right), \color{blue}{\left(y \cdot z + t\right)}\right) \]
  5. Simplified99.0%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log y, 0\right) - \mathsf{fma}\left(z, y, t\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \color{blue}{-1 \cdot \left(t + y \cdot z\right)} \]
  7. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \mathsf{neg}\left(\left(t + y \cdot z\right)\right) \]
    2. neg-sub0N/A

      \[\leadsto 0 - \color{blue}{\left(t + y \cdot z\right)} \]
    3. --lowering--.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(0, \color{blue}{\left(t + y \cdot z\right)}\right) \]
    4. +-commutativeN/A

      \[\leadsto \mathsf{\_.f64}\left(0, \left(y \cdot z + \color{blue}{t}\right)\right) \]
    5. *-commutativeN/A

      \[\leadsto \mathsf{\_.f64}\left(0, \left(z \cdot y + t\right)\right) \]
    6. accelerator-lowering-fma.f6458.4%

      \[\leadsto \mathsf{\_.f64}\left(0, \mathsf{fma.f64}\left(z, \color{blue}{y}, t\right)\right) \]
  8. Simplified58.4%

    \[\leadsto \color{blue}{0 - \mathsf{fma}\left(z, y, t\right)} \]
  9. Add Preprocessing

Alternative 18: 42.3% accurate, 55.0× speedup?

\[\begin{array}{l} \\ 0 - t \end{array} \]
(FPCore (x y z t) :precision binary64 (- 0.0 t))
double code(double x, double y, double z, double t) {
	return 0.0 - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = 0.0d0 - t
end function
public static double code(double x, double y, double z, double t) {
	return 0.0 - t;
}
def code(x, y, z, t):
	return 0.0 - t
function code(x, y, z, t)
	return Float64(0.0 - t)
end
function tmp = code(x, y, z, t)
	tmp = 0.0 - t;
end
code[x_, y_, z_, t_] := N[(0.0 - t), $MachinePrecision]
\begin{array}{l}

\\
0 - t
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in t around inf

    \[\leadsto \color{blue}{-1 \cdot t} \]
  4. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \mathsf{neg}\left(t\right) \]
    2. neg-sub0N/A

      \[\leadsto 0 - \color{blue}{t} \]
    3. --lowering--.f6441.9%

      \[\leadsto \mathsf{\_.f64}\left(0, \color{blue}{t}\right) \]
  5. Simplified41.9%

    \[\leadsto \color{blue}{0 - t} \]
  6. Step-by-step derivation
    1. sub0-negN/A

      \[\leadsto \mathsf{neg}\left(t\right) \]
    2. neg-lowering-neg.f6441.9%

      \[\leadsto \mathsf{neg.f64}\left(t\right) \]
  7. Applied egg-rr41.9%

    \[\leadsto \color{blue}{-t} \]
  8. Final simplification41.9%

    \[\leadsto 0 - t \]
  9. Add Preprocessing

Alternative 19: 2.3% accurate, 220.0× speedup?

\[\begin{array}{l} \\ t \end{array} \]
(FPCore (x y z t) :precision binary64 t)
double code(double x, double y, double z, double t) {
	return t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = t
end function
public static double code(double x, double y, double z, double t) {
	return t;
}
def code(x, y, z, t):
	return t
function code(x, y, z, t)
	return t
end
function tmp = code(x, y, z, t)
	tmp = t;
end
code[x_, y_, z_, t_] := t
\begin{array}{l}

\\
t
\end{array}
Derivation
  1. Initial program 83.8%

    \[\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in t around inf

    \[\leadsto \color{blue}{-1 \cdot t} \]
  4. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \mathsf{neg}\left(t\right) \]
    2. neg-sub0N/A

      \[\leadsto 0 - \color{blue}{t} \]
    3. --lowering--.f6441.9%

      \[\leadsto \mathsf{\_.f64}\left(0, \color{blue}{t}\right) \]
  5. Simplified41.9%

    \[\leadsto \color{blue}{0 - t} \]
  6. Step-by-step derivation
    1. flip3--N/A

      \[\leadsto \frac{{0}^{3} - {t}^{3}}{\color{blue}{0 \cdot 0 + \left(t \cdot t + 0 \cdot t\right)}} \]
    2. metadata-evalN/A

      \[\leadsto \frac{0 - {t}^{3}}{\color{blue}{0} \cdot 0 + \left(t \cdot t + 0 \cdot t\right)} \]
    3. sub0-negN/A

      \[\leadsto \frac{\mathsf{neg}\left({t}^{3}\right)}{\color{blue}{0 \cdot 0} + \left(t \cdot t + 0 \cdot t\right)} \]
    4. cube-negN/A

      \[\leadsto \frac{{\left(\mathsf{neg}\left(t\right)\right)}^{3}}{\color{blue}{0 \cdot 0} + \left(t \cdot t + 0 \cdot t\right)} \]
    5. sub0-negN/A

      \[\leadsto \frac{{\left(0 - t\right)}^{3}}{\color{blue}{0} \cdot 0 + \left(t \cdot t + 0 \cdot t\right)} \]
    6. sqr-powN/A

      \[\leadsto \frac{{\left(0 - t\right)}^{\left(\frac{3}{2}\right)} \cdot {\left(0 - t\right)}^{\left(\frac{3}{2}\right)}}{\color{blue}{0 \cdot 0} + \left(t \cdot t + 0 \cdot t\right)} \]
    7. unpow-prod-downN/A

      \[\leadsto \frac{{\left(\left(0 - t\right) \cdot \left(0 - t\right)\right)}^{\left(\frac{3}{2}\right)}}{\color{blue}{0 \cdot 0} + \left(t \cdot t + 0 \cdot t\right)} \]
    8. sub0-negN/A

      \[\leadsto \frac{{\left(\left(\mathsf{neg}\left(t\right)\right) \cdot \left(0 - t\right)\right)}^{\left(\frac{3}{2}\right)}}{0 \cdot 0 + \left(t \cdot t + 0 \cdot t\right)} \]
    9. sub0-negN/A

      \[\leadsto \frac{{\left(\left(\mathsf{neg}\left(t\right)\right) \cdot \left(\mathsf{neg}\left(t\right)\right)\right)}^{\left(\frac{3}{2}\right)}}{0 \cdot 0 + \left(t \cdot t + 0 \cdot t\right)} \]
    10. sqr-negN/A

      \[\leadsto \frac{{\left(t \cdot t\right)}^{\left(\frac{3}{2}\right)}}{\color{blue}{0} \cdot 0 + \left(t \cdot t + 0 \cdot t\right)} \]
    11. unpow-prod-downN/A

      \[\leadsto \frac{{t}^{\left(\frac{3}{2}\right)} \cdot {t}^{\left(\frac{3}{2}\right)}}{\color{blue}{0 \cdot 0} + \left(t \cdot t + 0 \cdot t\right)} \]
    12. sqr-powN/A

      \[\leadsto \frac{{t}^{3}}{\color{blue}{0 \cdot 0} + \left(t \cdot t + 0 \cdot t\right)} \]
    13. metadata-evalN/A

      \[\leadsto \frac{{t}^{3}}{0 + \left(\color{blue}{t \cdot t} + 0 \cdot t\right)} \]
    14. +-lft-identityN/A

      \[\leadsto \frac{{t}^{3}}{t \cdot t + \color{blue}{0 \cdot t}} \]
    15. distribute-rgt-outN/A

      \[\leadsto \frac{{t}^{3}}{t \cdot \color{blue}{\left(t + 0\right)}} \]
    16. +-commutativeN/A

      \[\leadsto \frac{{t}^{3}}{t \cdot \left(0 + \color{blue}{t}\right)} \]
    17. +-lft-identityN/A

      \[\leadsto \frac{{t}^{3}}{t \cdot t} \]
    18. pow2N/A

      \[\leadsto \frac{{t}^{3}}{{t}^{\color{blue}{2}}} \]
    19. pow-divN/A

      \[\leadsto {t}^{\color{blue}{\left(3 - 2\right)}} \]
    20. metadata-evalN/A

      \[\leadsto {t}^{1} \]
    21. unpow12.3%

      \[\leadsto t \]
  7. Applied egg-rr2.3%

    \[\leadsto \color{blue}{t} \]
  8. Add Preprocessing

Developer Target 1: 99.5% accurate, 1.3× speedup?

\[\begin{array}{l} \\ \left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (-
  (*
   (- z)
   (+
    (+ (* 0.5 (* y y)) y)
    (* (/ 0.3333333333333333 (* 1.0 (* 1.0 1.0))) (* y (* y y)))))
  (- t (* x (log y)))))
double code(double x, double y, double z, double t) {
	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (-z * (((0.5d0 * (y * y)) + y) + ((0.3333333333333333d0 / (1.0d0 * (1.0d0 * 1.0d0))) * (y * (y * y))))) - (t - (x * log(y)))
end function
public static double code(double x, double y, double z, double t) {
	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * Math.log(y)));
}
def code(x, y, z, t):
	return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * math.log(y)))
function code(x, y, z, t)
	return Float64(Float64(Float64(-z) * Float64(Float64(Float64(0.5 * Float64(y * y)) + y) + Float64(Float64(0.3333333333333333 / Float64(1.0 * Float64(1.0 * 1.0))) * Float64(y * Float64(y * y))))) - Float64(t - Float64(x * log(y))))
end
function tmp = code(x, y, z, t)
	tmp = (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
end
code[x_, y_, z_, t_] := N[(N[((-z) * N[(N[(N[(0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] + N[(N[(0.3333333333333333 / N[(1.0 * N[(1.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(t - N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right)
\end{array}

Reproduce

?
herbie shell --seed 2024193 
(FPCore (x y z t)
  :name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B"
  :precision binary64

  :alt
  (! :herbie-platform default (- (* (- z) (+ (+ (* 1/2 (* y y)) y) (* (/ 1/3 (* 1 (* 1 1))) (* y (* y y))))) (- t (* x (log y)))))

  (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))