Statistics.Distribution.Beta:$cdensity from math-functions-0.1.5.2

Percentage Accurate: 89.0% → 99.8%
Time: 14.2s
Alternatives: 16
Speedup: 1.9×

Specification

?
\[\begin{array}{l} \\ \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (((x - 1.0d0) * log(y)) + ((z - 1.0d0) * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((x - 1.0) * Math.log(y)) + ((z - 1.0) * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return (((x - 1.0) * math.log(y)) + ((z - 1.0) * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 16 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 89.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (((x - 1.0d0) * log(y)) + ((z - 1.0d0) * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((x - 1.0) * Math.log(y)) + ((z - 1.0) * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return (((x - 1.0) * math.log(y)) + ((z - 1.0) * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t
\end{array}

Alternative 1: 99.8% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(z + -1, \mathsf{log1p}\left(-y\right), \log y \cdot \left(x + -1\right) - t\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (fma (+ z -1.0) (log1p (- y)) (- (* (log y) (+ x -1.0)) t)))
double code(double x, double y, double z, double t) {
	return fma((z + -1.0), log1p(-y), ((log(y) * (x + -1.0)) - t));
}
function code(x, y, z, t)
	return fma(Float64(z + -1.0), log1p(Float64(-y)), Float64(Float64(log(y) * Float64(x + -1.0)) - t))
end
code[x_, y_, z_, t_] := N[(N[(z + -1.0), $MachinePrecision] * N[Log[1 + (-y)], $MachinePrecision] + N[(N[(N[Log[y], $MachinePrecision] * N[(x + -1.0), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(z + -1, \mathsf{log1p}\left(-y\right), \log y \cdot \left(x + -1\right) - t\right)
\end{array}
Derivation
  1. Initial program 87.5%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Step-by-step derivation
    1. +-commutative87.5%

      \[\leadsto \color{blue}{\left(\left(z - 1\right) \cdot \log \left(1 - y\right) + \left(x - 1\right) \cdot \log y\right)} - t \]
    2. associate--l+87.5%

      \[\leadsto \color{blue}{\left(z - 1\right) \cdot \log \left(1 - y\right) + \left(\left(x - 1\right) \cdot \log y - t\right)} \]
    3. fma-def87.5%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z - 1, \log \left(1 - y\right), \left(x - 1\right) \cdot \log y - t\right)} \]
    4. sub-neg87.5%

      \[\leadsto \mathsf{fma}\left(z - 1, \log \color{blue}{\left(1 + \left(-y\right)\right)}, \left(x - 1\right) \cdot \log y - t\right) \]
    5. log1p-def99.8%

      \[\leadsto \mathsf{fma}\left(z - 1, \color{blue}{\mathsf{log1p}\left(-y\right)}, \left(x - 1\right) \cdot \log y - t\right) \]
  3. Simplified99.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(z - 1, \mathsf{log1p}\left(-y\right), \left(x - 1\right) \cdot \log y - t\right)} \]
  4. Final simplification99.8%

    \[\leadsto \mathsf{fma}\left(z + -1, \mathsf{log1p}\left(-y\right), \log y \cdot \left(x + -1\right) - t\right) \]

Alternative 2: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\log y \cdot \left(x + -1\right) + z \cdot \mathsf{log1p}\left(-y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* (log y) (+ x -1.0)) (* z (log1p (- y)))) t))
double code(double x, double y, double z, double t) {
	return ((log(y) * (x + -1.0)) + (z * log1p(-y))) - t;
}
public static double code(double x, double y, double z, double t) {
	return ((Math.log(y) * (x + -1.0)) + (z * Math.log1p(-y))) - t;
}
def code(x, y, z, t):
	return ((math.log(y) * (x + -1.0)) + (z * math.log1p(-y))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(log(y) * Float64(x + -1.0)) + Float64(z * log1p(Float64(-y)))) - t)
end
code[x_, y_, z_, t_] := N[(N[(N[(N[Log[y], $MachinePrecision] * N[(x + -1.0), $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[1 + (-y)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\log y \cdot \left(x + -1\right) + z \cdot \mathsf{log1p}\left(-y\right)\right) - t
\end{array}
Derivation
  1. Initial program 87.5%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in z around inf 86.9%

    \[\leadsto \left(\left(x - 1\right) \cdot \log y + \color{blue}{z \cdot \log \left(1 - y\right)}\right) - t \]
  3. Step-by-step derivation
    1. *-commutative86.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \color{blue}{\log \left(1 - y\right) \cdot z}\right) - t \]
    2. sub-neg86.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \log \color{blue}{\left(1 + \left(-y\right)\right)} \cdot z\right) - t \]
    3. mul-1-neg86.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \log \left(1 + \color{blue}{-1 \cdot y}\right) \cdot z\right) - t \]
    4. log1p-def99.2%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \color{blue}{\mathsf{log1p}\left(-1 \cdot y\right)} \cdot z\right) - t \]
    5. mul-1-neg99.2%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \mathsf{log1p}\left(\color{blue}{-y}\right) \cdot z\right) - t \]
  4. Simplified99.2%

    \[\leadsto \left(\left(x - 1\right) \cdot \log y + \color{blue}{\mathsf{log1p}\left(-y\right) \cdot z}\right) - t \]
  5. Final simplification99.2%

    \[\leadsto \left(\log y \cdot \left(x + -1\right) + z \cdot \mathsf{log1p}\left(-y\right)\right) - t \]

Alternative 3: 99.5% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \left(\log y \cdot \left(x + -1\right) + \left(y \cdot \left(y \cdot -0.5\right) - y\right) \cdot \left(z + -1\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* (log y) (+ x -1.0)) (* (- (* y (* y -0.5)) y) (+ z -1.0))) t))
double code(double x, double y, double z, double t) {
	return ((log(y) * (x + -1.0)) + (((y * (y * -0.5)) - y) * (z + -1.0))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((log(y) * (x + (-1.0d0))) + (((y * (y * (-0.5d0))) - y) * (z + (-1.0d0)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((Math.log(y) * (x + -1.0)) + (((y * (y * -0.5)) - y) * (z + -1.0))) - t;
}
def code(x, y, z, t):
	return ((math.log(y) * (x + -1.0)) + (((y * (y * -0.5)) - y) * (z + -1.0))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(log(y) * Float64(x + -1.0)) + Float64(Float64(Float64(y * Float64(y * -0.5)) - y) * Float64(z + -1.0))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((log(y) * (x + -1.0)) + (((y * (y * -0.5)) - y) * (z + -1.0))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[Log[y], $MachinePrecision] * N[(x + -1.0), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(y * N[(y * -0.5), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision] * N[(z + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\log y \cdot \left(x + -1\right) + \left(y \cdot \left(y \cdot -0.5\right) - y\right) \cdot \left(z + -1\right)\right) - t
\end{array}
Derivation
  1. Initial program 87.5%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 98.9%

    \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)}\right) - t \]
  3. Step-by-step derivation
    1. mul-1-neg98.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right)\right) - t \]
    2. unsub-neg98.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)}\right) - t \]
    3. *-commutative98.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right)\right) - t \]
    4. unpow298.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right)\right) - t \]
    5. associate-*l*98.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right)\right) - t \]
  4. Simplified98.9%

    \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right) - y\right)}\right) - t \]
  5. Final simplification98.9%

    \[\leadsto \left(\log y \cdot \left(x + -1\right) + \left(y \cdot \left(y \cdot -0.5\right) - y\right) \cdot \left(z + -1\right)\right) - t \]

Alternative 4: 99.2% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \left(\log y \cdot \left(x + -1\right) - y \cdot \left(z + -1\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (- (* (log y) (+ x -1.0)) (* y (+ z -1.0))) t))
double code(double x, double y, double z, double t) {
	return ((log(y) * (x + -1.0)) - (y * (z + -1.0))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((log(y) * (x + (-1.0d0))) - (y * (z + (-1.0d0)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((Math.log(y) * (x + -1.0)) - (y * (z + -1.0))) - t;
}
def code(x, y, z, t):
	return ((math.log(y) * (x + -1.0)) - (y * (z + -1.0))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(log(y) * Float64(x + -1.0)) - Float64(y * Float64(z + -1.0))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((log(y) * (x + -1.0)) - (y * (z + -1.0))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[Log[y], $MachinePrecision] * N[(x + -1.0), $MachinePrecision]), $MachinePrecision] - N[(y * N[(z + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z + -1\right)\right) - t
\end{array}
Derivation
  1. Initial program 87.5%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 98.3%

    \[\leadsto \color{blue}{\left(-1 \cdot \left(\left(z - 1\right) \cdot y\right) + \left(x - 1\right) \cdot \log y\right)} - t \]
  3. Step-by-step derivation
    1. +-commutative98.3%

      \[\leadsto \color{blue}{\left(\left(x - 1\right) \cdot \log y + -1 \cdot \left(\left(z - 1\right) \cdot y\right)\right)} - t \]
    2. sub-neg98.3%

      \[\leadsto \left(\color{blue}{\left(x + \left(-1\right)\right)} \cdot \log y + -1 \cdot \left(\left(z - 1\right) \cdot y\right)\right) - t \]
    3. metadata-eval98.3%

      \[\leadsto \left(\left(x + \color{blue}{-1}\right) \cdot \log y + -1 \cdot \left(\left(z - 1\right) \cdot y\right)\right) - t \]
    4. mul-1-neg98.3%

      \[\leadsto \left(\left(x + -1\right) \cdot \log y + \color{blue}{\left(-\left(z - 1\right) \cdot y\right)}\right) - t \]
    5. unsub-neg98.3%

      \[\leadsto \color{blue}{\left(\left(x + -1\right) \cdot \log y - \left(z - 1\right) \cdot y\right)} - t \]
    6. *-commutative98.3%

      \[\leadsto \left(\color{blue}{\log y \cdot \left(x + -1\right)} - \left(z - 1\right) \cdot y\right) - t \]
    7. +-commutative98.3%

      \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - \left(z - 1\right) \cdot y\right) - t \]
    8. *-commutative98.3%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \color{blue}{y \cdot \left(z - 1\right)}\right) - t \]
    9. sub-neg98.3%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
    10. metadata-eval98.3%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
    11. +-commutative98.3%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
  4. Simplified98.3%

    \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
  5. Final simplification98.3%

    \[\leadsto \left(\log y \cdot \left(x + -1\right) - y \cdot \left(z + -1\right)\right) - t \]

Alternative 5: 83.5% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y - t\\ \mathbf{if}\;x \leq -2.8 \cdot 10^{+27}:\\ \;\;\;\;t_1\\ \mathbf{elif}\;x \leq 3 \cdot 10^{-191}:\\ \;\;\;\;\left(-\log y\right) - t\\ \mathbf{elif}\;x \leq 5 \cdot 10^{+27}:\\ \;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\ \mathbf{else}:\\ \;\;\;\;t_1\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (- (* x (log y)) t)))
   (if (<= x -2.8e+27)
     t_1
     (if (<= x 3e-191)
       (- (- (log y)) t)
       (if (<= x 5e+27) (- (* (+ z -1.0) (- (* -0.5 (* y y)) y)) t) t_1)))))
double code(double x, double y, double z, double t) {
	double t_1 = (x * log(y)) - t;
	double tmp;
	if (x <= -2.8e+27) {
		tmp = t_1;
	} else if (x <= 3e-191) {
		tmp = -log(y) - t;
	} else if (x <= 5e+27) {
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	} else {
		tmp = t_1;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: tmp
    t_1 = (x * log(y)) - t
    if (x <= (-2.8d+27)) then
        tmp = t_1
    else if (x <= 3d-191) then
        tmp = -log(y) - t
    else if (x <= 5d+27) then
        tmp = ((z + (-1.0d0)) * (((-0.5d0) * (y * y)) - y)) - t
    else
        tmp = t_1
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double t_1 = (x * Math.log(y)) - t;
	double tmp;
	if (x <= -2.8e+27) {
		tmp = t_1;
	} else if (x <= 3e-191) {
		tmp = -Math.log(y) - t;
	} else if (x <= 5e+27) {
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	} else {
		tmp = t_1;
	}
	return tmp;
}
def code(x, y, z, t):
	t_1 = (x * math.log(y)) - t
	tmp = 0
	if x <= -2.8e+27:
		tmp = t_1
	elif x <= 3e-191:
		tmp = -math.log(y) - t
	elif x <= 5e+27:
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t
	else:
		tmp = t_1
	return tmp
function code(x, y, z, t)
	t_1 = Float64(Float64(x * log(y)) - t)
	tmp = 0.0
	if (x <= -2.8e+27)
		tmp = t_1;
	elseif (x <= 3e-191)
		tmp = Float64(Float64(-log(y)) - t);
	elseif (x <= 5e+27)
		tmp = Float64(Float64(Float64(z + -1.0) * Float64(Float64(-0.5 * Float64(y * y)) - y)) - t);
	else
		tmp = t_1;
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	t_1 = (x * log(y)) - t;
	tmp = 0.0;
	if (x <= -2.8e+27)
		tmp = t_1;
	elseif (x <= 3e-191)
		tmp = -log(y) - t;
	elseif (x <= 5e+27)
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	else
		tmp = t_1;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -2.8e+27], t$95$1, If[LessEqual[x, 3e-191], N[((-N[Log[y], $MachinePrecision]) - t), $MachinePrecision], If[LessEqual[x, 5e+27], N[(N[(N[(z + -1.0), $MachinePrecision] * N[(N[(-0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -2.8 \cdot 10^{+27}:\\
\;\;\;\;t_1\\

\mathbf{elif}\;x \leq 3 \cdot 10^{-191}:\\
\;\;\;\;\left(-\log y\right) - t\\

\mathbf{elif}\;x \leq 5 \cdot 10^{+27}:\\
\;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\

\mathbf{else}:\\
\;\;\;\;t_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -2.7999999999999999e27 or 4.99999999999999979e27 < x

    1. Initial program 94.1%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 99.7%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)}\right) - t \]
    3. Taylor expanded in x around inf 93.5%

      \[\leadsto \color{blue}{\log y \cdot x} - t \]

    if -2.7999999999999999e27 < x < 3.0000000000000001e-191

    1. Initial program 85.3%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in z around 0 82.8%

      \[\leadsto \color{blue}{\left(\left(x - 1\right) \cdot \log y + -1 \cdot \log \left(1 - y\right)\right)} - t \]
    3. Step-by-step derivation
      1. sub-neg82.8%

        \[\leadsto \left(\color{blue}{\left(x + \left(-1\right)\right)} \cdot \log y + -1 \cdot \log \left(1 - y\right)\right) - t \]
      2. metadata-eval82.8%

        \[\leadsto \left(\left(x + \color{blue}{-1}\right) \cdot \log y + -1 \cdot \log \left(1 - y\right)\right) - t \]
      3. mul-1-neg82.8%

        \[\leadsto \left(\left(x + -1\right) \cdot \log y + \color{blue}{\left(-\log \left(1 - y\right)\right)}\right) - t \]
      4. unsub-neg82.8%

        \[\leadsto \color{blue}{\left(\left(x + -1\right) \cdot \log y - \log \left(1 - y\right)\right)} - t \]
      5. *-commutative82.8%

        \[\leadsto \left(\color{blue}{\log y \cdot \left(x + -1\right)} - \log \left(1 - y\right)\right) - t \]
      6. +-commutative82.8%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - \log \left(1 - y\right)\right) - t \]
      7. sub-neg82.8%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \log \color{blue}{\left(1 + \left(-y\right)\right)}\right) - t \]
      8. mul-1-neg82.8%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \log \left(1 + \color{blue}{-1 \cdot y}\right)\right) - t \]
      9. log1p-def82.8%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \color{blue}{\mathsf{log1p}\left(-1 \cdot y\right)}\right) - t \]
      10. mul-1-neg82.8%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \mathsf{log1p}\left(\color{blue}{-y}\right)\right) - t \]
    4. Simplified82.8%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - \mathsf{log1p}\left(-y\right)\right)} - t \]
    5. Taylor expanded in x around 0 82.5%

      \[\leadsto \left(\color{blue}{-1 \cdot \log y} - \mathsf{log1p}\left(-y\right)\right) - t \]
    6. Step-by-step derivation
      1. mul-1-neg82.5%

        \[\leadsto \left(\color{blue}{\left(-\log y\right)} - \mathsf{log1p}\left(-y\right)\right) - t \]
    7. Simplified82.5%

      \[\leadsto \left(\color{blue}{\left(-\log y\right)} - \mathsf{log1p}\left(-y\right)\right) - t \]
    8. Taylor expanded in y around 0 81.5%

      \[\leadsto \color{blue}{-1 \cdot \log y} - t \]
    9. Step-by-step derivation
      1. neg-mul-181.5%

        \[\leadsto \color{blue}{\left(-\log y\right)} - t \]
    10. Simplified81.5%

      \[\leadsto \color{blue}{\left(-\log y\right)} - t \]

    if 3.0000000000000001e-191 < x < 4.99999999999999979e27

    1. Initial program 74.6%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 99.6%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)}\right) - t \]
    3. Step-by-step derivation
      1. mul-1-neg99.6%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right)\right) - t \]
      2. unsub-neg99.6%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)}\right) - t \]
      3. *-commutative99.6%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right)\right) - t \]
      4. unpow299.6%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right)\right) - t \]
      5. associate-*l*99.6%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right)\right) - t \]
    4. Simplified99.6%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right) - y\right)}\right) - t \]
    5. Taylor expanded in y around inf 83.9%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(\left(z - 1\right) \cdot y\right) + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right)} - t \]
    6. Step-by-step derivation
      1. mul-1-neg83.9%

        \[\leadsto \left(\color{blue}{\left(-\left(z - 1\right) \cdot y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
      2. distribute-rgt-neg-in83.9%

        \[\leadsto \left(\color{blue}{\left(z - 1\right) \cdot \left(-y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
      3. *-commutative83.9%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(\left(z - 1\right) \cdot {y}^{2}\right) \cdot -0.5}\right) - t \]
      4. associate-*l*83.9%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(z - 1\right) \cdot \left({y}^{2} \cdot -0.5\right)}\right) - t \]
      5. unpow283.9%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5\right)\right) - t \]
      6. associate-*r*83.9%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right)\right)}\right) - t \]
      7. distribute-lft-in84.0%

        \[\leadsto \color{blue}{\left(z - 1\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right)} - t \]
      8. sub-neg84.0%

        \[\leadsto \color{blue}{\left(z + \left(-1\right)\right)} \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
      9. metadata-eval84.0%

        \[\leadsto \left(z + \color{blue}{-1}\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
      10. associate-*r*84.0%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{\left(y \cdot y\right) \cdot -0.5}\right) - t \]
      11. unpow284.0%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{{y}^{2}} \cdot -0.5\right) - t \]
      12. *-commutative84.0%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{-0.5 \cdot {y}^{2}}\right) - t \]
      13. +-commutative84.0%

        \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-y\right)\right)} - t \]
      14. sub-neg84.0%

        \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} - t \]
      15. unpow284.0%

        \[\leadsto \left(z + -1\right) \cdot \left(-0.5 \cdot \color{blue}{\left(y \cdot y\right)} - y\right) - t \]
    7. Simplified84.0%

      \[\leadsto \color{blue}{\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)} - t \]
  3. Recombined 3 regimes into one program.
  4. Final simplification87.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2.8 \cdot 10^{+27}:\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{elif}\;x \leq 3 \cdot 10^{-191}:\\ \;\;\;\;\left(-\log y\right) - t\\ \mathbf{elif}\;x \leq 5 \cdot 10^{+27}:\\ \;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\ \mathbf{else}:\\ \;\;\;\;x \cdot \log y - t\\ \end{array} \]

Alternative 6: 83.6% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y - t\\ \mathbf{if}\;x \leq -2.8 \cdot 10^{+27}:\\ \;\;\;\;t_1\\ \mathbf{elif}\;x \leq 7.4 \cdot 10^{-191}:\\ \;\;\;\;\left(y - \log y\right) - t\\ \mathbf{elif}\;x \leq 7.5 \cdot 10^{+27}:\\ \;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\ \mathbf{else}:\\ \;\;\;\;t_1\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (- (* x (log y)) t)))
   (if (<= x -2.8e+27)
     t_1
     (if (<= x 7.4e-191)
       (- (- y (log y)) t)
       (if (<= x 7.5e+27) (- (* (+ z -1.0) (- (* -0.5 (* y y)) y)) t) t_1)))))
double code(double x, double y, double z, double t) {
	double t_1 = (x * log(y)) - t;
	double tmp;
	if (x <= -2.8e+27) {
		tmp = t_1;
	} else if (x <= 7.4e-191) {
		tmp = (y - log(y)) - t;
	} else if (x <= 7.5e+27) {
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	} else {
		tmp = t_1;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: tmp
    t_1 = (x * log(y)) - t
    if (x <= (-2.8d+27)) then
        tmp = t_1
    else if (x <= 7.4d-191) then
        tmp = (y - log(y)) - t
    else if (x <= 7.5d+27) then
        tmp = ((z + (-1.0d0)) * (((-0.5d0) * (y * y)) - y)) - t
    else
        tmp = t_1
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double t_1 = (x * Math.log(y)) - t;
	double tmp;
	if (x <= -2.8e+27) {
		tmp = t_1;
	} else if (x <= 7.4e-191) {
		tmp = (y - Math.log(y)) - t;
	} else if (x <= 7.5e+27) {
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	} else {
		tmp = t_1;
	}
	return tmp;
}
def code(x, y, z, t):
	t_1 = (x * math.log(y)) - t
	tmp = 0
	if x <= -2.8e+27:
		tmp = t_1
	elif x <= 7.4e-191:
		tmp = (y - math.log(y)) - t
	elif x <= 7.5e+27:
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t
	else:
		tmp = t_1
	return tmp
function code(x, y, z, t)
	t_1 = Float64(Float64(x * log(y)) - t)
	tmp = 0.0
	if (x <= -2.8e+27)
		tmp = t_1;
	elseif (x <= 7.4e-191)
		tmp = Float64(Float64(y - log(y)) - t);
	elseif (x <= 7.5e+27)
		tmp = Float64(Float64(Float64(z + -1.0) * Float64(Float64(-0.5 * Float64(y * y)) - y)) - t);
	else
		tmp = t_1;
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	t_1 = (x * log(y)) - t;
	tmp = 0.0;
	if (x <= -2.8e+27)
		tmp = t_1;
	elseif (x <= 7.4e-191)
		tmp = (y - log(y)) - t;
	elseif (x <= 7.5e+27)
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	else
		tmp = t_1;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -2.8e+27], t$95$1, If[LessEqual[x, 7.4e-191], N[(N[(y - N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], If[LessEqual[x, 7.5e+27], N[(N[(N[(z + -1.0), $MachinePrecision] * N[(N[(-0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -2.8 \cdot 10^{+27}:\\
\;\;\;\;t_1\\

\mathbf{elif}\;x \leq 7.4 \cdot 10^{-191}:\\
\;\;\;\;\left(y - \log y\right) - t\\

\mathbf{elif}\;x \leq 7.5 \cdot 10^{+27}:\\
\;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\

\mathbf{else}:\\
\;\;\;\;t_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -2.7999999999999999e27 or 7.5000000000000002e27 < x

    1. Initial program 94.1%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 99.7%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)}\right) - t \]
    3. Taylor expanded in x around inf 93.5%

      \[\leadsto \color{blue}{\log y \cdot x} - t \]

    if -2.7999999999999999e27 < x < 7.3999999999999994e-191

    1. Initial program 85.3%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in z around 0 82.8%

      \[\leadsto \color{blue}{\left(\left(x - 1\right) \cdot \log y + -1 \cdot \log \left(1 - y\right)\right)} - t \]
    3. Step-by-step derivation
      1. sub-neg82.8%

        \[\leadsto \left(\color{blue}{\left(x + \left(-1\right)\right)} \cdot \log y + -1 \cdot \log \left(1 - y\right)\right) - t \]
      2. metadata-eval82.8%

        \[\leadsto \left(\left(x + \color{blue}{-1}\right) \cdot \log y + -1 \cdot \log \left(1 - y\right)\right) - t \]
      3. mul-1-neg82.8%

        \[\leadsto \left(\left(x + -1\right) \cdot \log y + \color{blue}{\left(-\log \left(1 - y\right)\right)}\right) - t \]
      4. unsub-neg82.8%

        \[\leadsto \color{blue}{\left(\left(x + -1\right) \cdot \log y - \log \left(1 - y\right)\right)} - t \]
      5. *-commutative82.8%

        \[\leadsto \left(\color{blue}{\log y \cdot \left(x + -1\right)} - \log \left(1 - y\right)\right) - t \]
      6. +-commutative82.8%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - \log \left(1 - y\right)\right) - t \]
      7. sub-neg82.8%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \log \color{blue}{\left(1 + \left(-y\right)\right)}\right) - t \]
      8. mul-1-neg82.8%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \log \left(1 + \color{blue}{-1 \cdot y}\right)\right) - t \]
      9. log1p-def82.8%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \color{blue}{\mathsf{log1p}\left(-1 \cdot y\right)}\right) - t \]
      10. mul-1-neg82.8%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \mathsf{log1p}\left(\color{blue}{-y}\right)\right) - t \]
    4. Simplified82.8%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - \mathsf{log1p}\left(-y\right)\right)} - t \]
    5. Taylor expanded in x around 0 82.5%

      \[\leadsto \left(\color{blue}{-1 \cdot \log y} - \mathsf{log1p}\left(-y\right)\right) - t \]
    6. Step-by-step derivation
      1. mul-1-neg82.5%

        \[\leadsto \left(\color{blue}{\left(-\log y\right)} - \mathsf{log1p}\left(-y\right)\right) - t \]
    7. Simplified82.5%

      \[\leadsto \left(\color{blue}{\left(-\log y\right)} - \mathsf{log1p}\left(-y\right)\right) - t \]
    8. Taylor expanded in y around 0 81.8%

      \[\leadsto \color{blue}{\left(y + -1 \cdot \log y\right)} - t \]
    9. Step-by-step derivation
      1. neg-mul-181.8%

        \[\leadsto \left(y + \color{blue}{\left(-\log y\right)}\right) - t \]
      2. unsub-neg81.8%

        \[\leadsto \color{blue}{\left(y - \log y\right)} - t \]
    10. Simplified81.8%

      \[\leadsto \color{blue}{\left(y - \log y\right)} - t \]

    if 7.3999999999999994e-191 < x < 7.5000000000000002e27

    1. Initial program 74.6%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 99.6%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)}\right) - t \]
    3. Step-by-step derivation
      1. mul-1-neg99.6%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right)\right) - t \]
      2. unsub-neg99.6%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)}\right) - t \]
      3. *-commutative99.6%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right)\right) - t \]
      4. unpow299.6%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right)\right) - t \]
      5. associate-*l*99.6%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right)\right) - t \]
    4. Simplified99.6%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right) - y\right)}\right) - t \]
    5. Taylor expanded in y around inf 83.9%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(\left(z - 1\right) \cdot y\right) + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right)} - t \]
    6. Step-by-step derivation
      1. mul-1-neg83.9%

        \[\leadsto \left(\color{blue}{\left(-\left(z - 1\right) \cdot y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
      2. distribute-rgt-neg-in83.9%

        \[\leadsto \left(\color{blue}{\left(z - 1\right) \cdot \left(-y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
      3. *-commutative83.9%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(\left(z - 1\right) \cdot {y}^{2}\right) \cdot -0.5}\right) - t \]
      4. associate-*l*83.9%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(z - 1\right) \cdot \left({y}^{2} \cdot -0.5\right)}\right) - t \]
      5. unpow283.9%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5\right)\right) - t \]
      6. associate-*r*83.9%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right)\right)}\right) - t \]
      7. distribute-lft-in84.0%

        \[\leadsto \color{blue}{\left(z - 1\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right)} - t \]
      8. sub-neg84.0%

        \[\leadsto \color{blue}{\left(z + \left(-1\right)\right)} \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
      9. metadata-eval84.0%

        \[\leadsto \left(z + \color{blue}{-1}\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
      10. associate-*r*84.0%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{\left(y \cdot y\right) \cdot -0.5}\right) - t \]
      11. unpow284.0%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{{y}^{2}} \cdot -0.5\right) - t \]
      12. *-commutative84.0%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{-0.5 \cdot {y}^{2}}\right) - t \]
      13. +-commutative84.0%

        \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-y\right)\right)} - t \]
      14. sub-neg84.0%

        \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} - t \]
      15. unpow284.0%

        \[\leadsto \left(z + -1\right) \cdot \left(-0.5 \cdot \color{blue}{\left(y \cdot y\right)} - y\right) - t \]
    7. Simplified84.0%

      \[\leadsto \color{blue}{\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)} - t \]
  3. Recombined 3 regimes into one program.
  4. Final simplification87.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2.8 \cdot 10^{+27}:\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{elif}\;x \leq 7.4 \cdot 10^{-191}:\\ \;\;\;\;\left(y - \log y\right) - t\\ \mathbf{elif}\;x \leq 7.5 \cdot 10^{+27}:\\ \;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\ \mathbf{else}:\\ \;\;\;\;x \cdot \log y - t\\ \end{array} \]

Alternative 7: 87.3% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z \leq -1.06 \cdot 10^{+111}:\\ \;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\ \mathbf{else}:\\ \;\;\;\;\left(y + \log y \cdot \left(x + -1\right)\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= z -1.06e+111)
   (- (* (+ z -1.0) (- (* -0.5 (* y y)) y)) t)
   (- (+ y (* (log y) (+ x -1.0))) t)))
double code(double x, double y, double z, double t) {
	double tmp;
	if (z <= -1.06e+111) {
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	} else {
		tmp = (y + (log(y) * (x + -1.0))) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if (z <= (-1.06d+111)) then
        tmp = ((z + (-1.0d0)) * (((-0.5d0) * (y * y)) - y)) - t
    else
        tmp = (y + (log(y) * (x + (-1.0d0)))) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (z <= -1.06e+111) {
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	} else {
		tmp = (y + (Math.log(y) * (x + -1.0))) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if z <= -1.06e+111:
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t
	else:
		tmp = (y + (math.log(y) * (x + -1.0))) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (z <= -1.06e+111)
		tmp = Float64(Float64(Float64(z + -1.0) * Float64(Float64(-0.5 * Float64(y * y)) - y)) - t);
	else
		tmp = Float64(Float64(y + Float64(log(y) * Float64(x + -1.0))) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if (z <= -1.06e+111)
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	else
		tmp = (y + (log(y) * (x + -1.0))) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[LessEqual[z, -1.06e+111], N[(N[(N[(z + -1.0), $MachinePrecision] * N[(N[(-0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(y + N[(N[Log[y], $MachinePrecision] * N[(x + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;z \leq -1.06 \cdot 10^{+111}:\\
\;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\

\mathbf{else}:\\
\;\;\;\;\left(y + \log y \cdot \left(x + -1\right)\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < -1.06e111

    1. Initial program 59.1%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 98.1%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)}\right) - t \]
    3. Step-by-step derivation
      1. mul-1-neg98.1%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right)\right) - t \]
      2. unsub-neg98.1%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)}\right) - t \]
      3. *-commutative98.1%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right)\right) - t \]
      4. unpow298.1%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right)\right) - t \]
      5. associate-*l*98.1%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right)\right) - t \]
    4. Simplified98.1%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right) - y\right)}\right) - t \]
    5. Taylor expanded in y around inf 68.8%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(\left(z - 1\right) \cdot y\right) + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right)} - t \]
    6. Step-by-step derivation
      1. mul-1-neg68.8%

        \[\leadsto \left(\color{blue}{\left(-\left(z - 1\right) \cdot y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
      2. distribute-rgt-neg-in68.8%

        \[\leadsto \left(\color{blue}{\left(z - 1\right) \cdot \left(-y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
      3. *-commutative68.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(\left(z - 1\right) \cdot {y}^{2}\right) \cdot -0.5}\right) - t \]
      4. associate-*l*68.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(z - 1\right) \cdot \left({y}^{2} \cdot -0.5\right)}\right) - t \]
      5. unpow268.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5\right)\right) - t \]
      6. associate-*r*68.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right)\right)}\right) - t \]
      7. distribute-lft-in68.9%

        \[\leadsto \color{blue}{\left(z - 1\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right)} - t \]
      8. sub-neg68.9%

        \[\leadsto \color{blue}{\left(z + \left(-1\right)\right)} \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
      9. metadata-eval68.9%

        \[\leadsto \left(z + \color{blue}{-1}\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
      10. associate-*r*68.9%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{\left(y \cdot y\right) \cdot -0.5}\right) - t \]
      11. unpow268.9%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{{y}^{2}} \cdot -0.5\right) - t \]
      12. *-commutative68.9%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{-0.5 \cdot {y}^{2}}\right) - t \]
      13. +-commutative68.9%

        \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-y\right)\right)} - t \]
      14. sub-neg68.9%

        \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} - t \]
      15. unpow268.9%

        \[\leadsto \left(z + -1\right) \cdot \left(-0.5 \cdot \color{blue}{\left(y \cdot y\right)} - y\right) - t \]
    7. Simplified68.9%

      \[\leadsto \color{blue}{\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)} - t \]

    if -1.06e111 < z

    1. Initial program 93.2%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in z around 0 92.4%

      \[\leadsto \color{blue}{\left(\left(x - 1\right) \cdot \log y + -1 \cdot \log \left(1 - y\right)\right)} - t \]
    3. Step-by-step derivation
      1. sub-neg92.4%

        \[\leadsto \left(\color{blue}{\left(x + \left(-1\right)\right)} \cdot \log y + -1 \cdot \log \left(1 - y\right)\right) - t \]
      2. metadata-eval92.4%

        \[\leadsto \left(\left(x + \color{blue}{-1}\right) \cdot \log y + -1 \cdot \log \left(1 - y\right)\right) - t \]
      3. mul-1-neg92.4%

        \[\leadsto \left(\left(x + -1\right) \cdot \log y + \color{blue}{\left(-\log \left(1 - y\right)\right)}\right) - t \]
      4. unsub-neg92.4%

        \[\leadsto \color{blue}{\left(\left(x + -1\right) \cdot \log y - \log \left(1 - y\right)\right)} - t \]
      5. *-commutative92.4%

        \[\leadsto \left(\color{blue}{\log y \cdot \left(x + -1\right)} - \log \left(1 - y\right)\right) - t \]
      6. +-commutative92.4%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - \log \left(1 - y\right)\right) - t \]
      7. sub-neg92.4%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \log \color{blue}{\left(1 + \left(-y\right)\right)}\right) - t \]
      8. mul-1-neg92.4%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \log \left(1 + \color{blue}{-1 \cdot y}\right)\right) - t \]
      9. log1p-def92.4%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \color{blue}{\mathsf{log1p}\left(-1 \cdot y\right)}\right) - t \]
      10. mul-1-neg92.4%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \mathsf{log1p}\left(\color{blue}{-y}\right)\right) - t \]
    4. Simplified92.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - \mathsf{log1p}\left(-y\right)\right)} - t \]
    5. Taylor expanded in y around 0 92.0%

      \[\leadsto \color{blue}{\left(y + \left(x - 1\right) \cdot \log y\right)} - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification88.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -1.06 \cdot 10^{+111}:\\ \;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\ \mathbf{else}:\\ \;\;\;\;\left(y + \log y \cdot \left(x + -1\right)\right) - t\\ \end{array} \]

Alternative 8: 87.1% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z \leq -1.06 \cdot 10^{+111}:\\ \;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\ \mathbf{else}:\\ \;\;\;\;\log y \cdot \left(x + -1\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= z -1.06e+111)
   (- (* (+ z -1.0) (- (* -0.5 (* y y)) y)) t)
   (- (* (log y) (+ x -1.0)) t)))
double code(double x, double y, double z, double t) {
	double tmp;
	if (z <= -1.06e+111) {
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	} else {
		tmp = (log(y) * (x + -1.0)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if (z <= (-1.06d+111)) then
        tmp = ((z + (-1.0d0)) * (((-0.5d0) * (y * y)) - y)) - t
    else
        tmp = (log(y) * (x + (-1.0d0))) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (z <= -1.06e+111) {
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	} else {
		tmp = (Math.log(y) * (x + -1.0)) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if z <= -1.06e+111:
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t
	else:
		tmp = (math.log(y) * (x + -1.0)) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (z <= -1.06e+111)
		tmp = Float64(Float64(Float64(z + -1.0) * Float64(Float64(-0.5 * Float64(y * y)) - y)) - t);
	else
		tmp = Float64(Float64(log(y) * Float64(x + -1.0)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if (z <= -1.06e+111)
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	else
		tmp = (log(y) * (x + -1.0)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[LessEqual[z, -1.06e+111], N[(N[(N[(z + -1.0), $MachinePrecision] * N[(N[(-0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(N[Log[y], $MachinePrecision] * N[(x + -1.0), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;z \leq -1.06 \cdot 10^{+111}:\\
\;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\

\mathbf{else}:\\
\;\;\;\;\log y \cdot \left(x + -1\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < -1.06e111

    1. Initial program 59.1%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 98.1%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)}\right) - t \]
    3. Step-by-step derivation
      1. mul-1-neg98.1%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right)\right) - t \]
      2. unsub-neg98.1%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)}\right) - t \]
      3. *-commutative98.1%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right)\right) - t \]
      4. unpow298.1%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right)\right) - t \]
      5. associate-*l*98.1%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right)\right) - t \]
    4. Simplified98.1%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right) - y\right)}\right) - t \]
    5. Taylor expanded in y around inf 68.8%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(\left(z - 1\right) \cdot y\right) + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right)} - t \]
    6. Step-by-step derivation
      1. mul-1-neg68.8%

        \[\leadsto \left(\color{blue}{\left(-\left(z - 1\right) \cdot y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
      2. distribute-rgt-neg-in68.8%

        \[\leadsto \left(\color{blue}{\left(z - 1\right) \cdot \left(-y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
      3. *-commutative68.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(\left(z - 1\right) \cdot {y}^{2}\right) \cdot -0.5}\right) - t \]
      4. associate-*l*68.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(z - 1\right) \cdot \left({y}^{2} \cdot -0.5\right)}\right) - t \]
      5. unpow268.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5\right)\right) - t \]
      6. associate-*r*68.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right)\right)}\right) - t \]
      7. distribute-lft-in68.9%

        \[\leadsto \color{blue}{\left(z - 1\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right)} - t \]
      8. sub-neg68.9%

        \[\leadsto \color{blue}{\left(z + \left(-1\right)\right)} \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
      9. metadata-eval68.9%

        \[\leadsto \left(z + \color{blue}{-1}\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
      10. associate-*r*68.9%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{\left(y \cdot y\right) \cdot -0.5}\right) - t \]
      11. unpow268.9%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{{y}^{2}} \cdot -0.5\right) - t \]
      12. *-commutative68.9%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{-0.5 \cdot {y}^{2}}\right) - t \]
      13. +-commutative68.9%

        \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-y\right)\right)} - t \]
      14. sub-neg68.9%

        \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} - t \]
      15. unpow268.9%

        \[\leadsto \left(z + -1\right) \cdot \left(-0.5 \cdot \color{blue}{\left(y \cdot y\right)} - y\right) - t \]
    7. Simplified68.9%

      \[\leadsto \color{blue}{\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)} - t \]

    if -1.06e111 < z

    1. Initial program 93.2%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 91.7%

      \[\leadsto \color{blue}{\left(x - 1\right) \cdot \log y} - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification87.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -1.06 \cdot 10^{+111}:\\ \;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\ \mathbf{else}:\\ \;\;\;\;\log y \cdot \left(x + -1\right) - t\\ \end{array} \]

Alternative 9: 60.1% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z \leq -2.7 \cdot 10^{+107} \lor \neg \left(z \leq 6.5 \cdot 10^{+48}\right):\\ \;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\ \mathbf{else}:\\ \;\;\;\;\left(-\log y\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (or (<= z -2.7e+107) (not (<= z 6.5e+48)))
   (- (* (+ z -1.0) (- (* -0.5 (* y y)) y)) t)
   (- (- (log y)) t)))
double code(double x, double y, double z, double t) {
	double tmp;
	if ((z <= -2.7e+107) || !(z <= 6.5e+48)) {
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	} else {
		tmp = -log(y) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((z <= (-2.7d+107)) .or. (.not. (z <= 6.5d+48))) then
        tmp = ((z + (-1.0d0)) * (((-0.5d0) * (y * y)) - y)) - t
    else
        tmp = -log(y) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if ((z <= -2.7e+107) || !(z <= 6.5e+48)) {
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	} else {
		tmp = -Math.log(y) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if (z <= -2.7e+107) or not (z <= 6.5e+48):
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t
	else:
		tmp = -math.log(y) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if ((z <= -2.7e+107) || !(z <= 6.5e+48))
		tmp = Float64(Float64(Float64(z + -1.0) * Float64(Float64(-0.5 * Float64(y * y)) - y)) - t);
	else
		tmp = Float64(Float64(-log(y)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if ((z <= -2.7e+107) || ~((z <= 6.5e+48)))
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	else
		tmp = -log(y) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[Or[LessEqual[z, -2.7e+107], N[Not[LessEqual[z, 6.5e+48]], $MachinePrecision]], N[(N[(N[(z + -1.0), $MachinePrecision] * N[(N[(-0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[((-N[Log[y], $MachinePrecision]) - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;z \leq -2.7 \cdot 10^{+107} \lor \neg \left(z \leq 6.5 \cdot 10^{+48}\right):\\
\;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\

\mathbf{else}:\\
\;\;\;\;\left(-\log y\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < -2.7000000000000001e107 or 6.49999999999999972e48 < z

    1. Initial program 67.7%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 98.2%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)}\right) - t \]
    3. Step-by-step derivation
      1. mul-1-neg98.2%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right)\right) - t \]
      2. unsub-neg98.2%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)}\right) - t \]
      3. *-commutative98.2%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right)\right) - t \]
      4. unpow298.2%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right)\right) - t \]
      5. associate-*l*98.2%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right)\right) - t \]
    4. Simplified98.2%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right) - y\right)}\right) - t \]
    5. Taylor expanded in y around inf 59.8%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(\left(z - 1\right) \cdot y\right) + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right)} - t \]
    6. Step-by-step derivation
      1. mul-1-neg59.8%

        \[\leadsto \left(\color{blue}{\left(-\left(z - 1\right) \cdot y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
      2. distribute-rgt-neg-in59.8%

        \[\leadsto \left(\color{blue}{\left(z - 1\right) \cdot \left(-y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
      3. *-commutative59.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(\left(z - 1\right) \cdot {y}^{2}\right) \cdot -0.5}\right) - t \]
      4. associate-*l*59.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(z - 1\right) \cdot \left({y}^{2} \cdot -0.5\right)}\right) - t \]
      5. unpow259.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5\right)\right) - t \]
      6. associate-*r*59.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right)\right)}\right) - t \]
      7. distribute-lft-in59.8%

        \[\leadsto \color{blue}{\left(z - 1\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right)} - t \]
      8. sub-neg59.8%

        \[\leadsto \color{blue}{\left(z + \left(-1\right)\right)} \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
      9. metadata-eval59.8%

        \[\leadsto \left(z + \color{blue}{-1}\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
      10. associate-*r*59.8%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{\left(y \cdot y\right) \cdot -0.5}\right) - t \]
      11. unpow259.8%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{{y}^{2}} \cdot -0.5\right) - t \]
      12. *-commutative59.8%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{-0.5 \cdot {y}^{2}}\right) - t \]
      13. +-commutative59.8%

        \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-y\right)\right)} - t \]
      14. sub-neg59.8%

        \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} - t \]
      15. unpow259.8%

        \[\leadsto \left(z + -1\right) \cdot \left(-0.5 \cdot \color{blue}{\left(y \cdot y\right)} - y\right) - t \]
    7. Simplified59.8%

      \[\leadsto \color{blue}{\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)} - t \]

    if -2.7000000000000001e107 < z < 6.49999999999999972e48

    1. Initial program 99.8%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in z around 0 99.8%

      \[\leadsto \color{blue}{\left(\left(x - 1\right) \cdot \log y + -1 \cdot \log \left(1 - y\right)\right)} - t \]
    3. Step-by-step derivation
      1. sub-neg99.8%

        \[\leadsto \left(\color{blue}{\left(x + \left(-1\right)\right)} \cdot \log y + -1 \cdot \log \left(1 - y\right)\right) - t \]
      2. metadata-eval99.8%

        \[\leadsto \left(\left(x + \color{blue}{-1}\right) \cdot \log y + -1 \cdot \log \left(1 - y\right)\right) - t \]
      3. mul-1-neg99.8%

        \[\leadsto \left(\left(x + -1\right) \cdot \log y + \color{blue}{\left(-\log \left(1 - y\right)\right)}\right) - t \]
      4. unsub-neg99.8%

        \[\leadsto \color{blue}{\left(\left(x + -1\right) \cdot \log y - \log \left(1 - y\right)\right)} - t \]
      5. *-commutative99.8%

        \[\leadsto \left(\color{blue}{\log y \cdot \left(x + -1\right)} - \log \left(1 - y\right)\right) - t \]
      6. +-commutative99.8%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - \log \left(1 - y\right)\right) - t \]
      7. sub-neg99.8%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \log \color{blue}{\left(1 + \left(-y\right)\right)}\right) - t \]
      8. mul-1-neg99.8%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \log \left(1 + \color{blue}{-1 \cdot y}\right)\right) - t \]
      9. log1p-def99.8%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \color{blue}{\mathsf{log1p}\left(-1 \cdot y\right)}\right) - t \]
      10. mul-1-neg99.8%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \mathsf{log1p}\left(\color{blue}{-y}\right)\right) - t \]
    4. Simplified99.8%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - \mathsf{log1p}\left(-y\right)\right)} - t \]
    5. Taylor expanded in x around 0 61.7%

      \[\leadsto \left(\color{blue}{-1 \cdot \log y} - \mathsf{log1p}\left(-y\right)\right) - t \]
    6. Step-by-step derivation
      1. mul-1-neg61.7%

        \[\leadsto \left(\color{blue}{\left(-\log y\right)} - \mathsf{log1p}\left(-y\right)\right) - t \]
    7. Simplified61.7%

      \[\leadsto \left(\color{blue}{\left(-\log y\right)} - \mathsf{log1p}\left(-y\right)\right) - t \]
    8. Taylor expanded in y around 0 60.8%

      \[\leadsto \color{blue}{-1 \cdot \log y} - t \]
    9. Step-by-step derivation
      1. neg-mul-160.8%

        \[\leadsto \color{blue}{\left(-\log y\right)} - t \]
    10. Simplified60.8%

      \[\leadsto \color{blue}{\left(-\log y\right)} - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification60.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -2.7 \cdot 10^{+107} \lor \neg \left(z \leq 6.5 \cdot 10^{+48}\right):\\ \;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\ \mathbf{else}:\\ \;\;\;\;\left(-\log y\right) - t\\ \end{array} \]

Alternative 10: 53.6% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -8.5 \cdot 10^{-12}:\\ \;\;\;\;\left(z \cdot y\right) \cdot \left(-1 + y \cdot -0.5\right) - t\\ \mathbf{elif}\;t \leq 8 \cdot 10^{-122}:\\ \;\;\;\;-\log y\\ \mathbf{else}:\\ \;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= t -8.5e-12)
   (- (* (* z y) (+ -1.0 (* y -0.5))) t)
   (if (<= t 8e-122) (- (log y)) (- (* (+ z -1.0) (- (* -0.5 (* y y)) y)) t))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (t <= -8.5e-12) {
		tmp = ((z * y) * (-1.0 + (y * -0.5))) - t;
	} else if (t <= 8e-122) {
		tmp = -log(y);
	} else {
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if (t <= (-8.5d-12)) then
        tmp = ((z * y) * ((-1.0d0) + (y * (-0.5d0)))) - t
    else if (t <= 8d-122) then
        tmp = -log(y)
    else
        tmp = ((z + (-1.0d0)) * (((-0.5d0) * (y * y)) - y)) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (t <= -8.5e-12) {
		tmp = ((z * y) * (-1.0 + (y * -0.5))) - t;
	} else if (t <= 8e-122) {
		tmp = -Math.log(y);
	} else {
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if t <= -8.5e-12:
		tmp = ((z * y) * (-1.0 + (y * -0.5))) - t
	elif t <= 8e-122:
		tmp = -math.log(y)
	else:
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (t <= -8.5e-12)
		tmp = Float64(Float64(Float64(z * y) * Float64(-1.0 + Float64(y * -0.5))) - t);
	elseif (t <= 8e-122)
		tmp = Float64(-log(y));
	else
		tmp = Float64(Float64(Float64(z + -1.0) * Float64(Float64(-0.5 * Float64(y * y)) - y)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if (t <= -8.5e-12)
		tmp = ((z * y) * (-1.0 + (y * -0.5))) - t;
	elseif (t <= 8e-122)
		tmp = -log(y);
	else
		tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[LessEqual[t, -8.5e-12], N[(N[(N[(z * y), $MachinePrecision] * N[(-1.0 + N[(y * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], If[LessEqual[t, 8e-122], (-N[Log[y], $MachinePrecision]), N[(N[(N[(z + -1.0), $MachinePrecision] * N[(N[(-0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -8.5 \cdot 10^{-12}:\\
\;\;\;\;\left(z \cdot y\right) \cdot \left(-1 + y \cdot -0.5\right) - t\\

\mathbf{elif}\;t \leq 8 \cdot 10^{-122}:\\
\;\;\;\;-\log y\\

\mathbf{else}:\\
\;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if t < -8.4999999999999997e-12

    1. Initial program 95.5%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 99.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)}\right) - t \]
    3. Step-by-step derivation
      1. mul-1-neg99.9%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right)\right) - t \]
      2. unsub-neg99.9%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)}\right) - t \]
      3. *-commutative99.9%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right)\right) - t \]
      4. unpow299.9%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right)\right) - t \]
      5. associate-*l*99.9%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right)\right) - t \]
    4. Simplified99.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right) - y\right)}\right) - t \]
    5. Taylor expanded in z around inf 70.0%

      \[\leadsto \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right) \cdot z} - t \]
    6. Step-by-step derivation
      1. *-commutative70.0%

        \[\leadsto \color{blue}{z \cdot \left(-0.5 \cdot {y}^{2} - y\right)} - t \]
      2. sub-neg70.0%

        \[\leadsto z \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-y\right)\right)} - t \]
      3. +-commutative70.0%

        \[\leadsto z \cdot \color{blue}{\left(\left(-y\right) + -0.5 \cdot {y}^{2}\right)} - t \]
      4. *-commutative70.0%

        \[\leadsto z \cdot \left(\left(-y\right) + \color{blue}{{y}^{2} \cdot -0.5}\right) - t \]
      5. unpow270.0%

        \[\leadsto z \cdot \left(\left(-y\right) + \color{blue}{\left(y \cdot y\right)} \cdot -0.5\right) - t \]
      6. associate-*r*70.0%

        \[\leadsto z \cdot \left(\left(-y\right) + \color{blue}{y \cdot \left(y \cdot -0.5\right)}\right) - t \]
      7. distribute-lft-in70.0%

        \[\leadsto \color{blue}{\left(z \cdot \left(-y\right) + z \cdot \left(y \cdot \left(y \cdot -0.5\right)\right)\right)} - t \]
      8. distribute-rgt-neg-in70.0%

        \[\leadsto \left(\color{blue}{\left(-z \cdot y\right)} + z \cdot \left(y \cdot \left(y \cdot -0.5\right)\right)\right) - t \]
      9. *-commutative70.0%

        \[\leadsto \left(\left(-\color{blue}{y \cdot z}\right) + z \cdot \left(y \cdot \left(y \cdot -0.5\right)\right)\right) - t \]
      10. mul-1-neg70.0%

        \[\leadsto \left(\color{blue}{-1 \cdot \left(y \cdot z\right)} + z \cdot \left(y \cdot \left(y \cdot -0.5\right)\right)\right) - t \]
      11. *-commutative70.0%

        \[\leadsto \left(\color{blue}{\left(y \cdot z\right) \cdot -1} + z \cdot \left(y \cdot \left(y \cdot -0.5\right)\right)\right) - t \]
      12. associate-*r*70.0%

        \[\leadsto \left(\left(y \cdot z\right) \cdot -1 + \color{blue}{\left(z \cdot y\right) \cdot \left(y \cdot -0.5\right)}\right) - t \]
      13. *-commutative70.0%

        \[\leadsto \left(\left(y \cdot z\right) \cdot -1 + \color{blue}{\left(y \cdot z\right)} \cdot \left(y \cdot -0.5\right)\right) - t \]
      14. distribute-lft-out70.0%

        \[\leadsto \color{blue}{\left(y \cdot z\right) \cdot \left(-1 + y \cdot -0.5\right)} - t \]
      15. *-commutative70.0%

        \[\leadsto \left(y \cdot z\right) \cdot \left(-1 + \color{blue}{-0.5 \cdot y}\right) - t \]
    7. Simplified70.0%

      \[\leadsto \color{blue}{\left(y \cdot z\right) \cdot \left(-1 + -0.5 \cdot y\right)} - t \]

    if -8.4999999999999997e-12 < t < 8.00000000000000047e-122

    1. Initial program 83.3%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in z around 0 80.3%

      \[\leadsto \color{blue}{\left(\left(x - 1\right) \cdot \log y + -1 \cdot \log \left(1 - y\right)\right)} - t \]
    3. Step-by-step derivation
      1. sub-neg80.3%

        \[\leadsto \left(\color{blue}{\left(x + \left(-1\right)\right)} \cdot \log y + -1 \cdot \log \left(1 - y\right)\right) - t \]
      2. metadata-eval80.3%

        \[\leadsto \left(\left(x + \color{blue}{-1}\right) \cdot \log y + -1 \cdot \log \left(1 - y\right)\right) - t \]
      3. mul-1-neg80.3%

        \[\leadsto \left(\left(x + -1\right) \cdot \log y + \color{blue}{\left(-\log \left(1 - y\right)\right)}\right) - t \]
      4. unsub-neg80.3%

        \[\leadsto \color{blue}{\left(\left(x + -1\right) \cdot \log y - \log \left(1 - y\right)\right)} - t \]
      5. *-commutative80.3%

        \[\leadsto \left(\color{blue}{\log y \cdot \left(x + -1\right)} - \log \left(1 - y\right)\right) - t \]
      6. +-commutative80.3%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - \log \left(1 - y\right)\right) - t \]
      7. sub-neg80.3%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \log \color{blue}{\left(1 + \left(-y\right)\right)}\right) - t \]
      8. mul-1-neg80.3%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \log \left(1 + \color{blue}{-1 \cdot y}\right)\right) - t \]
      9. log1p-def80.3%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \color{blue}{\mathsf{log1p}\left(-1 \cdot y\right)}\right) - t \]
      10. mul-1-neg80.3%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \mathsf{log1p}\left(\color{blue}{-y}\right)\right) - t \]
    4. Simplified80.3%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - \mathsf{log1p}\left(-y\right)\right)} - t \]
    5. Taylor expanded in x around 0 38.0%

      \[\leadsto \left(\color{blue}{-1 \cdot \log y} - \mathsf{log1p}\left(-y\right)\right) - t \]
    6. Step-by-step derivation
      1. mul-1-neg38.0%

        \[\leadsto \left(\color{blue}{\left(-\log y\right)} - \mathsf{log1p}\left(-y\right)\right) - t \]
    7. Simplified38.0%

      \[\leadsto \left(\color{blue}{\left(-\log y\right)} - \mathsf{log1p}\left(-y\right)\right) - t \]
    8. Taylor expanded in y around 0 37.8%

      \[\leadsto \color{blue}{-1 \cdot \log y} - t \]
    9. Step-by-step derivation
      1. neg-mul-137.8%

        \[\leadsto \color{blue}{\left(-\log y\right)} - t \]
    10. Simplified37.8%

      \[\leadsto \color{blue}{\left(-\log y\right)} - t \]
    11. Taylor expanded in t around 0 37.8%

      \[\leadsto \color{blue}{-1 \cdot \log y} \]
    12. Step-by-step derivation
      1. neg-mul-137.8%

        \[\leadsto \color{blue}{-\log y} \]
    13. Simplified37.8%

      \[\leadsto \color{blue}{-\log y} \]

    if 8.00000000000000047e-122 < t

    1. Initial program 86.1%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 98.8%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)}\right) - t \]
    3. Step-by-step derivation
      1. mul-1-neg98.8%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right)\right) - t \]
      2. unsub-neg98.8%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)}\right) - t \]
      3. *-commutative98.8%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right)\right) - t \]
      4. unpow298.8%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right)\right) - t \]
      5. associate-*l*98.8%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right)\right) - t \]
    4. Simplified98.8%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right) - y\right)}\right) - t \]
    5. Taylor expanded in y around inf 57.8%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(\left(z - 1\right) \cdot y\right) + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right)} - t \]
    6. Step-by-step derivation
      1. mul-1-neg57.8%

        \[\leadsto \left(\color{blue}{\left(-\left(z - 1\right) \cdot y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
      2. distribute-rgt-neg-in57.8%

        \[\leadsto \left(\color{blue}{\left(z - 1\right) \cdot \left(-y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
      3. *-commutative57.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(\left(z - 1\right) \cdot {y}^{2}\right) \cdot -0.5}\right) - t \]
      4. associate-*l*57.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(z - 1\right) \cdot \left({y}^{2} \cdot -0.5\right)}\right) - t \]
      5. unpow257.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5\right)\right) - t \]
      6. associate-*r*57.8%

        \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right)\right)}\right) - t \]
      7. distribute-lft-in57.8%

        \[\leadsto \color{blue}{\left(z - 1\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right)} - t \]
      8. sub-neg57.8%

        \[\leadsto \color{blue}{\left(z + \left(-1\right)\right)} \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
      9. metadata-eval57.8%

        \[\leadsto \left(z + \color{blue}{-1}\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
      10. associate-*r*57.8%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{\left(y \cdot y\right) \cdot -0.5}\right) - t \]
      11. unpow257.8%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{{y}^{2}} \cdot -0.5\right) - t \]
      12. *-commutative57.8%

        \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{-0.5 \cdot {y}^{2}}\right) - t \]
      13. +-commutative57.8%

        \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-y\right)\right)} - t \]
      14. sub-neg57.8%

        \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} - t \]
      15. unpow257.8%

        \[\leadsto \left(z + -1\right) \cdot \left(-0.5 \cdot \color{blue}{\left(y \cdot y\right)} - y\right) - t \]
    7. Simplified57.8%

      \[\leadsto \color{blue}{\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)} - t \]
  3. Recombined 3 regimes into one program.
  4. Final simplification53.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -8.5 \cdot 10^{-12}:\\ \;\;\;\;\left(z \cdot y\right) \cdot \left(-1 + y \cdot -0.5\right) - t\\ \mathbf{elif}\;t \leq 8 \cdot 10^{-122}:\\ \;\;\;\;-\log y\\ \mathbf{else}:\\ \;\;\;\;\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t\\ \end{array} \]

Alternative 11: 42.3% accurate, 14.2× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.0063 \lor \neg \left(t \leq 1.85 \cdot 10^{+59}\right):\\ \;\;\;\;\left(y \cdot y\right) \cdot \left(0.5 + z \cdot -0.5\right) - t\\ \mathbf{else}:\\ \;\;\;\;z \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (or (<= t -0.0063) (not (<= t 1.85e+59)))
   (- (* (* y y) (+ 0.5 (* z -0.5))) t)
   (* z (- (* -0.5 (* y y)) y))))
double code(double x, double y, double z, double t) {
	double tmp;
	if ((t <= -0.0063) || !(t <= 1.85e+59)) {
		tmp = ((y * y) * (0.5 + (z * -0.5))) - t;
	} else {
		tmp = z * ((-0.5 * (y * y)) - y);
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((t <= (-0.0063d0)) .or. (.not. (t <= 1.85d+59))) then
        tmp = ((y * y) * (0.5d0 + (z * (-0.5d0)))) - t
    else
        tmp = z * (((-0.5d0) * (y * y)) - y)
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if ((t <= -0.0063) || !(t <= 1.85e+59)) {
		tmp = ((y * y) * (0.5 + (z * -0.5))) - t;
	} else {
		tmp = z * ((-0.5 * (y * y)) - y);
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if (t <= -0.0063) or not (t <= 1.85e+59):
		tmp = ((y * y) * (0.5 + (z * -0.5))) - t
	else:
		tmp = z * ((-0.5 * (y * y)) - y)
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if ((t <= -0.0063) || !(t <= 1.85e+59))
		tmp = Float64(Float64(Float64(y * y) * Float64(0.5 + Float64(z * -0.5))) - t);
	else
		tmp = Float64(z * Float64(Float64(-0.5 * Float64(y * y)) - y));
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if ((t <= -0.0063) || ~((t <= 1.85e+59)))
		tmp = ((y * y) * (0.5 + (z * -0.5))) - t;
	else
		tmp = z * ((-0.5 * (y * y)) - y);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[Or[LessEqual[t, -0.0063], N[Not[LessEqual[t, 1.85e+59]], $MachinePrecision]], N[(N[(N[(y * y), $MachinePrecision] * N[(0.5 + N[(z * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(z * N[(N[(-0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.0063 \lor \neg \left(t \leq 1.85 \cdot 10^{+59}\right):\\
\;\;\;\;\left(y \cdot y\right) \cdot \left(0.5 + z \cdot -0.5\right) - t\\

\mathbf{else}:\\
\;\;\;\;z \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -0.0063 or 1.84999999999999999e59 < t

    1. Initial program 96.4%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 99.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)}\right) - t \]
    3. Step-by-step derivation
      1. mul-1-neg99.9%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right)\right) - t \]
      2. unsub-neg99.9%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)}\right) - t \]
      3. *-commutative99.9%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right)\right) - t \]
      4. unpow299.9%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right)\right) - t \]
      5. associate-*l*99.9%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right)\right) - t \]
    4. Simplified99.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right) - y\right)}\right) - t \]
    5. Taylor expanded in y around inf 70.6%

      \[\leadsto \color{blue}{-0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)} - t \]
    6. Step-by-step derivation
      1. associate-*r*70.6%

        \[\leadsto \color{blue}{\left(-0.5 \cdot \left(z - 1\right)\right) \cdot {y}^{2}} - t \]
      2. *-commutative70.6%

        \[\leadsto \color{blue}{{y}^{2} \cdot \left(-0.5 \cdot \left(z - 1\right)\right)} - t \]
      3. unpow270.6%

        \[\leadsto \color{blue}{\left(y \cdot y\right)} \cdot \left(-0.5 \cdot \left(z - 1\right)\right) - t \]
      4. sub-neg70.6%

        \[\leadsto \left(y \cdot y\right) \cdot \left(-0.5 \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
      5. metadata-eval70.6%

        \[\leadsto \left(y \cdot y\right) \cdot \left(-0.5 \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
      6. +-commutative70.6%

        \[\leadsto \left(y \cdot y\right) \cdot \left(-0.5 \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
      7. distribute-rgt-in70.6%

        \[\leadsto \left(y \cdot y\right) \cdot \color{blue}{\left(-1 \cdot -0.5 + z \cdot -0.5\right)} - t \]
      8. metadata-eval70.6%

        \[\leadsto \left(y \cdot y\right) \cdot \left(\color{blue}{0.5} + z \cdot -0.5\right) - t \]
    7. Simplified70.6%

      \[\leadsto \color{blue}{\left(y \cdot y\right) \cdot \left(0.5 + z \cdot -0.5\right)} - t \]

    if -0.0063 < t < 1.84999999999999999e59

    1. Initial program 80.8%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 98.8%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)}\right) - t \]
    3. Taylor expanded in x around 0 54.4%

      \[\leadsto \left(\color{blue}{-1 \cdot \log y} + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)\right) - t \]
    4. Step-by-step derivation
      1. mul-1-neg54.4%

        \[\leadsto \left(\color{blue}{\left(-\log y\right)} + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)\right) - t \]
    5. Simplified54.4%

      \[\leadsto \left(\color{blue}{\left(-\log y\right)} + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)\right) - t \]
    6. Taylor expanded in z around inf 22.7%

      \[\leadsto \color{blue}{z \cdot \left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)} \]
    7. Taylor expanded in y around 0 22.1%

      \[\leadsto z \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)} \]
    8. Step-by-step derivation
      1. neg-mul-122.1%

        \[\leadsto z \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right) \]
      2. sub-neg22.1%

        \[\leadsto z \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} \]
      3. unpow222.1%

        \[\leadsto z \cdot \left(-0.5 \cdot \color{blue}{\left(y \cdot y\right)} - y\right) \]
    9. Simplified22.1%

      \[\leadsto z \cdot \color{blue}{\left(-0.5 \cdot \left(y \cdot y\right) - y\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification42.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.0063 \lor \neg \left(t \leq 1.85 \cdot 10^{+59}\right):\\ \;\;\;\;\left(y \cdot y\right) \cdot \left(0.5 + z \cdot -0.5\right) - t\\ \mathbf{else}:\\ \;\;\;\;z \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)\\ \end{array} \]

Alternative 12: 42.2% accurate, 16.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.0033:\\ \;\;\;\;-t\\ \mathbf{elif}\;t \leq 1.85 \cdot 10^{+59}:\\ \;\;\;\;z \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= t -0.0033)
   (- t)
   (if (<= t 1.85e+59) (* z (- (* -0.5 (* y y)) y)) (- t))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (t <= -0.0033) {
		tmp = -t;
	} else if (t <= 1.85e+59) {
		tmp = z * ((-0.5 * (y * y)) - y);
	} else {
		tmp = -t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if (t <= (-0.0033d0)) then
        tmp = -t
    else if (t <= 1.85d+59) then
        tmp = z * (((-0.5d0) * (y * y)) - y)
    else
        tmp = -t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (t <= -0.0033) {
		tmp = -t;
	} else if (t <= 1.85e+59) {
		tmp = z * ((-0.5 * (y * y)) - y);
	} else {
		tmp = -t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if t <= -0.0033:
		tmp = -t
	elif t <= 1.85e+59:
		tmp = z * ((-0.5 * (y * y)) - y)
	else:
		tmp = -t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (t <= -0.0033)
		tmp = Float64(-t);
	elseif (t <= 1.85e+59)
		tmp = Float64(z * Float64(Float64(-0.5 * Float64(y * y)) - y));
	else
		tmp = Float64(-t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if (t <= -0.0033)
		tmp = -t;
	elseif (t <= 1.85e+59)
		tmp = z * ((-0.5 * (y * y)) - y);
	else
		tmp = -t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[LessEqual[t, -0.0033], (-t), If[LessEqual[t, 1.85e+59], N[(z * N[(N[(-0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision], (-t)]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.0033:\\
\;\;\;\;-t\\

\mathbf{elif}\;t \leq 1.85 \cdot 10^{+59}:\\
\;\;\;\;z \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)\\

\mathbf{else}:\\
\;\;\;\;-t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -0.0033 or 1.84999999999999999e59 < t

    1. Initial program 96.4%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. +-commutative96.4%

        \[\leadsto \color{blue}{\left(\left(z - 1\right) \cdot \log \left(1 - y\right) + \left(x - 1\right) \cdot \log y\right)} - t \]
      2. associate--l+96.4%

        \[\leadsto \color{blue}{\left(z - 1\right) \cdot \log \left(1 - y\right) + \left(\left(x - 1\right) \cdot \log y - t\right)} \]
      3. fma-def96.4%

        \[\leadsto \color{blue}{\mathsf{fma}\left(z - 1, \log \left(1 - y\right), \left(x - 1\right) \cdot \log y - t\right)} \]
      4. sub-neg96.4%

        \[\leadsto \mathsf{fma}\left(z - 1, \log \color{blue}{\left(1 + \left(-y\right)\right)}, \left(x - 1\right) \cdot \log y - t\right) \]
      5. log1p-def99.9%

        \[\leadsto \mathsf{fma}\left(z - 1, \color{blue}{\mathsf{log1p}\left(-y\right)}, \left(x - 1\right) \cdot \log y - t\right) \]
    3. Simplified99.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z - 1, \mathsf{log1p}\left(-y\right), \left(x - 1\right) \cdot \log y - t\right)} \]
    4. Taylor expanded in t around inf 70.5%

      \[\leadsto \color{blue}{-1 \cdot t} \]
    5. Step-by-step derivation
      1. neg-mul-170.5%

        \[\leadsto \color{blue}{-t} \]
    6. Simplified70.5%

      \[\leadsto \color{blue}{-t} \]

    if -0.0033 < t < 1.84999999999999999e59

    1. Initial program 80.8%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 98.8%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)}\right) - t \]
    3. Taylor expanded in x around 0 54.4%

      \[\leadsto \left(\color{blue}{-1 \cdot \log y} + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)\right) - t \]
    4. Step-by-step derivation
      1. mul-1-neg54.4%

        \[\leadsto \left(\color{blue}{\left(-\log y\right)} + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)\right) - t \]
    5. Simplified54.4%

      \[\leadsto \left(\color{blue}{\left(-\log y\right)} + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)\right) - t \]
    6. Taylor expanded in z around inf 22.7%

      \[\leadsto \color{blue}{z \cdot \left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)} \]
    7. Taylor expanded in y around 0 22.1%

      \[\leadsto z \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)} \]
    8. Step-by-step derivation
      1. neg-mul-122.1%

        \[\leadsto z \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right) \]
      2. sub-neg22.1%

        \[\leadsto z \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} \]
      3. unpow222.1%

        \[\leadsto z \cdot \left(-0.5 \cdot \color{blue}{\left(y \cdot y\right)} - y\right) \]
    9. Simplified22.1%

      \[\leadsto z \cdot \color{blue}{\left(-0.5 \cdot \left(y \cdot y\right) - y\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification42.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.0033:\\ \;\;\;\;-t\\ \mathbf{elif}\;t \leq 1.85 \cdot 10^{+59}:\\ \;\;\;\;z \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \]

Alternative 13: 46.5% accurate, 16.5× speedup?

\[\begin{array}{l} \\ \left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (* (+ z -1.0) (- (* -0.5 (* y y)) y)) t))
double code(double x, double y, double z, double t) {
	return ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((z + (-1.0d0)) * (((-0.5d0) * (y * y)) - y)) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
}
def code(x, y, z, t):
	return ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(z + -1.0) * Float64(Float64(-0.5 * Float64(y * y)) - y)) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((z + -1.0) * ((-0.5 * (y * y)) - y)) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(z + -1.0), $MachinePrecision] * N[(N[(-0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t
\end{array}
Derivation
  1. Initial program 87.5%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 98.9%

    \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)}\right) - t \]
  3. Step-by-step derivation
    1. mul-1-neg98.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right)\right) - t \]
    2. unsub-neg98.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)}\right) - t \]
    3. *-commutative98.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right)\right) - t \]
    4. unpow298.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right)\right) - t \]
    5. associate-*l*98.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right)\right) - t \]
  4. Simplified98.9%

    \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right) - y\right)}\right) - t \]
  5. Taylor expanded in y around inf 46.1%

    \[\leadsto \color{blue}{\left(-1 \cdot \left(\left(z - 1\right) \cdot y\right) + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right)} - t \]
  6. Step-by-step derivation
    1. mul-1-neg46.1%

      \[\leadsto \left(\color{blue}{\left(-\left(z - 1\right) \cdot y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
    2. distribute-rgt-neg-in46.1%

      \[\leadsto \left(\color{blue}{\left(z - 1\right) \cdot \left(-y\right)} + -0.5 \cdot \left(\left(z - 1\right) \cdot {y}^{2}\right)\right) - t \]
    3. *-commutative46.1%

      \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(\left(z - 1\right) \cdot {y}^{2}\right) \cdot -0.5}\right) - t \]
    4. associate-*l*46.1%

      \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \color{blue}{\left(z - 1\right) \cdot \left({y}^{2} \cdot -0.5\right)}\right) - t \]
    5. unpow246.1%

      \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5\right)\right) - t \]
    6. associate-*r*46.1%

      \[\leadsto \left(\left(z - 1\right) \cdot \left(-y\right) + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right)\right)}\right) - t \]
    7. distribute-lft-in46.1%

      \[\leadsto \color{blue}{\left(z - 1\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right)} - t \]
    8. sub-neg46.1%

      \[\leadsto \color{blue}{\left(z + \left(-1\right)\right)} \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
    9. metadata-eval46.1%

      \[\leadsto \left(z + \color{blue}{-1}\right) \cdot \left(\left(-y\right) + y \cdot \left(y \cdot -0.5\right)\right) - t \]
    10. associate-*r*46.1%

      \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{\left(y \cdot y\right) \cdot -0.5}\right) - t \]
    11. unpow246.1%

      \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{{y}^{2}} \cdot -0.5\right) - t \]
    12. *-commutative46.1%

      \[\leadsto \left(z + -1\right) \cdot \left(\left(-y\right) + \color{blue}{-0.5 \cdot {y}^{2}}\right) - t \]
    13. +-commutative46.1%

      \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-y\right)\right)} - t \]
    14. sub-neg46.1%

      \[\leadsto \left(z + -1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)} - t \]
    15. unpow246.1%

      \[\leadsto \left(z + -1\right) \cdot \left(-0.5 \cdot \color{blue}{\left(y \cdot y\right)} - y\right) - t \]
  7. Simplified46.1%

    \[\leadsto \color{blue}{\left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right)} - t \]
  8. Final simplification46.1%

    \[\leadsto \left(z + -1\right) \cdot \left(-0.5 \cdot \left(y \cdot y\right) - y\right) - t \]

Alternative 14: 46.3% accurate, 19.5× speedup?

\[\begin{array}{l} \\ \left(z \cdot y\right) \cdot \left(-1 + y \cdot -0.5\right) - t \end{array} \]
(FPCore (x y z t) :precision binary64 (- (* (* z y) (+ -1.0 (* y -0.5))) t))
double code(double x, double y, double z, double t) {
	return ((z * y) * (-1.0 + (y * -0.5))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((z * y) * ((-1.0d0) + (y * (-0.5d0)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return ((z * y) * (-1.0 + (y * -0.5))) - t;
}
def code(x, y, z, t):
	return ((z * y) * (-1.0 + (y * -0.5))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(z * y) * Float64(-1.0 + Float64(y * -0.5))) - t)
end
function tmp = code(x, y, z, t)
	tmp = ((z * y) * (-1.0 + (y * -0.5))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(z * y), $MachinePrecision] * N[(-1.0 + N[(y * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(z \cdot y\right) \cdot \left(-1 + y \cdot -0.5\right) - t
\end{array}
Derivation
  1. Initial program 87.5%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Taylor expanded in y around 0 98.9%

    \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + -1 \cdot y\right)}\right) - t \]
  3. Step-by-step derivation
    1. mul-1-neg98.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \color{blue}{\left(-y\right)}\right)\right) - t \]
    2. unsub-neg98.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right)}\right) - t \]
    3. *-commutative98.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{{y}^{2} \cdot -0.5} - y\right)\right) - t \]
    4. unpow298.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{\left(y \cdot y\right)} \cdot -0.5 - y\right)\right) - t \]
    5. associate-*l*98.9%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(\color{blue}{y \cdot \left(y \cdot -0.5\right)} - y\right)\right) - t \]
  4. Simplified98.9%

    \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot -0.5\right) - y\right)}\right) - t \]
  5. Taylor expanded in z around inf 45.9%

    \[\leadsto \color{blue}{\left(-0.5 \cdot {y}^{2} - y\right) \cdot z} - t \]
  6. Step-by-step derivation
    1. *-commutative45.9%

      \[\leadsto \color{blue}{z \cdot \left(-0.5 \cdot {y}^{2} - y\right)} - t \]
    2. sub-neg45.9%

      \[\leadsto z \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-y\right)\right)} - t \]
    3. +-commutative45.9%

      \[\leadsto z \cdot \color{blue}{\left(\left(-y\right) + -0.5 \cdot {y}^{2}\right)} - t \]
    4. *-commutative45.9%

      \[\leadsto z \cdot \left(\left(-y\right) + \color{blue}{{y}^{2} \cdot -0.5}\right) - t \]
    5. unpow245.9%

      \[\leadsto z \cdot \left(\left(-y\right) + \color{blue}{\left(y \cdot y\right)} \cdot -0.5\right) - t \]
    6. associate-*r*45.9%

      \[\leadsto z \cdot \left(\left(-y\right) + \color{blue}{y \cdot \left(y \cdot -0.5\right)}\right) - t \]
    7. distribute-lft-in45.9%

      \[\leadsto \color{blue}{\left(z \cdot \left(-y\right) + z \cdot \left(y \cdot \left(y \cdot -0.5\right)\right)\right)} - t \]
    8. distribute-rgt-neg-in45.9%

      \[\leadsto \left(\color{blue}{\left(-z \cdot y\right)} + z \cdot \left(y \cdot \left(y \cdot -0.5\right)\right)\right) - t \]
    9. *-commutative45.9%

      \[\leadsto \left(\left(-\color{blue}{y \cdot z}\right) + z \cdot \left(y \cdot \left(y \cdot -0.5\right)\right)\right) - t \]
    10. mul-1-neg45.9%

      \[\leadsto \left(\color{blue}{-1 \cdot \left(y \cdot z\right)} + z \cdot \left(y \cdot \left(y \cdot -0.5\right)\right)\right) - t \]
    11. *-commutative45.9%

      \[\leadsto \left(\color{blue}{\left(y \cdot z\right) \cdot -1} + z \cdot \left(y \cdot \left(y \cdot -0.5\right)\right)\right) - t \]
    12. associate-*r*45.9%

      \[\leadsto \left(\left(y \cdot z\right) \cdot -1 + \color{blue}{\left(z \cdot y\right) \cdot \left(y \cdot -0.5\right)}\right) - t \]
    13. *-commutative45.9%

      \[\leadsto \left(\left(y \cdot z\right) \cdot -1 + \color{blue}{\left(y \cdot z\right)} \cdot \left(y \cdot -0.5\right)\right) - t \]
    14. distribute-lft-out45.9%

      \[\leadsto \color{blue}{\left(y \cdot z\right) \cdot \left(-1 + y \cdot -0.5\right)} - t \]
    15. *-commutative45.9%

      \[\leadsto \left(y \cdot z\right) \cdot \left(-1 + \color{blue}{-0.5 \cdot y}\right) - t \]
  7. Simplified45.9%

    \[\leadsto \color{blue}{\left(y \cdot z\right) \cdot \left(-1 + -0.5 \cdot y\right)} - t \]
  8. Final simplification45.9%

    \[\leadsto \left(z \cdot y\right) \cdot \left(-1 + y \cdot -0.5\right) - t \]

Alternative 15: 43.1% accurate, 26.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.007:\\ \;\;\;\;-t\\ \mathbf{elif}\;t \leq 200000000:\\ \;\;\;\;z \cdot \left(-y\right)\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= t -0.007) (- t) (if (<= t 200000000.0) (* z (- y)) (- t))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (t <= -0.007) {
		tmp = -t;
	} else if (t <= 200000000.0) {
		tmp = z * -y;
	} else {
		tmp = -t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if (t <= (-0.007d0)) then
        tmp = -t
    else if (t <= 200000000.0d0) then
        tmp = z * -y
    else
        tmp = -t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (t <= -0.007) {
		tmp = -t;
	} else if (t <= 200000000.0) {
		tmp = z * -y;
	} else {
		tmp = -t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if t <= -0.007:
		tmp = -t
	elif t <= 200000000.0:
		tmp = z * -y
	else:
		tmp = -t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (t <= -0.007)
		tmp = Float64(-t);
	elseif (t <= 200000000.0)
		tmp = Float64(z * Float64(-y));
	else
		tmp = Float64(-t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if (t <= -0.007)
		tmp = -t;
	elseif (t <= 200000000.0)
		tmp = z * -y;
	else
		tmp = -t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[LessEqual[t, -0.007], (-t), If[LessEqual[t, 200000000.0], N[(z * (-y)), $MachinePrecision], (-t)]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.007:\\
\;\;\;\;-t\\

\mathbf{elif}\;t \leq 200000000:\\
\;\;\;\;z \cdot \left(-y\right)\\

\mathbf{else}:\\
\;\;\;\;-t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -0.00700000000000000015 or 2e8 < t

    1. Initial program 94.5%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. +-commutative94.5%

        \[\leadsto \color{blue}{\left(\left(z - 1\right) \cdot \log \left(1 - y\right) + \left(x - 1\right) \cdot \log y\right)} - t \]
      2. associate--l+94.5%

        \[\leadsto \color{blue}{\left(z - 1\right) \cdot \log \left(1 - y\right) + \left(\left(x - 1\right) \cdot \log y - t\right)} \]
      3. fma-def94.5%

        \[\leadsto \color{blue}{\mathsf{fma}\left(z - 1, \log \left(1 - y\right), \left(x - 1\right) \cdot \log y - t\right)} \]
      4. sub-neg94.5%

        \[\leadsto \mathsf{fma}\left(z - 1, \log \color{blue}{\left(1 + \left(-y\right)\right)}, \left(x - 1\right) \cdot \log y - t\right) \]
      5. log1p-def99.9%

        \[\leadsto \mathsf{fma}\left(z - 1, \color{blue}{\mathsf{log1p}\left(-y\right)}, \left(x - 1\right) \cdot \log y - t\right) \]
    3. Simplified99.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z - 1, \mathsf{log1p}\left(-y\right), \left(x - 1\right) \cdot \log y - t\right)} \]
    4. Taylor expanded in t around inf 64.5%

      \[\leadsto \color{blue}{-1 \cdot t} \]
    5. Step-by-step derivation
      1. neg-mul-164.5%

        \[\leadsto \color{blue}{-t} \]
    6. Simplified64.5%

      \[\leadsto \color{blue}{-t} \]

    if -0.00700000000000000015 < t < 2e8

    1. Initial program 80.7%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Taylor expanded in y around 0 98.7%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)}\right) - t \]
    3. Taylor expanded in x around 0 54.8%

      \[\leadsto \left(\color{blue}{-1 \cdot \log y} + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)\right) - t \]
    4. Step-by-step derivation
      1. mul-1-neg54.8%

        \[\leadsto \left(\color{blue}{\left(-\log y\right)} + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)\right) - t \]
    5. Simplified54.8%

      \[\leadsto \left(\color{blue}{\left(-\log y\right)} + \left(z - 1\right) \cdot \left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)\right) - t \]
    6. Taylor expanded in z around inf 22.2%

      \[\leadsto \color{blue}{z \cdot \left(-0.5 \cdot {y}^{2} + \left(-0.3333333333333333 \cdot {y}^{3} + -1 \cdot y\right)\right)} \]
    7. Taylor expanded in y around 0 20.9%

      \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z\right)} \]
    8. Step-by-step derivation
      1. associate-*r*20.9%

        \[\leadsto \color{blue}{\left(-1 \cdot y\right) \cdot z} \]
      2. neg-mul-120.9%

        \[\leadsto \color{blue}{\left(-y\right)} \cdot z \]
    9. Simplified20.9%

      \[\leadsto \color{blue}{\left(-y\right) \cdot z} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification42.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.007:\\ \;\;\;\;-t\\ \mathbf{elif}\;t \leq 200000000:\\ \;\;\;\;z \cdot \left(-y\right)\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \]

Alternative 16: 35.7% accurate, 107.5× speedup?

\[\begin{array}{l} \\ -t \end{array} \]
(FPCore (x y z t) :precision binary64 (- t))
double code(double x, double y, double z, double t) {
	return -t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = -t
end function
public static double code(double x, double y, double z, double t) {
	return -t;
}
def code(x, y, z, t):
	return -t
function code(x, y, z, t)
	return Float64(-t)
end
function tmp = code(x, y, z, t)
	tmp = -t;
end
code[x_, y_, z_, t_] := (-t)
\begin{array}{l}

\\
-t
\end{array}
Derivation
  1. Initial program 87.5%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Step-by-step derivation
    1. +-commutative87.5%

      \[\leadsto \color{blue}{\left(\left(z - 1\right) \cdot \log \left(1 - y\right) + \left(x - 1\right) \cdot \log y\right)} - t \]
    2. associate--l+87.5%

      \[\leadsto \color{blue}{\left(z - 1\right) \cdot \log \left(1 - y\right) + \left(\left(x - 1\right) \cdot \log y - t\right)} \]
    3. fma-def87.5%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z - 1, \log \left(1 - y\right), \left(x - 1\right) \cdot \log y - t\right)} \]
    4. sub-neg87.5%

      \[\leadsto \mathsf{fma}\left(z - 1, \log \color{blue}{\left(1 + \left(-y\right)\right)}, \left(x - 1\right) \cdot \log y - t\right) \]
    5. log1p-def99.8%

      \[\leadsto \mathsf{fma}\left(z - 1, \color{blue}{\mathsf{log1p}\left(-y\right)}, \left(x - 1\right) \cdot \log y - t\right) \]
  3. Simplified99.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(z - 1, \mathsf{log1p}\left(-y\right), \left(x - 1\right) \cdot \log y - t\right)} \]
  4. Taylor expanded in t around inf 33.3%

    \[\leadsto \color{blue}{-1 \cdot t} \]
  5. Step-by-step derivation
    1. neg-mul-133.3%

      \[\leadsto \color{blue}{-t} \]
  6. Simplified33.3%

    \[\leadsto \color{blue}{-t} \]
  7. Final simplification33.3%

    \[\leadsto -t \]

Reproduce

?
herbie shell --seed 2023195 
(FPCore (x y z t)
  :name "Statistics.Distribution.Beta:$cdensity from math-functions-0.1.5.2"
  :precision binary64
  (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))