Statistics.Distribution.Beta:$cdensity from math-functions-0.1.5.2

Percentage Accurate: 88.8% → 99.8%
Time: 21.0s
Alternatives: 19
Speedup: 1.9×

Specification

?
\[\begin{array}{l} \\ \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (((x - 1.0d0) * log(y)) + ((z - 1.0d0) * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((x - 1.0) * Math.log(y)) + ((z - 1.0) * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return (((x - 1.0) * math.log(y)) + ((z - 1.0) * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 19 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 88.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
	return (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (((x - 1.0d0) * log(y)) + ((z - 1.0d0) * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((x - 1.0) * Math.log(y)) + ((z - 1.0) * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t):
	return (((x - 1.0) * math.log(y)) + ((z - 1.0) * math.log((1.0 - y)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(x - 1.0) * log(y)) + Float64(Float64(z - 1.0) * log(Float64(1.0 - y)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((x - 1.0) * log(y)) + ((z - 1.0) * log((1.0 - y)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(x - 1.0), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z - 1.0), $MachinePrecision] * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t
\end{array}

Alternative 1: 99.8% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(z + -1, \mathsf{log1p}\left(-y\right), \mathsf{fma}\left(-1 + x, \log y, -t\right)\right) \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (fma (+ z -1.0) (log1p (- y)) (fma (+ -1.0 x) (log y) (- t))))
double code(double x, double y, double z, double t) {
	return fma((z + -1.0), log1p(-y), fma((-1.0 + x), log(y), -t));
}
function code(x, y, z, t)
	return fma(Float64(z + -1.0), log1p(Float64(-y)), fma(Float64(-1.0 + x), log(y), Float64(-t)))
end
code[x_, y_, z_, t_] := N[(N[(z + -1.0), $MachinePrecision] * N[Log[1 + (-y)], $MachinePrecision] + N[(N[(-1.0 + x), $MachinePrecision] * N[Log[y], $MachinePrecision] + (-t)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(z + -1, \mathsf{log1p}\left(-y\right), \mathsf{fma}\left(-1 + x, \log y, -t\right)\right)
\end{array}
Derivation
  1. Initial program 86.8%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Step-by-step derivation
    1. sub-neg86.8%

      \[\leadsto \color{blue}{\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) + \left(-t\right)} \]
    2. +-commutative86.8%

      \[\leadsto \color{blue}{\left(\left(z - 1\right) \cdot \log \left(1 - y\right) + \left(x - 1\right) \cdot \log y\right)} + \left(-t\right) \]
    3. associate-+l+86.8%

      \[\leadsto \color{blue}{\left(z - 1\right) \cdot \log \left(1 - y\right) + \left(\left(x - 1\right) \cdot \log y + \left(-t\right)\right)} \]
    4. fma-define86.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z - 1, \log \left(1 - y\right), \left(x - 1\right) \cdot \log y + \left(-t\right)\right)} \]
    5. sub-neg86.8%

      \[\leadsto \mathsf{fma}\left(\color{blue}{z + \left(-1\right)}, \log \left(1 - y\right), \left(x - 1\right) \cdot \log y + \left(-t\right)\right) \]
    6. metadata-eval86.8%

      \[\leadsto \mathsf{fma}\left(z + \color{blue}{-1}, \log \left(1 - y\right), \left(x - 1\right) \cdot \log y + \left(-t\right)\right) \]
    7. sub-neg86.8%

      \[\leadsto \mathsf{fma}\left(z + -1, \log \color{blue}{\left(1 + \left(-y\right)\right)}, \left(x - 1\right) \cdot \log y + \left(-t\right)\right) \]
    8. log1p-define99.8%

      \[\leadsto \mathsf{fma}\left(z + -1, \color{blue}{\mathsf{log1p}\left(-y\right)}, \left(x - 1\right) \cdot \log y + \left(-t\right)\right) \]
    9. fma-define99.8%

      \[\leadsto \mathsf{fma}\left(z + -1, \mathsf{log1p}\left(-y\right), \color{blue}{\mathsf{fma}\left(x - 1, \log y, -t\right)}\right) \]
    10. sub-neg99.8%

      \[\leadsto \mathsf{fma}\left(z + -1, \mathsf{log1p}\left(-y\right), \mathsf{fma}\left(\color{blue}{x + \left(-1\right)}, \log y, -t\right)\right) \]
    11. metadata-eval99.8%

      \[\leadsto \mathsf{fma}\left(z + -1, \mathsf{log1p}\left(-y\right), \mathsf{fma}\left(x + \color{blue}{-1}, \log y, -t\right)\right) \]
  3. Simplified99.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(z + -1, \mathsf{log1p}\left(-y\right), \mathsf{fma}\left(x + -1, \log y, -t\right)\right)} \]
  4. Add Preprocessing
  5. Final simplification99.8%

    \[\leadsto \mathsf{fma}\left(z + -1, \mathsf{log1p}\left(-y\right), \mathsf{fma}\left(-1 + x, \log y, -t\right)\right) \]
  6. Add Preprocessing

Alternative 2: 99.8% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(-1 + x, \log y, \mathsf{log1p}\left(-y\right) \cdot \left(z + -1\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (fma (+ -1.0 x) (log y) (* (log1p (- y)) (+ z -1.0))) t))
double code(double x, double y, double z, double t) {
	return fma((-1.0 + x), log(y), (log1p(-y) * (z + -1.0))) - t;
}
function code(x, y, z, t)
	return Float64(fma(Float64(-1.0 + x), log(y), Float64(log1p(Float64(-y)) * Float64(z + -1.0))) - t)
end
code[x_, y_, z_, t_] := N[(N[(N[(-1.0 + x), $MachinePrecision] * N[Log[y], $MachinePrecision] + N[(N[Log[1 + (-y)], $MachinePrecision] * N[(z + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(-1 + x, \log y, \mathsf{log1p}\left(-y\right) \cdot \left(z + -1\right)\right) - t
\end{array}
Derivation
  1. Initial program 86.8%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Step-by-step derivation
    1. fma-define86.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right)} - t \]
    2. sub-neg86.8%

      \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-1\right)}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    3. metadata-eval86.8%

      \[\leadsto \mathsf{fma}\left(x + \color{blue}{-1}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    4. sub-neg86.8%

      \[\leadsto \mathsf{fma}\left(x + -1, \log y, \color{blue}{\left(z + \left(-1\right)\right)} \cdot \log \left(1 - y\right)\right) - t \]
    5. metadata-eval86.8%

      \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + \color{blue}{-1}\right) \cdot \log \left(1 - y\right)\right) - t \]
    6. sub-neg86.8%

      \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \log \color{blue}{\left(1 + \left(-y\right)\right)}\right) - t \]
    7. log1p-define99.8%

      \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \color{blue}{\mathsf{log1p}\left(-y\right)}\right) - t \]
  3. Simplified99.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \mathsf{log1p}\left(-y\right)\right) - t} \]
  4. Add Preprocessing
  5. Final simplification99.8%

    \[\leadsto \mathsf{fma}\left(-1 + x, \log y, \mathsf{log1p}\left(-y\right) \cdot \left(z + -1\right)\right) - t \]
  6. Add Preprocessing

Alternative 3: 99.6% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \left(\left(-1 + x\right) \cdot \log y + \left(z + -1\right) \cdot \left(y \cdot \left(-1 + y \cdot \left(y \cdot \left(y \cdot -0.25 - 0.3333333333333333\right) - 0.5\right)\right)\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (-
  (+
   (* (+ -1.0 x) (log y))
   (*
    (+ z -1.0)
    (* y (+ -1.0 (* y (- (* y (- (* y -0.25) 0.3333333333333333)) 0.5))))))
  t))
double code(double x, double y, double z, double t) {
	return (((-1.0 + x) * log(y)) + ((z + -1.0) * (y * (-1.0 + (y * ((y * ((y * -0.25) - 0.3333333333333333)) - 0.5)))))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((((-1.0d0) + x) * log(y)) + ((z + (-1.0d0)) * (y * ((-1.0d0) + (y * ((y * ((y * (-0.25d0)) - 0.3333333333333333d0)) - 0.5d0)))))) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((-1.0 + x) * Math.log(y)) + ((z + -1.0) * (y * (-1.0 + (y * ((y * ((y * -0.25) - 0.3333333333333333)) - 0.5)))))) - t;
}
def code(x, y, z, t):
	return (((-1.0 + x) * math.log(y)) + ((z + -1.0) * (y * (-1.0 + (y * ((y * ((y * -0.25) - 0.3333333333333333)) - 0.5)))))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(-1.0 + x) * log(y)) + Float64(Float64(z + -1.0) * Float64(y * Float64(-1.0 + Float64(y * Float64(Float64(y * Float64(Float64(y * -0.25) - 0.3333333333333333)) - 0.5)))))) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((-1.0 + x) * log(y)) + ((z + -1.0) * (y * (-1.0 + (y * ((y * ((y * -0.25) - 0.3333333333333333)) - 0.5)))))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(-1.0 + x), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z + -1.0), $MachinePrecision] * N[(y * N[(-1.0 + N[(y * N[(N[(y * N[(N[(y * -0.25), $MachinePrecision] - 0.3333333333333333), $MachinePrecision]), $MachinePrecision] - 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(-1 + x\right) \cdot \log y + \left(z + -1\right) \cdot \left(y \cdot \left(-1 + y \cdot \left(y \cdot \left(y \cdot -0.25 - 0.3333333333333333\right) - 0.5\right)\right)\right)\right) - t
\end{array}
Derivation
  1. Initial program 86.8%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0 99.3%

    \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(y \cdot \left(-0.25 \cdot y - 0.3333333333333333\right) - 0.5\right) - 1\right)\right)}\right) - t \]
  4. Final simplification99.3%

    \[\leadsto \left(\left(-1 + x\right) \cdot \log y + \left(z + -1\right) \cdot \left(y \cdot \left(-1 + y \cdot \left(y \cdot \left(y \cdot -0.25 - 0.3333333333333333\right) - 0.5\right)\right)\right)\right) - t \]
  5. Add Preprocessing

Alternative 4: 82.2% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := y \cdot \left(1 - z\right) - t\\ t_2 := x \cdot \log y - t\\ \mathbf{if}\;x \leq -2.8 \cdot 10^{+30}:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;x \leq -1.75 \cdot 10^{-51}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x \leq 4.2 \cdot 10^{-227}:\\ \;\;\;\;\left(-t\right) - \log y\\ \mathbf{elif}\;x \leq 3.3 \cdot 10^{+24}:\\ \;\;\;\;t\_1\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (- (* y (- 1.0 z)) t)) (t_2 (- (* x (log y)) t)))
   (if (<= x -2.8e+30)
     t_2
     (if (<= x -1.75e-51)
       t_1
       (if (<= x 4.2e-227) (- (- t) (log y)) (if (<= x 3.3e+24) t_1 t_2))))))
double code(double x, double y, double z, double t) {
	double t_1 = (y * (1.0 - z)) - t;
	double t_2 = (x * log(y)) - t;
	double tmp;
	if (x <= -2.8e+30) {
		tmp = t_2;
	} else if (x <= -1.75e-51) {
		tmp = t_1;
	} else if (x <= 4.2e-227) {
		tmp = -t - log(y);
	} else if (x <= 3.3e+24) {
		tmp = t_1;
	} else {
		tmp = t_2;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: t_2
    real(8) :: tmp
    t_1 = (y * (1.0d0 - z)) - t
    t_2 = (x * log(y)) - t
    if (x <= (-2.8d+30)) then
        tmp = t_2
    else if (x <= (-1.75d-51)) then
        tmp = t_1
    else if (x <= 4.2d-227) then
        tmp = -t - log(y)
    else if (x <= 3.3d+24) then
        tmp = t_1
    else
        tmp = t_2
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double t_1 = (y * (1.0 - z)) - t;
	double t_2 = (x * Math.log(y)) - t;
	double tmp;
	if (x <= -2.8e+30) {
		tmp = t_2;
	} else if (x <= -1.75e-51) {
		tmp = t_1;
	} else if (x <= 4.2e-227) {
		tmp = -t - Math.log(y);
	} else if (x <= 3.3e+24) {
		tmp = t_1;
	} else {
		tmp = t_2;
	}
	return tmp;
}
def code(x, y, z, t):
	t_1 = (y * (1.0 - z)) - t
	t_2 = (x * math.log(y)) - t
	tmp = 0
	if x <= -2.8e+30:
		tmp = t_2
	elif x <= -1.75e-51:
		tmp = t_1
	elif x <= 4.2e-227:
		tmp = -t - math.log(y)
	elif x <= 3.3e+24:
		tmp = t_1
	else:
		tmp = t_2
	return tmp
function code(x, y, z, t)
	t_1 = Float64(Float64(y * Float64(1.0 - z)) - t)
	t_2 = Float64(Float64(x * log(y)) - t)
	tmp = 0.0
	if (x <= -2.8e+30)
		tmp = t_2;
	elseif (x <= -1.75e-51)
		tmp = t_1;
	elseif (x <= 4.2e-227)
		tmp = Float64(Float64(-t) - log(y));
	elseif (x <= 3.3e+24)
		tmp = t_1;
	else
		tmp = t_2;
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	t_1 = (y * (1.0 - z)) - t;
	t_2 = (x * log(y)) - t;
	tmp = 0.0;
	if (x <= -2.8e+30)
		tmp = t_2;
	elseif (x <= -1.75e-51)
		tmp = t_1;
	elseif (x <= 4.2e-227)
		tmp = -t - log(y);
	elseif (x <= 3.3e+24)
		tmp = t_1;
	else
		tmp = t_2;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(y * N[(1.0 - z), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, Block[{t$95$2 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -2.8e+30], t$95$2, If[LessEqual[x, -1.75e-51], t$95$1, If[LessEqual[x, 4.2e-227], N[((-t) - N[Log[y], $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 3.3e+24], t$95$1, t$95$2]]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := y \cdot \left(1 - z\right) - t\\
t_2 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -2.8 \cdot 10^{+30}:\\
\;\;\;\;t\_2\\

\mathbf{elif}\;x \leq -1.75 \cdot 10^{-51}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;x \leq 4.2 \cdot 10^{-227}:\\
\;\;\;\;\left(-t\right) - \log y\\

\mathbf{elif}\;x \leq 3.3 \cdot 10^{+24}:\\
\;\;\;\;t\_1\\

\mathbf{else}:\\
\;\;\;\;t\_2\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -2.79999999999999983e30 or 3.2999999999999999e24 < x

    1. Initial program 98.4%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0 99.6%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(y \cdot \left(-0.25 \cdot y - 0.3333333333333333\right) - 0.5\right) - 1\right)\right)}\right) - t \]
    4. Taylor expanded in y around 0 99.2%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(y \cdot \left(\color{blue}{-0.5 \cdot y} - 1\right)\right)\right) - t \]
    5. Step-by-step derivation
      1. *-commutative99.2%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(y \cdot \left(\color{blue}{y \cdot -0.5} - 1\right)\right)\right) - t \]
    6. Simplified99.2%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(y \cdot \left(\color{blue}{y \cdot -0.5} - 1\right)\right)\right) - t \]
    7. Taylor expanded in x around inf 97.2%

      \[\leadsto \color{blue}{x \cdot \log y} - t \]
    8. Step-by-step derivation
      1. *-commutative97.2%

        \[\leadsto \color{blue}{\log y \cdot x} - t \]
    9. Simplified97.2%

      \[\leadsto \color{blue}{\log y \cdot x} - t \]

    if -2.79999999999999983e30 < x < -1.7499999999999999e-51 or 4.1999999999999999e-227 < x < 3.2999999999999999e24

    1. Initial program 71.8%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0 99.4%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
    4. Step-by-step derivation
      1. +-commutative99.4%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
      2. sub-neg99.4%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      3. metadata-eval99.4%

        \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      4. mul-1-neg99.4%

        \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
      5. unsub-neg99.4%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
      6. +-commutative99.4%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
      7. sub-neg99.4%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
      8. metadata-eval99.4%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
      9. +-commutative99.4%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
    5. Simplified99.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
    6. Taylor expanded in y around inf 99.4%

      \[\leadsto \color{blue}{y \cdot \left(\left(1 + -1 \cdot \frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y}\right) - z\right)} - t \]
    7. Step-by-step derivation
      1. associate--l+99.4%

        \[\leadsto y \cdot \color{blue}{\left(1 + \left(-1 \cdot \frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y} - z\right)\right)} - t \]
      2. mul-1-neg99.4%

        \[\leadsto y \cdot \left(1 + \left(\color{blue}{\left(-\frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y}\right)} - z\right)\right) - t \]
      3. log-rec99.4%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\color{blue}{\left(-\log y\right)} \cdot \left(x - 1\right)}{y}\right) - z\right)\right) - t \]
      4. sub-neg99.4%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\left(-\log y\right) \cdot \color{blue}{\left(x + \left(-1\right)\right)}}{y}\right) - z\right)\right) - t \]
      5. metadata-eval99.4%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\left(-\log y\right) \cdot \left(x + \color{blue}{-1}\right)}{y}\right) - z\right)\right) - t \]
      6. associate-/l*99.3%

        \[\leadsto y \cdot \left(1 + \left(\left(-\color{blue}{\left(-\log y\right) \cdot \frac{x + -1}{y}}\right) - z\right)\right) - t \]
      7. +-commutative99.3%

        \[\leadsto y \cdot \left(1 + \left(\left(-\left(-\log y\right) \cdot \frac{\color{blue}{-1 + x}}{y}\right) - z\right)\right) - t \]
    8. Simplified99.3%

      \[\leadsto \color{blue}{y \cdot \left(1 + \left(\left(-\left(-\log y\right) \cdot \frac{-1 + x}{y}\right) - z\right)\right)} - t \]
    9. Taylor expanded in y around inf 81.2%

      \[\leadsto \color{blue}{y \cdot \left(1 - z\right)} - t \]

    if -1.7499999999999999e-51 < x < 4.1999999999999999e-227

    1. Initial program 80.4%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 80.4%

      \[\leadsto \color{blue}{\left(-1 \cdot \log y + \log \left(1 - y\right) \cdot \left(z - 1\right)\right)} - t \]
    4. Step-by-step derivation
      1. +-commutative80.4%

        \[\leadsto \color{blue}{\left(\log \left(1 - y\right) \cdot \left(z - 1\right) + -1 \cdot \log y\right)} - t \]
      2. mul-1-neg80.4%

        \[\leadsto \left(\log \left(1 - y\right) \cdot \left(z - 1\right) + \color{blue}{\left(-\log y\right)}\right) - t \]
      3. unsub-neg80.4%

        \[\leadsto \color{blue}{\left(\log \left(1 - y\right) \cdot \left(z - 1\right) - \log y\right)} - t \]
      4. sub-neg80.4%

        \[\leadsto \left(\log \left(1 - y\right) \cdot \color{blue}{\left(z + \left(-1\right)\right)} - \log y\right) - t \]
      5. metadata-eval80.4%

        \[\leadsto \left(\log \left(1 - y\right) \cdot \left(z + \color{blue}{-1}\right) - \log y\right) - t \]
      6. +-commutative80.4%

        \[\leadsto \left(\log \left(1 - y\right) \cdot \color{blue}{\left(-1 + z\right)} - \log y\right) - t \]
      7. sub-neg80.4%

        \[\leadsto \left(\log \color{blue}{\left(1 + \left(-y\right)\right)} \cdot \left(-1 + z\right) - \log y\right) - t \]
      8. log1p-define99.9%

        \[\leadsto \left(\color{blue}{\mathsf{log1p}\left(-y\right)} \cdot \left(-1 + z\right) - \log y\right) - t \]
    5. Simplified99.9%

      \[\leadsto \color{blue}{\left(\mathsf{log1p}\left(-y\right) \cdot \left(-1 + z\right) - \log y\right)} - t \]
    6. Taylor expanded in y around 0 76.3%

      \[\leadsto \color{blue}{-1 \cdot \left(t + \log y\right)} \]
    7. Step-by-step derivation
      1. mul-1-neg76.3%

        \[\leadsto \color{blue}{-\left(t + \log y\right)} \]
      2. distribute-neg-in76.3%

        \[\leadsto \color{blue}{\left(-t\right) + \left(-\log y\right)} \]
      3. unsub-neg76.3%

        \[\leadsto \color{blue}{\left(-t\right) - \log y} \]
    8. Simplified76.3%

      \[\leadsto \color{blue}{\left(-t\right) - \log y} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification87.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2.8 \cdot 10^{+30}:\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{elif}\;x \leq -1.75 \cdot 10^{-51}:\\ \;\;\;\;y \cdot \left(1 - z\right) - t\\ \mathbf{elif}\;x \leq 4.2 \cdot 10^{-227}:\\ \;\;\;\;\left(-t\right) - \log y\\ \mathbf{elif}\;x \leq 3.3 \cdot 10^{+24}:\\ \;\;\;\;y \cdot \left(1 - z\right) - t\\ \mathbf{else}:\\ \;\;\;\;x \cdot \log y - t\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 82.3% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := y \cdot \left(1 - z\right) - t\\ t_2 := x \cdot \log y - t\\ \mathbf{if}\;x \leq -2.8 \cdot 10^{+30}:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;x \leq -5.5 \cdot 10^{-52}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x \leq 4.1 \cdot 10^{-227}:\\ \;\;\;\;\left(y - \log y\right) - t\\ \mathbf{elif}\;x \leq 3.3 \cdot 10^{+24}:\\ \;\;\;\;t\_1\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (- (* y (- 1.0 z)) t)) (t_2 (- (* x (log y)) t)))
   (if (<= x -2.8e+30)
     t_2
     (if (<= x -5.5e-52)
       t_1
       (if (<= x 4.1e-227) (- (- y (log y)) t) (if (<= x 3.3e+24) t_1 t_2))))))
double code(double x, double y, double z, double t) {
	double t_1 = (y * (1.0 - z)) - t;
	double t_2 = (x * log(y)) - t;
	double tmp;
	if (x <= -2.8e+30) {
		tmp = t_2;
	} else if (x <= -5.5e-52) {
		tmp = t_1;
	} else if (x <= 4.1e-227) {
		tmp = (y - log(y)) - t;
	} else if (x <= 3.3e+24) {
		tmp = t_1;
	} else {
		tmp = t_2;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: t_2
    real(8) :: tmp
    t_1 = (y * (1.0d0 - z)) - t
    t_2 = (x * log(y)) - t
    if (x <= (-2.8d+30)) then
        tmp = t_2
    else if (x <= (-5.5d-52)) then
        tmp = t_1
    else if (x <= 4.1d-227) then
        tmp = (y - log(y)) - t
    else if (x <= 3.3d+24) then
        tmp = t_1
    else
        tmp = t_2
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double t_1 = (y * (1.0 - z)) - t;
	double t_2 = (x * Math.log(y)) - t;
	double tmp;
	if (x <= -2.8e+30) {
		tmp = t_2;
	} else if (x <= -5.5e-52) {
		tmp = t_1;
	} else if (x <= 4.1e-227) {
		tmp = (y - Math.log(y)) - t;
	} else if (x <= 3.3e+24) {
		tmp = t_1;
	} else {
		tmp = t_2;
	}
	return tmp;
}
def code(x, y, z, t):
	t_1 = (y * (1.0 - z)) - t
	t_2 = (x * math.log(y)) - t
	tmp = 0
	if x <= -2.8e+30:
		tmp = t_2
	elif x <= -5.5e-52:
		tmp = t_1
	elif x <= 4.1e-227:
		tmp = (y - math.log(y)) - t
	elif x <= 3.3e+24:
		tmp = t_1
	else:
		tmp = t_2
	return tmp
function code(x, y, z, t)
	t_1 = Float64(Float64(y * Float64(1.0 - z)) - t)
	t_2 = Float64(Float64(x * log(y)) - t)
	tmp = 0.0
	if (x <= -2.8e+30)
		tmp = t_2;
	elseif (x <= -5.5e-52)
		tmp = t_1;
	elseif (x <= 4.1e-227)
		tmp = Float64(Float64(y - log(y)) - t);
	elseif (x <= 3.3e+24)
		tmp = t_1;
	else
		tmp = t_2;
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	t_1 = (y * (1.0 - z)) - t;
	t_2 = (x * log(y)) - t;
	tmp = 0.0;
	if (x <= -2.8e+30)
		tmp = t_2;
	elseif (x <= -5.5e-52)
		tmp = t_1;
	elseif (x <= 4.1e-227)
		tmp = (y - log(y)) - t;
	elseif (x <= 3.3e+24)
		tmp = t_1;
	else
		tmp = t_2;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(y * N[(1.0 - z), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, Block[{t$95$2 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -2.8e+30], t$95$2, If[LessEqual[x, -5.5e-52], t$95$1, If[LessEqual[x, 4.1e-227], N[(N[(y - N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], If[LessEqual[x, 3.3e+24], t$95$1, t$95$2]]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := y \cdot \left(1 - z\right) - t\\
t_2 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -2.8 \cdot 10^{+30}:\\
\;\;\;\;t\_2\\

\mathbf{elif}\;x \leq -5.5 \cdot 10^{-52}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;x \leq 4.1 \cdot 10^{-227}:\\
\;\;\;\;\left(y - \log y\right) - t\\

\mathbf{elif}\;x \leq 3.3 \cdot 10^{+24}:\\
\;\;\;\;t\_1\\

\mathbf{else}:\\
\;\;\;\;t\_2\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -2.79999999999999983e30 or 3.2999999999999999e24 < x

    1. Initial program 98.4%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0 99.6%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(y \cdot \left(-0.25 \cdot y - 0.3333333333333333\right) - 0.5\right) - 1\right)\right)}\right) - t \]
    4. Taylor expanded in y around 0 99.2%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(y \cdot \left(\color{blue}{-0.5 \cdot y} - 1\right)\right)\right) - t \]
    5. Step-by-step derivation
      1. *-commutative99.2%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(y \cdot \left(\color{blue}{y \cdot -0.5} - 1\right)\right)\right) - t \]
    6. Simplified99.2%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(y \cdot \left(\color{blue}{y \cdot -0.5} - 1\right)\right)\right) - t \]
    7. Taylor expanded in x around inf 97.2%

      \[\leadsto \color{blue}{x \cdot \log y} - t \]
    8. Step-by-step derivation
      1. *-commutative97.2%

        \[\leadsto \color{blue}{\log y \cdot x} - t \]
    9. Simplified97.2%

      \[\leadsto \color{blue}{\log y \cdot x} - t \]

    if -2.79999999999999983e30 < x < -5.5e-52 or 4.10000000000000009e-227 < x < 3.2999999999999999e24

    1. Initial program 71.8%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0 99.4%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
    4. Step-by-step derivation
      1. +-commutative99.4%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
      2. sub-neg99.4%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      3. metadata-eval99.4%

        \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      4. mul-1-neg99.4%

        \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
      5. unsub-neg99.4%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
      6. +-commutative99.4%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
      7. sub-neg99.4%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
      8. metadata-eval99.4%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
      9. +-commutative99.4%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
    5. Simplified99.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
    6. Taylor expanded in y around inf 99.4%

      \[\leadsto \color{blue}{y \cdot \left(\left(1 + -1 \cdot \frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y}\right) - z\right)} - t \]
    7. Step-by-step derivation
      1. associate--l+99.4%

        \[\leadsto y \cdot \color{blue}{\left(1 + \left(-1 \cdot \frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y} - z\right)\right)} - t \]
      2. mul-1-neg99.4%

        \[\leadsto y \cdot \left(1 + \left(\color{blue}{\left(-\frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y}\right)} - z\right)\right) - t \]
      3. log-rec99.4%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\color{blue}{\left(-\log y\right)} \cdot \left(x - 1\right)}{y}\right) - z\right)\right) - t \]
      4. sub-neg99.4%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\left(-\log y\right) \cdot \color{blue}{\left(x + \left(-1\right)\right)}}{y}\right) - z\right)\right) - t \]
      5. metadata-eval99.4%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\left(-\log y\right) \cdot \left(x + \color{blue}{-1}\right)}{y}\right) - z\right)\right) - t \]
      6. associate-/l*99.3%

        \[\leadsto y \cdot \left(1 + \left(\left(-\color{blue}{\left(-\log y\right) \cdot \frac{x + -1}{y}}\right) - z\right)\right) - t \]
      7. +-commutative99.3%

        \[\leadsto y \cdot \left(1 + \left(\left(-\left(-\log y\right) \cdot \frac{\color{blue}{-1 + x}}{y}\right) - z\right)\right) - t \]
    8. Simplified99.3%

      \[\leadsto \color{blue}{y \cdot \left(1 + \left(\left(-\left(-\log y\right) \cdot \frac{-1 + x}{y}\right) - z\right)\right)} - t \]
    9. Taylor expanded in y around inf 81.2%

      \[\leadsto \color{blue}{y \cdot \left(1 - z\right)} - t \]

    if -5.5e-52 < x < 4.10000000000000009e-227

    1. Initial program 80.4%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0 96.7%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
    4. Step-by-step derivation
      1. +-commutative96.7%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
      2. sub-neg96.7%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      3. metadata-eval96.7%

        \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      4. mul-1-neg96.7%

        \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
      5. unsub-neg96.7%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
      6. +-commutative96.7%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
      7. sub-neg96.7%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
      8. metadata-eval96.7%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
      9. +-commutative96.7%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
    5. Simplified96.7%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
    6. Taylor expanded in z around 0 76.4%

      \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) - \left(t + -1 \cdot y\right)} \]
    7. Step-by-step derivation
      1. sub-neg76.4%

        \[\leadsto \log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} - \left(t + -1 \cdot y\right) \]
      2. metadata-eval76.4%

        \[\leadsto \log y \cdot \left(x + \color{blue}{-1}\right) - \left(t + -1 \cdot y\right) \]
      3. +-commutative76.4%

        \[\leadsto \log y \cdot \color{blue}{\left(-1 + x\right)} - \left(t + -1 \cdot y\right) \]
      4. neg-mul-176.4%

        \[\leadsto \log y \cdot \left(-1 + x\right) - \left(t + \color{blue}{\left(-y\right)}\right) \]
      5. unsub-neg76.4%

        \[\leadsto \log y \cdot \left(-1 + x\right) - \color{blue}{\left(t - y\right)} \]
    8. Simplified76.4%

      \[\leadsto \color{blue}{\log y \cdot \left(-1 + x\right) - \left(t - y\right)} \]
    9. Taylor expanded in x around 0 76.4%

      \[\leadsto \color{blue}{\left(y + -1 \cdot \log y\right) - t} \]
    10. Step-by-step derivation
      1. neg-mul-176.4%

        \[\leadsto \left(y + \color{blue}{\left(-\log y\right)}\right) - t \]
      2. unsub-neg76.4%

        \[\leadsto \color{blue}{\left(y - \log y\right)} - t \]
    11. Simplified76.4%

      \[\leadsto \color{blue}{\left(y - \log y\right) - t} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification87.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2.8 \cdot 10^{+30}:\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{elif}\;x \leq -5.5 \cdot 10^{-52}:\\ \;\;\;\;y \cdot \left(1 - z\right) - t\\ \mathbf{elif}\;x \leq 4.1 \cdot 10^{-227}:\\ \;\;\;\;\left(y - \log y\right) - t\\ \mathbf{elif}\;x \leq 3.3 \cdot 10^{+24}:\\ \;\;\;\;y \cdot \left(1 - z\right) - t\\ \mathbf{else}:\\ \;\;\;\;x \cdot \log y - t\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 72.1% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := x \cdot \log y\\ t_2 := y \cdot \left(1 - z\right) - t\\ \mathbf{if}\;x \leq -1.06 \cdot 10^{+67}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x \leq -4.8 \cdot 10^{-53}:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;x \leq 4.2 \cdot 10^{-227}:\\ \;\;\;\;\left(-t\right) - \log y\\ \mathbf{elif}\;x \leq 6.6 \cdot 10^{+107}:\\ \;\;\;\;t\_2\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (* x (log y))) (t_2 (- (* y (- 1.0 z)) t)))
   (if (<= x -1.06e+67)
     t_1
     (if (<= x -4.8e-53)
       t_2
       (if (<= x 4.2e-227) (- (- t) (log y)) (if (<= x 6.6e+107) t_2 t_1))))))
double code(double x, double y, double z, double t) {
	double t_1 = x * log(y);
	double t_2 = (y * (1.0 - z)) - t;
	double tmp;
	if (x <= -1.06e+67) {
		tmp = t_1;
	} else if (x <= -4.8e-53) {
		tmp = t_2;
	} else if (x <= 4.2e-227) {
		tmp = -t - log(y);
	} else if (x <= 6.6e+107) {
		tmp = t_2;
	} else {
		tmp = t_1;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: t_2
    real(8) :: tmp
    t_1 = x * log(y)
    t_2 = (y * (1.0d0 - z)) - t
    if (x <= (-1.06d+67)) then
        tmp = t_1
    else if (x <= (-4.8d-53)) then
        tmp = t_2
    else if (x <= 4.2d-227) then
        tmp = -t - log(y)
    else if (x <= 6.6d+107) then
        tmp = t_2
    else
        tmp = t_1
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double t_1 = x * Math.log(y);
	double t_2 = (y * (1.0 - z)) - t;
	double tmp;
	if (x <= -1.06e+67) {
		tmp = t_1;
	} else if (x <= -4.8e-53) {
		tmp = t_2;
	} else if (x <= 4.2e-227) {
		tmp = -t - Math.log(y);
	} else if (x <= 6.6e+107) {
		tmp = t_2;
	} else {
		tmp = t_1;
	}
	return tmp;
}
def code(x, y, z, t):
	t_1 = x * math.log(y)
	t_2 = (y * (1.0 - z)) - t
	tmp = 0
	if x <= -1.06e+67:
		tmp = t_1
	elif x <= -4.8e-53:
		tmp = t_2
	elif x <= 4.2e-227:
		tmp = -t - math.log(y)
	elif x <= 6.6e+107:
		tmp = t_2
	else:
		tmp = t_1
	return tmp
function code(x, y, z, t)
	t_1 = Float64(x * log(y))
	t_2 = Float64(Float64(y * Float64(1.0 - z)) - t)
	tmp = 0.0
	if (x <= -1.06e+67)
		tmp = t_1;
	elseif (x <= -4.8e-53)
		tmp = t_2;
	elseif (x <= 4.2e-227)
		tmp = Float64(Float64(-t) - log(y));
	elseif (x <= 6.6e+107)
		tmp = t_2;
	else
		tmp = t_1;
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	t_1 = x * log(y);
	t_2 = (y * (1.0 - z)) - t;
	tmp = 0.0;
	if (x <= -1.06e+67)
		tmp = t_1;
	elseif (x <= -4.8e-53)
		tmp = t_2;
	elseif (x <= 4.2e-227)
		tmp = -t - log(y);
	elseif (x <= 6.6e+107)
		tmp = t_2;
	else
		tmp = t_1;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(y * N[(1.0 - z), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -1.06e+67], t$95$1, If[LessEqual[x, -4.8e-53], t$95$2, If[LessEqual[x, 4.2e-227], N[((-t) - N[Log[y], $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 6.6e+107], t$95$2, t$95$1]]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := x \cdot \log y\\
t_2 := y \cdot \left(1 - z\right) - t\\
\mathbf{if}\;x \leq -1.06 \cdot 10^{+67}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;x \leq -4.8 \cdot 10^{-53}:\\
\;\;\;\;t\_2\\

\mathbf{elif}\;x \leq 4.2 \cdot 10^{-227}:\\
\;\;\;\;\left(-t\right) - \log y\\

\mathbf{elif}\;x \leq 6.6 \cdot 10^{+107}:\\
\;\;\;\;t\_2\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.0599999999999999e67 or 6.60000000000000064e107 < x

    1. Initial program 99.4%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. fma-define99.4%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right)} - t \]
      2. sub-neg99.4%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-1\right)}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
      3. metadata-eval99.4%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-1}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
      4. sub-neg99.4%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \color{blue}{\left(z + \left(-1\right)\right)} \cdot \log \left(1 - y\right)\right) - t \]
      5. metadata-eval99.4%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + \color{blue}{-1}\right) \cdot \log \left(1 - y\right)\right) - t \]
      6. sub-neg99.4%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \log \color{blue}{\left(1 + \left(-y\right)\right)}\right) - t \]
      7. log1p-define99.5%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \color{blue}{\mathsf{log1p}\left(-y\right)}\right) - t \]
    3. Simplified99.5%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \mathsf{log1p}\left(-y\right)\right) - t} \]
    4. Add Preprocessing
    5. Taylor expanded in t around inf 71.2%

      \[\leadsto \color{blue}{t \cdot \left(\left(\frac{\log y \cdot \left(x - 1\right)}{t} + \frac{\log \left(1 - y\right) \cdot \left(z - 1\right)}{t}\right) - 1\right)} \]
    6. Step-by-step derivation
      1. associate--l+71.2%

        \[\leadsto t \cdot \color{blue}{\left(\frac{\log y \cdot \left(x - 1\right)}{t} + \left(\frac{\log \left(1 - y\right) \cdot \left(z - 1\right)}{t} - 1\right)\right)} \]
      2. sub-neg71.2%

        \[\leadsto t \cdot \left(\frac{\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)}}{t} + \left(\frac{\log \left(1 - y\right) \cdot \left(z - 1\right)}{t} - 1\right)\right) \]
      3. metadata-eval71.2%

        \[\leadsto t \cdot \left(\frac{\log y \cdot \left(x + \color{blue}{-1}\right)}{t} + \left(\frac{\log \left(1 - y\right) \cdot \left(z - 1\right)}{t} - 1\right)\right) \]
      4. associate-/l*71.2%

        \[\leadsto t \cdot \left(\color{blue}{\log y \cdot \frac{x + -1}{t}} + \left(\frac{\log \left(1 - y\right) \cdot \left(z - 1\right)}{t} - 1\right)\right) \]
      5. +-commutative71.2%

        \[\leadsto t \cdot \left(\log y \cdot \frac{\color{blue}{-1 + x}}{t} + \left(\frac{\log \left(1 - y\right) \cdot \left(z - 1\right)}{t} - 1\right)\right) \]
      6. associate-/l*69.7%

        \[\leadsto t \cdot \left(\log y \cdot \frac{-1 + x}{t} + \left(\color{blue}{\log \left(1 - y\right) \cdot \frac{z - 1}{t}} - 1\right)\right) \]
      7. sub-neg69.7%

        \[\leadsto t \cdot \left(\log y \cdot \frac{-1 + x}{t} + \left(\log \left(1 - y\right) \cdot \frac{\color{blue}{z + \left(-1\right)}}{t} - 1\right)\right) \]
      8. metadata-eval69.7%

        \[\leadsto t \cdot \left(\log y \cdot \frac{-1 + x}{t} + \left(\log \left(1 - y\right) \cdot \frac{z + \color{blue}{-1}}{t} - 1\right)\right) \]
      9. +-commutative69.7%

        \[\leadsto t \cdot \left(\log y \cdot \frac{-1 + x}{t} + \left(\log \left(1 - y\right) \cdot \frac{\color{blue}{-1 + z}}{t} - 1\right)\right) \]
    7. Simplified69.7%

      \[\leadsto \color{blue}{t \cdot \left(\log y \cdot \frac{-1 + x}{t} + \left(\log \left(1 - y\right) \cdot \frac{-1 + z}{t} - 1\right)\right)} \]
    8. Taylor expanded in x around inf 75.0%

      \[\leadsto \color{blue}{x \cdot \log y} \]
    9. Step-by-step derivation
      1. *-commutative75.0%

        \[\leadsto \color{blue}{\log y \cdot x} \]
    10. Simplified75.0%

      \[\leadsto \color{blue}{\log y \cdot x} \]

    if -1.0599999999999999e67 < x < -4.80000000000000015e-53 or 4.1999999999999999e-227 < x < 6.60000000000000064e107

    1. Initial program 77.7%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0 98.3%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
    4. Step-by-step derivation
      1. +-commutative98.3%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
      2. sub-neg98.3%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      3. metadata-eval98.3%

        \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      4. mul-1-neg98.3%

        \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
      5. unsub-neg98.3%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
      6. +-commutative98.3%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
      7. sub-neg98.3%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
      8. metadata-eval98.3%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
      9. +-commutative98.3%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
    5. Simplified98.3%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
    6. Taylor expanded in y around inf 95.1%

      \[\leadsto \color{blue}{y \cdot \left(\left(1 + -1 \cdot \frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y}\right) - z\right)} - t \]
    7. Step-by-step derivation
      1. associate--l+95.1%

        \[\leadsto y \cdot \color{blue}{\left(1 + \left(-1 \cdot \frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y} - z\right)\right)} - t \]
      2. mul-1-neg95.1%

        \[\leadsto y \cdot \left(1 + \left(\color{blue}{\left(-\frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y}\right)} - z\right)\right) - t \]
      3. log-rec95.1%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\color{blue}{\left(-\log y\right)} \cdot \left(x - 1\right)}{y}\right) - z\right)\right) - t \]
      4. sub-neg95.1%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\left(-\log y\right) \cdot \color{blue}{\left(x + \left(-1\right)\right)}}{y}\right) - z\right)\right) - t \]
      5. metadata-eval95.1%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\left(-\log y\right) \cdot \left(x + \color{blue}{-1}\right)}{y}\right) - z\right)\right) - t \]
      6. associate-/l*95.0%

        \[\leadsto y \cdot \left(1 + \left(\left(-\color{blue}{\left(-\log y\right) \cdot \frac{x + -1}{y}}\right) - z\right)\right) - t \]
      7. +-commutative95.0%

        \[\leadsto y \cdot \left(1 + \left(\left(-\left(-\log y\right) \cdot \frac{\color{blue}{-1 + x}}{y}\right) - z\right)\right) - t \]
    8. Simplified95.0%

      \[\leadsto \color{blue}{y \cdot \left(1 + \left(\left(-\left(-\log y\right) \cdot \frac{-1 + x}{y}\right) - z\right)\right)} - t \]
    9. Taylor expanded in y around inf 77.5%

      \[\leadsto \color{blue}{y \cdot \left(1 - z\right)} - t \]

    if -4.80000000000000015e-53 < x < 4.1999999999999999e-227

    1. Initial program 80.4%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 80.4%

      \[\leadsto \color{blue}{\left(-1 \cdot \log y + \log \left(1 - y\right) \cdot \left(z - 1\right)\right)} - t \]
    4. Step-by-step derivation
      1. +-commutative80.4%

        \[\leadsto \color{blue}{\left(\log \left(1 - y\right) \cdot \left(z - 1\right) + -1 \cdot \log y\right)} - t \]
      2. mul-1-neg80.4%

        \[\leadsto \left(\log \left(1 - y\right) \cdot \left(z - 1\right) + \color{blue}{\left(-\log y\right)}\right) - t \]
      3. unsub-neg80.4%

        \[\leadsto \color{blue}{\left(\log \left(1 - y\right) \cdot \left(z - 1\right) - \log y\right)} - t \]
      4. sub-neg80.4%

        \[\leadsto \left(\log \left(1 - y\right) \cdot \color{blue}{\left(z + \left(-1\right)\right)} - \log y\right) - t \]
      5. metadata-eval80.4%

        \[\leadsto \left(\log \left(1 - y\right) \cdot \left(z + \color{blue}{-1}\right) - \log y\right) - t \]
      6. +-commutative80.4%

        \[\leadsto \left(\log \left(1 - y\right) \cdot \color{blue}{\left(-1 + z\right)} - \log y\right) - t \]
      7. sub-neg80.4%

        \[\leadsto \left(\log \color{blue}{\left(1 + \left(-y\right)\right)} \cdot \left(-1 + z\right) - \log y\right) - t \]
      8. log1p-define99.9%

        \[\leadsto \left(\color{blue}{\mathsf{log1p}\left(-y\right)} \cdot \left(-1 + z\right) - \log y\right) - t \]
    5. Simplified99.9%

      \[\leadsto \color{blue}{\left(\mathsf{log1p}\left(-y\right) \cdot \left(-1 + z\right) - \log y\right)} - t \]
    6. Taylor expanded in y around 0 76.3%

      \[\leadsto \color{blue}{-1 \cdot \left(t + \log y\right)} \]
    7. Step-by-step derivation
      1. mul-1-neg76.3%

        \[\leadsto \color{blue}{-\left(t + \log y\right)} \]
      2. distribute-neg-in76.3%

        \[\leadsto \color{blue}{\left(-t\right) + \left(-\log y\right)} \]
      3. unsub-neg76.3%

        \[\leadsto \color{blue}{\left(-t\right) - \log y} \]
    8. Simplified76.3%

      \[\leadsto \color{blue}{\left(-t\right) - \log y} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification76.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.06 \cdot 10^{+67}:\\ \;\;\;\;x \cdot \log y\\ \mathbf{elif}\;x \leq -4.8 \cdot 10^{-53}:\\ \;\;\;\;y \cdot \left(1 - z\right) - t\\ \mathbf{elif}\;x \leq 4.2 \cdot 10^{-227}:\\ \;\;\;\;\left(-t\right) - \log y\\ \mathbf{elif}\;x \leq 6.6 \cdot 10^{+107}:\\ \;\;\;\;y \cdot \left(1 - z\right) - t\\ \mathbf{else}:\\ \;\;\;\;x \cdot \log y\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 94.6% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;-1 + x \leq -4 \cdot 10^{+44}:\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{elif}\;-1 + x \leq -0.5:\\ \;\;\;\;\left(y \cdot \left(1 - z\right) - \log y\right) - t\\ \mathbf{else}:\\ \;\;\;\;\left(-1 + x\right) \cdot \log y - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= (+ -1.0 x) -4e+44)
   (- (* x (log y)) t)
   (if (<= (+ -1.0 x) -0.5)
     (- (- (* y (- 1.0 z)) (log y)) t)
     (- (* (+ -1.0 x) (log y)) t))))
double code(double x, double y, double z, double t) {
	double tmp;
	if ((-1.0 + x) <= -4e+44) {
		tmp = (x * log(y)) - t;
	} else if ((-1.0 + x) <= -0.5) {
		tmp = ((y * (1.0 - z)) - log(y)) - t;
	} else {
		tmp = ((-1.0 + x) * log(y)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if (((-1.0d0) + x) <= (-4d+44)) then
        tmp = (x * log(y)) - t
    else if (((-1.0d0) + x) <= (-0.5d0)) then
        tmp = ((y * (1.0d0 - z)) - log(y)) - t
    else
        tmp = (((-1.0d0) + x) * log(y)) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if ((-1.0 + x) <= -4e+44) {
		tmp = (x * Math.log(y)) - t;
	} else if ((-1.0 + x) <= -0.5) {
		tmp = ((y * (1.0 - z)) - Math.log(y)) - t;
	} else {
		tmp = ((-1.0 + x) * Math.log(y)) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if (-1.0 + x) <= -4e+44:
		tmp = (x * math.log(y)) - t
	elif (-1.0 + x) <= -0.5:
		tmp = ((y * (1.0 - z)) - math.log(y)) - t
	else:
		tmp = ((-1.0 + x) * math.log(y)) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (Float64(-1.0 + x) <= -4e+44)
		tmp = Float64(Float64(x * log(y)) - t);
	elseif (Float64(-1.0 + x) <= -0.5)
		tmp = Float64(Float64(Float64(y * Float64(1.0 - z)) - log(y)) - t);
	else
		tmp = Float64(Float64(Float64(-1.0 + x) * log(y)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if ((-1.0 + x) <= -4e+44)
		tmp = (x * log(y)) - t;
	elseif ((-1.0 + x) <= -0.5)
		tmp = ((y * (1.0 - z)) - log(y)) - t;
	else
		tmp = ((-1.0 + x) * log(y)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[LessEqual[N[(-1.0 + x), $MachinePrecision], -4e+44], N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], If[LessEqual[N[(-1.0 + x), $MachinePrecision], -0.5], N[(N[(N[(y * N[(1.0 - z), $MachinePrecision]), $MachinePrecision] - N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(N[(-1.0 + x), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;-1 + x \leq -4 \cdot 10^{+44}:\\
\;\;\;\;x \cdot \log y - t\\

\mathbf{elif}\;-1 + x \leq -0.5:\\
\;\;\;\;\left(y \cdot \left(1 - z\right) - \log y\right) - t\\

\mathbf{else}:\\
\;\;\;\;\left(-1 + x\right) \cdot \log y - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (-.f64 x #s(literal 1 binary64)) < -4.0000000000000004e44

    1. Initial program 98.9%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0 99.5%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(y \cdot \left(-0.25 \cdot y - 0.3333333333333333\right) - 0.5\right) - 1\right)\right)}\right) - t \]
    4. Taylor expanded in y around 0 99.1%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(y \cdot \left(\color{blue}{-0.5 \cdot y} - 1\right)\right)\right) - t \]
    5. Step-by-step derivation
      1. *-commutative99.1%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(y \cdot \left(\color{blue}{y \cdot -0.5} - 1\right)\right)\right) - t \]
    6. Simplified99.1%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(y \cdot \left(\color{blue}{y \cdot -0.5} - 1\right)\right)\right) - t \]
    7. Taylor expanded in x around inf 97.6%

      \[\leadsto \color{blue}{x \cdot \log y} - t \]
    8. Step-by-step derivation
      1. *-commutative97.6%

        \[\leadsto \color{blue}{\log y \cdot x} - t \]
    9. Simplified97.6%

      \[\leadsto \color{blue}{\log y \cdot x} - t \]

    if -4.0000000000000004e44 < (-.f64 x #s(literal 1 binary64)) < -0.5

    1. Initial program 75.3%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 73.0%

      \[\leadsto \color{blue}{\left(-1 \cdot \log y + \log \left(1 - y\right) \cdot \left(z - 1\right)\right)} - t \]
    4. Step-by-step derivation
      1. +-commutative73.0%

        \[\leadsto \color{blue}{\left(\log \left(1 - y\right) \cdot \left(z - 1\right) + -1 \cdot \log y\right)} - t \]
      2. mul-1-neg73.0%

        \[\leadsto \left(\log \left(1 - y\right) \cdot \left(z - 1\right) + \color{blue}{\left(-\log y\right)}\right) - t \]
      3. unsub-neg73.0%

        \[\leadsto \color{blue}{\left(\log \left(1 - y\right) \cdot \left(z - 1\right) - \log y\right)} - t \]
      4. sub-neg73.0%

        \[\leadsto \left(\log \left(1 - y\right) \cdot \color{blue}{\left(z + \left(-1\right)\right)} - \log y\right) - t \]
      5. metadata-eval73.0%

        \[\leadsto \left(\log \left(1 - y\right) \cdot \left(z + \color{blue}{-1}\right) - \log y\right) - t \]
      6. +-commutative73.0%

        \[\leadsto \left(\log \left(1 - y\right) \cdot \color{blue}{\left(-1 + z\right)} - \log y\right) - t \]
      7. sub-neg73.0%

        \[\leadsto \left(\log \color{blue}{\left(1 + \left(-y\right)\right)} \cdot \left(-1 + z\right) - \log y\right) - t \]
      8. log1p-define97.6%

        \[\leadsto \left(\color{blue}{\mathsf{log1p}\left(-y\right)} \cdot \left(-1 + z\right) - \log y\right) - t \]
    5. Simplified97.6%

      \[\leadsto \color{blue}{\left(\mathsf{log1p}\left(-y\right) \cdot \left(-1 + z\right) - \log y\right)} - t \]
    6. Taylor expanded in y around 0 95.7%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) - \log y\right)} - t \]
    7. Step-by-step derivation
      1. sub-neg95.7%

        \[\leadsto \left(-1 \cdot \left(y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - \log y\right) - t \]
      2. metadata-eval95.7%

        \[\leadsto \left(-1 \cdot \left(y \cdot \left(z + \color{blue}{-1}\right)\right) - \log y\right) - t \]
      3. neg-mul-195.7%

        \[\leadsto \left(\color{blue}{\left(-y \cdot \left(z + -1\right)\right)} - \log y\right) - t \]
      4. distribute-rgt-neg-in95.7%

        \[\leadsto \left(\color{blue}{y \cdot \left(-\left(z + -1\right)\right)} - \log y\right) - t \]
      5. +-commutative95.7%

        \[\leadsto \left(y \cdot \left(-\color{blue}{\left(-1 + z\right)}\right) - \log y\right) - t \]
      6. distribute-neg-in95.7%

        \[\leadsto \left(y \cdot \color{blue}{\left(\left(--1\right) + \left(-z\right)\right)} - \log y\right) - t \]
      7. metadata-eval95.7%

        \[\leadsto \left(y \cdot \left(\color{blue}{1} + \left(-z\right)\right) - \log y\right) - t \]
      8. sub-neg95.7%

        \[\leadsto \left(y \cdot \color{blue}{\left(1 - z\right)} - \log y\right) - t \]
    8. Simplified95.7%

      \[\leadsto \color{blue}{\left(y \cdot \left(1 - z\right) - \log y\right)} - t \]

    if -0.5 < (-.f64 x #s(literal 1 binary64))

    1. Initial program 98.4%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. fma-define98.4%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right)} - t \]
      2. sub-neg98.4%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-1\right)}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
      3. metadata-eval98.4%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-1}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
      4. sub-neg98.4%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \color{blue}{\left(z + \left(-1\right)\right)} \cdot \log \left(1 - y\right)\right) - t \]
      5. metadata-eval98.4%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + \color{blue}{-1}\right) \cdot \log \left(1 - y\right)\right) - t \]
      6. sub-neg98.4%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \log \color{blue}{\left(1 + \left(-y\right)\right)}\right) - t \]
      7. log1p-define99.7%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \color{blue}{\mathsf{log1p}\left(-y\right)}\right) - t \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \mathsf{log1p}\left(-y\right)\right) - t} \]
    4. Add Preprocessing
    5. Taylor expanded in y around 0 97.0%

      \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) - t} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification96.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;-1 + x \leq -4 \cdot 10^{+44}:\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{elif}\;-1 + x \leq -0.5:\\ \;\;\;\;\left(y \cdot \left(1 - z\right) - \log y\right) - t\\ \mathbf{else}:\\ \;\;\;\;\left(-1 + x\right) \cdot \log y - t\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 99.6% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \left(\left(-1 + x\right) \cdot \log y + \left(z + -1\right) \cdot \left(y \cdot \left(-1 + y \cdot \left(y \cdot -0.3333333333333333 - 0.5\right)\right)\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (-
  (+
   (* (+ -1.0 x) (log y))
   (* (+ z -1.0) (* y (+ -1.0 (* y (- (* y -0.3333333333333333) 0.5))))))
  t))
double code(double x, double y, double z, double t) {
	return (((-1.0 + x) * log(y)) + ((z + -1.0) * (y * (-1.0 + (y * ((y * -0.3333333333333333) - 0.5)))))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((((-1.0d0) + x) * log(y)) + ((z + (-1.0d0)) * (y * ((-1.0d0) + (y * ((y * (-0.3333333333333333d0)) - 0.5d0)))))) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((-1.0 + x) * Math.log(y)) + ((z + -1.0) * (y * (-1.0 + (y * ((y * -0.3333333333333333) - 0.5)))))) - t;
}
def code(x, y, z, t):
	return (((-1.0 + x) * math.log(y)) + ((z + -1.0) * (y * (-1.0 + (y * ((y * -0.3333333333333333) - 0.5)))))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(-1.0 + x) * log(y)) + Float64(Float64(z + -1.0) * Float64(y * Float64(-1.0 + Float64(y * Float64(Float64(y * -0.3333333333333333) - 0.5)))))) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((-1.0 + x) * log(y)) + ((z + -1.0) * (y * (-1.0 + (y * ((y * -0.3333333333333333) - 0.5)))))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(-1.0 + x), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z + -1.0), $MachinePrecision] * N[(y * N[(-1.0 + N[(y * N[(N[(y * -0.3333333333333333), $MachinePrecision] - 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(-1 + x\right) \cdot \log y + \left(z + -1\right) \cdot \left(y \cdot \left(-1 + y \cdot \left(y \cdot -0.3333333333333333 - 0.5\right)\right)\right)\right) - t
\end{array}
Derivation
  1. Initial program 86.8%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0 99.1%

    \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(-0.3333333333333333 \cdot y - 0.5\right) - 1\right)\right)}\right) - t \]
  4. Final simplification99.1%

    \[\leadsto \left(\left(-1 + x\right) \cdot \log y + \left(z + -1\right) \cdot \left(y \cdot \left(-1 + y \cdot \left(y \cdot -0.3333333333333333 - 0.5\right)\right)\right)\right) - t \]
  5. Add Preprocessing

Alternative 9: 99.4% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \left(\left(-1 + x\right) \cdot \log y + y \cdot \left(-0.5 \cdot \left(y \cdot \left(z + -1\right)\right) + \left(1 - z\right)\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (-
  (+ (* (+ -1.0 x) (log y)) (* y (+ (* -0.5 (* y (+ z -1.0))) (- 1.0 z))))
  t))
double code(double x, double y, double z, double t) {
	return (((-1.0 + x) * log(y)) + (y * ((-0.5 * (y * (z + -1.0))) + (1.0 - z)))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((((-1.0d0) + x) * log(y)) + (y * (((-0.5d0) * (y * (z + (-1.0d0)))) + (1.0d0 - z)))) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((-1.0 + x) * Math.log(y)) + (y * ((-0.5 * (y * (z + -1.0))) + (1.0 - z)))) - t;
}
def code(x, y, z, t):
	return (((-1.0 + x) * math.log(y)) + (y * ((-0.5 * (y * (z + -1.0))) + (1.0 - z)))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(-1.0 + x) * log(y)) + Float64(y * Float64(Float64(-0.5 * Float64(y * Float64(z + -1.0))) + Float64(1.0 - z)))) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((-1.0 + x) * log(y)) + (y * ((-0.5 * (y * (z + -1.0))) + (1.0 - z)))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(-1.0 + x), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(y * N[(N[(-0.5 * N[(y * N[(z + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 - z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(-1 + x\right) \cdot \log y + y \cdot \left(-0.5 \cdot \left(y \cdot \left(z + -1\right)\right) + \left(1 - z\right)\right)\right) - t
\end{array}
Derivation
  1. Initial program 86.8%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0 98.8%

    \[\leadsto \left(\left(x - 1\right) \cdot \log y + \color{blue}{y \cdot \left(-1 \cdot \left(z - 1\right) + -0.5 \cdot \left(y \cdot \left(z - 1\right)\right)\right)}\right) - t \]
  4. Final simplification98.8%

    \[\leadsto \left(\left(-1 + x\right) \cdot \log y + y \cdot \left(-0.5 \cdot \left(y \cdot \left(z + -1\right)\right) + \left(1 - z\right)\right)\right) - t \]
  5. Add Preprocessing

Alternative 10: 99.4% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \left(\left(-1 + x\right) \cdot \log y + \left(z + -1\right) \cdot \left(y \cdot \left(-1 + y \cdot -0.5\right)\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* (+ -1.0 x) (log y)) (* (+ z -1.0) (* y (+ -1.0 (* y -0.5))))) t))
double code(double x, double y, double z, double t) {
	return (((-1.0 + x) * log(y)) + ((z + -1.0) * (y * (-1.0 + (y * -0.5))))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((((-1.0d0) + x) * log(y)) + ((z + (-1.0d0)) * (y * ((-1.0d0) + (y * (-0.5d0)))))) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((-1.0 + x) * Math.log(y)) + ((z + -1.0) * (y * (-1.0 + (y * -0.5))))) - t;
}
def code(x, y, z, t):
	return (((-1.0 + x) * math.log(y)) + ((z + -1.0) * (y * (-1.0 + (y * -0.5))))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(-1.0 + x) * log(y)) + Float64(Float64(z + -1.0) * Float64(y * Float64(-1.0 + Float64(y * -0.5))))) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((-1.0 + x) * log(y)) + ((z + -1.0) * (y * (-1.0 + (y * -0.5))))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(-1.0 + x), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(z + -1.0), $MachinePrecision] * N[(y * N[(-1.0 + N[(y * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(-1 + x\right) \cdot \log y + \left(z + -1\right) \cdot \left(y \cdot \left(-1 + y \cdot -0.5\right)\right)\right) - t
\end{array}
Derivation
  1. Initial program 86.8%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0 98.8%

    \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(-0.5 \cdot y - 1\right)\right)}\right) - t \]
  4. Final simplification98.8%

    \[\leadsto \left(\left(-1 + x\right) \cdot \log y + \left(z + -1\right) \cdot \left(y \cdot \left(-1 + y \cdot -0.5\right)\right)\right) - t \]
  5. Add Preprocessing

Alternative 11: 94.7% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.7 \cdot 10^{+39}:\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{elif}\;x \leq 0.0152:\\ \;\;\;\;\left(-t\right) - \left(\log y + z \cdot y\right)\\ \mathbf{else}:\\ \;\;\;\;\left(-1 + x\right) \cdot \log y - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= x -1.7e+39)
   (- (* x (log y)) t)
   (if (<= x 0.0152)
     (- (- t) (+ (log y) (* z y)))
     (- (* (+ -1.0 x) (log y)) t))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (x <= -1.7e+39) {
		tmp = (x * log(y)) - t;
	} else if (x <= 0.0152) {
		tmp = -t - (log(y) + (z * y));
	} else {
		tmp = ((-1.0 + x) * log(y)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if (x <= (-1.7d+39)) then
        tmp = (x * log(y)) - t
    else if (x <= 0.0152d0) then
        tmp = -t - (log(y) + (z * y))
    else
        tmp = (((-1.0d0) + x) * log(y)) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (x <= -1.7e+39) {
		tmp = (x * Math.log(y)) - t;
	} else if (x <= 0.0152) {
		tmp = -t - (Math.log(y) + (z * y));
	} else {
		tmp = ((-1.0 + x) * Math.log(y)) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if x <= -1.7e+39:
		tmp = (x * math.log(y)) - t
	elif x <= 0.0152:
		tmp = -t - (math.log(y) + (z * y))
	else:
		tmp = ((-1.0 + x) * math.log(y)) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (x <= -1.7e+39)
		tmp = Float64(Float64(x * log(y)) - t);
	elseif (x <= 0.0152)
		tmp = Float64(Float64(-t) - Float64(log(y) + Float64(z * y)));
	else
		tmp = Float64(Float64(Float64(-1.0 + x) * log(y)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if (x <= -1.7e+39)
		tmp = (x * log(y)) - t;
	elseif (x <= 0.0152)
		tmp = -t - (log(y) + (z * y));
	else
		tmp = ((-1.0 + x) * log(y)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[LessEqual[x, -1.7e+39], N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], If[LessEqual[x, 0.0152], N[((-t) - N[(N[Log[y], $MachinePrecision] + N[(z * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(-1.0 + x), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.7 \cdot 10^{+39}:\\
\;\;\;\;x \cdot \log y - t\\

\mathbf{elif}\;x \leq 0.0152:\\
\;\;\;\;\left(-t\right) - \left(\log y + z \cdot y\right)\\

\mathbf{else}:\\
\;\;\;\;\left(-1 + x\right) \cdot \log y - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.6999999999999999e39

    1. Initial program 98.9%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0 99.5%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \color{blue}{\left(y \cdot \left(y \cdot \left(y \cdot \left(-0.25 \cdot y - 0.3333333333333333\right) - 0.5\right) - 1\right)\right)}\right) - t \]
    4. Taylor expanded in y around 0 99.1%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(y \cdot \left(\color{blue}{-0.5 \cdot y} - 1\right)\right)\right) - t \]
    5. Step-by-step derivation
      1. *-commutative99.1%

        \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(y \cdot \left(\color{blue}{y \cdot -0.5} - 1\right)\right)\right) - t \]
    6. Simplified99.1%

      \[\leadsto \left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \left(y \cdot \left(\color{blue}{y \cdot -0.5} - 1\right)\right)\right) - t \]
    7. Taylor expanded in x around inf 97.6%

      \[\leadsto \color{blue}{x \cdot \log y} - t \]
    8. Step-by-step derivation
      1. *-commutative97.6%

        \[\leadsto \color{blue}{\log y \cdot x} - t \]
    9. Simplified97.6%

      \[\leadsto \color{blue}{\log y \cdot x} - t \]

    if -1.6999999999999999e39 < x < 0.0152

    1. Initial program 75.3%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0 98.0%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
    4. Step-by-step derivation
      1. +-commutative98.0%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
      2. sub-neg98.0%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      3. metadata-eval98.0%

        \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      4. mul-1-neg98.0%

        \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
      5. unsub-neg98.0%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
      6. +-commutative98.0%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
      7. sub-neg98.0%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
      8. metadata-eval98.0%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
      9. +-commutative98.0%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
    5. Simplified98.0%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
    6. Taylor expanded in z around inf 97.9%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \color{blue}{y \cdot z}\right) - t \]
    7. Taylor expanded in x around 0 95.7%

      \[\leadsto \color{blue}{\left(-1 \cdot \log y - y \cdot z\right)} - t \]
    8. Step-by-step derivation
      1. neg-mul-195.7%

        \[\leadsto \left(\color{blue}{\left(-\log y\right)} - y \cdot z\right) - t \]
    9. Simplified95.7%

      \[\leadsto \color{blue}{\left(\left(-\log y\right) - y \cdot z\right)} - t \]

    if 0.0152 < x

    1. Initial program 98.4%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. fma-define98.4%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right)} - t \]
      2. sub-neg98.4%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-1\right)}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
      3. metadata-eval98.4%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-1}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
      4. sub-neg98.4%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \color{blue}{\left(z + \left(-1\right)\right)} \cdot \log \left(1 - y\right)\right) - t \]
      5. metadata-eval98.4%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + \color{blue}{-1}\right) \cdot \log \left(1 - y\right)\right) - t \]
      6. sub-neg98.4%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \log \color{blue}{\left(1 + \left(-y\right)\right)}\right) - t \]
      7. log1p-define99.7%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \color{blue}{\mathsf{log1p}\left(-y\right)}\right) - t \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \mathsf{log1p}\left(-y\right)\right) - t} \]
    4. Add Preprocessing
    5. Taylor expanded in y around 0 97.0%

      \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) - t} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification96.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.7 \cdot 10^{+39}:\\ \;\;\;\;x \cdot \log y - t\\ \mathbf{elif}\;x \leq 0.0152:\\ \;\;\;\;\left(-t\right) - \left(\log y + z \cdot y\right)\\ \mathbf{else}:\\ \;\;\;\;\left(-1 + x\right) \cdot \log y - t\\ \end{array} \]
  5. Add Preprocessing

Alternative 12: 89.3% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z \leq -4.3 \cdot 10^{+217}:\\ \;\;\;\;z \cdot \left(\left(-y\right) - \frac{t}{z}\right)\\ \mathbf{elif}\;z \leq 2.25 \cdot 10^{+181}:\\ \;\;\;\;\left(-1 + x\right) \cdot \log y - t\\ \mathbf{else}:\\ \;\;\;\;y \cdot \left(1 - z\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= z -4.3e+217)
   (* z (- (- y) (/ t z)))
   (if (<= z 2.25e+181) (- (* (+ -1.0 x) (log y)) t) (- (* y (- 1.0 z)) t))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (z <= -4.3e+217) {
		tmp = z * (-y - (t / z));
	} else if (z <= 2.25e+181) {
		tmp = ((-1.0 + x) * log(y)) - t;
	} else {
		tmp = (y * (1.0 - z)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if (z <= (-4.3d+217)) then
        tmp = z * (-y - (t / z))
    else if (z <= 2.25d+181) then
        tmp = (((-1.0d0) + x) * log(y)) - t
    else
        tmp = (y * (1.0d0 - z)) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (z <= -4.3e+217) {
		tmp = z * (-y - (t / z));
	} else if (z <= 2.25e+181) {
		tmp = ((-1.0 + x) * Math.log(y)) - t;
	} else {
		tmp = (y * (1.0 - z)) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if z <= -4.3e+217:
		tmp = z * (-y - (t / z))
	elif z <= 2.25e+181:
		tmp = ((-1.0 + x) * math.log(y)) - t
	else:
		tmp = (y * (1.0 - z)) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (z <= -4.3e+217)
		tmp = Float64(z * Float64(Float64(-y) - Float64(t / z)));
	elseif (z <= 2.25e+181)
		tmp = Float64(Float64(Float64(-1.0 + x) * log(y)) - t);
	else
		tmp = Float64(Float64(y * Float64(1.0 - z)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if (z <= -4.3e+217)
		tmp = z * (-y - (t / z));
	elseif (z <= 2.25e+181)
		tmp = ((-1.0 + x) * log(y)) - t;
	else
		tmp = (y * (1.0 - z)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[LessEqual[z, -4.3e+217], N[(z * N[((-y) - N[(t / z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[z, 2.25e+181], N[(N[(N[(-1.0 + x), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(y * N[(1.0 - z), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;z \leq -4.3 \cdot 10^{+217}:\\
\;\;\;\;z \cdot \left(\left(-y\right) - \frac{t}{z}\right)\\

\mathbf{elif}\;z \leq 2.25 \cdot 10^{+181}:\\
\;\;\;\;\left(-1 + x\right) \cdot \log y - t\\

\mathbf{else}:\\
\;\;\;\;y \cdot \left(1 - z\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if z < -4.3000000000000001e217

    1. Initial program 37.5%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0 95.1%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
    4. Step-by-step derivation
      1. +-commutative95.1%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
      2. sub-neg95.1%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      3. metadata-eval95.1%

        \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      4. mul-1-neg95.1%

        \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
      5. unsub-neg95.1%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
      6. +-commutative95.1%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
      7. sub-neg95.1%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
      8. metadata-eval95.1%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
      9. +-commutative95.1%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
    5. Simplified95.1%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
    6. Taylor expanded in z around inf 88.1%

      \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z\right)} - t \]
    7. Step-by-step derivation
      1. associate-*r*88.1%

        \[\leadsto \color{blue}{\left(-1 \cdot y\right) \cdot z} - t \]
      2. neg-mul-188.1%

        \[\leadsto \color{blue}{\left(-y\right)} \cdot z - t \]
    8. Simplified88.1%

      \[\leadsto \color{blue}{\left(-y\right) \cdot z} - t \]
    9. Taylor expanded in z around inf 88.2%

      \[\leadsto \color{blue}{z \cdot \left(-1 \cdot y + -1 \cdot \frac{t}{z}\right)} \]
    10. Step-by-step derivation
      1. *-commutative88.2%

        \[\leadsto \color{blue}{\left(-1 \cdot y + -1 \cdot \frac{t}{z}\right) \cdot z} \]
      2. distribute-lft-out88.2%

        \[\leadsto \color{blue}{\left(-1 \cdot \left(y + \frac{t}{z}\right)\right)} \cdot z \]
      3. neg-mul-188.2%

        \[\leadsto \color{blue}{\left(-\left(y + \frac{t}{z}\right)\right)} \cdot z \]
      4. distribute-lft-neg-in88.2%

        \[\leadsto \color{blue}{-\left(y + \frac{t}{z}\right) \cdot z} \]
      5. distribute-rgt-neg-in88.2%

        \[\leadsto \color{blue}{\left(y + \frac{t}{z}\right) \cdot \left(-z\right)} \]
    11. Simplified88.2%

      \[\leadsto \color{blue}{\left(y + \frac{t}{z}\right) \cdot \left(-z\right)} \]

    if -4.3000000000000001e217 < z < 2.25e181

    1. Initial program 94.3%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. fma-define94.3%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right)} - t \]
      2. sub-neg94.3%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-1\right)}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
      3. metadata-eval94.3%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-1}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
      4. sub-neg94.3%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \color{blue}{\left(z + \left(-1\right)\right)} \cdot \log \left(1 - y\right)\right) - t \]
      5. metadata-eval94.3%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + \color{blue}{-1}\right) \cdot \log \left(1 - y\right)\right) - t \]
      6. sub-neg94.3%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \log \color{blue}{\left(1 + \left(-y\right)\right)}\right) - t \]
      7. log1p-define99.8%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \color{blue}{\mathsf{log1p}\left(-y\right)}\right) - t \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \mathsf{log1p}\left(-y\right)\right) - t} \]
    4. Add Preprocessing
    5. Taylor expanded in y around 0 92.7%

      \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) - t} \]

    if 2.25e181 < z

    1. Initial program 56.7%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0 97.9%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
    4. Step-by-step derivation
      1. +-commutative97.9%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
      2. sub-neg97.9%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      3. metadata-eval97.9%

        \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      4. mul-1-neg97.9%

        \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
      5. unsub-neg97.9%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
      6. +-commutative97.9%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
      7. sub-neg97.9%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
      8. metadata-eval97.9%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
      9. +-commutative97.9%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
    5. Simplified97.9%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
    6. Taylor expanded in y around inf 85.4%

      \[\leadsto \color{blue}{y \cdot \left(\left(1 + -1 \cdot \frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y}\right) - z\right)} - t \]
    7. Step-by-step derivation
      1. associate--l+85.4%

        \[\leadsto y \cdot \color{blue}{\left(1 + \left(-1 \cdot \frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y} - z\right)\right)} - t \]
      2. mul-1-neg85.4%

        \[\leadsto y \cdot \left(1 + \left(\color{blue}{\left(-\frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y}\right)} - z\right)\right) - t \]
      3. log-rec85.4%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\color{blue}{\left(-\log y\right)} \cdot \left(x - 1\right)}{y}\right) - z\right)\right) - t \]
      4. sub-neg85.4%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\left(-\log y\right) \cdot \color{blue}{\left(x + \left(-1\right)\right)}}{y}\right) - z\right)\right) - t \]
      5. metadata-eval85.4%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\left(-\log y\right) \cdot \left(x + \color{blue}{-1}\right)}{y}\right) - z\right)\right) - t \]
      6. associate-/l*85.3%

        \[\leadsto y \cdot \left(1 + \left(\left(-\color{blue}{\left(-\log y\right) \cdot \frac{x + -1}{y}}\right) - z\right)\right) - t \]
      7. +-commutative85.3%

        \[\leadsto y \cdot \left(1 + \left(\left(-\left(-\log y\right) \cdot \frac{\color{blue}{-1 + x}}{y}\right) - z\right)\right) - t \]
    8. Simplified85.3%

      \[\leadsto \color{blue}{y \cdot \left(1 + \left(\left(-\left(-\log y\right) \cdot \frac{-1 + x}{y}\right) - z\right)\right)} - t \]
    9. Taylor expanded in y around inf 80.8%

      \[\leadsto \color{blue}{y \cdot \left(1 - z\right)} - t \]
  3. Recombined 3 regimes into one program.
  4. Final simplification91.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -4.3 \cdot 10^{+217}:\\ \;\;\;\;z \cdot \left(\left(-y\right) - \frac{t}{z}\right)\\ \mathbf{elif}\;z \leq 2.25 \cdot 10^{+181}:\\ \;\;\;\;\left(-1 + x\right) \cdot \log y - t\\ \mathbf{else}:\\ \;\;\;\;y \cdot \left(1 - z\right) - t\\ \end{array} \]
  5. Add Preprocessing

Alternative 13: 67.1% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.4 \cdot 10^{+67} \lor \neg \left(x \leq 2.7 \cdot 10^{+107}\right):\\ \;\;\;\;x \cdot \log y\\ \mathbf{else}:\\ \;\;\;\;y \cdot \left(1 - z\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (or (<= x -1.4e+67) (not (<= x 2.7e+107)))
   (* x (log y))
   (- (* y (- 1.0 z)) t)))
double code(double x, double y, double z, double t) {
	double tmp;
	if ((x <= -1.4e+67) || !(x <= 2.7e+107)) {
		tmp = x * log(y);
	} else {
		tmp = (y * (1.0 - z)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((x <= (-1.4d+67)) .or. (.not. (x <= 2.7d+107))) then
        tmp = x * log(y)
    else
        tmp = (y * (1.0d0 - z)) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double tmp;
	if ((x <= -1.4e+67) || !(x <= 2.7e+107)) {
		tmp = x * Math.log(y);
	} else {
		tmp = (y * (1.0 - z)) - t;
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if (x <= -1.4e+67) or not (x <= 2.7e+107):
		tmp = x * math.log(y)
	else:
		tmp = (y * (1.0 - z)) - t
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if ((x <= -1.4e+67) || !(x <= 2.7e+107))
		tmp = Float64(x * log(y));
	else
		tmp = Float64(Float64(y * Float64(1.0 - z)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	tmp = 0.0;
	if ((x <= -1.4e+67) || ~((x <= 2.7e+107)))
		tmp = x * log(y);
	else
		tmp = (y * (1.0 - z)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := If[Or[LessEqual[x, -1.4e+67], N[Not[LessEqual[x, 2.7e+107]], $MachinePrecision]], N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision], N[(N[(y * N[(1.0 - z), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.4 \cdot 10^{+67} \lor \neg \left(x \leq 2.7 \cdot 10^{+107}\right):\\
\;\;\;\;x \cdot \log y\\

\mathbf{else}:\\
\;\;\;\;y \cdot \left(1 - z\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -1.3999999999999999e67 or 2.7000000000000001e107 < x

    1. Initial program 99.4%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Step-by-step derivation
      1. fma-define99.4%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right)} - t \]
      2. sub-neg99.4%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-1\right)}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
      3. metadata-eval99.4%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-1}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
      4. sub-neg99.4%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \color{blue}{\left(z + \left(-1\right)\right)} \cdot \log \left(1 - y\right)\right) - t \]
      5. metadata-eval99.4%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + \color{blue}{-1}\right) \cdot \log \left(1 - y\right)\right) - t \]
      6. sub-neg99.4%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \log \color{blue}{\left(1 + \left(-y\right)\right)}\right) - t \]
      7. log1p-define99.5%

        \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \color{blue}{\mathsf{log1p}\left(-y\right)}\right) - t \]
    3. Simplified99.5%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \mathsf{log1p}\left(-y\right)\right) - t} \]
    4. Add Preprocessing
    5. Taylor expanded in t around inf 71.2%

      \[\leadsto \color{blue}{t \cdot \left(\left(\frac{\log y \cdot \left(x - 1\right)}{t} + \frac{\log \left(1 - y\right) \cdot \left(z - 1\right)}{t}\right) - 1\right)} \]
    6. Step-by-step derivation
      1. associate--l+71.2%

        \[\leadsto t \cdot \color{blue}{\left(\frac{\log y \cdot \left(x - 1\right)}{t} + \left(\frac{\log \left(1 - y\right) \cdot \left(z - 1\right)}{t} - 1\right)\right)} \]
      2. sub-neg71.2%

        \[\leadsto t \cdot \left(\frac{\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)}}{t} + \left(\frac{\log \left(1 - y\right) \cdot \left(z - 1\right)}{t} - 1\right)\right) \]
      3. metadata-eval71.2%

        \[\leadsto t \cdot \left(\frac{\log y \cdot \left(x + \color{blue}{-1}\right)}{t} + \left(\frac{\log \left(1 - y\right) \cdot \left(z - 1\right)}{t} - 1\right)\right) \]
      4. associate-/l*71.2%

        \[\leadsto t \cdot \left(\color{blue}{\log y \cdot \frac{x + -1}{t}} + \left(\frac{\log \left(1 - y\right) \cdot \left(z - 1\right)}{t} - 1\right)\right) \]
      5. +-commutative71.2%

        \[\leadsto t \cdot \left(\log y \cdot \frac{\color{blue}{-1 + x}}{t} + \left(\frac{\log \left(1 - y\right) \cdot \left(z - 1\right)}{t} - 1\right)\right) \]
      6. associate-/l*69.7%

        \[\leadsto t \cdot \left(\log y \cdot \frac{-1 + x}{t} + \left(\color{blue}{\log \left(1 - y\right) \cdot \frac{z - 1}{t}} - 1\right)\right) \]
      7. sub-neg69.7%

        \[\leadsto t \cdot \left(\log y \cdot \frac{-1 + x}{t} + \left(\log \left(1 - y\right) \cdot \frac{\color{blue}{z + \left(-1\right)}}{t} - 1\right)\right) \]
      8. metadata-eval69.7%

        \[\leadsto t \cdot \left(\log y \cdot \frac{-1 + x}{t} + \left(\log \left(1 - y\right) \cdot \frac{z + \color{blue}{-1}}{t} - 1\right)\right) \]
      9. +-commutative69.7%

        \[\leadsto t \cdot \left(\log y \cdot \frac{-1 + x}{t} + \left(\log \left(1 - y\right) \cdot \frac{\color{blue}{-1 + z}}{t} - 1\right)\right) \]
    7. Simplified69.7%

      \[\leadsto \color{blue}{t \cdot \left(\log y \cdot \frac{-1 + x}{t} + \left(\log \left(1 - y\right) \cdot \frac{-1 + z}{t} - 1\right)\right)} \]
    8. Taylor expanded in x around inf 75.0%

      \[\leadsto \color{blue}{x \cdot \log y} \]
    9. Step-by-step derivation
      1. *-commutative75.0%

        \[\leadsto \color{blue}{\log y \cdot x} \]
    10. Simplified75.0%

      \[\leadsto \color{blue}{\log y \cdot x} \]

    if -1.3999999999999999e67 < x < 2.7000000000000001e107

    1. Initial program 78.8%

      \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0 97.7%

      \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
    4. Step-by-step derivation
      1. +-commutative97.7%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
      2. sub-neg97.7%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      3. metadata-eval97.7%

        \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
      4. mul-1-neg97.7%

        \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
      5. unsub-neg97.7%

        \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
      6. +-commutative97.7%

        \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
      7. sub-neg97.7%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
      8. metadata-eval97.7%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
      9. +-commutative97.7%

        \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
    5. Simplified97.7%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
    6. Taylor expanded in y around inf 95.1%

      \[\leadsto \color{blue}{y \cdot \left(\left(1 + -1 \cdot \frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y}\right) - z\right)} - t \]
    7. Step-by-step derivation
      1. associate--l+95.1%

        \[\leadsto y \cdot \color{blue}{\left(1 + \left(-1 \cdot \frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y} - z\right)\right)} - t \]
      2. mul-1-neg95.1%

        \[\leadsto y \cdot \left(1 + \left(\color{blue}{\left(-\frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y}\right)} - z\right)\right) - t \]
      3. log-rec95.1%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\color{blue}{\left(-\log y\right)} \cdot \left(x - 1\right)}{y}\right) - z\right)\right) - t \]
      4. sub-neg95.1%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\left(-\log y\right) \cdot \color{blue}{\left(x + \left(-1\right)\right)}}{y}\right) - z\right)\right) - t \]
      5. metadata-eval95.1%

        \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\left(-\log y\right) \cdot \left(x + \color{blue}{-1}\right)}{y}\right) - z\right)\right) - t \]
      6. associate-/l*95.0%

        \[\leadsto y \cdot \left(1 + \left(\left(-\color{blue}{\left(-\log y\right) \cdot \frac{x + -1}{y}}\right) - z\right)\right) - t \]
      7. +-commutative95.0%

        \[\leadsto y \cdot \left(1 + \left(\left(-\left(-\log y\right) \cdot \frac{\color{blue}{-1 + x}}{y}\right) - z\right)\right) - t \]
    8. Simplified95.0%

      \[\leadsto \color{blue}{y \cdot \left(1 + \left(\left(-\left(-\log y\right) \cdot \frac{-1 + x}{y}\right) - z\right)\right)} - t \]
    9. Taylor expanded in y around inf 70.1%

      \[\leadsto \color{blue}{y \cdot \left(1 - z\right)} - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification72.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.4 \cdot 10^{+67} \lor \neg \left(x \leq 2.7 \cdot 10^{+107}\right):\\ \;\;\;\;x \cdot \log y\\ \mathbf{else}:\\ \;\;\;\;y \cdot \left(1 - z\right) - t\\ \end{array} \]
  5. Add Preprocessing

Alternative 14: 99.1% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \left(\left(-1 + x\right) \cdot \log y + y \cdot \left(1 - z\right)\right) - t \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- (+ (* (+ -1.0 x) (log y)) (* y (- 1.0 z))) t))
double code(double x, double y, double z, double t) {
	return (((-1.0 + x) * log(y)) + (y * (1.0 - z))) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((((-1.0d0) + x) * log(y)) + (y * (1.0d0 - z))) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((-1.0 + x) * Math.log(y)) + (y * (1.0 - z))) - t;
}
def code(x, y, z, t):
	return (((-1.0 + x) * math.log(y)) + (y * (1.0 - z))) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(-1.0 + x) * log(y)) + Float64(y * Float64(1.0 - z))) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((-1.0 + x) * log(y)) + (y * (1.0 - z))) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(-1.0 + x), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(y * N[(1.0 - z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(-1 + x\right) \cdot \log y + y \cdot \left(1 - z\right)\right) - t
\end{array}
Derivation
  1. Initial program 86.8%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0 98.4%

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
  4. Step-by-step derivation
    1. +-commutative98.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
    2. sub-neg98.4%

      \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
    3. metadata-eval98.4%

      \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
    4. mul-1-neg98.4%

      \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
    5. unsub-neg98.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
    6. +-commutative98.4%

      \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
    7. sub-neg98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
    8. metadata-eval98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
    9. +-commutative98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
  5. Simplified98.4%

    \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
  6. Final simplification98.4%

    \[\leadsto \left(\left(-1 + x\right) \cdot \log y + y \cdot \left(1 - z\right)\right) - t \]
  7. Add Preprocessing

Alternative 15: 98.9% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \left(\left(-1 + x\right) \cdot \log y - z \cdot y\right) - t \end{array} \]
(FPCore (x y z t) :precision binary64 (- (- (* (+ -1.0 x) (log y)) (* z y)) t))
double code(double x, double y, double z, double t) {
	return (((-1.0 + x) * log(y)) - (z * y)) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = ((((-1.0d0) + x) * log(y)) - (z * y)) - t
end function
public static double code(double x, double y, double z, double t) {
	return (((-1.0 + x) * Math.log(y)) - (z * y)) - t;
}
def code(x, y, z, t):
	return (((-1.0 + x) * math.log(y)) - (z * y)) - t
function code(x, y, z, t)
	return Float64(Float64(Float64(Float64(-1.0 + x) * log(y)) - Float64(z * y)) - t)
end
function tmp = code(x, y, z, t)
	tmp = (((-1.0 + x) * log(y)) - (z * y)) - t;
end
code[x_, y_, z_, t_] := N[(N[(N[(N[(-1.0 + x), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision] - N[(z * y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(-1 + x\right) \cdot \log y - z \cdot y\right) - t
\end{array}
Derivation
  1. Initial program 86.8%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0 98.4%

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
  4. Step-by-step derivation
    1. +-commutative98.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
    2. sub-neg98.4%

      \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
    3. metadata-eval98.4%

      \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
    4. mul-1-neg98.4%

      \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
    5. unsub-neg98.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
    6. +-commutative98.4%

      \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
    7. sub-neg98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
    8. metadata-eval98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
    9. +-commutative98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
  5. Simplified98.4%

    \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
  6. Taylor expanded in z around inf 98.3%

    \[\leadsto \left(\log y \cdot \left(-1 + x\right) - \color{blue}{y \cdot z}\right) - t \]
  7. Final simplification98.3%

    \[\leadsto \left(\left(-1 + x\right) \cdot \log y - z \cdot y\right) - t \]
  8. Add Preprocessing

Alternative 16: 46.9% accurate, 30.7× speedup?

\[\begin{array}{l} \\ y \cdot \left(1 - z\right) - t \end{array} \]
(FPCore (x y z t) :precision binary64 (- (* y (- 1.0 z)) t))
double code(double x, double y, double z, double t) {
	return (y * (1.0 - z)) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (y * (1.0d0 - z)) - t
end function
public static double code(double x, double y, double z, double t) {
	return (y * (1.0 - z)) - t;
}
def code(x, y, z, t):
	return (y * (1.0 - z)) - t
function code(x, y, z, t)
	return Float64(Float64(y * Float64(1.0 - z)) - t)
end
function tmp = code(x, y, z, t)
	tmp = (y * (1.0 - z)) - t;
end
code[x_, y_, z_, t_] := N[(N[(y * N[(1.0 - z), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
y \cdot \left(1 - z\right) - t
\end{array}
Derivation
  1. Initial program 86.8%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0 98.4%

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
  4. Step-by-step derivation
    1. +-commutative98.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
    2. sub-neg98.4%

      \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
    3. metadata-eval98.4%

      \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
    4. mul-1-neg98.4%

      \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
    5. unsub-neg98.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
    6. +-commutative98.4%

      \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
    7. sub-neg98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
    8. metadata-eval98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
    9. +-commutative98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
  5. Simplified98.4%

    \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
  6. Taylor expanded in y around inf 73.1%

    \[\leadsto \color{blue}{y \cdot \left(\left(1 + -1 \cdot \frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y}\right) - z\right)} - t \]
  7. Step-by-step derivation
    1. associate--l+73.1%

      \[\leadsto y \cdot \color{blue}{\left(1 + \left(-1 \cdot \frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y} - z\right)\right)} - t \]
    2. mul-1-neg73.1%

      \[\leadsto y \cdot \left(1 + \left(\color{blue}{\left(-\frac{\log \left(\frac{1}{y}\right) \cdot \left(x - 1\right)}{y}\right)} - z\right)\right) - t \]
    3. log-rec73.1%

      \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\color{blue}{\left(-\log y\right)} \cdot \left(x - 1\right)}{y}\right) - z\right)\right) - t \]
    4. sub-neg73.1%

      \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\left(-\log y\right) \cdot \color{blue}{\left(x + \left(-1\right)\right)}}{y}\right) - z\right)\right) - t \]
    5. metadata-eval73.1%

      \[\leadsto y \cdot \left(1 + \left(\left(-\frac{\left(-\log y\right) \cdot \left(x + \color{blue}{-1}\right)}{y}\right) - z\right)\right) - t \]
    6. associate-/l*73.0%

      \[\leadsto y \cdot \left(1 + \left(\left(-\color{blue}{\left(-\log y\right) \cdot \frac{x + -1}{y}}\right) - z\right)\right) - t \]
    7. +-commutative73.0%

      \[\leadsto y \cdot \left(1 + \left(\left(-\left(-\log y\right) \cdot \frac{\color{blue}{-1 + x}}{y}\right) - z\right)\right) - t \]
  8. Simplified73.0%

    \[\leadsto \color{blue}{y \cdot \left(1 + \left(\left(-\left(-\log y\right) \cdot \frac{-1 + x}{y}\right) - z\right)\right)} - t \]
  9. Taylor expanded in y around inf 52.9%

    \[\leadsto \color{blue}{y \cdot \left(1 - z\right)} - t \]
  10. Final simplification52.9%

    \[\leadsto y \cdot \left(1 - z\right) - t \]
  11. Add Preprocessing

Alternative 17: 46.7% accurate, 35.8× speedup?

\[\begin{array}{l} \\ y \cdot \left(-z\right) - t \end{array} \]
(FPCore (x y z t) :precision binary64 (- (* y (- z)) t))
double code(double x, double y, double z, double t) {
	return (y * -z) - t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = (y * -z) - t
end function
public static double code(double x, double y, double z, double t) {
	return (y * -z) - t;
}
def code(x, y, z, t):
	return (y * -z) - t
function code(x, y, z, t)
	return Float64(Float64(y * Float64(-z)) - t)
end
function tmp = code(x, y, z, t)
	tmp = (y * -z) - t;
end
code[x_, y_, z_, t_] := N[(N[(y * (-z)), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
y \cdot \left(-z\right) - t
\end{array}
Derivation
  1. Initial program 86.8%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0 98.4%

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
  4. Step-by-step derivation
    1. +-commutative98.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
    2. sub-neg98.4%

      \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
    3. metadata-eval98.4%

      \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
    4. mul-1-neg98.4%

      \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
    5. unsub-neg98.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
    6. +-commutative98.4%

      \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
    7. sub-neg98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
    8. metadata-eval98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
    9. +-commutative98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
  5. Simplified98.4%

    \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
  6. Taylor expanded in z around inf 52.7%

    \[\leadsto \color{blue}{-1 \cdot \left(y \cdot z\right)} - t \]
  7. Step-by-step derivation
    1. associate-*r*52.7%

      \[\leadsto \color{blue}{\left(-1 \cdot y\right) \cdot z} - t \]
    2. neg-mul-152.7%

      \[\leadsto \color{blue}{\left(-y\right)} \cdot z - t \]
  8. Simplified52.7%

    \[\leadsto \color{blue}{\left(-y\right) \cdot z} - t \]
  9. Final simplification52.7%

    \[\leadsto y \cdot \left(-z\right) - t \]
  10. Add Preprocessing

Alternative 18: 35.8% accurate, 107.5× speedup?

\[\begin{array}{l} \\ -t \end{array} \]
(FPCore (x y z t) :precision binary64 (- t))
double code(double x, double y, double z, double t) {
	return -t;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = -t
end function
public static double code(double x, double y, double z, double t) {
	return -t;
}
def code(x, y, z, t):
	return -t
function code(x, y, z, t)
	return Float64(-t)
end
function tmp = code(x, y, z, t)
	tmp = -t;
end
code[x_, y_, z_, t_] := (-t)
\begin{array}{l}

\\
-t
\end{array}
Derivation
  1. Initial program 86.8%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Step-by-step derivation
    1. fma-define86.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x - 1, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right)} - t \]
    2. sub-neg86.8%

      \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-1\right)}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    3. metadata-eval86.8%

      \[\leadsto \mathsf{fma}\left(x + \color{blue}{-1}, \log y, \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
    4. sub-neg86.8%

      \[\leadsto \mathsf{fma}\left(x + -1, \log y, \color{blue}{\left(z + \left(-1\right)\right)} \cdot \log \left(1 - y\right)\right) - t \]
    5. metadata-eval86.8%

      \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + \color{blue}{-1}\right) \cdot \log \left(1 - y\right)\right) - t \]
    6. sub-neg86.8%

      \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \log \color{blue}{\left(1 + \left(-y\right)\right)}\right) - t \]
    7. log1p-define99.8%

      \[\leadsto \mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \color{blue}{\mathsf{log1p}\left(-y\right)}\right) - t \]
  3. Simplified99.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x + -1, \log y, \left(z + -1\right) \cdot \mathsf{log1p}\left(-y\right)\right) - t} \]
  4. Add Preprocessing
  5. Taylor expanded in t around inf 39.2%

    \[\leadsto \color{blue}{-1 \cdot t} \]
  6. Step-by-step derivation
    1. mul-1-neg39.2%

      \[\leadsto \color{blue}{-t} \]
  7. Simplified39.2%

    \[\leadsto \color{blue}{-t} \]
  8. Final simplification39.2%

    \[\leadsto -t \]
  9. Add Preprocessing

Alternative 19: 2.8% accurate, 215.0× speedup?

\[\begin{array}{l} \\ y \end{array} \]
(FPCore (x y z t) :precision binary64 y)
double code(double x, double y, double z, double t) {
	return y;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = y
end function
public static double code(double x, double y, double z, double t) {
	return y;
}
def code(x, y, z, t):
	return y
function code(x, y, z, t)
	return y
end
function tmp = code(x, y, z, t)
	tmp = y;
end
code[x_, y_, z_, t_] := y
\begin{array}{l}

\\
y
\end{array}
Derivation
  1. Initial program 86.8%

    \[\left(\left(x - 1\right) \cdot \log y + \left(z - 1\right) \cdot \log \left(1 - y\right)\right) - t \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0 98.4%

    \[\leadsto \color{blue}{\left(-1 \cdot \left(y \cdot \left(z - 1\right)\right) + \log y \cdot \left(x - 1\right)\right)} - t \]
  4. Step-by-step derivation
    1. +-commutative98.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(x - 1\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right)} - t \]
    2. sub-neg98.4%

      \[\leadsto \left(\log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
    3. metadata-eval98.4%

      \[\leadsto \left(\log y \cdot \left(x + \color{blue}{-1}\right) + -1 \cdot \left(y \cdot \left(z - 1\right)\right)\right) - t \]
    4. mul-1-neg98.4%

      \[\leadsto \left(\log y \cdot \left(x + -1\right) + \color{blue}{\left(-y \cdot \left(z - 1\right)\right)}\right) - t \]
    5. unsub-neg98.4%

      \[\leadsto \color{blue}{\left(\log y \cdot \left(x + -1\right) - y \cdot \left(z - 1\right)\right)} - t \]
    6. +-commutative98.4%

      \[\leadsto \left(\log y \cdot \color{blue}{\left(-1 + x\right)} - y \cdot \left(z - 1\right)\right) - t \]
    7. sub-neg98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(z + \left(-1\right)\right)}\right) - t \]
    8. metadata-eval98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \left(z + \color{blue}{-1}\right)\right) - t \]
    9. +-commutative98.4%

      \[\leadsto \left(\log y \cdot \left(-1 + x\right) - y \cdot \color{blue}{\left(-1 + z\right)}\right) - t \]
  5. Simplified98.4%

    \[\leadsto \color{blue}{\left(\log y \cdot \left(-1 + x\right) - y \cdot \left(-1 + z\right)\right)} - t \]
  6. Taylor expanded in z around 0 84.7%

    \[\leadsto \color{blue}{\log y \cdot \left(x - 1\right) - \left(t + -1 \cdot y\right)} \]
  7. Step-by-step derivation
    1. sub-neg84.7%

      \[\leadsto \log y \cdot \color{blue}{\left(x + \left(-1\right)\right)} - \left(t + -1 \cdot y\right) \]
    2. metadata-eval84.7%

      \[\leadsto \log y \cdot \left(x + \color{blue}{-1}\right) - \left(t + -1 \cdot y\right) \]
    3. +-commutative84.7%

      \[\leadsto \log y \cdot \color{blue}{\left(-1 + x\right)} - \left(t + -1 \cdot y\right) \]
    4. neg-mul-184.7%

      \[\leadsto \log y \cdot \left(-1 + x\right) - \left(t + \color{blue}{\left(-y\right)}\right) \]
    5. unsub-neg84.7%

      \[\leadsto \log y \cdot \left(-1 + x\right) - \color{blue}{\left(t - y\right)} \]
  8. Simplified84.7%

    \[\leadsto \color{blue}{\log y \cdot \left(-1 + x\right) - \left(t - y\right)} \]
  9. Taylor expanded in y around inf 2.8%

    \[\leadsto \color{blue}{y} \]
  10. Final simplification2.8%

    \[\leadsto y \]
  11. Add Preprocessing

Reproduce

?
herbie shell --seed 2024082 
(FPCore (x y z t)
  :name "Statistics.Distribution.Beta:$cdensity from math-functions-0.1.5.2"
  :precision binary64
  (- (+ (* (- x 1.0) (log y)) (* (- z 1.0) (log (- 1.0 y)))) t))