Numeric.SpecFunctions:logGammaL from math-functions-0.1.5.2

Percentage Accurate: 99.6% → 99.6%
Time: 19.4s
Alternatives: 14
Speedup: 1.0×

Specification

?
\[\begin{array}{l} \\ \left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (- (+ (log (+ x y)) (log z)) t) (* (- a 0.5) (log t))))
double code(double x, double y, double z, double t, double a) {
	return ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = ((log((x + y)) + log(z)) - t) + ((a - 0.5d0) * log(t))
end function
public static double code(double x, double y, double z, double t, double a) {
	return ((Math.log((x + y)) + Math.log(z)) - t) + ((a - 0.5) * Math.log(t));
}
def code(x, y, z, t, a):
	return ((math.log((x + y)) + math.log(z)) - t) + ((a - 0.5) * math.log(t))
function code(x, y, z, t, a)
	return Float64(Float64(Float64(log(Float64(x + y)) + log(z)) - t) + Float64(Float64(a - 0.5) * log(t)))
end
function tmp = code(x, y, z, t, a)
	tmp = ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[Log[z], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 14 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (- (+ (log (+ x y)) (log z)) t) (* (- a 0.5) (log t))))
double code(double x, double y, double z, double t, double a) {
	return ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = ((log((x + y)) + log(z)) - t) + ((a - 0.5d0) * log(t))
end function
public static double code(double x, double y, double z, double t, double a) {
	return ((Math.log((x + y)) + Math.log(z)) - t) + ((a - 0.5) * Math.log(t));
}
def code(x, y, z, t, a):
	return ((math.log((x + y)) + math.log(z)) - t) + ((a - 0.5) * math.log(t))
function code(x, y, z, t, a)
	return Float64(Float64(Float64(log(Float64(x + y)) + log(z)) - t) + Float64(Float64(a - 0.5) * log(t)))
end
function tmp = code(x, y, z, t, a)
	tmp = ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[Log[z], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t
\end{array}

Alternative 1: 99.6% accurate, 0.8× speedup?

\[\begin{array}{l} \\ \left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right) \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (- (log z) t) (fma (+ a -0.5) (log t) (log (+ x y)))))
double code(double x, double y, double z, double t, double a) {
	return (log(z) - t) + fma((a + -0.5), log(t), log((x + y)));
}
function code(x, y, z, t, a)
	return Float64(Float64(log(z) - t) + fma(Float64(a + -0.5), log(t), log(Float64(x + y))))
end
code[x_, y_, z_, t_, a_] := N[(N[(N[Log[z], $MachinePrecision] - t), $MachinePrecision] + N[(N[(a + -0.5), $MachinePrecision] * N[Log[t], $MachinePrecision] + N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. +-commutative99.6%

      \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    3. associate-+l+99.6%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    4. +-commutative99.6%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
    5. fma-def99.6%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
    6. sub-neg99.6%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
    7. metadata-eval99.6%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
  4. Final simplification99.6%

    \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right) \]

Alternative 2: 99.6% accurate, 0.8× speedup?

\[\begin{array}{l} \\ \log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right) \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (log (+ x y)) (fma (+ a -0.5) (log t) (- (log z) t))))
double code(double x, double y, double z, double t, double a) {
	return log((x + y)) + fma((a + -0.5), log(t), (log(z) - t));
}
function code(x, y, z, t, a)
	return Float64(log(Float64(x + y)) + fma(Float64(a + -0.5), log(t), Float64(log(z) - t)))
end
code[x_, y_, z_, t_, a_] := N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[(N[(a + -0.5), $MachinePrecision] * N[Log[t], $MachinePrecision] + N[(N[Log[z], $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. associate-+l+99.6%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    3. +-commutative99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
    4. fma-def99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
    5. remove-double-neg99.6%

      \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
    6. remove-double-neg99.6%

      \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
    7. sub-neg99.6%

      \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
    8. metadata-eval99.6%

      \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
  4. Final simplification99.6%

    \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right) \]

Alternative 3: 84.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\log z \leq 165:\\ \;\;\;\;\left(\left(a + -0.5\right) \cdot \log t + \log \left(z \cdot \left(x + y\right)\right)\right) - t\\ \mathbf{else}:\\ \;\;\;\;a \cdot \log t - t\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= (log z) 165.0)
   (- (+ (* (+ a -0.5) (log t)) (log (* z (+ x y)))) t)
   (- (* a (log t)) t)))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (log(z) <= 165.0) {
		tmp = (((a + -0.5) * log(t)) + log((z * (x + y)))) - t;
	} else {
		tmp = (a * log(t)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if (log(z) <= 165.0d0) then
        tmp = (((a + (-0.5d0)) * log(t)) + log((z * (x + y)))) - t
    else
        tmp = (a * log(t)) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (Math.log(z) <= 165.0) {
		tmp = (((a + -0.5) * Math.log(t)) + Math.log((z * (x + y)))) - t;
	} else {
		tmp = (a * Math.log(t)) - t;
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if math.log(z) <= 165.0:
		tmp = (((a + -0.5) * math.log(t)) + math.log((z * (x + y)))) - t
	else:
		tmp = (a * math.log(t)) - t
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if (log(z) <= 165.0)
		tmp = Float64(Float64(Float64(Float64(a + -0.5) * log(t)) + log(Float64(z * Float64(x + y)))) - t);
	else
		tmp = Float64(Float64(a * log(t)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if (log(z) <= 165.0)
		tmp = (((a + -0.5) * log(t)) + log((z * (x + y)))) - t;
	else
		tmp = (a * log(t)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[LessEqual[N[Log[z], $MachinePrecision], 165.0], N[(N[(N[(N[(a + -0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision] + N[Log[N[(z * N[(x + y), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(a * N[Log[t], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\log z \leq 165:\\
\;\;\;\;\left(\left(a + -0.5\right) \cdot \log t + \log \left(z \cdot \left(x + y\right)\right)\right) - t\\

\mathbf{else}:\\
\;\;\;\;a \cdot \log t - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (log.f64 z) < 165

    1. Initial program 99.5%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.5%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. +-commutative99.5%

        \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      3. associate-+l+99.5%

        \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      4. +-commutative99.5%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
      5. fma-def99.5%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
      6. sub-neg99.5%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
      7. metadata-eval99.5%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
    3. Simplified99.5%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
    4. Step-by-step derivation
      1. +-commutative99.5%

        \[\leadsto \color{blue}{\mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right) + \left(\log z - t\right)} \]
      2. fma-udef99.5%

        \[\leadsto \color{blue}{\left(\left(a + -0.5\right) \cdot \log t + \log \left(x + y\right)\right)} + \left(\log z - t\right) \]
      3. metadata-eval99.5%

        \[\leadsto \left(\left(a + \color{blue}{\left(-0.5\right)}\right) \cdot \log t + \log \left(x + y\right)\right) + \left(\log z - t\right) \]
      4. sub-neg99.5%

        \[\leadsto \left(\color{blue}{\left(a - 0.5\right)} \cdot \log t + \log \left(x + y\right)\right) + \left(\log z - t\right) \]
      5. associate-+r+99.5%

        \[\leadsto \color{blue}{\left(a - 0.5\right) \cdot \log t + \left(\log \left(x + y\right) + \left(\log z - t\right)\right)} \]
      6. associate--l+99.5%

        \[\leadsto \left(a - 0.5\right) \cdot \log t + \color{blue}{\left(\left(\log \left(x + y\right) + \log z\right) - t\right)} \]
      7. associate-+r-99.5%

        \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log \left(x + y\right) + \log z\right)\right) - t} \]
      8. sub-neg99.5%

        \[\leadsto \left(\color{blue}{\left(a + \left(-0.5\right)\right)} \cdot \log t + \left(\log \left(x + y\right) + \log z\right)\right) - t \]
      9. metadata-eval99.5%

        \[\leadsto \left(\left(a + \color{blue}{-0.5}\right) \cdot \log t + \left(\log \left(x + y\right) + \log z\right)\right) - t \]
      10. +-commutative99.5%

        \[\leadsto \left(\left(a + -0.5\right) \cdot \log t + \color{blue}{\left(\log z + \log \left(x + y\right)\right)}\right) - t \]
      11. sum-log95.2%

        \[\leadsto \left(\left(a + -0.5\right) \cdot \log t + \color{blue}{\log \left(z \cdot \left(x + y\right)\right)}\right) - t \]
    5. Applied egg-rr95.2%

      \[\leadsto \color{blue}{\left(\left(a + -0.5\right) \cdot \log t + \log \left(z \cdot \left(x + y\right)\right)\right) - t} \]

    if 165 < (log.f64 z)

    1. Initial program 99.7%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.7%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.7%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.7%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.7%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.7%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.7%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.7%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.7%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in a around inf 99.7%

      \[\leadsto \color{blue}{\left(\log \left(y + x\right) + \left(\log z + \left(a \cdot \log t + -0.5 \cdot \log t\right)\right)\right) - t} \]
    5. Taylor expanded in a around inf 74.5%

      \[\leadsto \color{blue}{a \cdot \log t} - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification87.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\log z \leq 165:\\ \;\;\;\;\left(\left(a + -0.5\right) \cdot \log t + \log \left(z \cdot \left(x + y\right)\right)\right) - t\\ \mathbf{else}:\\ \;\;\;\;a \cdot \log t - t\\ \end{array} \]

Alternative 4: 98.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq 380:\\ \;\;\;\;\log \left(x + y\right) + \left(\log z + \log t \cdot \left(a - 0.5\right)\right)\\ \mathbf{else}:\\ \;\;\;\;a \cdot \log t - t\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= t 380.0)
   (+ (log (+ x y)) (+ (log z) (* (log t) (- a 0.5))))
   (- (* a (log t)) t)))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 380.0) {
		tmp = log((x + y)) + (log(z) + (log(t) * (a - 0.5)));
	} else {
		tmp = (a * log(t)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if (t <= 380.0d0) then
        tmp = log((x + y)) + (log(z) + (log(t) * (a - 0.5d0)))
    else
        tmp = (a * log(t)) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 380.0) {
		tmp = Math.log((x + y)) + (Math.log(z) + (Math.log(t) * (a - 0.5)));
	} else {
		tmp = (a * Math.log(t)) - t;
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if t <= 380.0:
		tmp = math.log((x + y)) + (math.log(z) + (math.log(t) * (a - 0.5)))
	else:
		tmp = (a * math.log(t)) - t
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if (t <= 380.0)
		tmp = Float64(log(Float64(x + y)) + Float64(log(z) + Float64(log(t) * Float64(a - 0.5))));
	else
		tmp = Float64(Float64(a * log(t)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if (t <= 380.0)
		tmp = log((x + y)) + (log(z) + (log(t) * (a - 0.5)));
	else
		tmp = (a * log(t)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[LessEqual[t, 380.0], N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[(N[Log[z], $MachinePrecision] + N[(N[Log[t], $MachinePrecision] * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(a * N[Log[t], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq 380:\\
\;\;\;\;\log \left(x + y\right) + \left(\log z + \log t \cdot \left(a - 0.5\right)\right)\\

\mathbf{else}:\\
\;\;\;\;a \cdot \log t - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < 380

    1. Initial program 99.2%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.2%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.2%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.2%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.2%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.2%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.2%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.2%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.2%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.2%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in t around 0 97.9%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log z\right)} \]

    if 380 < t

    1. Initial program 99.9%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.9%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.9%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.9%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.9%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.9%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in a around inf 99.9%

      \[\leadsto \color{blue}{\left(\log \left(y + x\right) + \left(\log z + \left(a \cdot \log t + -0.5 \cdot \log t\right)\right)\right) - t} \]
    5. Taylor expanded in a around inf 99.7%

      \[\leadsto \color{blue}{a \cdot \log t} - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification98.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq 380:\\ \;\;\;\;\log \left(x + y\right) + \left(\log z + \log t \cdot \left(a - 0.5\right)\right)\\ \mathbf{else}:\\ \;\;\;\;a \cdot \log t - t\\ \end{array} \]

Alternative 5: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\log z + \log \left(x + y\right)\right) - t\right) + \log t \cdot \left(a - 0.5\right) \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (- (+ (log z) (log (+ x y))) t) (* (log t) (- a 0.5))))
double code(double x, double y, double z, double t, double a) {
	return ((log(z) + log((x + y))) - t) + (log(t) * (a - 0.5));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = ((log(z) + log((x + y))) - t) + (log(t) * (a - 0.5d0))
end function
public static double code(double x, double y, double z, double t, double a) {
	return ((Math.log(z) + Math.log((x + y))) - t) + (Math.log(t) * (a - 0.5));
}
def code(x, y, z, t, a):
	return ((math.log(z) + math.log((x + y))) - t) + (math.log(t) * (a - 0.5))
function code(x, y, z, t, a)
	return Float64(Float64(Float64(log(z) + log(Float64(x + y))) - t) + Float64(log(t) * Float64(a - 0.5)))
end
function tmp = code(x, y, z, t, a)
	tmp = ((log(z) + log((x + y))) - t) + (log(t) * (a - 0.5));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[(N[Log[z], $MachinePrecision] + N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision] + N[(N[Log[t], $MachinePrecision] * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\log z + \log \left(x + y\right)\right) - t\right) + \log t \cdot \left(a - 0.5\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Final simplification99.6%

    \[\leadsto \left(\left(\log z + \log \left(x + y\right)\right) - t\right) + \log t \cdot \left(a - 0.5\right) \]

Alternative 6: 80.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq 350:\\ \;\;\;\;\log t \cdot \left(a - 0.5\right) + \left(\log z + \log y\right)\\ \mathbf{else}:\\ \;\;\;\;a \cdot \log t - t\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= t 350.0)
   (+ (* (log t) (- a 0.5)) (+ (log z) (log y)))
   (- (* a (log t)) t)))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 350.0) {
		tmp = (log(t) * (a - 0.5)) + (log(z) + log(y));
	} else {
		tmp = (a * log(t)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if (t <= 350.0d0) then
        tmp = (log(t) * (a - 0.5d0)) + (log(z) + log(y))
    else
        tmp = (a * log(t)) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 350.0) {
		tmp = (Math.log(t) * (a - 0.5)) + (Math.log(z) + Math.log(y));
	} else {
		tmp = (a * Math.log(t)) - t;
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if t <= 350.0:
		tmp = (math.log(t) * (a - 0.5)) + (math.log(z) + math.log(y))
	else:
		tmp = (a * math.log(t)) - t
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if (t <= 350.0)
		tmp = Float64(Float64(log(t) * Float64(a - 0.5)) + Float64(log(z) + log(y)));
	else
		tmp = Float64(Float64(a * log(t)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if (t <= 350.0)
		tmp = (log(t) * (a - 0.5)) + (log(z) + log(y));
	else
		tmp = (a * log(t)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[LessEqual[t, 350.0], N[(N[(N[Log[t], $MachinePrecision] * N[(a - 0.5), $MachinePrecision]), $MachinePrecision] + N[(N[Log[z], $MachinePrecision] + N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(a * N[Log[t], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq 350:\\
\;\;\;\;\log t \cdot \left(a - 0.5\right) + \left(\log z + \log y\right)\\

\mathbf{else}:\\
\;\;\;\;a \cdot \log t - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < 350

    1. Initial program 99.2%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.2%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.2%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.2%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.2%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.2%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.2%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.2%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.2%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.2%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in t around 0 97.9%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log z\right)} \]
    5. Taylor expanded in x around 0 64.1%

      \[\leadsto \color{blue}{\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)} \]

    if 350 < t

    1. Initial program 99.9%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.9%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.9%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.9%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.9%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.9%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in a around inf 99.9%

      \[\leadsto \color{blue}{\left(\log \left(y + x\right) + \left(\log z + \left(a \cdot \log t + -0.5 \cdot \log t\right)\right)\right) - t} \]
    5. Taylor expanded in a around inf 99.7%

      \[\leadsto \color{blue}{a \cdot \log t} - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification82.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq 350:\\ \;\;\;\;\log t \cdot \left(a - 0.5\right) + \left(\log z + \log y\right)\\ \mathbf{else}:\\ \;\;\;\;a \cdot \log t - t\\ \end{array} \]

Alternative 7: 69.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\log z - t\right) + \left(\log t \cdot \left(a - 0.5\right) + \log y\right) \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (- (log z) t) (+ (* (log t) (- a 0.5)) (log y))))
double code(double x, double y, double z, double t, double a) {
	return (log(z) - t) + ((log(t) * (a - 0.5)) + log(y));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = (log(z) - t) + ((log(t) * (a - 0.5d0)) + log(y))
end function
public static double code(double x, double y, double z, double t, double a) {
	return (Math.log(z) - t) + ((Math.log(t) * (a - 0.5)) + Math.log(y));
}
def code(x, y, z, t, a):
	return (math.log(z) - t) + ((math.log(t) * (a - 0.5)) + math.log(y))
function code(x, y, z, t, a)
	return Float64(Float64(log(z) - t) + Float64(Float64(log(t) * Float64(a - 0.5)) + log(y)))
end
function tmp = code(x, y, z, t, a)
	tmp = (log(z) - t) + ((log(t) * (a - 0.5)) + log(y));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[Log[z], $MachinePrecision] - t), $MachinePrecision] + N[(N[(N[Log[t], $MachinePrecision] * N[(a - 0.5), $MachinePrecision]), $MachinePrecision] + N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\log z - t\right) + \left(\log t \cdot \left(a - 0.5\right) + \log y\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. +-commutative99.6%

      \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    3. associate-+l+99.6%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    4. +-commutative99.6%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
    5. fma-def99.6%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
    6. sub-neg99.6%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
    7. metadata-eval99.6%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
  4. Taylor expanded in x around 0 66.3%

    \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log y\right)} \]
  5. Final simplification66.3%

    \[\leadsto \left(\log z - t\right) + \left(\log t \cdot \left(a - 0.5\right) + \log y\right) \]

Alternative 8: 86.9% accurate, 1.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;a - 0.5 \leq -200 \lor \neg \left(a - 0.5 \leq -0.4\right):\\ \;\;\;\;a \cdot \log t - t\\ \mathbf{else}:\\ \;\;\;\;\left(\log \left(z \cdot \left(x + y\right)\right) + -0.5 \cdot \log t\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (or (<= (- a 0.5) -200.0) (not (<= (- a 0.5) -0.4)))
   (- (* a (log t)) t)
   (- (+ (log (* z (+ x y))) (* -0.5 (log t))) t)))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (((a - 0.5) <= -200.0) || !((a - 0.5) <= -0.4)) {
		tmp = (a * log(t)) - t;
	} else {
		tmp = (log((z * (x + y))) + (-0.5 * log(t))) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if (((a - 0.5d0) <= (-200.0d0)) .or. (.not. ((a - 0.5d0) <= (-0.4d0)))) then
        tmp = (a * log(t)) - t
    else
        tmp = (log((z * (x + y))) + ((-0.5d0) * log(t))) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (((a - 0.5) <= -200.0) || !((a - 0.5) <= -0.4)) {
		tmp = (a * Math.log(t)) - t;
	} else {
		tmp = (Math.log((z * (x + y))) + (-0.5 * Math.log(t))) - t;
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if ((a - 0.5) <= -200.0) or not ((a - 0.5) <= -0.4):
		tmp = (a * math.log(t)) - t
	else:
		tmp = (math.log((z * (x + y))) + (-0.5 * math.log(t))) - t
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if ((Float64(a - 0.5) <= -200.0) || !(Float64(a - 0.5) <= -0.4))
		tmp = Float64(Float64(a * log(t)) - t);
	else
		tmp = Float64(Float64(log(Float64(z * Float64(x + y))) + Float64(-0.5 * log(t))) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if (((a - 0.5) <= -200.0) || ~(((a - 0.5) <= -0.4)))
		tmp = (a * log(t)) - t;
	else
		tmp = (log((z * (x + y))) + (-0.5 * log(t))) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[Or[LessEqual[N[(a - 0.5), $MachinePrecision], -200.0], N[Not[LessEqual[N[(a - 0.5), $MachinePrecision], -0.4]], $MachinePrecision]], N[(N[(a * N[Log[t], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(N[Log[N[(z * N[(x + y), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[(-0.5 * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;a - 0.5 \leq -200 \lor \neg \left(a - 0.5 \leq -0.4\right):\\
\;\;\;\;a \cdot \log t - t\\

\mathbf{else}:\\
\;\;\;\;\left(\log \left(z \cdot \left(x + y\right)\right) + -0.5 \cdot \log t\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 a 1/2) < -200 or -0.40000000000000002 < (-.f64 a 1/2)

    1. Initial program 99.7%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.7%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.7%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.7%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.7%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.7%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.7%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.7%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.7%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in a around inf 99.7%

      \[\leadsto \color{blue}{\left(\log \left(y + x\right) + \left(\log z + \left(a \cdot \log t + -0.5 \cdot \log t\right)\right)\right) - t} \]
    5. Taylor expanded in a around inf 98.3%

      \[\leadsto \color{blue}{a \cdot \log t} - t \]

    if -200 < (-.f64 a 1/2) < -0.40000000000000002

    1. Initial program 99.4%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.4%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.4%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.4%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.4%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.4%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.4%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.4%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.4%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.4%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in a around 0 97.9%

      \[\leadsto \color{blue}{\left(\log \left(y + x\right) + \left(\log z + -0.5 \cdot \log t\right)\right) - t} \]
    5. Step-by-step derivation
      1. associate-+r+97.9%

        \[\leadsto \color{blue}{\left(\left(\log \left(y + x\right) + \log z\right) + -0.5 \cdot \log t\right)} - t \]
      2. +-commutative97.9%

        \[\leadsto \left(\color{blue}{\left(\log z + \log \left(y + x\right)\right)} + -0.5 \cdot \log t\right) - t \]
      3. associate-+r+97.9%

        \[\leadsto \color{blue}{\left(\log z + \left(\log \left(y + x\right) + -0.5 \cdot \log t\right)\right)} - t \]
      4. +-commutative97.9%

        \[\leadsto \left(\log z + \left(\log \color{blue}{\left(x + y\right)} + -0.5 \cdot \log t\right)\right) - t \]
      5. associate-+r+97.9%

        \[\leadsto \color{blue}{\left(\left(\log z + \log \left(x + y\right)\right) + -0.5 \cdot \log t\right)} - t \]
      6. log-prod76.1%

        \[\leadsto \left(\color{blue}{\log \left(z \cdot \left(x + y\right)\right)} + -0.5 \cdot \log t\right) - t \]
      7. +-commutative76.1%

        \[\leadsto \left(\log \left(z \cdot \color{blue}{\left(y + x\right)}\right) + -0.5 \cdot \log t\right) - t \]
    6. Simplified76.1%

      \[\leadsto \color{blue}{\left(\log \left(z \cdot \left(y + x\right)\right) + -0.5 \cdot \log t\right) - t} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification88.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;a - 0.5 \leq -200 \lor \neg \left(a - 0.5 \leq -0.4\right):\\ \;\;\;\;a \cdot \log t - t\\ \mathbf{else}:\\ \;\;\;\;\left(\log \left(z \cdot \left(x + y\right)\right) + -0.5 \cdot \log t\right) - t\\ \end{array} \]

Alternative 9: 73.4% accurate, 1.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;a - 0.5 \leq -200 \lor \neg \left(a - 0.5 \leq -0.4\right):\\ \;\;\;\;a \cdot \log t - t\\ \mathbf{else}:\\ \;\;\;\;\left(-0.5 \cdot \log t + \log \left(z \cdot y\right)\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (or (<= (- a 0.5) -200.0) (not (<= (- a 0.5) -0.4)))
   (- (* a (log t)) t)
   (- (+ (* -0.5 (log t)) (log (* z y))) t)))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (((a - 0.5) <= -200.0) || !((a - 0.5) <= -0.4)) {
		tmp = (a * log(t)) - t;
	} else {
		tmp = ((-0.5 * log(t)) + log((z * y))) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if (((a - 0.5d0) <= (-200.0d0)) .or. (.not. ((a - 0.5d0) <= (-0.4d0)))) then
        tmp = (a * log(t)) - t
    else
        tmp = (((-0.5d0) * log(t)) + log((z * y))) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (((a - 0.5) <= -200.0) || !((a - 0.5) <= -0.4)) {
		tmp = (a * Math.log(t)) - t;
	} else {
		tmp = ((-0.5 * Math.log(t)) + Math.log((z * y))) - t;
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if ((a - 0.5) <= -200.0) or not ((a - 0.5) <= -0.4):
		tmp = (a * math.log(t)) - t
	else:
		tmp = ((-0.5 * math.log(t)) + math.log((z * y))) - t
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if ((Float64(a - 0.5) <= -200.0) || !(Float64(a - 0.5) <= -0.4))
		tmp = Float64(Float64(a * log(t)) - t);
	else
		tmp = Float64(Float64(Float64(-0.5 * log(t)) + log(Float64(z * y))) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if (((a - 0.5) <= -200.0) || ~(((a - 0.5) <= -0.4)))
		tmp = (a * log(t)) - t;
	else
		tmp = ((-0.5 * log(t)) + log((z * y))) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[Or[LessEqual[N[(a - 0.5), $MachinePrecision], -200.0], N[Not[LessEqual[N[(a - 0.5), $MachinePrecision], -0.4]], $MachinePrecision]], N[(N[(a * N[Log[t], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(N[(-0.5 * N[Log[t], $MachinePrecision]), $MachinePrecision] + N[Log[N[(z * y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;a - 0.5 \leq -200 \lor \neg \left(a - 0.5 \leq -0.4\right):\\
\;\;\;\;a \cdot \log t - t\\

\mathbf{else}:\\
\;\;\;\;\left(-0.5 \cdot \log t + \log \left(z \cdot y\right)\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 a 1/2) < -200 or -0.40000000000000002 < (-.f64 a 1/2)

    1. Initial program 99.7%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.7%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.7%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.7%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.7%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.7%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.7%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.7%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.7%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in a around inf 99.7%

      \[\leadsto \color{blue}{\left(\log \left(y + x\right) + \left(\log z + \left(a \cdot \log t + -0.5 \cdot \log t\right)\right)\right) - t} \]
    5. Taylor expanded in a around inf 98.3%

      \[\leadsto \color{blue}{a \cdot \log t} - t \]

    if -200 < (-.f64 a 1/2) < -0.40000000000000002

    1. Initial program 99.4%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.4%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.4%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.4%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.4%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.4%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.4%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.4%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.4%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.4%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in a around inf 99.4%

      \[\leadsto \color{blue}{\left(\log \left(y + x\right) + \left(\log z + \left(a \cdot \log t + -0.5 \cdot \log t\right)\right)\right) - t} \]
    5. Taylor expanded in a around 0 97.9%

      \[\leadsto \color{blue}{\left(\log z + \left(\log \left(y + x\right) + -0.5 \cdot \log t\right)\right)} - t \]
    6. Step-by-step derivation
      1. associate-+r+97.9%

        \[\leadsto \color{blue}{\left(\left(\log z + \log \left(y + x\right)\right) + -0.5 \cdot \log t\right)} - t \]
      2. metadata-eval97.9%

        \[\leadsto \left(\left(\log z + \log \left(y + x\right)\right) + \color{blue}{\left(0.5 \cdot -1\right)} \cdot \log t\right) - t \]
      3. associate-*r*97.9%

        \[\leadsto \left(\left(\log z + \log \left(y + x\right)\right) + \color{blue}{0.5 \cdot \left(-1 \cdot \log t\right)}\right) - t \]
      4. mul-1-neg97.9%

        \[\leadsto \left(\left(\log z + \log \left(y + x\right)\right) + 0.5 \cdot \color{blue}{\left(-\log t\right)}\right) - t \]
      5. log-rec97.9%

        \[\leadsto \left(\left(\log z + \log \left(y + x\right)\right) + 0.5 \cdot \color{blue}{\log \left(\frac{1}{t}\right)}\right) - t \]
      6. +-commutative97.9%

        \[\leadsto \color{blue}{\left(0.5 \cdot \log \left(\frac{1}{t}\right) + \left(\log z + \log \left(y + x\right)\right)\right)} - t \]
      7. +-commutative97.9%

        \[\leadsto \left(0.5 \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\log \left(y + x\right) + \log z\right)}\right) - t \]
      8. log-rec97.9%

        \[\leadsto \left(0.5 \cdot \color{blue}{\left(-\log t\right)} + \left(\log \left(y + x\right) + \log z\right)\right) - t \]
      9. mul-1-neg97.9%

        \[\leadsto \left(0.5 \cdot \color{blue}{\left(-1 \cdot \log t\right)} + \left(\log \left(y + x\right) + \log z\right)\right) - t \]
      10. associate-*r*97.9%

        \[\leadsto \left(\color{blue}{\left(0.5 \cdot -1\right) \cdot \log t} + \left(\log \left(y + x\right) + \log z\right)\right) - t \]
      11. metadata-eval97.9%

        \[\leadsto \left(\color{blue}{-0.5} \cdot \log t + \left(\log \left(y + x\right) + \log z\right)\right) - t \]
      12. *-commutative97.9%

        \[\leadsto \left(\color{blue}{\log t \cdot -0.5} + \left(\log \left(y + x\right) + \log z\right)\right) - t \]
      13. log-prod76.1%

        \[\leadsto \left(\log t \cdot -0.5 + \color{blue}{\log \left(\left(y + x\right) \cdot z\right)}\right) - t \]
      14. *-commutative76.1%

        \[\leadsto \left(\log t \cdot -0.5 + \log \color{blue}{\left(z \cdot \left(y + x\right)\right)}\right) - t \]
      15. +-commutative76.1%

        \[\leadsto \left(\log t \cdot -0.5 + \log \left(z \cdot \color{blue}{\left(x + y\right)}\right)\right) - t \]
    7. Simplified76.1%

      \[\leadsto \color{blue}{\left(\log t \cdot -0.5 + \log \left(z \cdot \left(x + y\right)\right)\right)} - t \]
    8. Taylor expanded in x around 0 47.1%

      \[\leadsto \left(\log t \cdot -0.5 + \color{blue}{\log \left(y \cdot z\right)}\right) - t \]
    9. Step-by-step derivation
      1. *-commutative47.1%

        \[\leadsto \left(\log t \cdot -0.5 + \log \color{blue}{\left(z \cdot y\right)}\right) - t \]
    10. Simplified47.1%

      \[\leadsto \left(\log t \cdot -0.5 + \color{blue}{\log \left(z \cdot y\right)}\right) - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification74.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;a - 0.5 \leq -200 \lor \neg \left(a - 0.5 \leq -0.4\right):\\ \;\;\;\;a \cdot \log t - t\\ \mathbf{else}:\\ \;\;\;\;\left(-0.5 \cdot \log t + \log \left(z \cdot y\right)\right) - t\\ \end{array} \]

Alternative 10: 73.4% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq 1.7 \cdot 10^{-24}:\\ \;\;\;\;\left(a + -0.5\right) \cdot \log t + \log \left(z \cdot y\right)\\ \mathbf{else}:\\ \;\;\;\;a \cdot \log t - t\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= t 1.7e-24)
   (+ (* (+ a -0.5) (log t)) (log (* z y)))
   (- (* a (log t)) t)))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 1.7e-24) {
		tmp = ((a + -0.5) * log(t)) + log((z * y));
	} else {
		tmp = (a * log(t)) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if (t <= 1.7d-24) then
        tmp = ((a + (-0.5d0)) * log(t)) + log((z * y))
    else
        tmp = (a * log(t)) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 1.7e-24) {
		tmp = ((a + -0.5) * Math.log(t)) + Math.log((z * y));
	} else {
		tmp = (a * Math.log(t)) - t;
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if t <= 1.7e-24:
		tmp = ((a + -0.5) * math.log(t)) + math.log((z * y))
	else:
		tmp = (a * math.log(t)) - t
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if (t <= 1.7e-24)
		tmp = Float64(Float64(Float64(a + -0.5) * log(t)) + log(Float64(z * y)));
	else
		tmp = Float64(Float64(a * log(t)) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if (t <= 1.7e-24)
		tmp = ((a + -0.5) * log(t)) + log((z * y));
	else
		tmp = (a * log(t)) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[LessEqual[t, 1.7e-24], N[(N[(N[(a + -0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision] + N[Log[N[(z * y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(N[(a * N[Log[t], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq 1.7 \cdot 10^{-24}:\\
\;\;\;\;\left(a + -0.5\right) \cdot \log t + \log \left(z \cdot y\right)\\

\mathbf{else}:\\
\;\;\;\;a \cdot \log t - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < 1.69999999999999996e-24

    1. Initial program 99.2%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.2%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.2%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.2%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.2%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.2%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.2%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.2%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.2%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.2%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in t around 0 99.2%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log z\right)} \]
    5. Taylor expanded in x around 0 66.2%

      \[\leadsto \color{blue}{\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)} \]
    6. Taylor expanded in t around inf 66.2%

      \[\leadsto \color{blue}{-1 \cdot \left(\log \left(\frac{1}{t}\right) \cdot \left(a - 0.5\right)\right) + \left(\log z + \log y\right)} \]
    7. Step-by-step derivation
      1. log-prod49.0%

        \[\leadsto -1 \cdot \left(\log \left(\frac{1}{t}\right) \cdot \left(a - 0.5\right)\right) + \color{blue}{\log \left(z \cdot y\right)} \]
      2. +-commutative49.0%

        \[\leadsto \color{blue}{\log \left(z \cdot y\right) + -1 \cdot \left(\log \left(\frac{1}{t}\right) \cdot \left(a - 0.5\right)\right)} \]
      3. mul-1-neg49.0%

        \[\leadsto \log \left(z \cdot y\right) + \color{blue}{\left(-\log \left(\frac{1}{t}\right) \cdot \left(a - 0.5\right)\right)} \]
      4. unsub-neg49.0%

        \[\leadsto \color{blue}{\log \left(z \cdot y\right) - \log \left(\frac{1}{t}\right) \cdot \left(a - 0.5\right)} \]
      5. log-rec49.0%

        \[\leadsto \log \left(z \cdot y\right) - \color{blue}{\left(-\log t\right)} \cdot \left(a - 0.5\right) \]
      6. distribute-lft-neg-in49.0%

        \[\leadsto \log \left(z \cdot y\right) - \color{blue}{\left(-\log t \cdot \left(a - 0.5\right)\right)} \]
      7. distribute-rgt-neg-in49.0%

        \[\leadsto \log \left(z \cdot y\right) - \color{blue}{\log t \cdot \left(-\left(a - 0.5\right)\right)} \]
      8. sub-neg49.0%

        \[\leadsto \log \left(z \cdot y\right) - \log t \cdot \left(-\color{blue}{\left(a + \left(-0.5\right)\right)}\right) \]
      9. metadata-eval49.0%

        \[\leadsto \log \left(z \cdot y\right) - \log t \cdot \left(-\left(a + \color{blue}{-0.5}\right)\right) \]
    8. Simplified49.0%

      \[\leadsto \color{blue}{\log \left(z \cdot y\right) - \log t \cdot \left(-\left(a + -0.5\right)\right)} \]

    if 1.69999999999999996e-24 < t

    1. Initial program 99.9%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.9%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.9%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.9%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.9%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.9%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in a around inf 99.9%

      \[\leadsto \color{blue}{\left(\log \left(y + x\right) + \left(\log z + \left(a \cdot \log t + -0.5 \cdot \log t\right)\right)\right) - t} \]
    5. Taylor expanded in a around inf 98.4%

      \[\leadsto \color{blue}{a \cdot \log t} - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification76.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq 1.7 \cdot 10^{-24}:\\ \;\;\;\;\left(a + -0.5\right) \cdot \log t + \log \left(z \cdot y\right)\\ \mathbf{else}:\\ \;\;\;\;a \cdot \log t - t\\ \end{array} \]

Alternative 11: 62.4% accurate, 2.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq 3.25 \cdot 10^{+50}:\\ \;\;\;\;t + a \cdot \log t\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= t 3.25e+50) (+ t (* a (log t))) (- t)))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 3.25e+50) {
		tmp = t + (a * log(t));
	} else {
		tmp = -t;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if (t <= 3.25d+50) then
        tmp = t + (a * log(t))
    else
        tmp = -t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 3.25e+50) {
		tmp = t + (a * Math.log(t));
	} else {
		tmp = -t;
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if t <= 3.25e+50:
		tmp = t + (a * math.log(t))
	else:
		tmp = -t
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if (t <= 3.25e+50)
		tmp = Float64(t + Float64(a * log(t)));
	else
		tmp = Float64(-t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if (t <= 3.25e+50)
		tmp = t + (a * log(t));
	else
		tmp = -t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[LessEqual[t, 3.25e+50], N[(t + N[(a * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], (-t)]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq 3.25 \cdot 10^{+50}:\\
\;\;\;\;t + a \cdot \log t\\

\mathbf{else}:\\
\;\;\;\;-t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < 3.2500000000000001e50

    1. Initial program 99.3%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.3%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.3%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.3%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.3%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.3%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.3%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.3%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.3%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.3%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in a around inf 99.3%

      \[\leadsto \color{blue}{\left(\log \left(y + x\right) + \left(\log z + \left(a \cdot \log t + -0.5 \cdot \log t\right)\right)\right) - t} \]
    5. Taylor expanded in a around inf 56.1%

      \[\leadsto \color{blue}{a \cdot \log t} - t \]
    6. Step-by-step derivation
      1. *-commutative56.1%

        \[\leadsto \color{blue}{\log t \cdot a} - t \]
      2. fma-neg56.1%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, a, -t\right)} \]
    7. Applied egg-rr56.1%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, a, -t\right)} \]
    8. Step-by-step derivation
      1. fma-udef56.1%

        \[\leadsto \color{blue}{\log t \cdot a + \left(-t\right)} \]
      2. add-sqr-sqrt0.0%

        \[\leadsto \log t \cdot a + \color{blue}{\sqrt{-t} \cdot \sqrt{-t}} \]
      3. sqrt-unprod52.0%

        \[\leadsto \log t \cdot a + \color{blue}{\sqrt{\left(-t\right) \cdot \left(-t\right)}} \]
      4. sqr-neg52.0%

        \[\leadsto \log t \cdot a + \sqrt{\color{blue}{t \cdot t}} \]
      5. sqrt-unprod52.1%

        \[\leadsto \log t \cdot a + \color{blue}{\sqrt{t} \cdot \sqrt{t}} \]
      6. add-sqr-sqrt52.1%

        \[\leadsto \log t \cdot a + \color{blue}{t} \]
    9. Applied egg-rr52.1%

      \[\leadsto \color{blue}{\log t \cdot a + t} \]

    if 3.2500000000000001e50 < t

    1. Initial program 99.9%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.9%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.9%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.9%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.9%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.9%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Step-by-step derivation
      1. expm1-log1p-u7.6%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)\right)\right)} \]
      2. +-commutative7.6%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{\mathsf{fma}\left(a + -0.5, \log t, \log z - t\right) + \log \left(x + y\right)}\right)\right) \]
    5. Applied egg-rr7.6%

      \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(a + -0.5, \log t, \log z - t\right) + \log \left(x + y\right)\right)\right)} \]
    6. Taylor expanded in t around inf 74.9%

      \[\leadsto \color{blue}{-1 \cdot t} \]
    7. Step-by-step derivation
      1. mul-1-neg74.9%

        \[\leadsto \color{blue}{-t} \]
    8. Simplified74.9%

      \[\leadsto \color{blue}{-t} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification62.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq 3.25 \cdot 10^{+50}:\\ \;\;\;\;t + a \cdot \log t\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \]

Alternative 12: 62.0% accurate, 3.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq 1.8 \cdot 10^{+57}:\\ \;\;\;\;a \cdot \log t\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= t 1.8e+57) (* a (log t)) (- t)))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 1.8e+57) {
		tmp = a * log(t);
	} else {
		tmp = -t;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if (t <= 1.8d+57) then
        tmp = a * log(t)
    else
        tmp = -t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 1.8e+57) {
		tmp = a * Math.log(t);
	} else {
		tmp = -t;
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if t <= 1.8e+57:
		tmp = a * math.log(t)
	else:
		tmp = -t
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if (t <= 1.8e+57)
		tmp = Float64(a * log(t));
	else
		tmp = Float64(-t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if (t <= 1.8e+57)
		tmp = a * log(t);
	else
		tmp = -t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[LessEqual[t, 1.8e+57], N[(a * N[Log[t], $MachinePrecision]), $MachinePrecision], (-t)]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq 1.8 \cdot 10^{+57}:\\
\;\;\;\;a \cdot \log t\\

\mathbf{else}:\\
\;\;\;\;-t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < 1.8000000000000001e57

    1. Initial program 99.3%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.3%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.3%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.3%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.3%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.3%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.3%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.3%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.3%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.3%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Taylor expanded in a around inf 99.3%

      \[\leadsto \color{blue}{\left(\log \left(y + x\right) + \left(\log z + \left(a \cdot \log t + -0.5 \cdot \log t\right)\right)\right) - t} \]
    5. Taylor expanded in a around inf 57.3%

      \[\leadsto \color{blue}{a \cdot \log t} - t \]
    6. Taylor expanded in a around inf 51.9%

      \[\leadsto \color{blue}{a \cdot \log t} \]

    if 1.8000000000000001e57 < t

    1. Initial program 99.9%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.9%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.9%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.9%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. fma-def99.9%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
      5. remove-double-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
      6. remove-double-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
      7. sub-neg99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
      8. metadata-eval99.9%

        \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
    3. Simplified99.9%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
    4. Step-by-step derivation
      1. expm1-log1p-u7.1%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)\right)\right)} \]
      2. +-commutative7.1%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{\mathsf{fma}\left(a + -0.5, \log t, \log z - t\right) + \log \left(x + y\right)}\right)\right) \]
    5. Applied egg-rr7.1%

      \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(a + -0.5, \log t, \log z - t\right) + \log \left(x + y\right)\right)\right)} \]
    6. Taylor expanded in t around inf 75.8%

      \[\leadsto \color{blue}{-1 \cdot t} \]
    7. Step-by-step derivation
      1. mul-1-neg75.8%

        \[\leadsto \color{blue}{-t} \]
    8. Simplified75.8%

      \[\leadsto \color{blue}{-t} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification62.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq 1.8 \cdot 10^{+57}:\\ \;\;\;\;a \cdot \log t\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \]

Alternative 13: 74.7% accurate, 3.0× speedup?

\[\begin{array}{l} \\ a \cdot \log t - t \end{array} \]
(FPCore (x y z t a) :precision binary64 (- (* a (log t)) t))
double code(double x, double y, double z, double t, double a) {
	return (a * log(t)) - t;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = (a * log(t)) - t
end function
public static double code(double x, double y, double z, double t, double a) {
	return (a * Math.log(t)) - t;
}
def code(x, y, z, t, a):
	return (a * math.log(t)) - t
function code(x, y, z, t, a)
	return Float64(Float64(a * log(t)) - t)
end
function tmp = code(x, y, z, t, a)
	tmp = (a * log(t)) - t;
end
code[x_, y_, z_, t_, a_] := N[(N[(a * N[Log[t], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
a \cdot \log t - t
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. associate-+l+99.6%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    3. +-commutative99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
    4. fma-def99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
    5. remove-double-neg99.6%

      \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
    6. remove-double-neg99.6%

      \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
    7. sub-neg99.6%

      \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
    8. metadata-eval99.6%

      \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
  4. Taylor expanded in a around inf 99.6%

    \[\leadsto \color{blue}{\left(\log \left(y + x\right) + \left(\log z + \left(a \cdot \log t + -0.5 \cdot \log t\right)\right)\right) - t} \]
  5. Taylor expanded in a around inf 76.1%

    \[\leadsto \color{blue}{a \cdot \log t} - t \]
  6. Final simplification76.1%

    \[\leadsto a \cdot \log t - t \]

Alternative 14: 37.6% accurate, 156.5× speedup?

\[\begin{array}{l} \\ -t \end{array} \]
(FPCore (x y z t a) :precision binary64 (- t))
double code(double x, double y, double z, double t, double a) {
	return -t;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = -t
end function
public static double code(double x, double y, double z, double t, double a) {
	return -t;
}
def code(x, y, z, t, a):
	return -t
function code(x, y, z, t, a)
	return Float64(-t)
end
function tmp = code(x, y, z, t, a)
	tmp = -t;
end
code[x_, y_, z_, t_, a_] := (-t)
\begin{array}{l}

\\
-t
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. associate-+l+99.6%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    3. +-commutative99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
    4. fma-def99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z - t\right)} \]
    5. remove-double-neg99.6%

      \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{-\left(-\left(a - 0.5\right)\right)}, \log t, \log z - t\right) \]
    6. remove-double-neg99.6%

      \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a - 0.5}, \log t, \log z - t\right) \]
    7. sub-neg99.6%

      \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z - t\right) \]
    8. metadata-eval99.6%

      \[\leadsto \log \left(x + y\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z - t\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)} \]
  4. Step-by-step derivation
    1. expm1-log1p-u33.0%

      \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\log \left(x + y\right) + \mathsf{fma}\left(a + -0.5, \log t, \log z - t\right)\right)\right)} \]
    2. +-commutative33.0%

      \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{\mathsf{fma}\left(a + -0.5, \log t, \log z - t\right) + \log \left(x + y\right)}\right)\right) \]
  5. Applied egg-rr33.0%

    \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(a + -0.5, \log t, \log z - t\right) + \log \left(x + y\right)\right)\right)} \]
  6. Taylor expanded in t around inf 37.7%

    \[\leadsto \color{blue}{-1 \cdot t} \]
  7. Step-by-step derivation
    1. mul-1-neg37.7%

      \[\leadsto \color{blue}{-t} \]
  8. Simplified37.7%

    \[\leadsto \color{blue}{-t} \]
  9. Final simplification37.7%

    \[\leadsto -t \]

Developer target: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right) \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (log (+ x y)) (+ (- (log z) t) (* (- a 0.5) (log t)))))
double code(double x, double y, double z, double t, double a) {
	return log((x + y)) + ((log(z) - t) + ((a - 0.5) * log(t)));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = log((x + y)) + ((log(z) - t) + ((a - 0.5d0) * log(t)))
end function
public static double code(double x, double y, double z, double t, double a) {
	return Math.log((x + y)) + ((Math.log(z) - t) + ((a - 0.5) * Math.log(t)));
}
def code(x, y, z, t, a):
	return math.log((x + y)) + ((math.log(z) - t) + ((a - 0.5) * math.log(t)))
function code(x, y, z, t, a)
	return Float64(log(Float64(x + y)) + Float64(Float64(log(z) - t) + Float64(Float64(a - 0.5) * log(t))))
end
function tmp = code(x, y, z, t, a)
	tmp = log((x + y)) + ((log(z) - t) + ((a - 0.5) * log(t)));
end
code[x_, y_, z_, t_, a_] := N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[(N[(N[Log[z], $MachinePrecision] - t), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)
\end{array}

Reproduce

?
herbie shell --seed 2023196 
(FPCore (x y z t a)
  :name "Numeric.SpecFunctions:logGammaL from math-functions-0.1.5.2"
  :precision binary64

  :herbie-target
  (+ (log (+ x y)) (+ (- (log z) t) (* (- a 0.5) (log t))))

  (+ (- (+ (log (+ x y)) (log z)) t) (* (- a 0.5) (log t))))