Numeric.SpecFunctions:logGammaL from math-functions-0.1.5.2

Percentage Accurate: 99.6% → 99.6%
Time: 16.3s
Alternatives: 12
Speedup: N/A×

Specification

?
\[\begin{array}{l} \\ \left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (- (+ (log (+ x y)) (log z)) t) (* (- a 0.5) (log t))))
double code(double x, double y, double z, double t, double a) {
	return ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = ((log((x + y)) + log(z)) - t) + ((a - 0.5d0) * log(t))
end function
public static double code(double x, double y, double z, double t, double a) {
	return ((Math.log((x + y)) + Math.log(z)) - t) + ((a - 0.5) * Math.log(t));
}
def code(x, y, z, t, a):
	return ((math.log((x + y)) + math.log(z)) - t) + ((a - 0.5) * math.log(t))
function code(x, y, z, t, a)
	return Float64(Float64(Float64(log(Float64(x + y)) + log(z)) - t) + Float64(Float64(a - 0.5) * log(t)))
end
function tmp = code(x, y, z, t, a)
	tmp = ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[Log[z], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 12 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (- (+ (log (+ x y)) (log z)) t) (* (- a 0.5) (log t))))
double code(double x, double y, double z, double t, double a) {
	return ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = ((log((x + y)) + log(z)) - t) + ((a - 0.5d0) * log(t))
end function
public static double code(double x, double y, double z, double t, double a) {
	return ((Math.log((x + y)) + Math.log(z)) - t) + ((a - 0.5) * Math.log(t));
}
def code(x, y, z, t, a):
	return ((math.log((x + y)) + math.log(z)) - t) + ((a - 0.5) * math.log(t))
function code(x, y, z, t, a)
	return Float64(Float64(Float64(log(Float64(x + y)) + log(z)) - t) + Float64(Float64(a - 0.5) * log(t)))
end
function tmp = code(x, y, z, t, a)
	tmp = ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[Log[z], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t
\end{array}

Alternative 1: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log z + \left(\log \left(x + y\right) + \left(\log t \cdot \left(a - 0.5\right) - t\right)\right) \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (log z) (+ (log (+ x y)) (- (* (log t) (- a 0.5)) t))))
double code(double x, double y, double z, double t, double a) {
	return log(z) + (log((x + y)) + ((log(t) * (a - 0.5)) - t));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = log(z) + (log((x + y)) + ((log(t) * (a - 0.5d0)) - t))
end function
public static double code(double x, double y, double z, double t, double a) {
	return Math.log(z) + (Math.log((x + y)) + ((Math.log(t) * (a - 0.5)) - t));
}
def code(x, y, z, t, a):
	return math.log(z) + (math.log((x + y)) + ((math.log(t) * (a - 0.5)) - t))
function code(x, y, z, t, a)
	return Float64(log(z) + Float64(log(Float64(x + y)) + Float64(Float64(log(t) * Float64(a - 0.5)) - t)))
end
function tmp = code(x, y, z, t, a)
	tmp = log(z) + (log((x + y)) + ((log(t) * (a - 0.5)) - t));
end
code[x_, y_, z_, t_, a_] := N[(N[Log[z], $MachinePrecision] + N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[(N[(N[Log[t], $MachinePrecision] * N[(a - 0.5), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\log z + \left(\log \left(x + y\right) + \left(\log t \cdot \left(a - 0.5\right) - t\right)\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. associate-+l+99.6%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    3. +-commutative99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
    4. associate-+r-99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(\left(a - 0.5\right) \cdot \log t + \log z\right) - t\right)} \]
    5. fma-def99.6%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z\right)} - t\right) \]
    6. sub-neg99.6%

      \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z\right) - t\right) \]
    7. metadata-eval99.6%

      \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z\right) - t\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\mathsf{fma}\left(a + -0.5, \log t, \log z\right) - t\right)} \]
  4. Taylor expanded in t around 0 99.6%

    \[\leadsto \color{blue}{\log z + \left(\log \left(x + y\right) + \left(-1 \cdot t + \log t \cdot \left(a - 0.5\right)\right)\right)} \]
  5. Final simplification99.6%

    \[\leadsto \log z + \left(\log \left(x + y\right) + \left(\log t \cdot \left(a - 0.5\right) - t\right)\right) \]

Alternative 2: 80.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;a - 0.5 \leq -50 \lor \neg \left(a - 0.5 \leq -0.5\right):\\ \;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\log z - t\right) + \left(\log y + \log t \cdot -0.5\right)\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (or (<= (- a 0.5) -50.0) (not (<= (- a 0.5) -0.5)))
   (+ (log (+ x y)) (- (* (log t) a) t))
   (+ (- (log z) t) (+ (log y) (* (log t) -0.5)))))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (((a - 0.5) <= -50.0) || !((a - 0.5) <= -0.5)) {
		tmp = log((x + y)) + ((log(t) * a) - t);
	} else {
		tmp = (log(z) - t) + (log(y) + (log(t) * -0.5));
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if (((a - 0.5d0) <= (-50.0d0)) .or. (.not. ((a - 0.5d0) <= (-0.5d0)))) then
        tmp = log((x + y)) + ((log(t) * a) - t)
    else
        tmp = (log(z) - t) + (log(y) + (log(t) * (-0.5d0)))
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (((a - 0.5) <= -50.0) || !((a - 0.5) <= -0.5)) {
		tmp = Math.log((x + y)) + ((Math.log(t) * a) - t);
	} else {
		tmp = (Math.log(z) - t) + (Math.log(y) + (Math.log(t) * -0.5));
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if ((a - 0.5) <= -50.0) or not ((a - 0.5) <= -0.5):
		tmp = math.log((x + y)) + ((math.log(t) * a) - t)
	else:
		tmp = (math.log(z) - t) + (math.log(y) + (math.log(t) * -0.5))
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if ((Float64(a - 0.5) <= -50.0) || !(Float64(a - 0.5) <= -0.5))
		tmp = Float64(log(Float64(x + y)) + Float64(Float64(log(t) * a) - t));
	else
		tmp = Float64(Float64(log(z) - t) + Float64(log(y) + Float64(log(t) * -0.5)));
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if (((a - 0.5) <= -50.0) || ~(((a - 0.5) <= -0.5)))
		tmp = log((x + y)) + ((log(t) * a) - t);
	else
		tmp = (log(z) - t) + (log(y) + (log(t) * -0.5));
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[Or[LessEqual[N[(a - 0.5), $MachinePrecision], -50.0], N[Not[LessEqual[N[(a - 0.5), $MachinePrecision], -0.5]], $MachinePrecision]], N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[(N[(N[Log[t], $MachinePrecision] * a), $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision], N[(N[(N[Log[z], $MachinePrecision] - t), $MachinePrecision] + N[(N[Log[y], $MachinePrecision] + N[(N[Log[t], $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;a - 0.5 \leq -50 \lor \neg \left(a - 0.5 \leq -0.5\right):\\
\;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\

\mathbf{else}:\\
\;\;\;\;\left(\log z - t\right) + \left(\log y + \log t \cdot -0.5\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 a 1/2) < -50 or -0.5 < (-.f64 a 1/2)

    1. Initial program 99.7%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.7%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.7%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.7%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. associate-+r-99.7%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(\left(a - 0.5\right) \cdot \log t + \log z\right) - t\right)} \]
      5. fma-def99.7%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z\right)} - t\right) \]
      6. sub-neg99.7%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z\right) - t\right) \]
      7. metadata-eval99.7%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z\right) - t\right) \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\mathsf{fma}\left(a + -0.5, \log t, \log z\right) - t\right)} \]
    4. Taylor expanded in a around inf 98.7%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{a \cdot \log t} - t\right) \]
    5. Step-by-step derivation
      1. *-commutative98.7%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\log t \cdot a} - t\right) \]
    6. Simplified98.7%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\log t \cdot a} - t\right) \]

    if -50 < (-.f64 a 1/2) < -0.5

    1. Initial program 99.4%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.4%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. +-commutative99.4%

        \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      3. associate-+l+99.5%

        \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      4. +-commutative99.5%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
      5. fma-def99.5%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
      6. sub-neg99.5%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
      7. metadata-eval99.5%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
    3. Simplified99.5%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
    4. Taylor expanded in a around 0 99.3%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\log \left(x + y\right) + -0.5 \cdot \log t\right)} \]
    5. Step-by-step derivation
      1. +-commutative99.3%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(-0.5 \cdot \log t + \log \left(x + y\right)\right)} \]
      2. +-commutative99.3%

        \[\leadsto \left(\log z - t\right) + \left(-0.5 \cdot \log t + \log \color{blue}{\left(y + x\right)}\right) \]
    6. Simplified99.3%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\left(-0.5 \cdot \log t + \log \left(y + x\right)\right)} \]
    7. Taylor expanded in y around inf 66.0%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\left(-1 \cdot \log \left(\frac{1}{y}\right) + -0.5 \cdot \log t\right)} \]
    8. Step-by-step derivation
      1. +-commutative66.0%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(-0.5 \cdot \log t + -1 \cdot \log \left(\frac{1}{y}\right)\right)} \]
      2. mul-1-neg66.0%

        \[\leadsto \left(\log z - t\right) + \left(-0.5 \cdot \log t + \color{blue}{\left(-\log \left(\frac{1}{y}\right)\right)}\right) \]
      3. log-rec66.0%

        \[\leadsto \left(\log z - t\right) + \left(-0.5 \cdot \log t + \left(-\color{blue}{\left(-\log y\right)}\right)\right) \]
      4. remove-double-neg66.0%

        \[\leadsto \left(\log z - t\right) + \left(-0.5 \cdot \log t + \color{blue}{\log y}\right) \]
    9. Simplified66.0%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\left(-0.5 \cdot \log t + \log y\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification83.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;a - 0.5 \leq -50 \lor \neg \left(a - 0.5 \leq -0.5\right):\\ \;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\log z - t\right) + \left(\log y + \log t \cdot -0.5\right)\\ \end{array} \]

Alternative 3: 98.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \log \left(x + y\right)\\ \mathbf{if}\;a \leq -0.84 \lor \neg \left(a \leq 9 \cdot 10^{-15}\right):\\ \;\;\;\;t_1 + \left(\log t \cdot a - t\right)\\ \mathbf{else}:\\ \;\;\;\;t_1 + \left(\left(\log z + \log t \cdot -0.5\right) - t\right)\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (let* ((t_1 (log (+ x y))))
   (if (or (<= a -0.84) (not (<= a 9e-15)))
     (+ t_1 (- (* (log t) a) t))
     (+ t_1 (- (+ (log z) (* (log t) -0.5)) t)))))
double code(double x, double y, double z, double t, double a) {
	double t_1 = log((x + y));
	double tmp;
	if ((a <= -0.84) || !(a <= 9e-15)) {
		tmp = t_1 + ((log(t) * a) - t);
	} else {
		tmp = t_1 + ((log(z) + (log(t) * -0.5)) - t);
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: t_1
    real(8) :: tmp
    t_1 = log((x + y))
    if ((a <= (-0.84d0)) .or. (.not. (a <= 9d-15))) then
        tmp = t_1 + ((log(t) * a) - t)
    else
        tmp = t_1 + ((log(z) + (log(t) * (-0.5d0))) - t)
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double t_1 = Math.log((x + y));
	double tmp;
	if ((a <= -0.84) || !(a <= 9e-15)) {
		tmp = t_1 + ((Math.log(t) * a) - t);
	} else {
		tmp = t_1 + ((Math.log(z) + (Math.log(t) * -0.5)) - t);
	}
	return tmp;
}
def code(x, y, z, t, a):
	t_1 = math.log((x + y))
	tmp = 0
	if (a <= -0.84) or not (a <= 9e-15):
		tmp = t_1 + ((math.log(t) * a) - t)
	else:
		tmp = t_1 + ((math.log(z) + (math.log(t) * -0.5)) - t)
	return tmp
function code(x, y, z, t, a)
	t_1 = log(Float64(x + y))
	tmp = 0.0
	if ((a <= -0.84) || !(a <= 9e-15))
		tmp = Float64(t_1 + Float64(Float64(log(t) * a) - t));
	else
		tmp = Float64(t_1 + Float64(Float64(log(z) + Float64(log(t) * -0.5)) - t));
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	t_1 = log((x + y));
	tmp = 0.0;
	if ((a <= -0.84) || ~((a <= 9e-15)))
		tmp = t_1 + ((log(t) * a) - t);
	else
		tmp = t_1 + ((log(z) + (log(t) * -0.5)) - t);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := Block[{t$95$1 = N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision]}, If[Or[LessEqual[a, -0.84], N[Not[LessEqual[a, 9e-15]], $MachinePrecision]], N[(t$95$1 + N[(N[(N[Log[t], $MachinePrecision] * a), $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision], N[(t$95$1 + N[(N[(N[Log[z], $MachinePrecision] + N[(N[Log[t], $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \log \left(x + y\right)\\
\mathbf{if}\;a \leq -0.84 \lor \neg \left(a \leq 9 \cdot 10^{-15}\right):\\
\;\;\;\;t_1 + \left(\log t \cdot a - t\right)\\

\mathbf{else}:\\
\;\;\;\;t_1 + \left(\left(\log z + \log t \cdot -0.5\right) - t\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if a < -0.839999999999999969 or 8.9999999999999995e-15 < a

    1. Initial program 99.7%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.7%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.7%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.7%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. associate-+r-99.7%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(\left(a - 0.5\right) \cdot \log t + \log z\right) - t\right)} \]
      5. fma-def99.7%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z\right)} - t\right) \]
      6. sub-neg99.7%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z\right) - t\right) \]
      7. metadata-eval99.7%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z\right) - t\right) \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\mathsf{fma}\left(a + -0.5, \log t, \log z\right) - t\right)} \]
    4. Taylor expanded in a around inf 98.7%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{a \cdot \log t} - t\right) \]
    5. Step-by-step derivation
      1. *-commutative98.7%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\log t \cdot a} - t\right) \]
    6. Simplified98.7%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\log t \cdot a} - t\right) \]

    if -0.839999999999999969 < a < 8.9999999999999995e-15

    1. Initial program 99.4%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.4%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.4%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.4%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. associate-+r-99.5%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(\left(a - 0.5\right) \cdot \log t + \log z\right) - t\right)} \]
      5. fma-def99.5%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z\right)} - t\right) \]
      6. sub-neg99.5%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z\right) - t\right) \]
      7. metadata-eval99.5%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z\right) - t\right) \]
    3. Simplified99.5%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\mathsf{fma}\left(a + -0.5, \log t, \log z\right) - t\right)} \]
    4. Taylor expanded in a around 0 99.3%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\left(\log z + -0.5 \cdot \log t\right)} - t\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;a \leq -0.84 \lor \neg \left(a \leq 9 \cdot 10^{-15}\right):\\ \;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + y\right) + \left(\left(\log z + \log t \cdot -0.5\right) - t\right)\\ \end{array} \]

Alternative 4: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \log t \cdot \left(a + -0.5\right) \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (+ (log (+ x y)) (- (log z) t)) (* (log t) (+ a -0.5))))
double code(double x, double y, double z, double t, double a) {
	return (log((x + y)) + (log(z) - t)) + (log(t) * (a + -0.5));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = (log((x + y)) + (log(z) - t)) + (log(t) * (a + (-0.5d0)))
end function
public static double code(double x, double y, double z, double t, double a) {
	return (Math.log((x + y)) + (Math.log(z) - t)) + (Math.log(t) * (a + -0.5));
}
def code(x, y, z, t, a):
	return (math.log((x + y)) + (math.log(z) - t)) + (math.log(t) * (a + -0.5))
function code(x, y, z, t, a)
	return Float64(Float64(log(Float64(x + y)) + Float64(log(z) - t)) + Float64(log(t) * Float64(a + -0.5)))
end
function tmp = code(x, y, z, t, a)
	tmp = (log((x + y)) + (log(z) - t)) + (log(t) * (a + -0.5));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[(N[Log[z], $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision] + N[(N[Log[t], $MachinePrecision] * N[(a + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \log t \cdot \left(a + -0.5\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. cancel-sign-sub99.6%

      \[\leadsto \color{blue}{\left(\left(\log \left(x + y\right) + \log z\right) - t\right) - \left(-\left(a - 0.5\right)\right) \cdot \log t} \]
    2. cancel-sign-sub-inv99.6%

      \[\leadsto \color{blue}{\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(-\left(-\left(a - 0.5\right)\right)\right) \cdot \log t} \]
    3. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(-\left(-\left(a - 0.5\right)\right)\right) \cdot \log t \]
    4. remove-double-neg99.6%

      \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \color{blue}{\left(a - 0.5\right)} \cdot \log t \]
    5. sub-neg99.6%

      \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \color{blue}{\left(a + \left(-0.5\right)\right)} \cdot \log t \]
    6. metadata-eval99.6%

      \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + \color{blue}{-0.5}\right) \cdot \log t \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + -0.5\right) \cdot \log t} \]
  4. Final simplification99.6%

    \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \log t \cdot \left(a + -0.5\right) \]

Alternative 5: 68.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log t \cdot \left(a + -0.5\right) + \left(\log z + \left(\log y - t\right)\right) \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (* (log t) (+ a -0.5)) (+ (log z) (- (log y) t))))
double code(double x, double y, double z, double t, double a) {
	return (log(t) * (a + -0.5)) + (log(z) + (log(y) - t));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = (log(t) * (a + (-0.5d0))) + (log(z) + (log(y) - t))
end function
public static double code(double x, double y, double z, double t, double a) {
	return (Math.log(t) * (a + -0.5)) + (Math.log(z) + (Math.log(y) - t));
}
def code(x, y, z, t, a):
	return (math.log(t) * (a + -0.5)) + (math.log(z) + (math.log(y) - t))
function code(x, y, z, t, a)
	return Float64(Float64(log(t) * Float64(a + -0.5)) + Float64(log(z) + Float64(log(y) - t)))
end
function tmp = code(x, y, z, t, a)
	tmp = (log(t) * (a + -0.5)) + (log(z) + (log(y) - t));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[Log[t], $MachinePrecision] * N[(a + -0.5), $MachinePrecision]), $MachinePrecision] + N[(N[Log[z], $MachinePrecision] + N[(N[Log[y], $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\log t \cdot \left(a + -0.5\right) + \left(\log z + \left(\log y - t\right)\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. cancel-sign-sub99.6%

      \[\leadsto \color{blue}{\left(\left(\log \left(x + y\right) + \log z\right) - t\right) - \left(-\left(a - 0.5\right)\right) \cdot \log t} \]
    2. cancel-sign-sub-inv99.6%

      \[\leadsto \color{blue}{\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(-\left(-\left(a - 0.5\right)\right)\right) \cdot \log t} \]
    3. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(-\left(-\left(a - 0.5\right)\right)\right) \cdot \log t \]
    4. remove-double-neg99.6%

      \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \color{blue}{\left(a - 0.5\right)} \cdot \log t \]
    5. sub-neg99.6%

      \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \color{blue}{\left(a + \left(-0.5\right)\right)} \cdot \log t \]
    6. metadata-eval99.6%

      \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + \color{blue}{-0.5}\right) \cdot \log t \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + -0.5\right) \cdot \log t} \]
  4. Taylor expanded in x around 0 71.5%

    \[\leadsto \color{blue}{\left(\left(\log y + \log z\right) - t\right)} + \left(a + -0.5\right) \cdot \log t \]
  5. Step-by-step derivation
    1. remove-double-neg71.5%

      \[\leadsto \left(\left(\color{blue}{\left(-\left(-\log y\right)\right)} + \log z\right) - t\right) + \left(a + -0.5\right) \cdot \log t \]
    2. log-rec71.5%

      \[\leadsto \left(\left(\left(-\color{blue}{\log \left(\frac{1}{y}\right)}\right) + \log z\right) - t\right) + \left(a + -0.5\right) \cdot \log t \]
    3. mul-1-neg71.5%

      \[\leadsto \left(\left(\color{blue}{-1 \cdot \log \left(\frac{1}{y}\right)} + \log z\right) - t\right) + \left(a + -0.5\right) \cdot \log t \]
    4. +-commutative71.5%

      \[\leadsto \left(\color{blue}{\left(\log z + -1 \cdot \log \left(\frac{1}{y}\right)\right)} - t\right) + \left(a + -0.5\right) \cdot \log t \]
    5. associate--l+71.5%

      \[\leadsto \color{blue}{\left(\log z + \left(-1 \cdot \log \left(\frac{1}{y}\right) - t\right)\right)} + \left(a + -0.5\right) \cdot \log t \]
    6. mul-1-neg71.5%

      \[\leadsto \left(\log z + \left(\color{blue}{\left(-\log \left(\frac{1}{y}\right)\right)} - t\right)\right) + \left(a + -0.5\right) \cdot \log t \]
    7. log-rec71.5%

      \[\leadsto \left(\log z + \left(\left(-\color{blue}{\left(-\log y\right)}\right) - t\right)\right) + \left(a + -0.5\right) \cdot \log t \]
    8. remove-double-neg71.5%

      \[\leadsto \left(\log z + \left(\color{blue}{\log y} - t\right)\right) + \left(a + -0.5\right) \cdot \log t \]
  6. Simplified71.5%

    \[\leadsto \color{blue}{\left(\log z + \left(\log y - t\right)\right)} + \left(a + -0.5\right) \cdot \log t \]
  7. Final simplification71.5%

    \[\leadsto \log t \cdot \left(a + -0.5\right) + \left(\log z + \left(\log y - t\right)\right) \]

Alternative 6: 68.4% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x + y \leq 2 \cdot 10^{+132}:\\ \;\;\;\;\left(\log t \cdot \left(a - 0.5\right) + \log \left(z \cdot y\right)\right) - t\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= (+ x y) 2e+132)
   (- (+ (* (log t) (- a 0.5)) (log (* z y))) t)
   (+ (log (+ x y)) (- (* (log t) a) t))))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if ((x + y) <= 2e+132) {
		tmp = ((log(t) * (a - 0.5)) + log((z * y))) - t;
	} else {
		tmp = log((x + y)) + ((log(t) * a) - t);
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if ((x + y) <= 2d+132) then
        tmp = ((log(t) * (a - 0.5d0)) + log((z * y))) - t
    else
        tmp = log((x + y)) + ((log(t) * a) - t)
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if ((x + y) <= 2e+132) {
		tmp = ((Math.log(t) * (a - 0.5)) + Math.log((z * y))) - t;
	} else {
		tmp = Math.log((x + y)) + ((Math.log(t) * a) - t);
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if (x + y) <= 2e+132:
		tmp = ((math.log(t) * (a - 0.5)) + math.log((z * y))) - t
	else:
		tmp = math.log((x + y)) + ((math.log(t) * a) - t)
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if (Float64(x + y) <= 2e+132)
		tmp = Float64(Float64(Float64(log(t) * Float64(a - 0.5)) + log(Float64(z * y))) - t);
	else
		tmp = Float64(log(Float64(x + y)) + Float64(Float64(log(t) * a) - t));
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if ((x + y) <= 2e+132)
		tmp = ((log(t) * (a - 0.5)) + log((z * y))) - t;
	else
		tmp = log((x + y)) + ((log(t) * a) - t);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[LessEqual[N[(x + y), $MachinePrecision], 2e+132], N[(N[(N[(N[Log[t], $MachinePrecision] * N[(a - 0.5), $MachinePrecision]), $MachinePrecision] + N[Log[N[(z * y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[(N[(N[Log[t], $MachinePrecision] * a), $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x + y \leq 2 \cdot 10^{+132}:\\
\;\;\;\;\left(\log t \cdot \left(a - 0.5\right) + \log \left(z \cdot y\right)\right) - t\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (+.f64 x y) < 1.99999999999999998e132

    1. Initial program 99.4%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. cancel-sign-sub99.4%

        \[\leadsto \color{blue}{\left(\left(\log \left(x + y\right) + \log z\right) - t\right) - \left(-\left(a - 0.5\right)\right) \cdot \log t} \]
      2. cancel-sign-sub-inv99.4%

        \[\leadsto \color{blue}{\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(-\left(-\left(a - 0.5\right)\right)\right) \cdot \log t} \]
      3. associate--l+99.4%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(-\left(-\left(a - 0.5\right)\right)\right) \cdot \log t \]
      4. remove-double-neg99.4%

        \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \color{blue}{\left(a - 0.5\right)} \cdot \log t \]
      5. sub-neg99.4%

        \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \color{blue}{\left(a + \left(-0.5\right)\right)} \cdot \log t \]
      6. metadata-eval99.4%

        \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + \color{blue}{-0.5}\right) \cdot \log t \]
    3. Simplified99.4%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + -0.5\right) \cdot \log t} \]
    4. Step-by-step derivation
      1. add-sqr-sqrt34.4%

        \[\leadsto \color{blue}{\sqrt{\left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + -0.5\right) \cdot \log t} \cdot \sqrt{\left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + -0.5\right) \cdot \log t}} \]
      2. pow234.4%

        \[\leadsto \color{blue}{{\left(\sqrt{\left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + -0.5\right) \cdot \log t}\right)}^{2}} \]
      3. +-commutative34.4%

        \[\leadsto {\left(\sqrt{\color{blue}{\left(a + -0.5\right) \cdot \log t + \left(\log \left(x + y\right) + \left(\log z - t\right)\right)}}\right)}^{2} \]
      4. fma-def34.4%

        \[\leadsto {\left(\sqrt{\color{blue}{\mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right) + \left(\log z - t\right)\right)}}\right)}^{2} \]
      5. associate-+r-34.4%

        \[\leadsto {\left(\sqrt{\mathsf{fma}\left(a + -0.5, \log t, \color{blue}{\left(\log \left(x + y\right) + \log z\right) - t}\right)}\right)}^{2} \]
      6. +-commutative34.4%

        \[\leadsto {\left(\sqrt{\mathsf{fma}\left(a + -0.5, \log t, \color{blue}{\left(\log z + \log \left(x + y\right)\right)} - t\right)}\right)}^{2} \]
      7. sum-log30.9%

        \[\leadsto {\left(\sqrt{\mathsf{fma}\left(a + -0.5, \log t, \color{blue}{\log \left(z \cdot \left(x + y\right)\right)} - t\right)}\right)}^{2} \]
    5. Applied egg-rr30.9%

      \[\leadsto \color{blue}{{\left(\sqrt{\mathsf{fma}\left(a + -0.5, \log t, \log \left(z \cdot \left(x + y\right)\right) - t\right)}\right)}^{2}} \]
    6. Taylor expanded in x around 0 58.7%

      \[\leadsto \color{blue}{\left(\log \left(y \cdot z\right) + \log t \cdot \left(a - 0.5\right)\right) - t} \]

    if 1.99999999999999998e132 < (+.f64 x y)

    1. Initial program 99.7%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.7%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.8%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.8%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. associate-+r-99.8%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(\left(a - 0.5\right) \cdot \log t + \log z\right) - t\right)} \]
      5. fma-def99.8%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z\right)} - t\right) \]
      6. sub-neg99.8%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z\right) - t\right) \]
      7. metadata-eval99.8%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z\right) - t\right) \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\mathsf{fma}\left(a + -0.5, \log t, \log z\right) - t\right)} \]
    4. Taylor expanded in a around inf 83.0%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{a \cdot \log t} - t\right) \]
    5. Step-by-step derivation
      1. *-commutative83.0%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\log t \cdot a} - t\right) \]
    6. Simplified83.0%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\log t \cdot a} - t\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification70.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x + y \leq 2 \cdot 10^{+132}:\\ \;\;\;\;\left(\log t \cdot \left(a - 0.5\right) + \log \left(z \cdot y\right)\right) - t\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\ \end{array} \]

Alternative 7: 73.8% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;a \leq -8.5 \cdot 10^{-12} \lor \neg \left(a \leq 8.5 \cdot 10^{-24}\right):\\ \;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\log t \cdot -0.5 + \log \left(z \cdot y\right)\right) - t\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (or (<= a -8.5e-12) (not (<= a 8.5e-24)))
   (+ (log (+ x y)) (- (* (log t) a) t))
   (- (+ (* (log t) -0.5) (log (* z y))) t)))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if ((a <= -8.5e-12) || !(a <= 8.5e-24)) {
		tmp = log((x + y)) + ((log(t) * a) - t);
	} else {
		tmp = ((log(t) * -0.5) + log((z * y))) - t;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if ((a <= (-8.5d-12)) .or. (.not. (a <= 8.5d-24))) then
        tmp = log((x + y)) + ((log(t) * a) - t)
    else
        tmp = ((log(t) * (-0.5d0)) + log((z * y))) - t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if ((a <= -8.5e-12) || !(a <= 8.5e-24)) {
		tmp = Math.log((x + y)) + ((Math.log(t) * a) - t);
	} else {
		tmp = ((Math.log(t) * -0.5) + Math.log((z * y))) - t;
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if (a <= -8.5e-12) or not (a <= 8.5e-24):
		tmp = math.log((x + y)) + ((math.log(t) * a) - t)
	else:
		tmp = ((math.log(t) * -0.5) + math.log((z * y))) - t
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if ((a <= -8.5e-12) || !(a <= 8.5e-24))
		tmp = Float64(log(Float64(x + y)) + Float64(Float64(log(t) * a) - t));
	else
		tmp = Float64(Float64(Float64(log(t) * -0.5) + log(Float64(z * y))) - t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if ((a <= -8.5e-12) || ~((a <= 8.5e-24)))
		tmp = log((x + y)) + ((log(t) * a) - t);
	else
		tmp = ((log(t) * -0.5) + log((z * y))) - t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[Or[LessEqual[a, -8.5e-12], N[Not[LessEqual[a, 8.5e-24]], $MachinePrecision]], N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[(N[(N[Log[t], $MachinePrecision] * a), $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[Log[t], $MachinePrecision] * -0.5), $MachinePrecision] + N[Log[N[(z * y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;a \leq -8.5 \cdot 10^{-12} \lor \neg \left(a \leq 8.5 \cdot 10^{-24}\right):\\
\;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\

\mathbf{else}:\\
\;\;\;\;\left(\log t \cdot -0.5 + \log \left(z \cdot y\right)\right) - t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if a < -8.4999999999999997e-12 or 8.5000000000000002e-24 < a

    1. Initial program 99.7%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.7%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.7%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.7%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. associate-+r-99.7%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(\left(a - 0.5\right) \cdot \log t + \log z\right) - t\right)} \]
      5. fma-def99.7%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z\right)} - t\right) \]
      6. sub-neg99.7%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z\right) - t\right) \]
      7. metadata-eval99.7%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z\right) - t\right) \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\mathsf{fma}\left(a + -0.5, \log t, \log z\right) - t\right)} \]
    4. Taylor expanded in a around inf 98.2%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{a \cdot \log t} - t\right) \]
    5. Step-by-step derivation
      1. *-commutative98.2%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\log t \cdot a} - t\right) \]
    6. Simplified98.2%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\log t \cdot a} - t\right) \]

    if -8.4999999999999997e-12 < a < 8.5000000000000002e-24

    1. Initial program 99.3%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.3%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.4%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.4%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. associate-+r-99.4%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(\left(a - 0.5\right) \cdot \log t + \log z\right) - t\right)} \]
      5. fma-def99.4%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z\right)} - t\right) \]
      6. sub-neg99.4%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z\right) - t\right) \]
      7. metadata-eval99.4%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z\right) - t\right) \]
    3. Simplified99.4%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\mathsf{fma}\left(a + -0.5, \log t, \log z\right) - t\right)} \]
    4. Taylor expanded in a around 0 99.3%

      \[\leadsto \color{blue}{\left(\log z + \left(\log \left(x + y\right) + -0.5 \cdot \log t\right)\right) - t} \]
    5. Step-by-step derivation
      1. associate-+r+99.2%

        \[\leadsto \color{blue}{\left(\left(\log z + \log \left(x + y\right)\right) + -0.5 \cdot \log t\right)} - t \]
      2. log-prod70.3%

        \[\leadsto \left(\color{blue}{\log \left(z \cdot \left(x + y\right)\right)} + -0.5 \cdot \log t\right) - t \]
      3. +-commutative70.3%

        \[\leadsto \left(\log \left(z \cdot \color{blue}{\left(y + x\right)}\right) + -0.5 \cdot \log t\right) - t \]
    6. Simplified70.3%

      \[\leadsto \color{blue}{\left(\log \left(z \cdot \left(y + x\right)\right) + -0.5 \cdot \log t\right) - t} \]
    7. Taylor expanded in y around inf 66.2%

      \[\leadsto \color{blue}{\left(\log z + \left(-1 \cdot \log \left(\frac{1}{y}\right) + -0.5 \cdot \log t\right)\right)} - t \]
    8. Step-by-step derivation
      1. associate-+r+66.2%

        \[\leadsto \color{blue}{\left(\left(\log z + -1 \cdot \log \left(\frac{1}{y}\right)\right) + -0.5 \cdot \log t\right)} - t \]
      2. +-commutative66.2%

        \[\leadsto \left(\color{blue}{\left(-1 \cdot \log \left(\frac{1}{y}\right) + \log z\right)} + -0.5 \cdot \log t\right) - t \]
      3. mul-1-neg66.2%

        \[\leadsto \left(\left(\color{blue}{\left(-\log \left(\frac{1}{y}\right)\right)} + \log z\right) + -0.5 \cdot \log t\right) - t \]
      4. log-rec66.2%

        \[\leadsto \left(\left(\left(-\color{blue}{\left(-\log y\right)}\right) + \log z\right) + -0.5 \cdot \log t\right) - t \]
      5. remove-double-neg66.2%

        \[\leadsto \left(\left(\color{blue}{\log y} + \log z\right) + -0.5 \cdot \log t\right) - t \]
      6. log-prod50.2%

        \[\leadsto \left(\color{blue}{\log \left(y \cdot z\right)} + -0.5 \cdot \log t\right) - t \]
      7. +-commutative50.2%

        \[\leadsto \color{blue}{\left(-0.5 \cdot \log t + \log \left(y \cdot z\right)\right)} - t \]
    9. Simplified50.2%

      \[\leadsto \color{blue}{\left(-0.5 \cdot \log t + \log \left(y \cdot z\right)\right)} - t \]
  3. Recombined 2 regimes into one program.
  4. Final simplification76.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;a \leq -8.5 \cdot 10^{-12} \lor \neg \left(a \leq 8.5 \cdot 10^{-24}\right):\\ \;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\log t \cdot -0.5 + \log \left(z \cdot y\right)\right) - t\\ \end{array} \]

Alternative 8: 86.5% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq 6.8 \cdot 10^{-28}:\\ \;\;\;\;\log t \cdot \left(a + -0.5\right) + \log \left(z \cdot \left(x + y\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= t 6.8e-28)
   (+ (* (log t) (+ a -0.5)) (log (* z (+ x y))))
   (+ (log (+ x y)) (- (* (log t) a) t))))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 6.8e-28) {
		tmp = (log(t) * (a + -0.5)) + log((z * (x + y)));
	} else {
		tmp = log((x + y)) + ((log(t) * a) - t);
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if (t <= 6.8d-28) then
        tmp = (log(t) * (a + (-0.5d0))) + log((z * (x + y)))
    else
        tmp = log((x + y)) + ((log(t) * a) - t)
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 6.8e-28) {
		tmp = (Math.log(t) * (a + -0.5)) + Math.log((z * (x + y)));
	} else {
		tmp = Math.log((x + y)) + ((Math.log(t) * a) - t);
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if t <= 6.8e-28:
		tmp = (math.log(t) * (a + -0.5)) + math.log((z * (x + y)))
	else:
		tmp = math.log((x + y)) + ((math.log(t) * a) - t)
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if (t <= 6.8e-28)
		tmp = Float64(Float64(log(t) * Float64(a + -0.5)) + log(Float64(z * Float64(x + y))));
	else
		tmp = Float64(log(Float64(x + y)) + Float64(Float64(log(t) * a) - t));
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if (t <= 6.8e-28)
		tmp = (log(t) * (a + -0.5)) + log((z * (x + y)));
	else
		tmp = log((x + y)) + ((log(t) * a) - t);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[LessEqual[t, 6.8e-28], N[(N[(N[Log[t], $MachinePrecision] * N[(a + -0.5), $MachinePrecision]), $MachinePrecision] + N[Log[N[(z * N[(x + y), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[(N[(N[Log[t], $MachinePrecision] * a), $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq 6.8 \cdot 10^{-28}:\\
\;\;\;\;\log t \cdot \left(a + -0.5\right) + \log \left(z \cdot \left(x + y\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < 6.8000000000000001e-28

    1. Initial program 99.3%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. cancel-sign-sub99.3%

        \[\leadsto \color{blue}{\left(\left(\log \left(x + y\right) + \log z\right) - t\right) - \left(-\left(a - 0.5\right)\right) \cdot \log t} \]
      2. cancel-sign-sub-inv99.3%

        \[\leadsto \color{blue}{\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(-\left(-\left(a - 0.5\right)\right)\right) \cdot \log t} \]
      3. associate--l+99.3%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(-\left(-\left(a - 0.5\right)\right)\right) \cdot \log t \]
      4. remove-double-neg99.3%

        \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \color{blue}{\left(a - 0.5\right)} \cdot \log t \]
      5. sub-neg99.3%

        \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \color{blue}{\left(a + \left(-0.5\right)\right)} \cdot \log t \]
      6. metadata-eval99.3%

        \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + \color{blue}{-0.5}\right) \cdot \log t \]
    3. Simplified99.3%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + -0.5\right) \cdot \log t} \]
    4. Taylor expanded in t around 0 99.3%

      \[\leadsto \color{blue}{\left(\log z + \log \left(x + y\right)\right)} + \left(a + -0.5\right) \cdot \log t \]
    5. Step-by-step derivation
      1. log-prod71.3%

        \[\leadsto \color{blue}{\log \left(z \cdot \left(x + y\right)\right)} + \left(a + -0.5\right) \cdot \log t \]
      2. +-commutative71.3%

        \[\leadsto \log \left(z \cdot \color{blue}{\left(y + x\right)}\right) + \left(a + -0.5\right) \cdot \log t \]
    6. Simplified71.3%

      \[\leadsto \color{blue}{\log \left(z \cdot \left(y + x\right)\right)} + \left(a + -0.5\right) \cdot \log t \]

    if 6.8000000000000001e-28 < t

    1. Initial program 99.8%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.8%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+99.8%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative99.8%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. associate-+r-99.8%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(\left(a - 0.5\right) \cdot \log t + \log z\right) - t\right)} \]
      5. fma-def99.8%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z\right)} - t\right) \]
      6. sub-neg99.8%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z\right) - t\right) \]
      7. metadata-eval99.8%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z\right) - t\right) \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\mathsf{fma}\left(a + -0.5, \log t, \log z\right) - t\right)} \]
    4. Taylor expanded in a around inf 94.1%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{a \cdot \log t} - t\right) \]
    5. Step-by-step derivation
      1. *-commutative94.1%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\log t \cdot a} - t\right) \]
    6. Simplified94.1%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\log t \cdot a} - t\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification84.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq 6.8 \cdot 10^{-28}:\\ \;\;\;\;\log t \cdot \left(a + -0.5\right) + \log \left(z \cdot \left(x + y\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + y\right) + \left(\log t \cdot a - t\right)\\ \end{array} \]

Alternative 9: 61.0% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq 2 \cdot 10^{+26}:\\ \;\;\;\;\log t \cdot \left(a - 0.5\right) + \log \left(z \cdot x\right)\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= t 2e+26) (+ (* (log t) (- a 0.5)) (log (* z x))) (- t)))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 2e+26) {
		tmp = (log(t) * (a - 0.5)) + log((z * x));
	} else {
		tmp = -t;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if (t <= 2d+26) then
        tmp = (log(t) * (a - 0.5d0)) + log((z * x))
    else
        tmp = -t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 2e+26) {
		tmp = (Math.log(t) * (a - 0.5)) + Math.log((z * x));
	} else {
		tmp = -t;
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if t <= 2e+26:
		tmp = (math.log(t) * (a - 0.5)) + math.log((z * x))
	else:
		tmp = -t
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if (t <= 2e+26)
		tmp = Float64(Float64(log(t) * Float64(a - 0.5)) + log(Float64(z * x)));
	else
		tmp = Float64(-t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if (t <= 2e+26)
		tmp = (log(t) * (a - 0.5)) + log((z * x));
	else
		tmp = -t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[LessEqual[t, 2e+26], N[(N[(N[Log[t], $MachinePrecision] * N[(a - 0.5), $MachinePrecision]), $MachinePrecision] + N[Log[N[(z * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], (-t)]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq 2 \cdot 10^{+26}:\\
\;\;\;\;\log t \cdot \left(a - 0.5\right) + \log \left(z \cdot x\right)\\

\mathbf{else}:\\
\;\;\;\;-t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < 2.0000000000000001e26

    1. Initial program 99.2%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. cancel-sign-sub99.2%

        \[\leadsto \color{blue}{\left(\left(\log \left(x + y\right) + \log z\right) - t\right) - \left(-\left(a - 0.5\right)\right) \cdot \log t} \]
      2. cancel-sign-sub-inv99.2%

        \[\leadsto \color{blue}{\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(-\left(-\left(a - 0.5\right)\right)\right) \cdot \log t} \]
      3. associate--l+99.2%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(-\left(-\left(a - 0.5\right)\right)\right) \cdot \log t \]
      4. remove-double-neg99.2%

        \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \color{blue}{\left(a - 0.5\right)} \cdot \log t \]
      5. sub-neg99.2%

        \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \color{blue}{\left(a + \left(-0.5\right)\right)} \cdot \log t \]
      6. metadata-eval99.2%

        \[\leadsto \left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + \color{blue}{-0.5}\right) \cdot \log t \]
    3. Simplified99.2%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + -0.5\right) \cdot \log t} \]
    4. Step-by-step derivation
      1. add-sqr-sqrt60.7%

        \[\leadsto \color{blue}{\sqrt{\left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + -0.5\right) \cdot \log t} \cdot \sqrt{\left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + -0.5\right) \cdot \log t}} \]
      2. pow260.7%

        \[\leadsto \color{blue}{{\left(\sqrt{\left(\log \left(x + y\right) + \left(\log z - t\right)\right) + \left(a + -0.5\right) \cdot \log t}\right)}^{2}} \]
      3. +-commutative60.7%

        \[\leadsto {\left(\sqrt{\color{blue}{\left(a + -0.5\right) \cdot \log t + \left(\log \left(x + y\right) + \left(\log z - t\right)\right)}}\right)}^{2} \]
      4. fma-def60.7%

        \[\leadsto {\left(\sqrt{\color{blue}{\mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right) + \left(\log z - t\right)\right)}}\right)}^{2} \]
      5. associate-+r-60.7%

        \[\leadsto {\left(\sqrt{\mathsf{fma}\left(a + -0.5, \log t, \color{blue}{\left(\log \left(x + y\right) + \log z\right) - t}\right)}\right)}^{2} \]
      6. +-commutative60.7%

        \[\leadsto {\left(\sqrt{\mathsf{fma}\left(a + -0.5, \log t, \color{blue}{\left(\log z + \log \left(x + y\right)\right)} - t\right)}\right)}^{2} \]
      7. sum-log40.6%

        \[\leadsto {\left(\sqrt{\mathsf{fma}\left(a + -0.5, \log t, \color{blue}{\log \left(z \cdot \left(x + y\right)\right)} - t\right)}\right)}^{2} \]
    5. Applied egg-rr40.6%

      \[\leadsto \color{blue}{{\left(\sqrt{\mathsf{fma}\left(a + -0.5, \log t, \log \left(z \cdot \left(x + y\right)\right) - t\right)}\right)}^{2}} \]
    6. Taylor expanded in y around 0 24.1%

      \[\leadsto {\color{blue}{\left(\sqrt{\left(\log \left(x \cdot z\right) + \log t \cdot \left(a - 0.5\right)\right) - t}\right)}}^{2} \]
    7. Taylor expanded in t around 0 43.7%

      \[\leadsto \color{blue}{\log \left(x \cdot z\right) + \log t \cdot \left(a - 0.5\right)} \]

    if 2.0000000000000001e26 < t

    1. Initial program 100.0%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+100.0%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. associate-+l+100.0%

        \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      3. +-commutative100.0%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
      4. associate-+r-100.0%

        \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(\left(a - 0.5\right) \cdot \log t + \log z\right) - t\right)} \]
      5. fma-def100.0%

        \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z\right)} - t\right) \]
      6. sub-neg100.0%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z\right) - t\right) \]
      7. metadata-eval100.0%

        \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z\right) - t\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\mathsf{fma}\left(a + -0.5, \log t, \log z\right) - t\right)} \]
    4. Taylor expanded in t around inf 80.8%

      \[\leadsto \color{blue}{-1 \cdot t} \]
    5. Step-by-step derivation
      1. neg-mul-180.8%

        \[\leadsto \color{blue}{-t} \]
    6. Simplified80.8%

      \[\leadsto \color{blue}{-t} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification60.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq 2 \cdot 10^{+26}:\\ \;\;\;\;\log t \cdot \left(a - 0.5\right) + \log \left(z \cdot x\right)\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \]

Alternative 10: 77.7% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \log \left(x + y\right) + \left(\log t \cdot a - t\right) \end{array} \]
(FPCore (x y z t a) :precision binary64 (+ (log (+ x y)) (- (* (log t) a) t)))
double code(double x, double y, double z, double t, double a) {
	return log((x + y)) + ((log(t) * a) - t);
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = log((x + y)) + ((log(t) * a) - t)
end function
public static double code(double x, double y, double z, double t, double a) {
	return Math.log((x + y)) + ((Math.log(t) * a) - t);
}
def code(x, y, z, t, a):
	return math.log((x + y)) + ((math.log(t) * a) - t)
function code(x, y, z, t, a)
	return Float64(log(Float64(x + y)) + Float64(Float64(log(t) * a) - t))
end
function tmp = code(x, y, z, t, a)
	tmp = log((x + y)) + ((log(t) * a) - t);
end
code[x_, y_, z_, t_, a_] := N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[(N[(N[Log[t], $MachinePrecision] * a), $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + y\right) + \left(\log t \cdot a - t\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. associate-+l+99.6%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    3. +-commutative99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
    4. associate-+r-99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(\left(a - 0.5\right) \cdot \log t + \log z\right) - t\right)} \]
    5. fma-def99.6%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z\right)} - t\right) \]
    6. sub-neg99.6%

      \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z\right) - t\right) \]
    7. metadata-eval99.6%

      \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z\right) - t\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\mathsf{fma}\left(a + -0.5, \log t, \log z\right) - t\right)} \]
  4. Taylor expanded in a around inf 78.0%

    \[\leadsto \log \left(x + y\right) + \left(\color{blue}{a \cdot \log t} - t\right) \]
  5. Step-by-step derivation
    1. *-commutative78.0%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\log t \cdot a} - t\right) \]
  6. Simplified78.0%

    \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\log t \cdot a} - t\right) \]
  7. Final simplification78.0%

    \[\leadsto \log \left(x + y\right) + \left(\log t \cdot a - t\right) \]

Alternative 11: 41.0% accurate, 3.0× speedup?

\[\begin{array}{l} \\ \log z - t \end{array} \]
(FPCore (x y z t a) :precision binary64 (- (log z) t))
double code(double x, double y, double z, double t, double a) {
	return log(z) - t;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = log(z) - t
end function
public static double code(double x, double y, double z, double t, double a) {
	return Math.log(z) - t;
}
def code(x, y, z, t, a):
	return math.log(z) - t
function code(x, y, z, t, a)
	return Float64(log(z) - t)
end
function tmp = code(x, y, z, t, a)
	tmp = log(z) - t;
end
code[x_, y_, z_, t_, a_] := N[(N[Log[z], $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\log z - t
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. associate-+l+99.6%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    3. +-commutative99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
    4. associate-+r-99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(\left(a - 0.5\right) \cdot \log t + \log z\right) - t\right)} \]
    5. fma-def99.6%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z\right)} - t\right) \]
    6. sub-neg99.6%

      \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z\right) - t\right) \]
    7. metadata-eval99.6%

      \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z\right) - t\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\mathsf{fma}\left(a + -0.5, \log t, \log z\right) - t\right)} \]
  4. Taylor expanded in t around 0 99.6%

    \[\leadsto \color{blue}{\log z + \left(\log \left(x + y\right) + \left(-1 \cdot t + \log t \cdot \left(a - 0.5\right)\right)\right)} \]
  5. Taylor expanded in t around inf 41.2%

    \[\leadsto \log z + \color{blue}{-1 \cdot t} \]
  6. Step-by-step derivation
    1. neg-mul-141.2%

      \[\leadsto \log z + \color{blue}{\left(-t\right)} \]
  7. Simplified41.2%

    \[\leadsto \log z + \color{blue}{\left(-t\right)} \]
  8. Final simplification41.2%

    \[\leadsto \log z - t \]

Alternative 12: 38.2% accurate, 156.5× speedup?

\[\begin{array}{l} \\ -t \end{array} \]
(FPCore (x y z t a) :precision binary64 (- t))
double code(double x, double y, double z, double t, double a) {
	return -t;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = -t
end function
public static double code(double x, double y, double z, double t, double a) {
	return -t;
}
def code(x, y, z, t, a):
	return -t
function code(x, y, z, t, a)
	return Float64(-t)
end
function tmp = code(x, y, z, t, a)
	tmp = -t;
end
code[x_, y_, z_, t_, a_] := (-t)
\begin{array}{l}

\\
-t
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. associate-+l+99.6%

      \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    3. +-commutative99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z - t\right)\right)} \]
    4. associate-+r-99.6%

      \[\leadsto \log \left(x + y\right) + \color{blue}{\left(\left(\left(a - 0.5\right) \cdot \log t + \log z\right) - t\right)} \]
    5. fma-def99.6%

      \[\leadsto \log \left(x + y\right) + \left(\color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log z\right)} - t\right) \]
    6. sub-neg99.6%

      \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log z\right) - t\right) \]
    7. metadata-eval99.6%

      \[\leadsto \log \left(x + y\right) + \left(\mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log z\right) - t\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\log \left(x + y\right) + \left(\mathsf{fma}\left(a + -0.5, \log t, \log z\right) - t\right)} \]
  4. Taylor expanded in t around inf 38.0%

    \[\leadsto \color{blue}{-1 \cdot t} \]
  5. Step-by-step derivation
    1. neg-mul-138.0%

      \[\leadsto \color{blue}{-t} \]
  6. Simplified38.0%

    \[\leadsto \color{blue}{-t} \]
  7. Final simplification38.0%

    \[\leadsto -t \]

Developer target: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right) \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (log (+ x y)) (+ (- (log z) t) (* (- a 0.5) (log t)))))
double code(double x, double y, double z, double t, double a) {
	return log((x + y)) + ((log(z) - t) + ((a - 0.5) * log(t)));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = log((x + y)) + ((log(z) - t) + ((a - 0.5d0) * log(t)))
end function
public static double code(double x, double y, double z, double t, double a) {
	return Math.log((x + y)) + ((Math.log(z) - t) + ((a - 0.5) * Math.log(t)));
}
def code(x, y, z, t, a):
	return math.log((x + y)) + ((math.log(z) - t) + ((a - 0.5) * math.log(t)))
function code(x, y, z, t, a)
	return Float64(log(Float64(x + y)) + Float64(Float64(log(z) - t) + Float64(Float64(a - 0.5) * log(t))))
end
function tmp = code(x, y, z, t, a)
	tmp = log((x + y)) + ((log(z) - t) + ((a - 0.5) * log(t)));
end
code[x_, y_, z_, t_, a_] := N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[(N[(N[Log[z], $MachinePrecision] - t), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)
\end{array}

Reproduce

?
herbie shell --seed 2023291 
(FPCore (x y z t a)
  :name "Numeric.SpecFunctions:logGammaL from math-functions-0.1.5.2"
  :precision binary64

  :herbie-target
  (+ (log (+ x y)) (+ (- (log z) t) (* (- a 0.5) (log t))))

  (+ (- (+ (log (+ x y)) (log z)) t) (* (- a 0.5) (log t))))