Numeric.SpecFunctions:logGammaL from math-functions-0.1.5.2

Percentage Accurate: 99.6% → 99.6%
Time: 31.0s
Alternatives: 12
Speedup: 1.0×

Specification

?
\[\begin{array}{l} \\ \left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (- (+ (log (+ x y)) (log z)) t) (* (- a 0.5) (log t))))
double code(double x, double y, double z, double t, double a) {
	return ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = ((log((x + y)) + log(z)) - t) + ((a - 0.5d0) * log(t))
end function
public static double code(double x, double y, double z, double t, double a) {
	return ((Math.log((x + y)) + Math.log(z)) - t) + ((a - 0.5) * Math.log(t));
}
def code(x, y, z, t, a):
	return ((math.log((x + y)) + math.log(z)) - t) + ((a - 0.5) * math.log(t))
function code(x, y, z, t, a)
	return Float64(Float64(Float64(log(Float64(x + y)) + log(z)) - t) + Float64(Float64(a - 0.5) * log(t)))
end
function tmp = code(x, y, z, t, a)
	tmp = ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[Log[z], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 12 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (- (+ (log (+ x y)) (log z)) t) (* (- a 0.5) (log t))))
double code(double x, double y, double z, double t, double a) {
	return ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = ((log((x + y)) + log(z)) - t) + ((a - 0.5d0) * log(t))
end function
public static double code(double x, double y, double z, double t, double a) {
	return ((Math.log((x + y)) + Math.log(z)) - t) + ((a - 0.5) * Math.log(t));
}
def code(x, y, z, t, a):
	return ((math.log((x + y)) + math.log(z)) - t) + ((a - 0.5) * math.log(t))
function code(x, y, z, t, a)
	return Float64(Float64(Float64(log(Float64(x + y)) + log(z)) - t) + Float64(Float64(a - 0.5) * log(t)))
end
function tmp = code(x, y, z, t, a)
	tmp = ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[Log[z], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t
\end{array}

Alternative 1: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (- (+ (log (+ x y)) (log z)) t) (* (- a 0.5) (log t))))
double code(double x, double y, double z, double t, double a) {
	return ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = ((log((x + y)) + log(z)) - t) + ((a - 0.5d0) * log(t))
end function
public static double code(double x, double y, double z, double t, double a) {
	return ((Math.log((x + y)) + Math.log(z)) - t) + ((a - 0.5) * Math.log(t));
}
def code(x, y, z, t, a):
	return ((math.log((x + y)) + math.log(z)) - t) + ((a - 0.5) * math.log(t))
function code(x, y, z, t, a)
	return Float64(Float64(Float64(log(Float64(x + y)) + log(z)) - t) + Float64(Float64(a - 0.5) * log(t)))
end
function tmp = code(x, y, z, t, a)
	tmp = ((log((x + y)) + log(z)) - t) + ((a - 0.5) * log(t));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[Log[z], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Final simplification99.6%

    \[\leadsto \left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]

Alternative 2: 80.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq 0.16:\\ \;\;\;\;\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\log t, a, -t\right)\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= t 0.16)
   (+ (* (- a 0.5) (log t)) (+ (log z) (log y)))
   (fma (log t) a (- t))))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 0.16) {
		tmp = ((a - 0.5) * log(t)) + (log(z) + log(y));
	} else {
		tmp = fma(log(t), a, -t);
	}
	return tmp;
}
function code(x, y, z, t, a)
	tmp = 0.0
	if (t <= 0.16)
		tmp = Float64(Float64(Float64(a - 0.5) * log(t)) + Float64(log(z) + log(y)));
	else
		tmp = fma(log(t), a, Float64(-t));
	end
	return tmp
end
code[x_, y_, z_, t_, a_] := If[LessEqual[t, 0.16], N[(N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision] + N[(N[Log[z], $MachinePrecision] + N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[Log[t], $MachinePrecision] * a + (-t)), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq 0.16:\\
\;\;\;\;\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\log t, a, -t\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < 0.160000000000000003

    1. Initial program 99.3%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.3%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. +-commutative99.3%

        \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      3. associate-+l+99.2%

        \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      4. +-commutative99.2%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
      5. fma-def99.2%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
      6. sub-neg99.2%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
      7. metadata-eval99.2%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
    3. Simplified99.2%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
    4. Taylor expanded in x around 0 58.9%

      \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t} \]
    5. Taylor expanded in t around 0 58.9%

      \[\leadsto \color{blue}{\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)} \]

    if 0.160000000000000003 < t

    1. Initial program 99.8%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.8%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. +-commutative99.8%

        \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      3. associate-+l+99.8%

        \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      4. +-commutative99.8%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
      5. fma-def99.8%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
      6. sub-neg99.8%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
      7. metadata-eval99.8%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
    4. Taylor expanded in x around 0 73.0%

      \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t} \]
    5. Taylor expanded in a around inf 99.2%

      \[\leadsto \color{blue}{a \cdot \log t} - t \]
    6. Step-by-step derivation
      1. *-commutative27.1%

        \[\leadsto \color{blue}{\log t \cdot a} \]
    7. Simplified99.2%

      \[\leadsto \color{blue}{\log t \cdot a} - t \]
    8. Taylor expanded in t around 0 99.2%

      \[\leadsto \color{blue}{-1 \cdot t + a \cdot \log t} \]
    9. Step-by-step derivation
      1. neg-mul-199.2%

        \[\leadsto \color{blue}{\left(-t\right)} + a \cdot \log t \]
      2. +-commutative99.2%

        \[\leadsto \color{blue}{a \cdot \log t + \left(-t\right)} \]
      3. *-commutative99.2%

        \[\leadsto \color{blue}{\log t \cdot a} + \left(-t\right) \]
      4. fma-udef99.2%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, a, -t\right)} \]
    10. Simplified99.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, a, -t\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification81.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq 0.16:\\ \;\;\;\;\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\log t, a, -t\right)\\ \end{array} \]

Alternative 3: 69.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\log z - t\right) + \left(\left(a - 0.5\right) \cdot \log t + \log y\right) \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (- (log z) t) (+ (* (- a 0.5) (log t)) (log y))))
double code(double x, double y, double z, double t, double a) {
	return (log(z) - t) + (((a - 0.5) * log(t)) + log(y));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = (log(z) - t) + (((a - 0.5d0) * log(t)) + log(y))
end function
public static double code(double x, double y, double z, double t, double a) {
	return (Math.log(z) - t) + (((a - 0.5) * Math.log(t)) + Math.log(y));
}
def code(x, y, z, t, a):
	return (math.log(z) - t) + (((a - 0.5) * math.log(t)) + math.log(y))
function code(x, y, z, t, a)
	return Float64(Float64(log(z) - t) + Float64(Float64(Float64(a - 0.5) * log(t)) + log(y)))
end
function tmp = code(x, y, z, t, a)
	tmp = (log(z) - t) + (((a - 0.5) * log(t)) + log(y));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[Log[z], $MachinePrecision] - t), $MachinePrecision] + N[(N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision] + N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\log z - t\right) + \left(\left(a - 0.5\right) \cdot \log t + \log y\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. +-commutative99.6%

      \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    3. associate-+l+99.5%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    4. +-commutative99.5%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
    5. fma-def99.5%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
    6. sub-neg99.5%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
    7. metadata-eval99.5%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
  3. Simplified99.5%

    \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
  4. Taylor expanded in x around 0 66.9%

    \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log y\right)} \]
  5. Final simplification66.9%

    \[\leadsto \left(\log z - t\right) + \left(\left(a - 0.5\right) \cdot \log t + \log y\right) \]

Alternative 4: 69.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (- (+ (* (- a 0.5) (log t)) (+ (log z) (log y))) t))
double code(double x, double y, double z, double t, double a) {
	return (((a - 0.5) * log(t)) + (log(z) + log(y))) - t;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = (((a - 0.5d0) * log(t)) + (log(z) + log(y))) - t
end function
public static double code(double x, double y, double z, double t, double a) {
	return (((a - 0.5) * Math.log(t)) + (Math.log(z) + Math.log(y))) - t;
}
def code(x, y, z, t, a):
	return (((a - 0.5) * math.log(t)) + (math.log(z) + math.log(y))) - t
function code(x, y, z, t, a)
	return Float64(Float64(Float64(Float64(a - 0.5) * log(t)) + Float64(log(z) + log(y))) - t)
end
function tmp = code(x, y, z, t, a)
	tmp = (((a - 0.5) * log(t)) + (log(z) + log(y))) - t;
end
code[x_, y_, z_, t_, a_] := N[(N[(N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision] + N[(N[Log[z], $MachinePrecision] + N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. +-commutative99.6%

      \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    3. associate-+l+99.5%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    4. +-commutative99.5%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
    5. fma-def99.5%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
    6. sub-neg99.5%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
    7. metadata-eval99.5%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
  3. Simplified99.5%

    \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
  4. Taylor expanded in x around 0 66.9%

    \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t} \]
  5. Final simplification66.9%

    \[\leadsto \left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t \]

Alternative 5: 87.4% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq 0.16:\\ \;\;\;\;\left(\log t \cdot \left(a + -0.5\right) + \log \left(\left(x + y\right) \cdot z\right)\right) - t\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\log t, a, -t\right)\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= t 0.16)
   (- (+ (* (log t) (+ a -0.5)) (log (* (+ x y) z))) t)
   (fma (log t) a (- t))))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 0.16) {
		tmp = ((log(t) * (a + -0.5)) + log(((x + y) * z))) - t;
	} else {
		tmp = fma(log(t), a, -t);
	}
	return tmp;
}
function code(x, y, z, t, a)
	tmp = 0.0
	if (t <= 0.16)
		tmp = Float64(Float64(Float64(log(t) * Float64(a + -0.5)) + log(Float64(Float64(x + y) * z))) - t);
	else
		tmp = fma(log(t), a, Float64(-t));
	end
	return tmp
end
code[x_, y_, z_, t_, a_] := If[LessEqual[t, 0.16], N[(N[(N[(N[Log[t], $MachinePrecision] * N[(a + -0.5), $MachinePrecision]), $MachinePrecision] + N[Log[N[(N[(x + y), $MachinePrecision] * z), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[Log[t], $MachinePrecision] * a + (-t)), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq 0.16:\\
\;\;\;\;\left(\log t \cdot \left(a + -0.5\right) + \log \left(\left(x + y\right) \cdot z\right)\right) - t\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\log t, a, -t\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < 0.160000000000000003

    1. Initial program 99.3%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.3%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. +-commutative99.3%

        \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      3. associate-+l+99.2%

        \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      4. +-commutative99.2%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
      5. fma-def99.2%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
      6. sub-neg99.2%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
      7. metadata-eval99.2%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
    3. Simplified99.2%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
    4. Step-by-step derivation
      1. +-commutative99.2%

        \[\leadsto \color{blue}{\mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right) + \left(\log z - t\right)} \]
      2. fma-udef99.2%

        \[\leadsto \color{blue}{\left(\left(a + -0.5\right) \cdot \log t + \log \left(x + y\right)\right)} + \left(\log z - t\right) \]
      3. metadata-eval99.2%

        \[\leadsto \left(\left(a + \color{blue}{\left(-0.5\right)}\right) \cdot \log t + \log \left(x + y\right)\right) + \left(\log z - t\right) \]
      4. sub-neg99.2%

        \[\leadsto \left(\color{blue}{\left(a - 0.5\right)} \cdot \log t + \log \left(x + y\right)\right) + \left(\log z - t\right) \]
      5. associate-+r+99.3%

        \[\leadsto \color{blue}{\left(a - 0.5\right) \cdot \log t + \left(\log \left(x + y\right) + \left(\log z - t\right)\right)} \]
      6. associate-+r-99.3%

        \[\leadsto \left(a - 0.5\right) \cdot \log t + \color{blue}{\left(\left(\log \left(x + y\right) + \log z\right) - t\right)} \]
      7. associate-+r-99.3%

        \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log \left(x + y\right) + \log z\right)\right) - t} \]
      8. sub-neg99.3%

        \[\leadsto \left(\color{blue}{\left(a + \left(-0.5\right)\right)} \cdot \log t + \left(\log \left(x + y\right) + \log z\right)\right) - t \]
      9. metadata-eval99.3%

        \[\leadsto \left(\left(a + \color{blue}{-0.5}\right) \cdot \log t + \left(\log \left(x + y\right) + \log z\right)\right) - t \]
      10. sum-log75.0%

        \[\leadsto \left(\left(a + -0.5\right) \cdot \log t + \color{blue}{\log \left(\left(x + y\right) \cdot z\right)}\right) - t \]
    5. Applied egg-rr75.0%

      \[\leadsto \color{blue}{\left(\left(a + -0.5\right) \cdot \log t + \log \left(\left(x + y\right) \cdot z\right)\right) - t} \]

    if 0.160000000000000003 < t

    1. Initial program 99.8%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.8%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. +-commutative99.8%

        \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      3. associate-+l+99.8%

        \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      4. +-commutative99.8%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
      5. fma-def99.8%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
      6. sub-neg99.8%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
      7. metadata-eval99.8%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
    4. Taylor expanded in x around 0 73.0%

      \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t} \]
    5. Taylor expanded in a around inf 99.2%

      \[\leadsto \color{blue}{a \cdot \log t} - t \]
    6. Step-by-step derivation
      1. *-commutative27.1%

        \[\leadsto \color{blue}{\log t \cdot a} \]
    7. Simplified99.2%

      \[\leadsto \color{blue}{\log t \cdot a} - t \]
    8. Taylor expanded in t around 0 99.2%

      \[\leadsto \color{blue}{-1 \cdot t + a \cdot \log t} \]
    9. Step-by-step derivation
      1. neg-mul-199.2%

        \[\leadsto \color{blue}{\left(-t\right)} + a \cdot \log t \]
      2. +-commutative99.2%

        \[\leadsto \color{blue}{a \cdot \log t + \left(-t\right)} \]
      3. *-commutative99.2%

        \[\leadsto \color{blue}{\log t \cdot a} + \left(-t\right) \]
      4. fma-udef99.2%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, a, -t\right)} \]
    10. Simplified99.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, a, -t\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification88.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq 0.16:\\ \;\;\;\;\left(\log t \cdot \left(a + -0.5\right) + \log \left(\left(x + y\right) \cdot z\right)\right) - t\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\log t, a, -t\right)\\ \end{array} \]

Alternative 6: 73.9% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq 0.16:\\ \;\;\;\;\left(\left(a - 0.5\right) \cdot \log t + \log \left(y \cdot z\right)\right) - t\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\log t, a, -t\right)\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= t 0.16)
   (- (+ (* (- a 0.5) (log t)) (log (* y z))) t)
   (fma (log t) a (- t))))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 0.16) {
		tmp = (((a - 0.5) * log(t)) + log((y * z))) - t;
	} else {
		tmp = fma(log(t), a, -t);
	}
	return tmp;
}
function code(x, y, z, t, a)
	tmp = 0.0
	if (t <= 0.16)
		tmp = Float64(Float64(Float64(Float64(a - 0.5) * log(t)) + log(Float64(y * z))) - t);
	else
		tmp = fma(log(t), a, Float64(-t));
	end
	return tmp
end
code[x_, y_, z_, t_, a_] := If[LessEqual[t, 0.16], N[(N[(N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision] + N[Log[N[(y * z), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[Log[t], $MachinePrecision] * a + (-t)), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq 0.16:\\
\;\;\;\;\left(\left(a - 0.5\right) \cdot \log t + \log \left(y \cdot z\right)\right) - t\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\log t, a, -t\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < 0.160000000000000003

    1. Initial program 99.3%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.3%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. +-commutative99.3%

        \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      3. associate-+l+99.2%

        \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      4. +-commutative99.2%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
      5. fma-def99.2%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
      6. sub-neg99.2%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
      7. metadata-eval99.2%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
    3. Simplified99.2%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
    4. Taylor expanded in x around 0 58.9%

      \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t} \]
    5. Taylor expanded in z around inf 58.9%

      \[\leadsto \left(\left(a - 0.5\right) \cdot \log t + \color{blue}{\left(-1 \cdot \log \left(\frac{1}{z}\right) + \log y\right)}\right) - t \]
    6. Step-by-step derivation
      1. mul-1-neg58.9%

        \[\leadsto \left(a - 0.5\right) \cdot \log t + \left(\color{blue}{\left(-\log \left(\frac{1}{z}\right)\right)} + \log y\right) \]
      2. log-rec58.9%

        \[\leadsto \left(a - 0.5\right) \cdot \log t + \left(\left(-\color{blue}{\left(-\log z\right)}\right) + \log y\right) \]
      3. remove-double-neg58.9%

        \[\leadsto \left(a - 0.5\right) \cdot \log t + \left(\color{blue}{\log z} + \log y\right) \]
      4. log-prod48.2%

        \[\leadsto \left(a - 0.5\right) \cdot \log t + \color{blue}{\log \left(z \cdot y\right)} \]
    7. Simplified48.2%

      \[\leadsto \left(\left(a - 0.5\right) \cdot \log t + \color{blue}{\log \left(z \cdot y\right)}\right) - t \]

    if 0.160000000000000003 < t

    1. Initial program 99.8%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.8%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. +-commutative99.8%

        \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      3. associate-+l+99.8%

        \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      4. +-commutative99.8%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
      5. fma-def99.8%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
      6. sub-neg99.8%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
      7. metadata-eval99.8%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
    4. Taylor expanded in x around 0 73.0%

      \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t} \]
    5. Taylor expanded in a around inf 99.2%

      \[\leadsto \color{blue}{a \cdot \log t} - t \]
    6. Step-by-step derivation
      1. *-commutative27.1%

        \[\leadsto \color{blue}{\log t \cdot a} \]
    7. Simplified99.2%

      \[\leadsto \color{blue}{\log t \cdot a} - t \]
    8. Taylor expanded in t around 0 99.2%

      \[\leadsto \color{blue}{-1 \cdot t + a \cdot \log t} \]
    9. Step-by-step derivation
      1. neg-mul-199.2%

        \[\leadsto \color{blue}{\left(-t\right)} + a \cdot \log t \]
      2. +-commutative99.2%

        \[\leadsto \color{blue}{a \cdot \log t + \left(-t\right)} \]
      3. *-commutative99.2%

        \[\leadsto \color{blue}{\log t \cdot a} + \left(-t\right) \]
      4. fma-udef99.2%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, a, -t\right)} \]
    10. Simplified99.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, a, -t\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification77.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq 0.16:\\ \;\;\;\;\left(\left(a - 0.5\right) \cdot \log t + \log \left(y \cdot z\right)\right) - t\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\log t, a, -t\right)\\ \end{array} \]

Alternative 7: 73.8% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq 0.0125:\\ \;\;\;\;\left(a - 0.5\right) \cdot \log t + \log \left(y \cdot z\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\log t, a, -t\right)\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (<= t 0.0125)
   (+ (* (- a 0.5) (log t)) (log (* y z)))
   (fma (log t) a (- t))))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if (t <= 0.0125) {
		tmp = ((a - 0.5) * log(t)) + log((y * z));
	} else {
		tmp = fma(log(t), a, -t);
	}
	return tmp;
}
function code(x, y, z, t, a)
	tmp = 0.0
	if (t <= 0.0125)
		tmp = Float64(Float64(Float64(a - 0.5) * log(t)) + log(Float64(y * z)));
	else
		tmp = fma(log(t), a, Float64(-t));
	end
	return tmp
end
code[x_, y_, z_, t_, a_] := If[LessEqual[t, 0.0125], N[(N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision] + N[Log[N[(y * z), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(N[Log[t], $MachinePrecision] * a + (-t)), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq 0.0125:\\
\;\;\;\;\left(a - 0.5\right) \cdot \log t + \log \left(y \cdot z\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\log t, a, -t\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < 0.012500000000000001

    1. Initial program 99.3%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.3%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. +-commutative99.3%

        \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      3. associate-+l+99.2%

        \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      4. +-commutative99.2%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
      5. fma-def99.2%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
      6. sub-neg99.2%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
      7. metadata-eval99.2%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
    3. Simplified99.2%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
    4. Taylor expanded in x around 0 58.9%

      \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t} \]
    5. Taylor expanded in t around 0 58.9%

      \[\leadsto \color{blue}{\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)} \]
    6. Taylor expanded in z around inf 58.9%

      \[\leadsto \left(a - 0.5\right) \cdot \log t + \color{blue}{\left(-1 \cdot \log \left(\frac{1}{z}\right) + \log y\right)} \]
    7. Step-by-step derivation
      1. mul-1-neg58.9%

        \[\leadsto \left(a - 0.5\right) \cdot \log t + \left(\color{blue}{\left(-\log \left(\frac{1}{z}\right)\right)} + \log y\right) \]
      2. log-rec58.9%

        \[\leadsto \left(a - 0.5\right) \cdot \log t + \left(\left(-\color{blue}{\left(-\log z\right)}\right) + \log y\right) \]
      3. remove-double-neg58.9%

        \[\leadsto \left(a - 0.5\right) \cdot \log t + \left(\color{blue}{\log z} + \log y\right) \]
      4. log-prod48.2%

        \[\leadsto \left(a - 0.5\right) \cdot \log t + \color{blue}{\log \left(z \cdot y\right)} \]
    8. Simplified48.2%

      \[\leadsto \left(a - 0.5\right) \cdot \log t + \color{blue}{\log \left(z \cdot y\right)} \]

    if 0.012500000000000001 < t

    1. Initial program 99.8%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.8%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. +-commutative99.8%

        \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      3. associate-+l+99.8%

        \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      4. +-commutative99.8%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
      5. fma-def99.8%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
      6. sub-neg99.8%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
      7. metadata-eval99.8%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
    4. Taylor expanded in x around 0 73.0%

      \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t} \]
    5. Taylor expanded in a around inf 99.2%

      \[\leadsto \color{blue}{a \cdot \log t} - t \]
    6. Step-by-step derivation
      1. *-commutative27.1%

        \[\leadsto \color{blue}{\log t \cdot a} \]
    7. Simplified99.2%

      \[\leadsto \color{blue}{\log t \cdot a} - t \]
    8. Taylor expanded in t around 0 99.2%

      \[\leadsto \color{blue}{-1 \cdot t + a \cdot \log t} \]
    9. Step-by-step derivation
      1. neg-mul-199.2%

        \[\leadsto \color{blue}{\left(-t\right)} + a \cdot \log t \]
      2. +-commutative99.2%

        \[\leadsto \color{blue}{a \cdot \log t + \left(-t\right)} \]
      3. *-commutative99.2%

        \[\leadsto \color{blue}{\log t \cdot a} + \left(-t\right) \]
      4. fma-udef99.2%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, a, -t\right)} \]
    10. Simplified99.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, a, -t\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification77.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq 0.0125:\\ \;\;\;\;\left(a - 0.5\right) \cdot \log t + \log \left(y \cdot z\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\log t, a, -t\right)\\ \end{array} \]

Alternative 8: 77.3% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \left(\log z - t\right) + a \cdot \log t \end{array} \]
(FPCore (x y z t a) :precision binary64 (+ (- (log z) t) (* a (log t))))
double code(double x, double y, double z, double t, double a) {
	return (log(z) - t) + (a * log(t));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = (log(z) - t) + (a * log(t))
end function
public static double code(double x, double y, double z, double t, double a) {
	return (Math.log(z) - t) + (a * Math.log(t));
}
def code(x, y, z, t, a):
	return (math.log(z) - t) + (a * math.log(t))
function code(x, y, z, t, a)
	return Float64(Float64(log(z) - t) + Float64(a * log(t)))
end
function tmp = code(x, y, z, t, a)
	tmp = (log(z) - t) + (a * log(t));
end
code[x_, y_, z_, t_, a_] := N[(N[(N[Log[z], $MachinePrecision] - t), $MachinePrecision] + N[(a * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\log z - t\right) + a \cdot \log t
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. +-commutative99.6%

      \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    3. associate-+l+99.5%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    4. +-commutative99.5%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
    5. fma-def99.5%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
    6. sub-neg99.5%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
    7. metadata-eval99.5%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
  3. Simplified99.5%

    \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
  4. Taylor expanded in a around inf 77.4%

    \[\leadsto \left(\log z - t\right) + \color{blue}{a \cdot \log t} \]
  5. Step-by-step derivation
    1. *-commutative77.4%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\log t \cdot a} \]
  6. Simplified77.4%

    \[\leadsto \left(\log z - t\right) + \color{blue}{\log t \cdot a} \]
  7. Final simplification77.4%

    \[\leadsto \left(\log z - t\right) + a \cdot \log t \]

Alternative 9: 75.0% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(\log t, a, -t\right) \end{array} \]
(FPCore (x y z t a) :precision binary64 (fma (log t) a (- t)))
double code(double x, double y, double z, double t, double a) {
	return fma(log(t), a, -t);
}
function code(x, y, z, t, a)
	return fma(log(t), a, Float64(-t))
end
code[x_, y_, z_, t_, a_] := N[(N[Log[t], $MachinePrecision] * a + (-t)), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(\log t, a, -t\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. +-commutative99.6%

      \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    3. associate-+l+99.5%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    4. +-commutative99.5%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
    5. fma-def99.5%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
    6. sub-neg99.5%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
    7. metadata-eval99.5%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
  3. Simplified99.5%

    \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
  4. Taylor expanded in x around 0 66.9%

    \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t} \]
  5. Taylor expanded in a around inf 74.6%

    \[\leadsto \color{blue}{a \cdot \log t} - t \]
  6. Step-by-step derivation
    1. *-commutative33.8%

      \[\leadsto \color{blue}{\log t \cdot a} \]
  7. Simplified74.6%

    \[\leadsto \color{blue}{\log t \cdot a} - t \]
  8. Taylor expanded in t around 0 74.6%

    \[\leadsto \color{blue}{-1 \cdot t + a \cdot \log t} \]
  9. Step-by-step derivation
    1. neg-mul-174.6%

      \[\leadsto \color{blue}{\left(-t\right)} + a \cdot \log t \]
    2. +-commutative74.6%

      \[\leadsto \color{blue}{a \cdot \log t + \left(-t\right)} \]
    3. *-commutative74.6%

      \[\leadsto \color{blue}{\log t \cdot a} + \left(-t\right) \]
    4. fma-udef74.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, a, -t\right)} \]
  10. Simplified74.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, a, -t\right)} \]
  11. Final simplification74.6%

    \[\leadsto \mathsf{fma}\left(\log t, a, -t\right) \]

Alternative 10: 60.9% accurate, 2.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;a \leq -2.1 \cdot 10^{+100} \lor \neg \left(a \leq 5 \cdot 10^{+53}\right):\\ \;\;\;\;a \cdot \log t\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (if (or (<= a -2.1e+100) (not (<= a 5e+53))) (* a (log t)) (- t)))
double code(double x, double y, double z, double t, double a) {
	double tmp;
	if ((a <= -2.1e+100) || !(a <= 5e+53)) {
		tmp = a * log(t);
	} else {
		tmp = -t;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8) :: tmp
    if ((a <= (-2.1d+100)) .or. (.not. (a <= 5d+53))) then
        tmp = a * log(t)
    else
        tmp = -t
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a) {
	double tmp;
	if ((a <= -2.1e+100) || !(a <= 5e+53)) {
		tmp = a * Math.log(t);
	} else {
		tmp = -t;
	}
	return tmp;
}
def code(x, y, z, t, a):
	tmp = 0
	if (a <= -2.1e+100) or not (a <= 5e+53):
		tmp = a * math.log(t)
	else:
		tmp = -t
	return tmp
function code(x, y, z, t, a)
	tmp = 0.0
	if ((a <= -2.1e+100) || !(a <= 5e+53))
		tmp = Float64(a * log(t));
	else
		tmp = Float64(-t);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a)
	tmp = 0.0;
	if ((a <= -2.1e+100) || ~((a <= 5e+53)))
		tmp = a * log(t);
	else
		tmp = -t;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_] := If[Or[LessEqual[a, -2.1e+100], N[Not[LessEqual[a, 5e+53]], $MachinePrecision]], N[(a * N[Log[t], $MachinePrecision]), $MachinePrecision], (-t)]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;a \leq -2.1 \cdot 10^{+100} \lor \neg \left(a \leq 5 \cdot 10^{+53}\right):\\
\;\;\;\;a \cdot \log t\\

\mathbf{else}:\\
\;\;\;\;-t\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if a < -2.0999999999999999e100 or 5.0000000000000004e53 < a

    1. Initial program 99.5%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.5%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. +-commutative99.5%

        \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      3. associate-+l+99.5%

        \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      4. +-commutative99.5%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
      5. fma-def99.5%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
      6. sub-neg99.5%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
      7. metadata-eval99.5%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
    3. Simplified99.5%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
    4. Taylor expanded in x around 0 70.0%

      \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t} \]
    5. Taylor expanded in t around 0 57.0%

      \[\leadsto \color{blue}{\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)} \]
    6. Taylor expanded in a around inf 83.9%

      \[\leadsto \color{blue}{a \cdot \log t} \]
    7. Step-by-step derivation
      1. *-commutative83.9%

        \[\leadsto \color{blue}{\log t \cdot a} \]
    8. Simplified83.9%

      \[\leadsto \color{blue}{\log t \cdot a} \]

    if -2.0999999999999999e100 < a < 5.0000000000000004e53

    1. Initial program 99.6%

      \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
    2. Step-by-step derivation
      1. associate--l+99.6%

        \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      2. +-commutative99.6%

        \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
      3. associate-+l+99.5%

        \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
      4. +-commutative99.5%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
      5. fma-def99.5%

        \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
      6. sub-neg99.5%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
      7. metadata-eval99.5%

        \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
    3. Simplified99.5%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
    4. Taylor expanded in t around inf 55.6%

      \[\leadsto \color{blue}{-1 \cdot t} \]
    5. Step-by-step derivation
      1. neg-mul-155.6%

        \[\leadsto \color{blue}{-t} \]
    6. Simplified55.6%

      \[\leadsto \color{blue}{-t} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification64.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;a \leq -2.1 \cdot 10^{+100} \lor \neg \left(a \leq 5 \cdot 10^{+53}\right):\\ \;\;\;\;a \cdot \log t\\ \mathbf{else}:\\ \;\;\;\;-t\\ \end{array} \]

Alternative 11: 75.0% accurate, 3.0× speedup?

\[\begin{array}{l} \\ a \cdot \log t - t \end{array} \]
(FPCore (x y z t a) :precision binary64 (- (* a (log t)) t))
double code(double x, double y, double z, double t, double a) {
	return (a * log(t)) - t;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = (a * log(t)) - t
end function
public static double code(double x, double y, double z, double t, double a) {
	return (a * Math.log(t)) - t;
}
def code(x, y, z, t, a):
	return (a * math.log(t)) - t
function code(x, y, z, t, a)
	return Float64(Float64(a * log(t)) - t)
end
function tmp = code(x, y, z, t, a)
	tmp = (a * log(t)) - t;
end
code[x_, y_, z_, t_, a_] := N[(N[(a * N[Log[t], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}

\\
a \cdot \log t - t
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. +-commutative99.6%

      \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    3. associate-+l+99.5%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    4. +-commutative99.5%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
    5. fma-def99.5%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
    6. sub-neg99.5%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
    7. metadata-eval99.5%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
  3. Simplified99.5%

    \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
  4. Taylor expanded in x around 0 66.9%

    \[\leadsto \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \left(\log z + \log y\right)\right) - t} \]
  5. Taylor expanded in a around inf 74.6%

    \[\leadsto \color{blue}{a \cdot \log t} - t \]
  6. Step-by-step derivation
    1. *-commutative33.8%

      \[\leadsto \color{blue}{\log t \cdot a} \]
  7. Simplified74.6%

    \[\leadsto \color{blue}{\log t \cdot a} - t \]
  8. Final simplification74.6%

    \[\leadsto a \cdot \log t - t \]

Alternative 12: 37.9% accurate, 156.5× speedup?

\[\begin{array}{l} \\ -t \end{array} \]
(FPCore (x y z t a) :precision binary64 (- t))
double code(double x, double y, double z, double t, double a) {
	return -t;
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = -t
end function
public static double code(double x, double y, double z, double t, double a) {
	return -t;
}
def code(x, y, z, t, a):
	return -t
function code(x, y, z, t, a)
	return Float64(-t)
end
function tmp = code(x, y, z, t, a)
	tmp = -t;
end
code[x_, y_, z_, t_, a_] := (-t)
\begin{array}{l}

\\
-t
\end{array}
Derivation
  1. Initial program 99.6%

    \[\left(\left(\log \left(x + y\right) + \log z\right) - t\right) + \left(a - 0.5\right) \cdot \log t \]
  2. Step-by-step derivation
    1. associate--l+99.6%

      \[\leadsto \color{blue}{\left(\log \left(x + y\right) + \left(\log z - t\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    2. +-commutative99.6%

      \[\leadsto \color{blue}{\left(\left(\log z - t\right) + \log \left(x + y\right)\right)} + \left(a - 0.5\right) \cdot \log t \]
    3. associate-+l+99.5%

      \[\leadsto \color{blue}{\left(\log z - t\right) + \left(\log \left(x + y\right) + \left(a - 0.5\right) \cdot \log t\right)} \]
    4. +-commutative99.5%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\left(\left(a - 0.5\right) \cdot \log t + \log \left(x + y\right)\right)} \]
    5. fma-def99.5%

      \[\leadsto \left(\log z - t\right) + \color{blue}{\mathsf{fma}\left(a - 0.5, \log t, \log \left(x + y\right)\right)} \]
    6. sub-neg99.5%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(\color{blue}{a + \left(-0.5\right)}, \log t, \log \left(x + y\right)\right) \]
    7. metadata-eval99.5%

      \[\leadsto \left(\log z - t\right) + \mathsf{fma}\left(a + \color{blue}{-0.5}, \log t, \log \left(x + y\right)\right) \]
  3. Simplified99.5%

    \[\leadsto \color{blue}{\left(\log z - t\right) + \mathsf{fma}\left(a + -0.5, \log t, \log \left(x + y\right)\right)} \]
  4. Taylor expanded in t around inf 42.9%

    \[\leadsto \color{blue}{-1 \cdot t} \]
  5. Step-by-step derivation
    1. neg-mul-142.9%

      \[\leadsto \color{blue}{-t} \]
  6. Simplified42.9%

    \[\leadsto \color{blue}{-t} \]
  7. Final simplification42.9%

    \[\leadsto -t \]

Developer target: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right) \end{array} \]
(FPCore (x y z t a)
 :precision binary64
 (+ (log (+ x y)) (+ (- (log z) t) (* (- a 0.5) (log t)))))
double code(double x, double y, double z, double t, double a) {
	return log((x + y)) + ((log(z) - t) + ((a - 0.5) * log(t)));
}
real(8) function code(x, y, z, t, a)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    code = log((x + y)) + ((log(z) - t) + ((a - 0.5d0) * log(t)))
end function
public static double code(double x, double y, double z, double t, double a) {
	return Math.log((x + y)) + ((Math.log(z) - t) + ((a - 0.5) * Math.log(t)));
}
def code(x, y, z, t, a):
	return math.log((x + y)) + ((math.log(z) - t) + ((a - 0.5) * math.log(t)))
function code(x, y, z, t, a)
	return Float64(log(Float64(x + y)) + Float64(Float64(log(z) - t) + Float64(Float64(a - 0.5) * log(t))))
end
function tmp = code(x, y, z, t, a)
	tmp = log((x + y)) + ((log(z) - t) + ((a - 0.5) * log(t)));
end
code[x_, y_, z_, t_, a_] := N[(N[Log[N[(x + y), $MachinePrecision]], $MachinePrecision] + N[(N[(N[Log[z], $MachinePrecision] - t), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + y\right) + \left(\left(\log z - t\right) + \left(a - 0.5\right) \cdot \log t\right)
\end{array}

Reproduce

?
herbie shell --seed 2023279 
(FPCore (x y z t a)
  :name "Numeric.SpecFunctions:logGammaL from math-functions-0.1.5.2"
  :precision binary64

  :herbie-target
  (+ (log (+ x y)) (+ (- (log z) t) (* (- a 0.5) (log t))))

  (+ (- (+ (log (+ x y)) (log z)) t) (* (- a 0.5) (log t))))