Numeric.SpecFunctions:logBeta from math-functions-0.1.5.2, A

Percentage Accurate: 99.9% → 99.9%
Time: 13.2s
Alternatives: 15
Speedup: 1.0×

Specification

?
\[\begin{array}{l} \\ \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (+ (- (+ (+ x y) z) (* z (log t))) (* (- a 0.5) b)))
double code(double x, double y, double z, double t, double a, double b) {
	return (((x + y) + z) - (z * log(t))) + ((a - 0.5) * b);
}
real(8) function code(x, y, z, t, a, b)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = (((x + y) + z) - (z * log(t))) + ((a - 0.5d0) * b)
end function
public static double code(double x, double y, double z, double t, double a, double b) {
	return (((x + y) + z) - (z * Math.log(t))) + ((a - 0.5) * b);
}
def code(x, y, z, t, a, b):
	return (((x + y) + z) - (z * math.log(t))) + ((a - 0.5) * b)
function code(x, y, z, t, a, b)
	return Float64(Float64(Float64(Float64(x + y) + z) - Float64(z * log(t))) + Float64(Float64(a - 0.5) * b))
end
function tmp = code(x, y, z, t, a, b)
	tmp = (((x + y) + z) - (z * log(t))) + ((a - 0.5) * b);
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(N[(x + y), $MachinePrecision] + z), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 15 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 99.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (+ (- (+ (+ x y) z) (* z (log t))) (* (- a 0.5) b)))
double code(double x, double y, double z, double t, double a, double b) {
	return (((x + y) + z) - (z * log(t))) + ((a - 0.5) * b);
}
real(8) function code(x, y, z, t, a, b)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = (((x + y) + z) - (z * log(t))) + ((a - 0.5d0) * b)
end function
public static double code(double x, double y, double z, double t, double a, double b) {
	return (((x + y) + z) - (z * Math.log(t))) + ((a - 0.5) * b);
}
def code(x, y, z, t, a, b):
	return (((x + y) + z) - (z * math.log(t))) + ((a - 0.5) * b)
function code(x, y, z, t, a, b)
	return Float64(Float64(Float64(Float64(x + y) + z) - Float64(z * log(t))) + Float64(Float64(a - 0.5) * b))
end
function tmp = code(x, y, z, t, a, b)
	tmp = (((x + y) + z) - (z * log(t))) + ((a - 0.5) * b);
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(N[(x + y), $MachinePrecision] + z), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b
\end{array}

Alternative 1: 99.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (+ (- (+ z (+ x y)) (* z (log t))) (fma b a (* b -0.5))))
double code(double x, double y, double z, double t, double a, double b) {
	return ((z + (x + y)) - (z * log(t))) + fma(b, a, (b * -0.5));
}
function code(x, y, z, t, a, b)
	return Float64(Float64(Float64(z + Float64(x + y)) - Float64(z * log(t))) + fma(b, a, Float64(b * -0.5)))
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(z + N[(x + y), $MachinePrecision]), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(b * a + N[(b * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot -0.5\right)
\end{array}
Derivation
  1. Initial program 99.8%

    \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(a - \frac{1}{2}\right)} \cdot b \]
    2. *-commutativeN/A

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
    3. lift--.f64N/A

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
    4. sub-negN/A

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
    5. distribute-lft-inN/A

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
    6. lower-fma.f64N/A

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
    7. lower-*.f64N/A

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
    8. metadata-eval99.9

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
  4. Applied egg-rr99.9%

    \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
  5. Final simplification99.9%

    \[\leadsto \left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]
  6. Add Preprocessing

Alternative 2: 93.2% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := 1 - \log t\\ t_2 := b \cdot \left(a - 0.5\right)\\ t_3 := \mathsf{fma}\left(z, t\_1, \mathsf{fma}\left(b, a + -0.5, y\right)\right)\\ \mathbf{if}\;t\_2 \leq -2 \cdot 10^{+157}:\\ \;\;\;\;t\_3\\ \mathbf{elif}\;t\_2 \leq 10^{+85}:\\ \;\;\;\;\mathsf{fma}\left(z, t\_1, y\right) + \mathsf{fma}\left(b, -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;t\_3\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (- 1.0 (log t)))
        (t_2 (* b (- a 0.5)))
        (t_3 (fma z t_1 (fma b (+ a -0.5) y))))
   (if (<= t_2 -2e+157)
     t_3
     (if (<= t_2 1e+85) (+ (fma z t_1 y) (fma b -0.5 x)) t_3))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = 1.0 - log(t);
	double t_2 = b * (a - 0.5);
	double t_3 = fma(z, t_1, fma(b, (a + -0.5), y));
	double tmp;
	if (t_2 <= -2e+157) {
		tmp = t_3;
	} else if (t_2 <= 1e+85) {
		tmp = fma(z, t_1, y) + fma(b, -0.5, x);
	} else {
		tmp = t_3;
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = Float64(1.0 - log(t))
	t_2 = Float64(b * Float64(a - 0.5))
	t_3 = fma(z, t_1, fma(b, Float64(a + -0.5), y))
	tmp = 0.0
	if (t_2 <= -2e+157)
		tmp = t_3;
	elseif (t_2 <= 1e+85)
		tmp = Float64(fma(z, t_1, y) + fma(b, -0.5, x));
	else
		tmp = t_3;
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(1.0 - N[Log[t], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(b * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$3 = N[(z * t$95$1 + N[(b * N[(a + -0.5), $MachinePrecision] + y), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$2, -2e+157], t$95$3, If[LessEqual[t$95$2, 1e+85], N[(N[(z * t$95$1 + y), $MachinePrecision] + N[(b * -0.5 + x), $MachinePrecision]), $MachinePrecision], t$95$3]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := 1 - \log t\\
t_2 := b \cdot \left(a - 0.5\right)\\
t_3 := \mathsf{fma}\left(z, t\_1, \mathsf{fma}\left(b, a + -0.5, y\right)\right)\\
\mathbf{if}\;t\_2 \leq -2 \cdot 10^{+157}:\\
\;\;\;\;t\_3\\

\mathbf{elif}\;t\_2 \leq 10^{+85}:\\
\;\;\;\;\mathsf{fma}\left(z, t\_1, y\right) + \mathsf{fma}\left(b, -0.5, x\right)\\

\mathbf{else}:\\
\;\;\;\;t\_3\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < -1.99999999999999997e157 or 1e85 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b)

    1. Initial program 99.9%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{\left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) - z \cdot \log t} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) - \color{blue}{\log t \cdot z} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \left(\mathsf{neg}\left(\log t\right)\right) \cdot z} \]
      3. log-recN/A

        \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \color{blue}{\log \left(\frac{1}{t}\right)} \cdot z \]
      4. *-commutativeN/A

        \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \color{blue}{z \cdot \log \left(\frac{1}{t}\right)} \]
      5. +-commutativeN/A

        \[\leadsto \color{blue}{z \cdot \log \left(\frac{1}{t}\right) + \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right)} \]
      6. +-commutativeN/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(z + b \cdot \left(a - \frac{1}{2}\right)\right) + y\right)} \]
      7. associate-+l+N/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(z + \left(b \cdot \left(a - \frac{1}{2}\right) + y\right)\right)} \]
      8. +-commutativeN/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \left(z + \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right)}\right) \]
      9. associate-+r+N/A

        \[\leadsto \color{blue}{\left(z \cdot \log \left(\frac{1}{t}\right) + z\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
      10. *-rgt-identityN/A

        \[\leadsto \left(z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{z \cdot 1}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      11. distribute-lft-inN/A

        \[\leadsto \color{blue}{z \cdot \left(\log \left(\frac{1}{t}\right) + 1\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      12. +-commutativeN/A

        \[\leadsto z \cdot \color{blue}{\left(1 + \log \left(\frac{1}{t}\right)\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      13. log-recN/A

        \[\leadsto z \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      14. sub-negN/A

        \[\leadsto z \cdot \color{blue}{\left(1 - \log t\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
    5. Simplified89.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)} \]

    if -1.99999999999999997e157 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < 1e85

    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in a around 0

      \[\leadsto \color{blue}{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - \color{blue}{\log t \cdot z} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) + \left(\mathsf{neg}\left(\log t\right)\right) \cdot z} \]
      3. log-recN/A

        \[\leadsto \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) + \color{blue}{\log \left(\frac{1}{t}\right)} \cdot z \]
      4. *-commutativeN/A

        \[\leadsto \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) + \color{blue}{z \cdot \log \left(\frac{1}{t}\right)} \]
      5. +-commutativeN/A

        \[\leadsto \color{blue}{z \cdot \log \left(\frac{1}{t}\right) + \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right)} \]
      6. +-commutativeN/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(y + \left(z + \frac{-1}{2} \cdot b\right)\right) + x\right)} \]
      7. associate-+r+N/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \left(\color{blue}{\left(\left(y + z\right) + \frac{-1}{2} \cdot b\right)} + x\right) \]
      8. associate-+l+N/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(y + z\right) + \left(\frac{-1}{2} \cdot b + x\right)\right)} \]
      9. associate-+r+N/A

        \[\leadsto \color{blue}{\left(z \cdot \log \left(\frac{1}{t}\right) + \left(y + z\right)\right) + \left(\frac{-1}{2} \cdot b + x\right)} \]
      10. +-commutativeN/A

        \[\leadsto \color{blue}{\left(\left(y + z\right) + z \cdot \log \left(\frac{1}{t}\right)\right)} + \left(\frac{-1}{2} \cdot b + x\right) \]
      11. *-commutativeN/A

        \[\leadsto \left(\left(y + z\right) + \color{blue}{\log \left(\frac{1}{t}\right) \cdot z}\right) + \left(\frac{-1}{2} \cdot b + x\right) \]
      12. log-recN/A

        \[\leadsto \left(\left(y + z\right) + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)} \cdot z\right) + \left(\frac{-1}{2} \cdot b + x\right) \]
      13. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(\left(y + z\right) - \log t \cdot z\right)} + \left(\frac{-1}{2} \cdot b + x\right) \]
      14. *-commutativeN/A

        \[\leadsto \left(\left(y + z\right) - \color{blue}{z \cdot \log t}\right) + \left(\frac{-1}{2} \cdot b + x\right) \]
      15. lower-+.f64N/A

        \[\leadsto \color{blue}{\left(\left(y + z\right) - z \cdot \log t\right) + \left(\frac{-1}{2} \cdot b + x\right)} \]
    5. Simplified96.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y\right) + \mathsf{fma}\left(b, -0.5, x\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification93.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;b \cdot \left(a - 0.5\right) \leq -2 \cdot 10^{+157}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)\\ \mathbf{elif}\;b \cdot \left(a - 0.5\right) \leq 10^{+85}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, y\right) + \mathsf{fma}\left(b, -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 92.9% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := b \cdot \left(a - 0.5\right)\\ t_2 := \mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\ \mathbf{if}\;t\_1 \leq -5 \cdot 10^{+120}:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 5 \cdot 10^{+123}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, y\right) + \mathsf{fma}\left(b, -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (* b (- a 0.5))) (t_2 (+ (fma b a (* b -0.5)) (+ x y))))
   (if (<= t_1 -5e+120)
     t_2
     (if (<= t_1 5e+123) (+ (fma z (- 1.0 (log t)) y) (fma b -0.5 x)) t_2))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = b * (a - 0.5);
	double t_2 = fma(b, a, (b * -0.5)) + (x + y);
	double tmp;
	if (t_1 <= -5e+120) {
		tmp = t_2;
	} else if (t_1 <= 5e+123) {
		tmp = fma(z, (1.0 - log(t)), y) + fma(b, -0.5, x);
	} else {
		tmp = t_2;
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = Float64(b * Float64(a - 0.5))
	t_2 = Float64(fma(b, a, Float64(b * -0.5)) + Float64(x + y))
	tmp = 0.0
	if (t_1 <= -5e+120)
		tmp = t_2;
	elseif (t_1 <= 5e+123)
		tmp = Float64(fma(z, Float64(1.0 - log(t)), y) + fma(b, -0.5, x));
	else
		tmp = t_2;
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(b * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(b * a + N[(b * -0.5), $MachinePrecision]), $MachinePrecision] + N[(x + y), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -5e+120], t$95$2, If[LessEqual[t$95$1, 5e+123], N[(N[(z * N[(1.0 - N[Log[t], $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] + N[(b * -0.5 + x), $MachinePrecision]), $MachinePrecision], t$95$2]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := b \cdot \left(a - 0.5\right)\\
t_2 := \mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\
\mathbf{if}\;t\_1 \leq -5 \cdot 10^{+120}:\\
\;\;\;\;t\_2\\

\mathbf{elif}\;t\_1 \leq 5 \cdot 10^{+123}:\\
\;\;\;\;\mathsf{fma}\left(z, 1 - \log t, y\right) + \mathsf{fma}\left(b, -0.5, x\right)\\

\mathbf{else}:\\
\;\;\;\;t\_2\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < -5.00000000000000019e120 or 4.99999999999999974e123 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b)

    1. Initial program 99.9%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(a - \frac{1}{2}\right)} \cdot b \]
      2. *-commutativeN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
      3. lift--.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
      4. sub-negN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      5. distribute-lft-inN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      6. lower-fma.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      7. lower-*.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
      8. metadata-eval99.9

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
    4. Applied egg-rr99.9%

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
    5. Taylor expanded in z around 0

      \[\leadsto \color{blue}{\left(x + y\right)} + \mathsf{fma}\left(b, a, b \cdot \frac{-1}{2}\right) \]
    6. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot \frac{-1}{2}\right) \]
      2. lower-+.f6492.4

        \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]
    7. Simplified92.4%

      \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]

    if -5.00000000000000019e120 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < 4.99999999999999974e123

    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in a around 0

      \[\leadsto \color{blue}{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - \color{blue}{\log t \cdot z} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) + \left(\mathsf{neg}\left(\log t\right)\right) \cdot z} \]
      3. log-recN/A

        \[\leadsto \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) + \color{blue}{\log \left(\frac{1}{t}\right)} \cdot z \]
      4. *-commutativeN/A

        \[\leadsto \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) + \color{blue}{z \cdot \log \left(\frac{1}{t}\right)} \]
      5. +-commutativeN/A

        \[\leadsto \color{blue}{z \cdot \log \left(\frac{1}{t}\right) + \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right)} \]
      6. +-commutativeN/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(y + \left(z + \frac{-1}{2} \cdot b\right)\right) + x\right)} \]
      7. associate-+r+N/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \left(\color{blue}{\left(\left(y + z\right) + \frac{-1}{2} \cdot b\right)} + x\right) \]
      8. associate-+l+N/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(y + z\right) + \left(\frac{-1}{2} \cdot b + x\right)\right)} \]
      9. associate-+r+N/A

        \[\leadsto \color{blue}{\left(z \cdot \log \left(\frac{1}{t}\right) + \left(y + z\right)\right) + \left(\frac{-1}{2} \cdot b + x\right)} \]
      10. +-commutativeN/A

        \[\leadsto \color{blue}{\left(\left(y + z\right) + z \cdot \log \left(\frac{1}{t}\right)\right)} + \left(\frac{-1}{2} \cdot b + x\right) \]
      11. *-commutativeN/A

        \[\leadsto \left(\left(y + z\right) + \color{blue}{\log \left(\frac{1}{t}\right) \cdot z}\right) + \left(\frac{-1}{2} \cdot b + x\right) \]
      12. log-recN/A

        \[\leadsto \left(\left(y + z\right) + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)} \cdot z\right) + \left(\frac{-1}{2} \cdot b + x\right) \]
      13. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(\left(y + z\right) - \log t \cdot z\right)} + \left(\frac{-1}{2} \cdot b + x\right) \]
      14. *-commutativeN/A

        \[\leadsto \left(\left(y + z\right) - \color{blue}{z \cdot \log t}\right) + \left(\frac{-1}{2} \cdot b + x\right) \]
      15. lower-+.f64N/A

        \[\leadsto \color{blue}{\left(\left(y + z\right) - z \cdot \log t\right) + \left(\frac{-1}{2} \cdot b + x\right)} \]
    5. Simplified96.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y\right) + \mathsf{fma}\left(b, -0.5, x\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification94.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;b \cdot \left(a - 0.5\right) \leq -5 \cdot 10^{+120}:\\ \;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\ \mathbf{elif}\;b \cdot \left(a - 0.5\right) \leq 5 \cdot 10^{+123}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, y\right) + \mathsf{fma}\left(b, -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 90.5% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := b \cdot \left(a - 0.5\right)\\ t_2 := \mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\ \mathbf{if}\;t\_1 \leq -1 \cdot 10^{+84}:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 5 \cdot 10^{+123}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, x + y\right)\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (* b (- a 0.5))) (t_2 (+ (fma b a (* b -0.5)) (+ x y))))
   (if (<= t_1 -1e+84)
     t_2
     (if (<= t_1 5e+123) (fma z (- 1.0 (log t)) (+ x y)) t_2))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = b * (a - 0.5);
	double t_2 = fma(b, a, (b * -0.5)) + (x + y);
	double tmp;
	if (t_1 <= -1e+84) {
		tmp = t_2;
	} else if (t_1 <= 5e+123) {
		tmp = fma(z, (1.0 - log(t)), (x + y));
	} else {
		tmp = t_2;
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = Float64(b * Float64(a - 0.5))
	t_2 = Float64(fma(b, a, Float64(b * -0.5)) + Float64(x + y))
	tmp = 0.0
	if (t_1 <= -1e+84)
		tmp = t_2;
	elseif (t_1 <= 5e+123)
		tmp = fma(z, Float64(1.0 - log(t)), Float64(x + y));
	else
		tmp = t_2;
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(b * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(b * a + N[(b * -0.5), $MachinePrecision]), $MachinePrecision] + N[(x + y), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -1e+84], t$95$2, If[LessEqual[t$95$1, 5e+123], N[(z * N[(1.0 - N[Log[t], $MachinePrecision]), $MachinePrecision] + N[(x + y), $MachinePrecision]), $MachinePrecision], t$95$2]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := b \cdot \left(a - 0.5\right)\\
t_2 := \mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\
\mathbf{if}\;t\_1 \leq -1 \cdot 10^{+84}:\\
\;\;\;\;t\_2\\

\mathbf{elif}\;t\_1 \leq 5 \cdot 10^{+123}:\\
\;\;\;\;\mathsf{fma}\left(z, 1 - \log t, x + y\right)\\

\mathbf{else}:\\
\;\;\;\;t\_2\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < -1.00000000000000006e84 or 4.99999999999999974e123 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b)

    1. Initial program 99.9%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(a - \frac{1}{2}\right)} \cdot b \]
      2. *-commutativeN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
      3. lift--.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
      4. sub-negN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      5. distribute-lft-inN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      6. lower-fma.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      7. lower-*.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
      8. metadata-eval99.9

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
    4. Applied egg-rr99.9%

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
    5. Taylor expanded in z around 0

      \[\leadsto \color{blue}{\left(x + y\right)} + \mathsf{fma}\left(b, a, b \cdot \frac{-1}{2}\right) \]
    6. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot \frac{-1}{2}\right) \]
      2. lower-+.f6491.9

        \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]
    7. Simplified91.9%

      \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]

    if -1.00000000000000006e84 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < 4.99999999999999974e123

    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in b around 0

      \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) - z \cdot \log t} \]
    4. Step-by-step derivation
      1. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t} \]
      2. associate-+r+N/A

        \[\leadsto \color{blue}{\left(\left(x + y\right) + z\right)} + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t \]
      3. associate-+l+N/A

        \[\leadsto \color{blue}{\left(x + y\right) + \left(z + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t\right)} \]
      4. cancel-sign-sub-invN/A

        \[\leadsto \left(x + y\right) + \color{blue}{\left(z - z \cdot \log t\right)} \]
      5. *-rgt-identityN/A

        \[\leadsto \left(x + y\right) + \left(\color{blue}{z \cdot 1} - z \cdot \log t\right) \]
      6. distribute-lft-out--N/A

        \[\leadsto \left(x + y\right) + \color{blue}{z \cdot \left(1 - \log t\right)} \]
      7. +-commutativeN/A

        \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right) + \left(x + y\right)} \]
      8. *-commutativeN/A

        \[\leadsto \color{blue}{\left(1 - \log t\right) \cdot z} + \left(x + y\right) \]
      9. sub-negN/A

        \[\leadsto \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} \cdot z + \left(x + y\right) \]
      10. mul-1-negN/A

        \[\leadsto \left(1 + \color{blue}{-1 \cdot \log t}\right) \cdot z + \left(x + y\right) \]
      11. *-commutativeN/A

        \[\leadsto \color{blue}{z \cdot \left(1 + -1 \cdot \log t\right)} + \left(x + y\right) \]
      12. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 + -1 \cdot \log t, x + y\right)} \]
      13. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(z, 1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}, x + y\right) \]
      14. sub-negN/A

        \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
      15. lower--.f64N/A

        \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
      16. lower-log.f64N/A

        \[\leadsto \mathsf{fma}\left(z, 1 - \color{blue}{\log t}, x + y\right) \]
      17. +-commutativeN/A

        \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
      18. lower-+.f6492.8

        \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
    5. Simplified92.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y + x\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification92.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;b \cdot \left(a - 0.5\right) \leq -1 \cdot 10^{+84}:\\ \;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\ \mathbf{elif}\;b \cdot \left(a - 0.5\right) \leq 5 \cdot 10^{+123}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, x + y\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 99.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + b \cdot \left(a - 0.5\right) \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (+ (- (+ z (+ x y)) (* z (log t))) (* b (- a 0.5))))
double code(double x, double y, double z, double t, double a, double b) {
	return ((z + (x + y)) - (z * log(t))) + (b * (a - 0.5));
}
real(8) function code(x, y, z, t, a, b)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = ((z + (x + y)) - (z * log(t))) + (b * (a - 0.5d0))
end function
public static double code(double x, double y, double z, double t, double a, double b) {
	return ((z + (x + y)) - (z * Math.log(t))) + (b * (a - 0.5));
}
def code(x, y, z, t, a, b):
	return ((z + (x + y)) - (z * math.log(t))) + (b * (a - 0.5))
function code(x, y, z, t, a, b)
	return Float64(Float64(Float64(z + Float64(x + y)) - Float64(z * log(t))) + Float64(b * Float64(a - 0.5)))
end
function tmp = code(x, y, z, t, a, b)
	tmp = ((z + (x + y)) - (z * log(t))) + (b * (a - 0.5));
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(z + N[(x + y), $MachinePrecision]), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(b * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + b \cdot \left(a - 0.5\right)
\end{array}
Derivation
  1. Initial program 99.8%

    \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
  2. Add Preprocessing
  3. Final simplification99.8%

    \[\leadsto \left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + b \cdot \left(a - 0.5\right) \]
  4. Add Preprocessing

Alternative 6: 85.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \mathsf{fma}\left(z, 1 - \log t, y\right)\\ \mathbf{if}\;z \leq -1.3 \cdot 10^{+121}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;z \leq 1.85 \cdot 10^{+194}:\\ \;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (fma z (- 1.0 (log t)) y)))
   (if (<= z -1.3e+121)
     t_1
     (if (<= z 1.85e+194) (+ (fma b a (* b -0.5)) (+ x y)) t_1))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = fma(z, (1.0 - log(t)), y);
	double tmp;
	if (z <= -1.3e+121) {
		tmp = t_1;
	} else if (z <= 1.85e+194) {
		tmp = fma(b, a, (b * -0.5)) + (x + y);
	} else {
		tmp = t_1;
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = fma(z, Float64(1.0 - log(t)), y)
	tmp = 0.0
	if (z <= -1.3e+121)
		tmp = t_1;
	elseif (z <= 1.85e+194)
		tmp = Float64(fma(b, a, Float64(b * -0.5)) + Float64(x + y));
	else
		tmp = t_1;
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(z * N[(1.0 - N[Log[t], $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision]}, If[LessEqual[z, -1.3e+121], t$95$1, If[LessEqual[z, 1.85e+194], N[(N[(b * a + N[(b * -0.5), $MachinePrecision]), $MachinePrecision] + N[(x + y), $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \mathsf{fma}\left(z, 1 - \log t, y\right)\\
\mathbf{if}\;z \leq -1.3 \cdot 10^{+121}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;z \leq 1.85 \cdot 10^{+194}:\\
\;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < -1.2999999999999999e121 or 1.8500000000000001e194 < z

    1. Initial program 99.6%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{\left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) - z \cdot \log t} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) - \color{blue}{\log t \cdot z} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \left(\mathsf{neg}\left(\log t\right)\right) \cdot z} \]
      3. log-recN/A

        \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \color{blue}{\log \left(\frac{1}{t}\right)} \cdot z \]
      4. *-commutativeN/A

        \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \color{blue}{z \cdot \log \left(\frac{1}{t}\right)} \]
      5. +-commutativeN/A

        \[\leadsto \color{blue}{z \cdot \log \left(\frac{1}{t}\right) + \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right)} \]
      6. +-commutativeN/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(z + b \cdot \left(a - \frac{1}{2}\right)\right) + y\right)} \]
      7. associate-+l+N/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(z + \left(b \cdot \left(a - \frac{1}{2}\right) + y\right)\right)} \]
      8. +-commutativeN/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \left(z + \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right)}\right) \]
      9. associate-+r+N/A

        \[\leadsto \color{blue}{\left(z \cdot \log \left(\frac{1}{t}\right) + z\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
      10. *-rgt-identityN/A

        \[\leadsto \left(z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{z \cdot 1}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      11. distribute-lft-inN/A

        \[\leadsto \color{blue}{z \cdot \left(\log \left(\frac{1}{t}\right) + 1\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      12. +-commutativeN/A

        \[\leadsto z \cdot \color{blue}{\left(1 + \log \left(\frac{1}{t}\right)\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      13. log-recN/A

        \[\leadsto z \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      14. sub-negN/A

        \[\leadsto z \cdot \color{blue}{\left(1 - \log t\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
    5. Simplified96.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)} \]
    6. Taylor expanded in b around 0

      \[\leadsto \color{blue}{y + z \cdot \left(1 - \log t\right)} \]
    7. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right) + y} \]
      2. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y\right)} \]
      3. lower--.f64N/A

        \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, y\right) \]
      4. lower-log.f6476.8

        \[\leadsto \mathsf{fma}\left(z, 1 - \color{blue}{\log t}, y\right) \]
    8. Simplified76.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y\right)} \]

    if -1.2999999999999999e121 < z < 1.8500000000000001e194

    1. Initial program 99.9%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(a - \frac{1}{2}\right)} \cdot b \]
      2. *-commutativeN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
      3. lift--.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
      4. sub-negN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      5. distribute-lft-inN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      6. lower-fma.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      7. lower-*.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
      8. metadata-eval99.9

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
    4. Applied egg-rr99.9%

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
    5. Taylor expanded in z around 0

      \[\leadsto \color{blue}{\left(x + y\right)} + \mathsf{fma}\left(b, a, b \cdot \frac{-1}{2}\right) \]
    6. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot \frac{-1}{2}\right) \]
      2. lower-+.f6487.3

        \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]
    7. Simplified87.3%

      \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification84.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -1.3 \cdot 10^{+121}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, y\right)\\ \mathbf{elif}\;z \leq 1.85 \cdot 10^{+194}:\\ \;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 84.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z \leq -3 \cdot 10^{+192}:\\ \;\;\;\;z - z \cdot \log t\\ \mathbf{elif}\;z \leq 1.12 \cdot 10^{+207}:\\ \;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\log t, -z, z\right)\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (if (<= z -3e+192)
   (- z (* z (log t)))
   (if (<= z 1.12e+207)
     (+ (fma b a (* b -0.5)) (+ x y))
     (fma (log t) (- z) z))))
double code(double x, double y, double z, double t, double a, double b) {
	double tmp;
	if (z <= -3e+192) {
		tmp = z - (z * log(t));
	} else if (z <= 1.12e+207) {
		tmp = fma(b, a, (b * -0.5)) + (x + y);
	} else {
		tmp = fma(log(t), -z, z);
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	tmp = 0.0
	if (z <= -3e+192)
		tmp = Float64(z - Float64(z * log(t)));
	elseif (z <= 1.12e+207)
		tmp = Float64(fma(b, a, Float64(b * -0.5)) + Float64(x + y));
	else
		tmp = fma(log(t), Float64(-z), z);
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := If[LessEqual[z, -3e+192], N[(z - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[z, 1.12e+207], N[(N[(b * a + N[(b * -0.5), $MachinePrecision]), $MachinePrecision] + N[(x + y), $MachinePrecision]), $MachinePrecision], N[(N[Log[t], $MachinePrecision] * (-z) + z), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;z \leq -3 \cdot 10^{+192}:\\
\;\;\;\;z - z \cdot \log t\\

\mathbf{elif}\;z \leq 1.12 \cdot 10^{+207}:\\
\;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\log t, -z, z\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if z < -3e192

    1. Initial program 99.6%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in z around inf

      \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right)} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \color{blue}{\left(1 - \log t\right) \cdot z} \]
      2. sub-negN/A

        \[\leadsto \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} \cdot z \]
      3. log-recN/A

        \[\leadsto \left(1 + \color{blue}{\log \left(\frac{1}{t}\right)}\right) \cdot z \]
      4. +-commutativeN/A

        \[\leadsto \color{blue}{\left(\log \left(\frac{1}{t}\right) + 1\right)} \cdot z \]
      5. distribute-lft1-inN/A

        \[\leadsto \color{blue}{\log \left(\frac{1}{t}\right) \cdot z + z} \]
      6. log-recN/A

        \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)} \cdot z + z \]
      7. distribute-lft-neg-inN/A

        \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\log t \cdot z\right)\right)} + z \]
      8. distribute-rgt-neg-inN/A

        \[\leadsto \color{blue}{\log t \cdot \left(\mathsf{neg}\left(z\right)\right)} + z \]
      9. neg-mul-1N/A

        \[\leadsto \log t \cdot \color{blue}{\left(-1 \cdot z\right)} + z \]
      10. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, -1 \cdot z, z\right)} \]
      11. lower-log.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\log t}, -1 \cdot z, z\right) \]
      12. neg-mul-1N/A

        \[\leadsto \mathsf{fma}\left(\log t, \color{blue}{\mathsf{neg}\left(z\right)}, z\right) \]
      13. lower-neg.f6479.9

        \[\leadsto \mathsf{fma}\left(\log t, \color{blue}{-z}, z\right) \]
    5. Simplified79.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, -z, z\right)} \]
    6. Step-by-step derivation
      1. lift-log.f64N/A

        \[\leadsto \color{blue}{\log t} \cdot \left(\mathsf{neg}\left(z\right)\right) + z \]
      2. lift-neg.f64N/A

        \[\leadsto \log t \cdot \color{blue}{\left(\mathsf{neg}\left(z\right)\right)} + z \]
      3. +-commutativeN/A

        \[\leadsto \color{blue}{z + \log t \cdot \left(\mathsf{neg}\left(z\right)\right)} \]
      4. *-commutativeN/A

        \[\leadsto z + \color{blue}{\left(\mathsf{neg}\left(z\right)\right) \cdot \log t} \]
      5. lift-neg.f64N/A

        \[\leadsto z + \color{blue}{\left(\mathsf{neg}\left(z\right)\right)} \cdot \log t \]
      6. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{z - z \cdot \log t} \]
      7. lift-*.f64N/A

        \[\leadsto z - \color{blue}{z \cdot \log t} \]
      8. lower--.f6479.9

        \[\leadsto \color{blue}{z - z \cdot \log t} \]
    7. Applied egg-rr79.9%

      \[\leadsto \color{blue}{z - z \cdot \log t} \]

    if -3e192 < z < 1.1199999999999999e207

    1. Initial program 99.9%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(a - \frac{1}{2}\right)} \cdot b \]
      2. *-commutativeN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
      3. lift--.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
      4. sub-negN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      5. distribute-lft-inN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      6. lower-fma.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      7. lower-*.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
      8. metadata-eval99.9

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
    4. Applied egg-rr99.9%

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
    5. Taylor expanded in z around 0

      \[\leadsto \color{blue}{\left(x + y\right)} + \mathsf{fma}\left(b, a, b \cdot \frac{-1}{2}\right) \]
    6. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot \frac{-1}{2}\right) \]
      2. lower-+.f6483.8

        \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]
    7. Simplified83.8%

      \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]

    if 1.1199999999999999e207 < z

    1. Initial program 99.5%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in z around inf

      \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right)} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \color{blue}{\left(1 - \log t\right) \cdot z} \]
      2. sub-negN/A

        \[\leadsto \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} \cdot z \]
      3. log-recN/A

        \[\leadsto \left(1 + \color{blue}{\log \left(\frac{1}{t}\right)}\right) \cdot z \]
      4. +-commutativeN/A

        \[\leadsto \color{blue}{\left(\log \left(\frac{1}{t}\right) + 1\right)} \cdot z \]
      5. distribute-lft1-inN/A

        \[\leadsto \color{blue}{\log \left(\frac{1}{t}\right) \cdot z + z} \]
      6. log-recN/A

        \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)} \cdot z + z \]
      7. distribute-lft-neg-inN/A

        \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\log t \cdot z\right)\right)} + z \]
      8. distribute-rgt-neg-inN/A

        \[\leadsto \color{blue}{\log t \cdot \left(\mathsf{neg}\left(z\right)\right)} + z \]
      9. neg-mul-1N/A

        \[\leadsto \log t \cdot \color{blue}{\left(-1 \cdot z\right)} + z \]
      10. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, -1 \cdot z, z\right)} \]
      11. lower-log.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\log t}, -1 \cdot z, z\right) \]
      12. neg-mul-1N/A

        \[\leadsto \mathsf{fma}\left(\log t, \color{blue}{\mathsf{neg}\left(z\right)}, z\right) \]
      13. lower-neg.f6475.7

        \[\leadsto \mathsf{fma}\left(\log t, \color{blue}{-z}, z\right) \]
    5. Simplified75.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, -z, z\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification83.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -3 \cdot 10^{+192}:\\ \;\;\;\;z - z \cdot \log t\\ \mathbf{elif}\;z \leq 1.12 \cdot 10^{+207}:\\ \;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\log t, -z, z\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 84.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := z - z \cdot \log t\\ \mathbf{if}\;z \leq -3 \cdot 10^{+192}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;z \leq 1.12 \cdot 10^{+207}:\\ \;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (- z (* z (log t)))))
   (if (<= z -3e+192)
     t_1
     (if (<= z 1.12e+207) (+ (fma b a (* b -0.5)) (+ x y)) t_1))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = z - (z * log(t));
	double tmp;
	if (z <= -3e+192) {
		tmp = t_1;
	} else if (z <= 1.12e+207) {
		tmp = fma(b, a, (b * -0.5)) + (x + y);
	} else {
		tmp = t_1;
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = Float64(z - Float64(z * log(t)))
	tmp = 0.0
	if (z <= -3e+192)
		tmp = t_1;
	elseif (z <= 1.12e+207)
		tmp = Float64(fma(b, a, Float64(b * -0.5)) + Float64(x + y));
	else
		tmp = t_1;
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(z - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[z, -3e+192], t$95$1, If[LessEqual[z, 1.12e+207], N[(N[(b * a + N[(b * -0.5), $MachinePrecision]), $MachinePrecision] + N[(x + y), $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := z - z \cdot \log t\\
\mathbf{if}\;z \leq -3 \cdot 10^{+192}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;z \leq 1.12 \cdot 10^{+207}:\\
\;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < -3e192 or 1.1199999999999999e207 < z

    1. Initial program 99.6%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in z around inf

      \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right)} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \color{blue}{\left(1 - \log t\right) \cdot z} \]
      2. sub-negN/A

        \[\leadsto \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} \cdot z \]
      3. log-recN/A

        \[\leadsto \left(1 + \color{blue}{\log \left(\frac{1}{t}\right)}\right) \cdot z \]
      4. +-commutativeN/A

        \[\leadsto \color{blue}{\left(\log \left(\frac{1}{t}\right) + 1\right)} \cdot z \]
      5. distribute-lft1-inN/A

        \[\leadsto \color{blue}{\log \left(\frac{1}{t}\right) \cdot z + z} \]
      6. log-recN/A

        \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)} \cdot z + z \]
      7. distribute-lft-neg-inN/A

        \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\log t \cdot z\right)\right)} + z \]
      8. distribute-rgt-neg-inN/A

        \[\leadsto \color{blue}{\log t \cdot \left(\mathsf{neg}\left(z\right)\right)} + z \]
      9. neg-mul-1N/A

        \[\leadsto \log t \cdot \color{blue}{\left(-1 \cdot z\right)} + z \]
      10. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, -1 \cdot z, z\right)} \]
      11. lower-log.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\log t}, -1 \cdot z, z\right) \]
      12. neg-mul-1N/A

        \[\leadsto \mathsf{fma}\left(\log t, \color{blue}{\mathsf{neg}\left(z\right)}, z\right) \]
      13. lower-neg.f6478.0

        \[\leadsto \mathsf{fma}\left(\log t, \color{blue}{-z}, z\right) \]
    5. Simplified78.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, -z, z\right)} \]
    6. Step-by-step derivation
      1. lift-log.f64N/A

        \[\leadsto \color{blue}{\log t} \cdot \left(\mathsf{neg}\left(z\right)\right) + z \]
      2. lift-neg.f64N/A

        \[\leadsto \log t \cdot \color{blue}{\left(\mathsf{neg}\left(z\right)\right)} + z \]
      3. +-commutativeN/A

        \[\leadsto \color{blue}{z + \log t \cdot \left(\mathsf{neg}\left(z\right)\right)} \]
      4. *-commutativeN/A

        \[\leadsto z + \color{blue}{\left(\mathsf{neg}\left(z\right)\right) \cdot \log t} \]
      5. lift-neg.f64N/A

        \[\leadsto z + \color{blue}{\left(\mathsf{neg}\left(z\right)\right)} \cdot \log t \]
      6. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{z - z \cdot \log t} \]
      7. lift-*.f64N/A

        \[\leadsto z - \color{blue}{z \cdot \log t} \]
      8. lower--.f6478.0

        \[\leadsto \color{blue}{z - z \cdot \log t} \]
    7. Applied egg-rr78.0%

      \[\leadsto \color{blue}{z - z \cdot \log t} \]

    if -3e192 < z < 1.1199999999999999e207

    1. Initial program 99.9%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(a - \frac{1}{2}\right)} \cdot b \]
      2. *-commutativeN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
      3. lift--.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
      4. sub-negN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      5. distribute-lft-inN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      6. lower-fma.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      7. lower-*.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
      8. metadata-eval99.9

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
    4. Applied egg-rr99.9%

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
    5. Taylor expanded in z around 0

      \[\leadsto \color{blue}{\left(x + y\right)} + \mathsf{fma}\left(b, a, b \cdot \frac{-1}{2}\right) \]
    6. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot \frac{-1}{2}\right) \]
      2. lower-+.f6483.8

        \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]
    7. Simplified83.8%

      \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification83.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -3 \cdot 10^{+192}:\\ \;\;\;\;z - z \cdot \log t\\ \mathbf{elif}\;z \leq 1.12 \cdot 10^{+207}:\\ \;\;\;\;\mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)\\ \mathbf{else}:\\ \;\;\;\;z - z \cdot \log t\\ \end{array} \]
  5. Add Preprocessing

Alternative 9: 72.4% accurate, 3.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := b \cdot \left(a - 0.5\right)\\ t_2 := \mathsf{fma}\left(b, a + -0.5, y\right)\\ \mathbf{if}\;t\_1 \leq -4 \cdot 10^{+152}:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 10^{+94}:\\ \;\;\;\;y + \mathsf{fma}\left(-0.5, b, x\right)\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (* b (- a 0.5))) (t_2 (fma b (+ a -0.5) y)))
   (if (<= t_1 -4e+152) t_2 (if (<= t_1 1e+94) (+ y (fma -0.5 b x)) t_2))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = b * (a - 0.5);
	double t_2 = fma(b, (a + -0.5), y);
	double tmp;
	if (t_1 <= -4e+152) {
		tmp = t_2;
	} else if (t_1 <= 1e+94) {
		tmp = y + fma(-0.5, b, x);
	} else {
		tmp = t_2;
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = Float64(b * Float64(a - 0.5))
	t_2 = fma(b, Float64(a + -0.5), y)
	tmp = 0.0
	if (t_1 <= -4e+152)
		tmp = t_2;
	elseif (t_1 <= 1e+94)
		tmp = Float64(y + fma(-0.5, b, x));
	else
		tmp = t_2;
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(b * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(b * N[(a + -0.5), $MachinePrecision] + y), $MachinePrecision]}, If[LessEqual[t$95$1, -4e+152], t$95$2, If[LessEqual[t$95$1, 1e+94], N[(y + N[(-0.5 * b + x), $MachinePrecision]), $MachinePrecision], t$95$2]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := b \cdot \left(a - 0.5\right)\\
t_2 := \mathsf{fma}\left(b, a + -0.5, y\right)\\
\mathbf{if}\;t\_1 \leq -4 \cdot 10^{+152}:\\
\;\;\;\;t\_2\\

\mathbf{elif}\;t\_1 \leq 10^{+94}:\\
\;\;\;\;y + \mathsf{fma}\left(-0.5, b, x\right)\\

\mathbf{else}:\\
\;\;\;\;t\_2\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < -4.0000000000000002e152 or 1e94 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b)

    1. Initial program 99.9%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{\left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) - z \cdot \log t} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) - \color{blue}{\log t \cdot z} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \left(\mathsf{neg}\left(\log t\right)\right) \cdot z} \]
      3. log-recN/A

        \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \color{blue}{\log \left(\frac{1}{t}\right)} \cdot z \]
      4. *-commutativeN/A

        \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \color{blue}{z \cdot \log \left(\frac{1}{t}\right)} \]
      5. +-commutativeN/A

        \[\leadsto \color{blue}{z \cdot \log \left(\frac{1}{t}\right) + \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right)} \]
      6. +-commutativeN/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(z + b \cdot \left(a - \frac{1}{2}\right)\right) + y\right)} \]
      7. associate-+l+N/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(z + \left(b \cdot \left(a - \frac{1}{2}\right) + y\right)\right)} \]
      8. +-commutativeN/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \left(z + \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right)}\right) \]
      9. associate-+r+N/A

        \[\leadsto \color{blue}{\left(z \cdot \log \left(\frac{1}{t}\right) + z\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
      10. *-rgt-identityN/A

        \[\leadsto \left(z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{z \cdot 1}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      11. distribute-lft-inN/A

        \[\leadsto \color{blue}{z \cdot \left(\log \left(\frac{1}{t}\right) + 1\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      12. +-commutativeN/A

        \[\leadsto z \cdot \color{blue}{\left(1 + \log \left(\frac{1}{t}\right)\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      13. log-recN/A

        \[\leadsto z \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      14. sub-negN/A

        \[\leadsto z \cdot \color{blue}{\left(1 - \log t\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
    5. Simplified91.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)} \]
    6. Taylor expanded in z around 0

      \[\leadsto \color{blue}{y + b \cdot \left(a - \frac{1}{2}\right)} \]
    7. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right) + y} \]
      2. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
      3. sub-negN/A

        \[\leadsto \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
      4. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
      5. lower-+.f6479.3

        \[\leadsto \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
    8. Simplified79.3%

      \[\leadsto \color{blue}{\mathsf{fma}\left(b, a + -0.5, y\right)} \]

    if -4.0000000000000002e152 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < 1e94

    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in a around 0

      \[\leadsto \color{blue}{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - \color{blue}{\log t \cdot z} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) + \left(\mathsf{neg}\left(\log t\right)\right) \cdot z} \]
      3. log-recN/A

        \[\leadsto \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) + \color{blue}{\log \left(\frac{1}{t}\right)} \cdot z \]
      4. *-commutativeN/A

        \[\leadsto \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) + \color{blue}{z \cdot \log \left(\frac{1}{t}\right)} \]
      5. +-commutativeN/A

        \[\leadsto \color{blue}{z \cdot \log \left(\frac{1}{t}\right) + \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right)} \]
      6. +-commutativeN/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(y + \left(z + \frac{-1}{2} \cdot b\right)\right) + x\right)} \]
      7. associate-+r+N/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \left(\color{blue}{\left(\left(y + z\right) + \frac{-1}{2} \cdot b\right)} + x\right) \]
      8. associate-+l+N/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(y + z\right) + \left(\frac{-1}{2} \cdot b + x\right)\right)} \]
      9. associate-+r+N/A

        \[\leadsto \color{blue}{\left(z \cdot \log \left(\frac{1}{t}\right) + \left(y + z\right)\right) + \left(\frac{-1}{2} \cdot b + x\right)} \]
      10. +-commutativeN/A

        \[\leadsto \color{blue}{\left(\left(y + z\right) + z \cdot \log \left(\frac{1}{t}\right)\right)} + \left(\frac{-1}{2} \cdot b + x\right) \]
      11. *-commutativeN/A

        \[\leadsto \left(\left(y + z\right) + \color{blue}{\log \left(\frac{1}{t}\right) \cdot z}\right) + \left(\frac{-1}{2} \cdot b + x\right) \]
      12. log-recN/A

        \[\leadsto \left(\left(y + z\right) + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)} \cdot z\right) + \left(\frac{-1}{2} \cdot b + x\right) \]
      13. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(\left(y + z\right) - \log t \cdot z\right)} + \left(\frac{-1}{2} \cdot b + x\right) \]
      14. *-commutativeN/A

        \[\leadsto \left(\left(y + z\right) - \color{blue}{z \cdot \log t}\right) + \left(\frac{-1}{2} \cdot b + x\right) \]
      15. lower-+.f64N/A

        \[\leadsto \color{blue}{\left(\left(y + z\right) - z \cdot \log t\right) + \left(\frac{-1}{2} \cdot b + x\right)} \]
    5. Simplified96.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y\right) + \mathsf{fma}\left(b, -0.5, x\right)} \]
    6. Taylor expanded in z around 0

      \[\leadsto \color{blue}{x + \left(y + \frac{-1}{2} \cdot b\right)} \]
    7. Step-by-step derivation
      1. associate-+r+N/A

        \[\leadsto \color{blue}{\left(x + y\right) + \frac{-1}{2} \cdot b} \]
      2. +-commutativeN/A

        \[\leadsto \color{blue}{\left(y + x\right)} + \frac{-1}{2} \cdot b \]
      3. associate-+l+N/A

        \[\leadsto \color{blue}{y + \left(x + \frac{-1}{2} \cdot b\right)} \]
      4. lower-+.f64N/A

        \[\leadsto \color{blue}{y + \left(x + \frac{-1}{2} \cdot b\right)} \]
      5. +-commutativeN/A

        \[\leadsto y + \color{blue}{\left(\frac{-1}{2} \cdot b + x\right)} \]
      6. lower-fma.f6461.2

        \[\leadsto y + \color{blue}{\mathsf{fma}\left(-0.5, b, x\right)} \]
    8. Simplified61.2%

      \[\leadsto \color{blue}{y + \mathsf{fma}\left(-0.5, b, x\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification69.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;b \cdot \left(a - 0.5\right) \leq -4 \cdot 10^{+152}:\\ \;\;\;\;\mathsf{fma}\left(b, a + -0.5, y\right)\\ \mathbf{elif}\;b \cdot \left(a - 0.5\right) \leq 10^{+94}:\\ \;\;\;\;y + \mathsf{fma}\left(-0.5, b, x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(b, a + -0.5, y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 10: 70.3% accurate, 3.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := b \cdot \left(a - 0.5\right)\\ t_2 := b \cdot \left(a + -0.5\right)\\ \mathbf{if}\;t\_1 \leq -2 \cdot 10^{+193}:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 10^{+186}:\\ \;\;\;\;y + \mathsf{fma}\left(-0.5, b, x\right)\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (* b (- a 0.5))) (t_2 (* b (+ a -0.5))))
   (if (<= t_1 -2e+193) t_2 (if (<= t_1 1e+186) (+ y (fma -0.5 b x)) t_2))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = b * (a - 0.5);
	double t_2 = b * (a + -0.5);
	double tmp;
	if (t_1 <= -2e+193) {
		tmp = t_2;
	} else if (t_1 <= 1e+186) {
		tmp = y + fma(-0.5, b, x);
	} else {
		tmp = t_2;
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = Float64(b * Float64(a - 0.5))
	t_2 = Float64(b * Float64(a + -0.5))
	tmp = 0.0
	if (t_1 <= -2e+193)
		tmp = t_2;
	elseif (t_1 <= 1e+186)
		tmp = Float64(y + fma(-0.5, b, x));
	else
		tmp = t_2;
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(b * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(b * N[(a + -0.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -2e+193], t$95$2, If[LessEqual[t$95$1, 1e+186], N[(y + N[(-0.5 * b + x), $MachinePrecision]), $MachinePrecision], t$95$2]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := b \cdot \left(a - 0.5\right)\\
t_2 := b \cdot \left(a + -0.5\right)\\
\mathbf{if}\;t\_1 \leq -2 \cdot 10^{+193}:\\
\;\;\;\;t\_2\\

\mathbf{elif}\;t\_1 \leq 10^{+186}:\\
\;\;\;\;y + \mathsf{fma}\left(-0.5, b, x\right)\\

\mathbf{else}:\\
\;\;\;\;t\_2\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < -2.00000000000000013e193 or 9.9999999999999998e185 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b)

    1. Initial program 99.9%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in b around inf

      \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
      2. sub-negN/A

        \[\leadsto b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      3. metadata-evalN/A

        \[\leadsto b \cdot \left(a + \color{blue}{\frac{-1}{2}}\right) \]
      4. lower-+.f6487.8

        \[\leadsto b \cdot \color{blue}{\left(a + -0.5\right)} \]
    5. Simplified87.8%

      \[\leadsto \color{blue}{b \cdot \left(a + -0.5\right)} \]

    if -2.00000000000000013e193 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < 9.9999999999999998e185

    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in a around 0

      \[\leadsto \color{blue}{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - \color{blue}{\log t \cdot z} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) + \left(\mathsf{neg}\left(\log t\right)\right) \cdot z} \]
      3. log-recN/A

        \[\leadsto \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) + \color{blue}{\log \left(\frac{1}{t}\right)} \cdot z \]
      4. *-commutativeN/A

        \[\leadsto \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) + \color{blue}{z \cdot \log \left(\frac{1}{t}\right)} \]
      5. +-commutativeN/A

        \[\leadsto \color{blue}{z \cdot \log \left(\frac{1}{t}\right) + \left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right)} \]
      6. +-commutativeN/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(y + \left(z + \frac{-1}{2} \cdot b\right)\right) + x\right)} \]
      7. associate-+r+N/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \left(\color{blue}{\left(\left(y + z\right) + \frac{-1}{2} \cdot b\right)} + x\right) \]
      8. associate-+l+N/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(y + z\right) + \left(\frac{-1}{2} \cdot b + x\right)\right)} \]
      9. associate-+r+N/A

        \[\leadsto \color{blue}{\left(z \cdot \log \left(\frac{1}{t}\right) + \left(y + z\right)\right) + \left(\frac{-1}{2} \cdot b + x\right)} \]
      10. +-commutativeN/A

        \[\leadsto \color{blue}{\left(\left(y + z\right) + z \cdot \log \left(\frac{1}{t}\right)\right)} + \left(\frac{-1}{2} \cdot b + x\right) \]
      11. *-commutativeN/A

        \[\leadsto \left(\left(y + z\right) + \color{blue}{\log \left(\frac{1}{t}\right) \cdot z}\right) + \left(\frac{-1}{2} \cdot b + x\right) \]
      12. log-recN/A

        \[\leadsto \left(\left(y + z\right) + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)} \cdot z\right) + \left(\frac{-1}{2} \cdot b + x\right) \]
      13. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(\left(y + z\right) - \log t \cdot z\right)} + \left(\frac{-1}{2} \cdot b + x\right) \]
      14. *-commutativeN/A

        \[\leadsto \left(\left(y + z\right) - \color{blue}{z \cdot \log t}\right) + \left(\frac{-1}{2} \cdot b + x\right) \]
      15. lower-+.f64N/A

        \[\leadsto \color{blue}{\left(\left(y + z\right) - z \cdot \log t\right) + \left(\frac{-1}{2} \cdot b + x\right)} \]
    5. Simplified93.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y\right) + \mathsf{fma}\left(b, -0.5, x\right)} \]
    6. Taylor expanded in z around 0

      \[\leadsto \color{blue}{x + \left(y + \frac{-1}{2} \cdot b\right)} \]
    7. Step-by-step derivation
      1. associate-+r+N/A

        \[\leadsto \color{blue}{\left(x + y\right) + \frac{-1}{2} \cdot b} \]
      2. +-commutativeN/A

        \[\leadsto \color{blue}{\left(y + x\right)} + \frac{-1}{2} \cdot b \]
      3. associate-+l+N/A

        \[\leadsto \color{blue}{y + \left(x + \frac{-1}{2} \cdot b\right)} \]
      4. lower-+.f64N/A

        \[\leadsto \color{blue}{y + \left(x + \frac{-1}{2} \cdot b\right)} \]
      5. +-commutativeN/A

        \[\leadsto y + \color{blue}{\left(\frac{-1}{2} \cdot b + x\right)} \]
      6. lower-fma.f6458.7

        \[\leadsto y + \color{blue}{\mathsf{fma}\left(-0.5, b, x\right)} \]
    8. Simplified58.7%

      \[\leadsto \color{blue}{y + \mathsf{fma}\left(-0.5, b, x\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification68.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;b \cdot \left(a - 0.5\right) \leq -2 \cdot 10^{+193}:\\ \;\;\;\;b \cdot \left(a + -0.5\right)\\ \mathbf{elif}\;b \cdot \left(a - 0.5\right) \leq 10^{+186}:\\ \;\;\;\;y + \mathsf{fma}\left(-0.5, b, x\right)\\ \mathbf{else}:\\ \;\;\;\;b \cdot \left(a + -0.5\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 11: 38.1% accurate, 7.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;a \leq -0.004:\\ \;\;\;\;b \cdot a\\ \mathbf{elif}\;a \leq 0.00064:\\ \;\;\;\;b \cdot -0.5\\ \mathbf{else}:\\ \;\;\;\;b \cdot a\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (if (<= a -0.004) (* b a) (if (<= a 0.00064) (* b -0.5) (* b a))))
double code(double x, double y, double z, double t, double a, double b) {
	double tmp;
	if (a <= -0.004) {
		tmp = b * a;
	} else if (a <= 0.00064) {
		tmp = b * -0.5;
	} else {
		tmp = b * a;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a, b)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8) :: tmp
    if (a <= (-0.004d0)) then
        tmp = b * a
    else if (a <= 0.00064d0) then
        tmp = b * (-0.5d0)
    else
        tmp = b * a
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a, double b) {
	double tmp;
	if (a <= -0.004) {
		tmp = b * a;
	} else if (a <= 0.00064) {
		tmp = b * -0.5;
	} else {
		tmp = b * a;
	}
	return tmp;
}
def code(x, y, z, t, a, b):
	tmp = 0
	if a <= -0.004:
		tmp = b * a
	elif a <= 0.00064:
		tmp = b * -0.5
	else:
		tmp = b * a
	return tmp
function code(x, y, z, t, a, b)
	tmp = 0.0
	if (a <= -0.004)
		tmp = Float64(b * a);
	elseif (a <= 0.00064)
		tmp = Float64(b * -0.5);
	else
		tmp = Float64(b * a);
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a, b)
	tmp = 0.0;
	if (a <= -0.004)
		tmp = b * a;
	elseif (a <= 0.00064)
		tmp = b * -0.5;
	else
		tmp = b * a;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_, b_] := If[LessEqual[a, -0.004], N[(b * a), $MachinePrecision], If[LessEqual[a, 0.00064], N[(b * -0.5), $MachinePrecision], N[(b * a), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;a \leq -0.004:\\
\;\;\;\;b \cdot a\\

\mathbf{elif}\;a \leq 0.00064:\\
\;\;\;\;b \cdot -0.5\\

\mathbf{else}:\\
\;\;\;\;b \cdot a\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if a < -0.0040000000000000001 or 6.40000000000000052e-4 < a

    1. Initial program 99.9%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in a around inf

      \[\leadsto \color{blue}{a \cdot b} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \color{blue}{b \cdot a} \]
      2. lower-*.f6452.1

        \[\leadsto \color{blue}{b \cdot a} \]
    5. Simplified52.1%

      \[\leadsto \color{blue}{b \cdot a} \]

    if -0.0040000000000000001 < a < 6.40000000000000052e-4

    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in b around inf

      \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
      2. sub-negN/A

        \[\leadsto b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      3. metadata-evalN/A

        \[\leadsto b \cdot \left(a + \color{blue}{\frac{-1}{2}}\right) \]
      4. lower-+.f6427.2

        \[\leadsto b \cdot \color{blue}{\left(a + -0.5\right)} \]
    5. Simplified27.2%

      \[\leadsto \color{blue}{b \cdot \left(a + -0.5\right)} \]
    6. Taylor expanded in a around 0

      \[\leadsto b \cdot \color{blue}{\frac{-1}{2}} \]
    7. Step-by-step derivation
      1. Simplified27.0%

        \[\leadsto b \cdot \color{blue}{-0.5} \]
    8. Recombined 2 regimes into one program.
    9. Add Preprocessing

    Alternative 12: 78.9% accurate, 7.0× speedup?

    \[\begin{array}{l} \\ \mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right) \end{array} \]
    (FPCore (x y z t a b) :precision binary64 (+ (fma b a (* b -0.5)) (+ x y)))
    double code(double x, double y, double z, double t, double a, double b) {
    	return fma(b, a, (b * -0.5)) + (x + y);
    }
    
    function code(x, y, z, t, a, b)
    	return Float64(fma(b, a, Float64(b * -0.5)) + Float64(x + y))
    end
    
    code[x_, y_, z_, t_, a_, b_] := N[(N[(b * a + N[(b * -0.5), $MachinePrecision]), $MachinePrecision] + N[(x + y), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right)
    \end{array}
    
    Derivation
    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(a - \frac{1}{2}\right)} \cdot b \]
      2. *-commutativeN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
      3. lift--.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
      4. sub-negN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      5. distribute-lft-inN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      6. lower-fma.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      7. lower-*.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
      8. metadata-eval99.9

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
    4. Applied egg-rr99.9%

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
    5. Taylor expanded in z around 0

      \[\leadsto \color{blue}{\left(x + y\right)} + \mathsf{fma}\left(b, a, b \cdot \frac{-1}{2}\right) \]
    6. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot \frac{-1}{2}\right) \]
      2. lower-+.f6475.1

        \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]
    7. Simplified75.1%

      \[\leadsto \color{blue}{\left(y + x\right)} + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]
    8. Final simplification75.1%

      \[\leadsto \mathsf{fma}\left(b, a, b \cdot -0.5\right) + \left(x + y\right) \]
    9. Add Preprocessing

    Alternative 13: 78.9% accurate, 9.7× speedup?

    \[\begin{array}{l} \\ y + \mathsf{fma}\left(b, a + -0.5, x\right) \end{array} \]
    (FPCore (x y z t a b) :precision binary64 (+ y (fma b (+ a -0.5) x)))
    double code(double x, double y, double z, double t, double a, double b) {
    	return y + fma(b, (a + -0.5), x);
    }
    
    function code(x, y, z, t, a, b)
    	return Float64(y + fma(b, Float64(a + -0.5), x))
    end
    
    code[x_, y_, z_, t_, a_, b_] := N[(y + N[(b * N[(a + -0.5), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    y + \mathsf{fma}\left(b, a + -0.5, x\right)
    \end{array}
    
    Derivation
    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in z around 0

      \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
    4. Step-by-step derivation
      1. associate-+r+N/A

        \[\leadsto \color{blue}{\left(x + y\right) + b \cdot \left(a - \frac{1}{2}\right)} \]
      2. +-commutativeN/A

        \[\leadsto \color{blue}{\left(y + x\right)} + b \cdot \left(a - \frac{1}{2}\right) \]
      3. associate-+l+N/A

        \[\leadsto \color{blue}{y + \left(x + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
      4. lower-+.f64N/A

        \[\leadsto \color{blue}{y + \left(x + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
      5. +-commutativeN/A

        \[\leadsto y + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
      6. lower-fma.f64N/A

        \[\leadsto y + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, x\right)} \]
      7. sub-negN/A

        \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, x\right) \]
      8. metadata-evalN/A

        \[\leadsto y + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, x\right) \]
      9. lower-+.f6475.1

        \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, x\right) \]
    5. Simplified75.1%

      \[\leadsto \color{blue}{y + \mathsf{fma}\left(b, a + -0.5, x\right)} \]
    6. Add Preprocessing

    Alternative 14: 38.7% accurate, 14.0× speedup?

    \[\begin{array}{l} \\ b \cdot \left(a + -0.5\right) \end{array} \]
    (FPCore (x y z t a b) :precision binary64 (* b (+ a -0.5)))
    double code(double x, double y, double z, double t, double a, double b) {
    	return b * (a + -0.5);
    }
    
    real(8) function code(x, y, z, t, a, b)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        real(8), intent (in) :: z
        real(8), intent (in) :: t
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        code = b * (a + (-0.5d0))
    end function
    
    public static double code(double x, double y, double z, double t, double a, double b) {
    	return b * (a + -0.5);
    }
    
    def code(x, y, z, t, a, b):
    	return b * (a + -0.5)
    
    function code(x, y, z, t, a, b)
    	return Float64(b * Float64(a + -0.5))
    end
    
    function tmp = code(x, y, z, t, a, b)
    	tmp = b * (a + -0.5);
    end
    
    code[x_, y_, z_, t_, a_, b_] := N[(b * N[(a + -0.5), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    b \cdot \left(a + -0.5\right)
    \end{array}
    
    Derivation
    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in b around inf

      \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
      2. sub-negN/A

        \[\leadsto b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      3. metadata-evalN/A

        \[\leadsto b \cdot \left(a + \color{blue}{\frac{-1}{2}}\right) \]
      4. lower-+.f6440.0

        \[\leadsto b \cdot \color{blue}{\left(a + -0.5\right)} \]
    5. Simplified40.0%

      \[\leadsto \color{blue}{b \cdot \left(a + -0.5\right)} \]
    6. Add Preprocessing

    Alternative 15: 14.0% accurate, 21.0× speedup?

    \[\begin{array}{l} \\ b \cdot -0.5 \end{array} \]
    (FPCore (x y z t a b) :precision binary64 (* b -0.5))
    double code(double x, double y, double z, double t, double a, double b) {
    	return b * -0.5;
    }
    
    real(8) function code(x, y, z, t, a, b)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        real(8), intent (in) :: z
        real(8), intent (in) :: t
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        code = b * (-0.5d0)
    end function
    
    public static double code(double x, double y, double z, double t, double a, double b) {
    	return b * -0.5;
    }
    
    def code(x, y, z, t, a, b):
    	return b * -0.5
    
    function code(x, y, z, t, a, b)
    	return Float64(b * -0.5)
    end
    
    function tmp = code(x, y, z, t, a, b)
    	tmp = b * -0.5;
    end
    
    code[x_, y_, z_, t_, a_, b_] := N[(b * -0.5), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    b \cdot -0.5
    \end{array}
    
    Derivation
    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in b around inf

      \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
      2. sub-negN/A

        \[\leadsto b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      3. metadata-evalN/A

        \[\leadsto b \cdot \left(a + \color{blue}{\frac{-1}{2}}\right) \]
      4. lower-+.f6440.0

        \[\leadsto b \cdot \color{blue}{\left(a + -0.5\right)} \]
    5. Simplified40.0%

      \[\leadsto \color{blue}{b \cdot \left(a + -0.5\right)} \]
    6. Taylor expanded in a around 0

      \[\leadsto b \cdot \color{blue}{\frac{-1}{2}} \]
    7. Step-by-step derivation
      1. Simplified14.8%

        \[\leadsto b \cdot \color{blue}{-0.5} \]
      2. Add Preprocessing

      Developer Target 1: 99.6% accurate, 0.4× speedup?

      \[\begin{array}{l} \\ \left(\left(x + y\right) + \frac{\left(1 - {\log t}^{2}\right) \cdot z}{1 + \log t}\right) + \left(a - 0.5\right) \cdot b \end{array} \]
      (FPCore (x y z t a b)
       :precision binary64
       (+
        (+ (+ x y) (/ (* (- 1.0 (pow (log t) 2.0)) z) (+ 1.0 (log t))))
        (* (- a 0.5) b)))
      double code(double x, double y, double z, double t, double a, double b) {
      	return ((x + y) + (((1.0 - pow(log(t), 2.0)) * z) / (1.0 + log(t)))) + ((a - 0.5) * b);
      }
      
      real(8) function code(x, y, z, t, a, b)
          real(8), intent (in) :: x
          real(8), intent (in) :: y
          real(8), intent (in) :: z
          real(8), intent (in) :: t
          real(8), intent (in) :: a
          real(8), intent (in) :: b
          code = ((x + y) + (((1.0d0 - (log(t) ** 2.0d0)) * z) / (1.0d0 + log(t)))) + ((a - 0.5d0) * b)
      end function
      
      public static double code(double x, double y, double z, double t, double a, double b) {
      	return ((x + y) + (((1.0 - Math.pow(Math.log(t), 2.0)) * z) / (1.0 + Math.log(t)))) + ((a - 0.5) * b);
      }
      
      def code(x, y, z, t, a, b):
      	return ((x + y) + (((1.0 - math.pow(math.log(t), 2.0)) * z) / (1.0 + math.log(t)))) + ((a - 0.5) * b)
      
      function code(x, y, z, t, a, b)
      	return Float64(Float64(Float64(x + y) + Float64(Float64(Float64(1.0 - (log(t) ^ 2.0)) * z) / Float64(1.0 + log(t)))) + Float64(Float64(a - 0.5) * b))
      end
      
      function tmp = code(x, y, z, t, a, b)
      	tmp = ((x + y) + (((1.0 - (log(t) ^ 2.0)) * z) / (1.0 + log(t)))) + ((a - 0.5) * b);
      end
      
      code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(x + y), $MachinePrecision] + N[(N[(N[(1.0 - N[Power[N[Log[t], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision] / N[(1.0 + N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \left(\left(x + y\right) + \frac{\left(1 - {\log t}^{2}\right) \cdot z}{1 + \log t}\right) + \left(a - 0.5\right) \cdot b
      \end{array}
      

      Reproduce

      ?
      herbie shell --seed 2024207 
      (FPCore (x y z t a b)
        :name "Numeric.SpecFunctions:logBeta from math-functions-0.1.5.2, A"
        :precision binary64
      
        :alt
        (! :herbie-platform default (+ (+ (+ x y) (/ (* (- 1 (pow (log t) 2)) z) (+ 1 (log t)))) (* (- a 1/2) b)))
      
        (+ (- (+ (+ x y) z) (* z (log t))) (* (- a 0.5) b)))