Numeric.SpecFunctions:logBeta from math-functions-0.1.5.2, A

Percentage Accurate: 99.8% → 99.8%
Time: 14.1s
Alternatives: 23
Speedup: 1.0×

Specification

?
\[\begin{array}{l} \\ \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (+ (- (+ (+ x y) z) (* z (log t))) (* (- a 0.5) b)))
double code(double x, double y, double z, double t, double a, double b) {
	return (((x + y) + z) - (z * log(t))) + ((a - 0.5) * b);
}
real(8) function code(x, y, z, t, a, b)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = (((x + y) + z) - (z * log(t))) + ((a - 0.5d0) * b)
end function
public static double code(double x, double y, double z, double t, double a, double b) {
	return (((x + y) + z) - (z * Math.log(t))) + ((a - 0.5) * b);
}
def code(x, y, z, t, a, b):
	return (((x + y) + z) - (z * math.log(t))) + ((a - 0.5) * b)
function code(x, y, z, t, a, b)
	return Float64(Float64(Float64(Float64(x + y) + z) - Float64(z * log(t))) + Float64(Float64(a - 0.5) * b))
end
function tmp = code(x, y, z, t, a, b)
	tmp = (((x + y) + z) - (z * log(t))) + ((a - 0.5) * b);
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(N[(x + y), $MachinePrecision] + z), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 23 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 99.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (+ (- (+ (+ x y) z) (* z (log t))) (* (- a 0.5) b)))
double code(double x, double y, double z, double t, double a, double b) {
	return (((x + y) + z) - (z * log(t))) + ((a - 0.5) * b);
}
real(8) function code(x, y, z, t, a, b)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = (((x + y) + z) - (z * log(t))) + ((a - 0.5d0) * b)
end function
public static double code(double x, double y, double z, double t, double a, double b) {
	return (((x + y) + z) - (z * Math.log(t))) + ((a - 0.5) * b);
}
def code(x, y, z, t, a, b):
	return (((x + y) + z) - (z * math.log(t))) + ((a - 0.5) * b)
function code(x, y, z, t, a, b)
	return Float64(Float64(Float64(Float64(x + y) + z) - Float64(z * log(t))) + Float64(Float64(a - 0.5) * b))
end
function tmp = code(x, y, z, t, a, b)
	tmp = (((x + y) + z) - (z * log(t))) + ((a - 0.5) * b);
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(N[(x + y), $MachinePrecision] + z), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b
\end{array}

Alternative 1: 99.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (+ (- (+ z (+ x y)) (* z (log t))) (fma b a (* b -0.5))))
double code(double x, double y, double z, double t, double a, double b) {
	return ((z + (x + y)) - (z * log(t))) + fma(b, a, (b * -0.5));
}
function code(x, y, z, t, a, b)
	return Float64(Float64(Float64(z + Float64(x + y)) - Float64(z * log(t))) + fma(b, a, Float64(b * -0.5)))
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(z + N[(x + y), $MachinePrecision]), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(b * a + N[(b * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot -0.5\right)
\end{array}
Derivation
  1. Initial program 99.9%

    \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
    2. sub-negN/A

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
    3. distribute-lft-inN/A

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
    4. accelerator-lowering-fma.f64N/A

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
    5. *-lowering-*.f64N/A

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
    6. metadata-eval99.9

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
  4. Applied egg-rr99.9%

    \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
  5. Final simplification99.9%

    \[\leadsto \left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot -0.5\right) \]
  6. Add Preprocessing

Alternative 2: 89.8% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := b \cdot \left(a - 0.5\right)\\ \mathbf{if}\;t\_1 \leq -1 \cdot 10^{+80}:\\ \;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\ \mathbf{elif}\;t\_1 \leq 10^{+28}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, x + y\right)\\ \mathbf{else}:\\ \;\;\;\;x + \mathsf{fma}\left(b, a + -0.5, y\right)\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (* b (- a 0.5))))
   (if (<= t_1 -1e+80)
     (+ y (fma b (+ a -0.5) x))
     (if (<= t_1 1e+28)
       (fma z (- 1.0 (log t)) (+ x y))
       (+ x (fma b (+ a -0.5) y))))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = b * (a - 0.5);
	double tmp;
	if (t_1 <= -1e+80) {
		tmp = y + fma(b, (a + -0.5), x);
	} else if (t_1 <= 1e+28) {
		tmp = fma(z, (1.0 - log(t)), (x + y));
	} else {
		tmp = x + fma(b, (a + -0.5), y);
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = Float64(b * Float64(a - 0.5))
	tmp = 0.0
	if (t_1 <= -1e+80)
		tmp = Float64(y + fma(b, Float64(a + -0.5), x));
	elseif (t_1 <= 1e+28)
		tmp = fma(z, Float64(1.0 - log(t)), Float64(x + y));
	else
		tmp = Float64(x + fma(b, Float64(a + -0.5), y));
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(b * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -1e+80], N[(y + N[(b * N[(a + -0.5), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$1, 1e+28], N[(z * N[(1.0 - N[Log[t], $MachinePrecision]), $MachinePrecision] + N[(x + y), $MachinePrecision]), $MachinePrecision], N[(x + N[(b * N[(a + -0.5), $MachinePrecision] + y), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := b \cdot \left(a - 0.5\right)\\
\mathbf{if}\;t\_1 \leq -1 \cdot 10^{+80}:\\
\;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\

\mathbf{elif}\;t\_1 \leq 10^{+28}:\\
\;\;\;\;\mathsf{fma}\left(z, 1 - \log t, x + y\right)\\

\mathbf{else}:\\
\;\;\;\;x + \mathsf{fma}\left(b, a + -0.5, y\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < -1e80

    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in z around 0

      \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
    4. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right) + x} \]
      2. associate-+l+N/A

        \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
      3. +-lowering-+.f64N/A

        \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
      4. accelerator-lowering-fma.f64N/A

        \[\leadsto y + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, x\right)} \]
      5. sub-negN/A

        \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, x\right) \]
      6. metadata-evalN/A

        \[\leadsto y + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, x\right) \]
      7. +-lowering-+.f6491.9

        \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, x\right) \]
    5. Simplified91.9%

      \[\leadsto \color{blue}{y + \mathsf{fma}\left(b, a + -0.5, x\right)} \]

    if -1e80 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < 9.99999999999999958e27

    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in b around 0

      \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) - z \cdot \log t} \]
    4. Step-by-step derivation
      1. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t} \]
      2. associate-+r+N/A

        \[\leadsto \color{blue}{\left(\left(x + y\right) + z\right)} + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t \]
      3. associate-+l+N/A

        \[\leadsto \color{blue}{\left(x + y\right) + \left(z + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t\right)} \]
      4. cancel-sign-sub-invN/A

        \[\leadsto \left(x + y\right) + \color{blue}{\left(z - z \cdot \log t\right)} \]
      5. *-rgt-identityN/A

        \[\leadsto \left(x + y\right) + \left(\color{blue}{z \cdot 1} - z \cdot \log t\right) \]
      6. distribute-lft-out--N/A

        \[\leadsto \left(x + y\right) + \color{blue}{z \cdot \left(1 - \log t\right)} \]
      7. +-commutativeN/A

        \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right) + \left(x + y\right)} \]
      8. sub-negN/A

        \[\leadsto z \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} + \left(x + y\right) \]
      9. mul-1-negN/A

        \[\leadsto z \cdot \left(1 + \color{blue}{-1 \cdot \log t}\right) + \left(x + y\right) \]
      10. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 + -1 \cdot \log t, x + y\right)} \]
      11. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(z, 1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}, x + y\right) \]
      12. sub-negN/A

        \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
      13. --lowering--.f64N/A

        \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
      14. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(z, 1 - \color{blue}{\log t}, x + y\right) \]
      15. +-commutativeN/A

        \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
      16. +-lowering-+.f6497.2

        \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
    5. Simplified97.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y + x\right)} \]

    if 9.99999999999999958e27 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b)

    1. Initial program 100.0%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
      2. sub-negN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      3. distribute-lft-inN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      4. accelerator-lowering-fma.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      5. *-lowering-*.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
      6. metadata-eval100.0

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
    4. Applied egg-rr100.0%

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
    5. Taylor expanded in z around 0

      \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
    6. Step-by-step derivation
      1. associate-+r+N/A

        \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
      2. +-commutativeN/A

        \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
      3. distribute-rgt-inN/A

        \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
      4. metadata-evalN/A

        \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
      5. sub-negN/A

        \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
      6. associate-+r+N/A

        \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
      7. +-lowering-+.f64N/A

        \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
      8. +-commutativeN/A

        \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
      9. accelerator-lowering-fma.f64N/A

        \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
      10. sub-negN/A

        \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
      11. metadata-evalN/A

        \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
      12. +-lowering-+.f6489.8

        \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
    7. Simplified89.8%

      \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification93.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;b \cdot \left(a - 0.5\right) \leq -1 \cdot 10^{+80}:\\ \;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\ \mathbf{elif}\;b \cdot \left(a - 0.5\right) \leq 10^{+28}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, x + y\right)\\ \mathbf{else}:\\ \;\;\;\;x + \mathsf{fma}\left(b, a + -0.5, y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 94.1% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \mathsf{fma}\left(b, a + -0.5, y\right)\\ \mathbf{if}\;b \leq -2.7 \cdot 10^{+122}:\\ \;\;\;\;x + t\_1\\ \mathbf{elif}\;b \leq 1.7 \cdot 10^{+60}:\\ \;\;\;\;\left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + b \cdot a\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, t\_1\right)\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (fma b (+ a -0.5) y)))
   (if (<= b -2.7e+122)
     (+ x t_1)
     (if (<= b 1.7e+60)
       (+ (- (+ z (+ x y)) (* z (log t))) (* b a))
       (fma z (- 1.0 (log t)) t_1)))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = fma(b, (a + -0.5), y);
	double tmp;
	if (b <= -2.7e+122) {
		tmp = x + t_1;
	} else if (b <= 1.7e+60) {
		tmp = ((z + (x + y)) - (z * log(t))) + (b * a);
	} else {
		tmp = fma(z, (1.0 - log(t)), t_1);
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = fma(b, Float64(a + -0.5), y)
	tmp = 0.0
	if (b <= -2.7e+122)
		tmp = Float64(x + t_1);
	elseif (b <= 1.7e+60)
		tmp = Float64(Float64(Float64(z + Float64(x + y)) - Float64(z * log(t))) + Float64(b * a));
	else
		tmp = fma(z, Float64(1.0 - log(t)), t_1);
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(b * N[(a + -0.5), $MachinePrecision] + y), $MachinePrecision]}, If[LessEqual[b, -2.7e+122], N[(x + t$95$1), $MachinePrecision], If[LessEqual[b, 1.7e+60], N[(N[(N[(z + N[(x + y), $MachinePrecision]), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(b * a), $MachinePrecision]), $MachinePrecision], N[(z * N[(1.0 - N[Log[t], $MachinePrecision]), $MachinePrecision] + t$95$1), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \mathsf{fma}\left(b, a + -0.5, y\right)\\
\mathbf{if}\;b \leq -2.7 \cdot 10^{+122}:\\
\;\;\;\;x + t\_1\\

\mathbf{elif}\;b \leq 1.7 \cdot 10^{+60}:\\
\;\;\;\;\left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + b \cdot a\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(z, 1 - \log t, t\_1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if b < -2.6999999999999998e122

    1. Initial program 100.0%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
      2. sub-negN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      3. distribute-lft-inN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      4. accelerator-lowering-fma.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
      5. *-lowering-*.f64N/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
      6. metadata-eval100.0

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
    4. Applied egg-rr100.0%

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
    5. Taylor expanded in z around 0

      \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
    6. Step-by-step derivation
      1. associate-+r+N/A

        \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
      2. +-commutativeN/A

        \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
      3. distribute-rgt-inN/A

        \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
      4. metadata-evalN/A

        \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
      5. sub-negN/A

        \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
      6. associate-+r+N/A

        \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
      7. +-lowering-+.f64N/A

        \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
      8. +-commutativeN/A

        \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
      9. accelerator-lowering-fma.f64N/A

        \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
      10. sub-negN/A

        \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
      11. metadata-evalN/A

        \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
      12. +-lowering-+.f6498.1

        \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
    7. Simplified98.1%

      \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]

    if -2.6999999999999998e122 < b < 1.7e60

    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in a around inf

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{a \cdot b} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot a} \]
      2. *-lowering-*.f6498.5

        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot a} \]
    5. Simplified98.5%

      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot a} \]

    if 1.7e60 < b

    1. Initial program 99.9%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{\left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) - z \cdot \log t} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) - \color{blue}{\log t \cdot z} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \color{blue}{\left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \left(\mathsf{neg}\left(\log t\right)\right) \cdot z} \]
      3. log-recN/A

        \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \color{blue}{\log \left(\frac{1}{t}\right)} \cdot z \]
      4. *-commutativeN/A

        \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \color{blue}{z \cdot \log \left(\frac{1}{t}\right)} \]
      5. +-commutativeN/A

        \[\leadsto \color{blue}{z \cdot \log \left(\frac{1}{t}\right) + \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right)} \]
      6. +-commutativeN/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(z + b \cdot \left(a - \frac{1}{2}\right)\right) + y\right)} \]
      7. associate-+l+N/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(z + \left(b \cdot \left(a - \frac{1}{2}\right) + y\right)\right)} \]
      8. +-commutativeN/A

        \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \left(z + \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right)}\right) \]
      9. associate-+r+N/A

        \[\leadsto \color{blue}{\left(z \cdot \log \left(\frac{1}{t}\right) + z\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
      10. *-rgt-identityN/A

        \[\leadsto \left(z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{z \cdot 1}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      11. distribute-lft-inN/A

        \[\leadsto \color{blue}{z \cdot \left(\log \left(\frac{1}{t}\right) + 1\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      12. +-commutativeN/A

        \[\leadsto z \cdot \color{blue}{\left(1 + \log \left(\frac{1}{t}\right)\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      13. log-recN/A

        \[\leadsto z \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      14. sub-negN/A

        \[\leadsto z \cdot \color{blue}{\left(1 - \log t\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      15. sub-negN/A

        \[\leadsto z \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
      16. mul-1-negN/A

        \[\leadsto z \cdot \left(1 + \color{blue}{-1 \cdot \log t}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
    5. Simplified87.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification96.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;b \leq -2.7 \cdot 10^{+122}:\\ \;\;\;\;x + \mathsf{fma}\left(b, a + -0.5, y\right)\\ \mathbf{elif}\;b \leq 1.7 \cdot 10^{+60}:\\ \;\;\;\;\left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + b \cdot a\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 21.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + b \cdot \left(a - 0.5\right) \leq -4 \cdot 10^{-128}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;y\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (if (<= (+ (- (+ z (+ x y)) (* z (log t))) (* b (- a 0.5))) -4e-128) x y))
double code(double x, double y, double z, double t, double a, double b) {
	double tmp;
	if ((((z + (x + y)) - (z * log(t))) + (b * (a - 0.5))) <= -4e-128) {
		tmp = x;
	} else {
		tmp = y;
	}
	return tmp;
}
real(8) function code(x, y, z, t, a, b)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8) :: tmp
    if ((((z + (x + y)) - (z * log(t))) + (b * (a - 0.5d0))) <= (-4d-128)) then
        tmp = x
    else
        tmp = y
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t, double a, double b) {
	double tmp;
	if ((((z + (x + y)) - (z * Math.log(t))) + (b * (a - 0.5))) <= -4e-128) {
		tmp = x;
	} else {
		tmp = y;
	}
	return tmp;
}
def code(x, y, z, t, a, b):
	tmp = 0
	if (((z + (x + y)) - (z * math.log(t))) + (b * (a - 0.5))) <= -4e-128:
		tmp = x
	else:
		tmp = y
	return tmp
function code(x, y, z, t, a, b)
	tmp = 0.0
	if (Float64(Float64(Float64(z + Float64(x + y)) - Float64(z * log(t))) + Float64(b * Float64(a - 0.5))) <= -4e-128)
		tmp = x;
	else
		tmp = y;
	end
	return tmp
end
function tmp_2 = code(x, y, z, t, a, b)
	tmp = 0.0;
	if ((((z + (x + y)) - (z * log(t))) + (b * (a - 0.5))) <= -4e-128)
		tmp = x;
	else
		tmp = y;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_, a_, b_] := If[LessEqual[N[(N[(N[(z + N[(x + y), $MachinePrecision]), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(b * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -4e-128], x, y]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + b \cdot \left(a - 0.5\right) \leq -4 \cdot 10^{-128}:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;y\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (+.f64 (-.f64 (+.f64 (+.f64 x y) z) (*.f64 z (log.f64 t))) (*.f64 (-.f64 a #s(literal 1/2 binary64)) b)) < -4.00000000000000022e-128

    1. Initial program 99.8%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x} \]
    4. Step-by-step derivation
      1. Simplified22.4%

        \[\leadsto \color{blue}{x} \]

      if -4.00000000000000022e-128 < (+.f64 (-.f64 (+.f64 (+.f64 x y) z) (*.f64 z (log.f64 t))) (*.f64 (-.f64 a #s(literal 1/2 binary64)) b))

      1. Initial program 99.9%

        \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
      2. Add Preprocessing
      3. Taylor expanded in y around inf

        \[\leadsto \color{blue}{y} \]
      4. Step-by-step derivation
        1. Simplified23.3%

          \[\leadsto \color{blue}{y} \]
      5. Recombined 2 regimes into one program.
      6. Final simplification22.9%

        \[\leadsto \begin{array}{l} \mathbf{if}\;\left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + b \cdot \left(a - 0.5\right) \leq -4 \cdot 10^{-128}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;y\\ \end{array} \]
      7. Add Preprocessing

      Alternative 5: 91.5% accurate, 1.0× speedup?

      \[\begin{array}{l} \\ \begin{array}{l} t_1 := \mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)\\ \mathbf{if}\;z \leq -3.3 \cdot 10^{+86}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;z \leq 8.4 \cdot 10^{+155}:\\ \;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
      (FPCore (x y z t a b)
       :precision binary64
       (let* ((t_1 (fma z (- 1.0 (log t)) (fma b (+ a -0.5) y))))
         (if (<= z -3.3e+86)
           t_1
           (if (<= z 8.4e+155) (+ y (fma b (+ a -0.5) x)) t_1))))
      double code(double x, double y, double z, double t, double a, double b) {
      	double t_1 = fma(z, (1.0 - log(t)), fma(b, (a + -0.5), y));
      	double tmp;
      	if (z <= -3.3e+86) {
      		tmp = t_1;
      	} else if (z <= 8.4e+155) {
      		tmp = y + fma(b, (a + -0.5), x);
      	} else {
      		tmp = t_1;
      	}
      	return tmp;
      }
      
      function code(x, y, z, t, a, b)
      	t_1 = fma(z, Float64(1.0 - log(t)), fma(b, Float64(a + -0.5), y))
      	tmp = 0.0
      	if (z <= -3.3e+86)
      		tmp = t_1;
      	elseif (z <= 8.4e+155)
      		tmp = Float64(y + fma(b, Float64(a + -0.5), x));
      	else
      		tmp = t_1;
      	end
      	return tmp
      end
      
      code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(z * N[(1.0 - N[Log[t], $MachinePrecision]), $MachinePrecision] + N[(b * N[(a + -0.5), $MachinePrecision] + y), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[z, -3.3e+86], t$95$1, If[LessEqual[z, 8.4e+155], N[(y + N[(b * N[(a + -0.5), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision], t$95$1]]]
      
      \begin{array}{l}
      
      \\
      \begin{array}{l}
      t_1 := \mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)\\
      \mathbf{if}\;z \leq -3.3 \cdot 10^{+86}:\\
      \;\;\;\;t\_1\\
      
      \mathbf{elif}\;z \leq 8.4 \cdot 10^{+155}:\\
      \;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\
      
      \mathbf{else}:\\
      \;\;\;\;t\_1\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if z < -3.2999999999999999e86 or 8.4e155 < z

        1. Initial program 99.7%

          \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
        2. Add Preprocessing
        3. Taylor expanded in x around 0

          \[\leadsto \color{blue}{\left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) - z \cdot \log t} \]
        4. Step-by-step derivation
          1. *-commutativeN/A

            \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) - \color{blue}{\log t \cdot z} \]
          2. cancel-sign-sub-invN/A

            \[\leadsto \color{blue}{\left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \left(\mathsf{neg}\left(\log t\right)\right) \cdot z} \]
          3. log-recN/A

            \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \color{blue}{\log \left(\frac{1}{t}\right)} \cdot z \]
          4. *-commutativeN/A

            \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \color{blue}{z \cdot \log \left(\frac{1}{t}\right)} \]
          5. +-commutativeN/A

            \[\leadsto \color{blue}{z \cdot \log \left(\frac{1}{t}\right) + \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right)} \]
          6. +-commutativeN/A

            \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(z + b \cdot \left(a - \frac{1}{2}\right)\right) + y\right)} \]
          7. associate-+l+N/A

            \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(z + \left(b \cdot \left(a - \frac{1}{2}\right) + y\right)\right)} \]
          8. +-commutativeN/A

            \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \left(z + \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right)}\right) \]
          9. associate-+r+N/A

            \[\leadsto \color{blue}{\left(z \cdot \log \left(\frac{1}{t}\right) + z\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
          10. *-rgt-identityN/A

            \[\leadsto \left(z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{z \cdot 1}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
          11. distribute-lft-inN/A

            \[\leadsto \color{blue}{z \cdot \left(\log \left(\frac{1}{t}\right) + 1\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
          12. +-commutativeN/A

            \[\leadsto z \cdot \color{blue}{\left(1 + \log \left(\frac{1}{t}\right)\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
          13. log-recN/A

            \[\leadsto z \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
          14. sub-negN/A

            \[\leadsto z \cdot \color{blue}{\left(1 - \log t\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
          15. sub-negN/A

            \[\leadsto z \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
          16. mul-1-negN/A

            \[\leadsto z \cdot \left(1 + \color{blue}{-1 \cdot \log t}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
        5. Simplified91.3%

          \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)} \]

        if -3.2999999999999999e86 < z < 8.4e155

        1. Initial program 99.9%

          \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
        2. Add Preprocessing
        3. Taylor expanded in z around 0

          \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
        4. Step-by-step derivation
          1. +-commutativeN/A

            \[\leadsto \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right) + x} \]
          2. associate-+l+N/A

            \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
          3. +-lowering-+.f64N/A

            \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
          4. accelerator-lowering-fma.f64N/A

            \[\leadsto y + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, x\right)} \]
          5. sub-negN/A

            \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, x\right) \]
          6. metadata-evalN/A

            \[\leadsto y + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, x\right) \]
          7. +-lowering-+.f6495.0

            \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, x\right) \]
        5. Simplified95.0%

          \[\leadsto \color{blue}{y + \mathsf{fma}\left(b, a + -0.5, x\right)} \]
      3. Recombined 2 regimes into one program.
      4. Add Preprocessing

      Alternative 6: 57.0% accurate, 1.0× speedup?

      \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\left(z + \left(x + y\right)\right) - z \cdot \log t \leq -4 \cdot 10^{-128}:\\ \;\;\;\;\mathsf{fma}\left(a + -0.5, b, x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(b, a + -0.5, y\right)\\ \end{array} \end{array} \]
      (FPCore (x y z t a b)
       :precision binary64
       (if (<= (- (+ z (+ x y)) (* z (log t))) -4e-128)
         (fma (+ a -0.5) b x)
         (fma b (+ a -0.5) y)))
      double code(double x, double y, double z, double t, double a, double b) {
      	double tmp;
      	if (((z + (x + y)) - (z * log(t))) <= -4e-128) {
      		tmp = fma((a + -0.5), b, x);
      	} else {
      		tmp = fma(b, (a + -0.5), y);
      	}
      	return tmp;
      }
      
      function code(x, y, z, t, a, b)
      	tmp = 0.0
      	if (Float64(Float64(z + Float64(x + y)) - Float64(z * log(t))) <= -4e-128)
      		tmp = fma(Float64(a + -0.5), b, x);
      	else
      		tmp = fma(b, Float64(a + -0.5), y);
      	end
      	return tmp
      end
      
      code[x_, y_, z_, t_, a_, b_] := If[LessEqual[N[(N[(z + N[(x + y), $MachinePrecision]), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -4e-128], N[(N[(a + -0.5), $MachinePrecision] * b + x), $MachinePrecision], N[(b * N[(a + -0.5), $MachinePrecision] + y), $MachinePrecision]]
      
      \begin{array}{l}
      
      \\
      \begin{array}{l}
      \mathbf{if}\;\left(z + \left(x + y\right)\right) - z \cdot \log t \leq -4 \cdot 10^{-128}:\\
      \;\;\;\;\mathsf{fma}\left(a + -0.5, b, x\right)\\
      
      \mathbf{else}:\\
      \;\;\;\;\mathsf{fma}\left(b, a + -0.5, y\right)\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if (-.f64 (+.f64 (+.f64 x y) z) (*.f64 z (log.f64 t))) < -4.00000000000000022e-128

        1. Initial program 99.8%

          \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
        2. Add Preprocessing
        3. Taylor expanded in x around inf

          \[\leadsto \color{blue}{x} + \left(a - \frac{1}{2}\right) \cdot b \]
        4. Step-by-step derivation
          1. Simplified56.2%

            \[\leadsto \color{blue}{x} + \left(a - 0.5\right) \cdot b \]
          2. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \color{blue}{\left(a - \frac{1}{2}\right) \cdot b + x} \]
            2. accelerator-lowering-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(a - \frac{1}{2}, b, x\right)} \]
            3. sub-negN/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, b, x\right) \]
            4. metadata-evalN/A

              \[\leadsto \mathsf{fma}\left(a + \color{blue}{\frac{-1}{2}}, b, x\right) \]
            5. +-lowering-+.f6456.2

              \[\leadsto \mathsf{fma}\left(\color{blue}{a + -0.5}, b, x\right) \]
          3. Applied egg-rr56.2%

            \[\leadsto \color{blue}{\mathsf{fma}\left(a + -0.5, b, x\right)} \]

          if -4.00000000000000022e-128 < (-.f64 (+.f64 (+.f64 x y) z) (*.f64 z (log.f64 t)))

          1. Initial program 99.9%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
            2. sub-negN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            3. distribute-lft-inN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            4. accelerator-lowering-fma.f64N/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            5. *-lowering-*.f64N/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
            6. metadata-eval99.9

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
          4. Applied egg-rr99.9%

            \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
          5. Taylor expanded in z around 0

            \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
          6. Step-by-step derivation
            1. associate-+r+N/A

              \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
            2. +-commutativeN/A

              \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
            3. distribute-rgt-inN/A

              \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
            4. metadata-evalN/A

              \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
            5. sub-negN/A

              \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
            6. associate-+r+N/A

              \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
            7. +-lowering-+.f64N/A

              \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
            8. +-commutativeN/A

              \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
            9. accelerator-lowering-fma.f64N/A

              \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
            10. sub-negN/A

              \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
            11. metadata-evalN/A

              \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
            12. +-lowering-+.f6481.3

              \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
          7. Simplified81.3%

            \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]
          8. Taylor expanded in x around 0

            \[\leadsto \color{blue}{y + b \cdot \left(a - \frac{1}{2}\right)} \]
          9. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right) + y} \]
            2. accelerator-lowering-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
            3. sub-negN/A

              \[\leadsto \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
            4. metadata-evalN/A

              \[\leadsto \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
            5. +-lowering-+.f6459.7

              \[\leadsto \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
          10. Simplified59.7%

            \[\leadsto \color{blue}{\mathsf{fma}\left(b, a + -0.5, y\right)} \]
        5. Recombined 2 regimes into one program.
        6. Final simplification58.0%

          \[\leadsto \begin{array}{l} \mathbf{if}\;\left(z + \left(x + y\right)\right) - z \cdot \log t \leq -4 \cdot 10^{-128}:\\ \;\;\;\;\mathsf{fma}\left(a + -0.5, b, x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(b, a + -0.5, y\right)\\ \end{array} \]
        7. Add Preprocessing

        Alternative 7: 76.5% accurate, 1.0× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x + y \leq -2 \cdot 10^{+84}:\\ \;\;\;\;b \cdot a + \left(\left(x + z\right) - z \cdot \log t\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)\\ \end{array} \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (if (<= (+ x y) -2e+84)
           (+ (* b a) (- (+ x z) (* z (log t))))
           (fma z (- 1.0 (log t)) (fma b (+ a -0.5) y))))
        double code(double x, double y, double z, double t, double a, double b) {
        	double tmp;
        	if ((x + y) <= -2e+84) {
        		tmp = (b * a) + ((x + z) - (z * log(t)));
        	} else {
        		tmp = fma(z, (1.0 - log(t)), fma(b, (a + -0.5), y));
        	}
        	return tmp;
        }
        
        function code(x, y, z, t, a, b)
        	tmp = 0.0
        	if (Float64(x + y) <= -2e+84)
        		tmp = Float64(Float64(b * a) + Float64(Float64(x + z) - Float64(z * log(t))));
        	else
        		tmp = fma(z, Float64(1.0 - log(t)), fma(b, Float64(a + -0.5), y));
        	end
        	return tmp
        end
        
        code[x_, y_, z_, t_, a_, b_] := If[LessEqual[N[(x + y), $MachinePrecision], -2e+84], N[(N[(b * a), $MachinePrecision] + N[(N[(x + z), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(z * N[(1.0 - N[Log[t], $MachinePrecision]), $MachinePrecision] + N[(b * N[(a + -0.5), $MachinePrecision] + y), $MachinePrecision]), $MachinePrecision]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        \mathbf{if}\;x + y \leq -2 \cdot 10^{+84}:\\
        \;\;\;\;b \cdot a + \left(\left(x + z\right) - z \cdot \log t\right)\\
        
        \mathbf{else}:\\
        \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if (+.f64 x y) < -2.00000000000000012e84

          1. Initial program 99.9%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Taylor expanded in a around inf

            \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{a \cdot b} \]
          4. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot a} \]
            2. *-lowering-*.f6494.6

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot a} \]
          5. Simplified94.6%

            \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot a} \]
          6. Taylor expanded in y around 0

            \[\leadsto \left(\color{blue}{\left(x + z\right)} - z \cdot \log t\right) + b \cdot a \]
          7. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \left(\color{blue}{\left(z + x\right)} - z \cdot \log t\right) + b \cdot a \]
            2. +-lowering-+.f6462.9

              \[\leadsto \left(\color{blue}{\left(z + x\right)} - z \cdot \log t\right) + b \cdot a \]
          8. Simplified62.9%

            \[\leadsto \left(\color{blue}{\left(z + x\right)} - z \cdot \log t\right) + b \cdot a \]

          if -2.00000000000000012e84 < (+.f64 x y)

          1. Initial program 99.8%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Taylor expanded in x around 0

            \[\leadsto \color{blue}{\left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) - z \cdot \log t} \]
          4. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) - \color{blue}{\log t \cdot z} \]
            2. cancel-sign-sub-invN/A

              \[\leadsto \color{blue}{\left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \left(\mathsf{neg}\left(\log t\right)\right) \cdot z} \]
            3. log-recN/A

              \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \color{blue}{\log \left(\frac{1}{t}\right)} \cdot z \]
            4. *-commutativeN/A

              \[\leadsto \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right) + \color{blue}{z \cdot \log \left(\frac{1}{t}\right)} \]
            5. +-commutativeN/A

              \[\leadsto \color{blue}{z \cdot \log \left(\frac{1}{t}\right) + \left(y + \left(z + b \cdot \left(a - \frac{1}{2}\right)\right)\right)} \]
            6. +-commutativeN/A

              \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(\left(z + b \cdot \left(a - \frac{1}{2}\right)\right) + y\right)} \]
            7. associate-+l+N/A

              \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{\left(z + \left(b \cdot \left(a - \frac{1}{2}\right) + y\right)\right)} \]
            8. +-commutativeN/A

              \[\leadsto z \cdot \log \left(\frac{1}{t}\right) + \left(z + \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right)}\right) \]
            9. associate-+r+N/A

              \[\leadsto \color{blue}{\left(z \cdot \log \left(\frac{1}{t}\right) + z\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
            10. *-rgt-identityN/A

              \[\leadsto \left(z \cdot \log \left(\frac{1}{t}\right) + \color{blue}{z \cdot 1}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
            11. distribute-lft-inN/A

              \[\leadsto \color{blue}{z \cdot \left(\log \left(\frac{1}{t}\right) + 1\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
            12. +-commutativeN/A

              \[\leadsto z \cdot \color{blue}{\left(1 + \log \left(\frac{1}{t}\right)\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
            13. log-recN/A

              \[\leadsto z \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
            14. sub-negN/A

              \[\leadsto z \cdot \color{blue}{\left(1 - \log t\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
            15. sub-negN/A

              \[\leadsto z \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
            16. mul-1-negN/A

              \[\leadsto z \cdot \left(1 + \color{blue}{-1 \cdot \log t}\right) + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right) \]
          5. Simplified82.9%

            \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)} \]
        3. Recombined 2 regimes into one program.
        4. Final simplification77.5%

          \[\leadsto \begin{array}{l} \mathbf{if}\;x + y \leq -2 \cdot 10^{+84}:\\ \;\;\;\;b \cdot a + \left(\left(x + z\right) - z \cdot \log t\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, \mathsf{fma}\left(b, a + -0.5, y\right)\right)\\ \end{array} \]
        5. Add Preprocessing

        Alternative 8: 99.8% accurate, 1.0× speedup?

        \[\begin{array}{l} \\ \left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + b \cdot \left(a - 0.5\right) \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (+ (- (+ z (+ x y)) (* z (log t))) (* b (- a 0.5))))
        double code(double x, double y, double z, double t, double a, double b) {
        	return ((z + (x + y)) - (z * log(t))) + (b * (a - 0.5));
        }
        
        real(8) function code(x, y, z, t, a, b)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            real(8), intent (in) :: z
            real(8), intent (in) :: t
            real(8), intent (in) :: a
            real(8), intent (in) :: b
            code = ((z + (x + y)) - (z * log(t))) + (b * (a - 0.5d0))
        end function
        
        public static double code(double x, double y, double z, double t, double a, double b) {
        	return ((z + (x + y)) - (z * Math.log(t))) + (b * (a - 0.5));
        }
        
        def code(x, y, z, t, a, b):
        	return ((z + (x + y)) - (z * math.log(t))) + (b * (a - 0.5))
        
        function code(x, y, z, t, a, b)
        	return Float64(Float64(Float64(z + Float64(x + y)) - Float64(z * log(t))) + Float64(b * Float64(a - 0.5)))
        end
        
        function tmp = code(x, y, z, t, a, b)
        	tmp = ((z + (x + y)) - (z * log(t))) + (b * (a - 0.5));
        end
        
        code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(z + N[(x + y), $MachinePrecision]), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(b * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + b \cdot \left(a - 0.5\right)
        \end{array}
        
        Derivation
        1. Initial program 99.9%

          \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
        2. Add Preprocessing
        3. Final simplification99.9%

          \[\leadsto \left(\left(z + \left(x + y\right)\right) - z \cdot \log t\right) + b \cdot \left(a - 0.5\right) \]
        4. Add Preprocessing

        Alternative 9: 85.9% accurate, 1.0× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} t_1 := \mathsf{fma}\left(z, 1 - \log t, y\right)\\ \mathbf{if}\;z \leq -1.05 \cdot 10^{+182}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;z \leq 1.32 \cdot 10^{+156}:\\ \;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (let* ((t_1 (fma z (- 1.0 (log t)) y)))
           (if (<= z -1.05e+182)
             t_1
             (if (<= z 1.32e+156) (+ y (fma b (+ a -0.5) x)) t_1))))
        double code(double x, double y, double z, double t, double a, double b) {
        	double t_1 = fma(z, (1.0 - log(t)), y);
        	double tmp;
        	if (z <= -1.05e+182) {
        		tmp = t_1;
        	} else if (z <= 1.32e+156) {
        		tmp = y + fma(b, (a + -0.5), x);
        	} else {
        		tmp = t_1;
        	}
        	return tmp;
        }
        
        function code(x, y, z, t, a, b)
        	t_1 = fma(z, Float64(1.0 - log(t)), y)
        	tmp = 0.0
        	if (z <= -1.05e+182)
        		tmp = t_1;
        	elseif (z <= 1.32e+156)
        		tmp = Float64(y + fma(b, Float64(a + -0.5), x));
        	else
        		tmp = t_1;
        	end
        	return tmp
        end
        
        code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(z * N[(1.0 - N[Log[t], $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision]}, If[LessEqual[z, -1.05e+182], t$95$1, If[LessEqual[z, 1.32e+156], N[(y + N[(b * N[(a + -0.5), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision], t$95$1]]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        t_1 := \mathsf{fma}\left(z, 1 - \log t, y\right)\\
        \mathbf{if}\;z \leq -1.05 \cdot 10^{+182}:\\
        \;\;\;\;t\_1\\
        
        \mathbf{elif}\;z \leq 1.32 \cdot 10^{+156}:\\
        \;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\
        
        \mathbf{else}:\\
        \;\;\;\;t\_1\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if z < -1.0499999999999999e182 or 1.3199999999999999e156 < z

          1. Initial program 99.6%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Taylor expanded in b around 0

            \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) - z \cdot \log t} \]
          4. Step-by-step derivation
            1. cancel-sign-sub-invN/A

              \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t} \]
            2. associate-+r+N/A

              \[\leadsto \color{blue}{\left(\left(x + y\right) + z\right)} + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t \]
            3. associate-+l+N/A

              \[\leadsto \color{blue}{\left(x + y\right) + \left(z + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t\right)} \]
            4. cancel-sign-sub-invN/A

              \[\leadsto \left(x + y\right) + \color{blue}{\left(z - z \cdot \log t\right)} \]
            5. *-rgt-identityN/A

              \[\leadsto \left(x + y\right) + \left(\color{blue}{z \cdot 1} - z \cdot \log t\right) \]
            6. distribute-lft-out--N/A

              \[\leadsto \left(x + y\right) + \color{blue}{z \cdot \left(1 - \log t\right)} \]
            7. +-commutativeN/A

              \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right) + \left(x + y\right)} \]
            8. sub-negN/A

              \[\leadsto z \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} + \left(x + y\right) \]
            9. mul-1-negN/A

              \[\leadsto z \cdot \left(1 + \color{blue}{-1 \cdot \log t}\right) + \left(x + y\right) \]
            10. accelerator-lowering-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 + -1 \cdot \log t, x + y\right)} \]
            11. mul-1-negN/A

              \[\leadsto \mathsf{fma}\left(z, 1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}, x + y\right) \]
            12. sub-negN/A

              \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
            13. --lowering--.f64N/A

              \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
            14. log-lowering-log.f64N/A

              \[\leadsto \mathsf{fma}\left(z, 1 - \color{blue}{\log t}, x + y\right) \]
            15. +-commutativeN/A

              \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
            16. +-lowering-+.f6475.3

              \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
          5. Simplified75.3%

            \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y + x\right)} \]
          6. Taylor expanded in x around 0

            \[\leadsto \color{blue}{y + z \cdot \left(1 - \log t\right)} \]
          7. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right) + y} \]
            2. accelerator-lowering-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y\right)} \]
            3. --lowering--.f64N/A

              \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, y\right) \]
            4. log-lowering-log.f6471.3

              \[\leadsto \mathsf{fma}\left(z, 1 - \color{blue}{\log t}, y\right) \]
          8. Simplified71.3%

            \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y\right)} \]

          if -1.0499999999999999e182 < z < 1.3199999999999999e156

          1. Initial program 99.9%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Taylor expanded in z around 0

            \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
          4. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right) + x} \]
            2. associate-+l+N/A

              \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
            3. +-lowering-+.f64N/A

              \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
            4. accelerator-lowering-fma.f64N/A

              \[\leadsto y + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, x\right)} \]
            5. sub-negN/A

              \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, x\right) \]
            6. metadata-evalN/A

              \[\leadsto y + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, x\right) \]
            7. +-lowering-+.f6492.2

              \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, x\right) \]
          5. Simplified92.2%

            \[\leadsto \color{blue}{y + \mathsf{fma}\left(b, a + -0.5, x\right)} \]
        3. Recombined 2 regimes into one program.
        4. Add Preprocessing

        Alternative 10: 85.2% accurate, 1.0× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z \leq -3 \cdot 10^{+216}:\\ \;\;\;\;\mathsf{fma}\left(\log t, -z, z\right)\\ \mathbf{elif}\;z \leq 3.6 \cdot 10^{+205}:\\ \;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, x\right)\\ \end{array} \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (if (<= z -3e+216)
           (fma (log t) (- z) z)
           (if (<= z 3.6e+205) (+ y (fma b (+ a -0.5) x)) (fma z (- 1.0 (log t)) x))))
        double code(double x, double y, double z, double t, double a, double b) {
        	double tmp;
        	if (z <= -3e+216) {
        		tmp = fma(log(t), -z, z);
        	} else if (z <= 3.6e+205) {
        		tmp = y + fma(b, (a + -0.5), x);
        	} else {
        		tmp = fma(z, (1.0 - log(t)), x);
        	}
        	return tmp;
        }
        
        function code(x, y, z, t, a, b)
        	tmp = 0.0
        	if (z <= -3e+216)
        		tmp = fma(log(t), Float64(-z), z);
        	elseif (z <= 3.6e+205)
        		tmp = Float64(y + fma(b, Float64(a + -0.5), x));
        	else
        		tmp = fma(z, Float64(1.0 - log(t)), x);
        	end
        	return tmp
        end
        
        code[x_, y_, z_, t_, a_, b_] := If[LessEqual[z, -3e+216], N[(N[Log[t], $MachinePrecision] * (-z) + z), $MachinePrecision], If[LessEqual[z, 3.6e+205], N[(y + N[(b * N[(a + -0.5), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision], N[(z * N[(1.0 - N[Log[t], $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        \mathbf{if}\;z \leq -3 \cdot 10^{+216}:\\
        \;\;\;\;\mathsf{fma}\left(\log t, -z, z\right)\\
        
        \mathbf{elif}\;z \leq 3.6 \cdot 10^{+205}:\\
        \;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\
        
        \mathbf{else}:\\
        \;\;\;\;\mathsf{fma}\left(z, 1 - \log t, x\right)\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 3 regimes
        2. if z < -2.9999999999999998e216

          1. Initial program 99.4%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
            2. sub-negN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            3. distribute-lft-inN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            4. accelerator-lowering-fma.f64N/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            5. *-lowering-*.f64N/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
            6. metadata-eval99.4

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
          4. Applied egg-rr99.4%

            \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
          5. Taylor expanded in z around inf

            \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right)} \]
          6. Step-by-step derivation
            1. sub-negN/A

              \[\leadsto z \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} \]
            2. log-recN/A

              \[\leadsto z \cdot \left(1 + \color{blue}{\log \left(\frac{1}{t}\right)}\right) \]
            3. +-commutativeN/A

              \[\leadsto z \cdot \color{blue}{\left(\log \left(\frac{1}{t}\right) + 1\right)} \]
            4. distribute-rgt-inN/A

              \[\leadsto \color{blue}{\log \left(\frac{1}{t}\right) \cdot z + 1 \cdot z} \]
            5. *-lft-identityN/A

              \[\leadsto \log \left(\frac{1}{t}\right) \cdot z + \color{blue}{z} \]
            6. log-recN/A

              \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)} \cdot z + z \]
            7. distribute-lft-neg-inN/A

              \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\log t \cdot z\right)\right)} + z \]
            8. distribute-rgt-neg-inN/A

              \[\leadsto \color{blue}{\log t \cdot \left(\mathsf{neg}\left(z\right)\right)} + z \]
            9. neg-mul-1N/A

              \[\leadsto \log t \cdot \color{blue}{\left(-1 \cdot z\right)} + z \]
            10. accelerator-lowering-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, -1 \cdot z, z\right)} \]
            11. log-lowering-log.f64N/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{\log t}, -1 \cdot z, z\right) \]
            12. neg-mul-1N/A

              \[\leadsto \mathsf{fma}\left(\log t, \color{blue}{\mathsf{neg}\left(z\right)}, z\right) \]
            13. neg-lowering-neg.f6487.0

              \[\leadsto \mathsf{fma}\left(\log t, \color{blue}{-z}, z\right) \]
          7. Simplified87.0%

            \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, -z, z\right)} \]

          if -2.9999999999999998e216 < z < 3.60000000000000002e205

          1. Initial program 99.9%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Taylor expanded in z around 0

            \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
          4. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right) + x} \]
            2. associate-+l+N/A

              \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
            3. +-lowering-+.f64N/A

              \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
            4. accelerator-lowering-fma.f64N/A

              \[\leadsto y + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, x\right)} \]
            5. sub-negN/A

              \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, x\right) \]
            6. metadata-evalN/A

              \[\leadsto y + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, x\right) \]
            7. +-lowering-+.f6487.5

              \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, x\right) \]
          5. Simplified87.5%

            \[\leadsto \color{blue}{y + \mathsf{fma}\left(b, a + -0.5, x\right)} \]

          if 3.60000000000000002e205 < z

          1. Initial program 99.5%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Taylor expanded in b around 0

            \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) - z \cdot \log t} \]
          4. Step-by-step derivation
            1. cancel-sign-sub-invN/A

              \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t} \]
            2. associate-+r+N/A

              \[\leadsto \color{blue}{\left(\left(x + y\right) + z\right)} + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t \]
            3. associate-+l+N/A

              \[\leadsto \color{blue}{\left(x + y\right) + \left(z + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t\right)} \]
            4. cancel-sign-sub-invN/A

              \[\leadsto \left(x + y\right) + \color{blue}{\left(z - z \cdot \log t\right)} \]
            5. *-rgt-identityN/A

              \[\leadsto \left(x + y\right) + \left(\color{blue}{z \cdot 1} - z \cdot \log t\right) \]
            6. distribute-lft-out--N/A

              \[\leadsto \left(x + y\right) + \color{blue}{z \cdot \left(1 - \log t\right)} \]
            7. +-commutativeN/A

              \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right) + \left(x + y\right)} \]
            8. sub-negN/A

              \[\leadsto z \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} + \left(x + y\right) \]
            9. mul-1-negN/A

              \[\leadsto z \cdot \left(1 + \color{blue}{-1 \cdot \log t}\right) + \left(x + y\right) \]
            10. accelerator-lowering-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 + -1 \cdot \log t, x + y\right)} \]
            11. mul-1-negN/A

              \[\leadsto \mathsf{fma}\left(z, 1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}, x + y\right) \]
            12. sub-negN/A

              \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
            13. --lowering--.f64N/A

              \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
            14. log-lowering-log.f64N/A

              \[\leadsto \mathsf{fma}\left(z, 1 - \color{blue}{\log t}, x + y\right) \]
            15. +-commutativeN/A

              \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
            16. +-lowering-+.f6481.2

              \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
          5. Simplified81.2%

            \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y + x\right)} \]
          6. Taylor expanded in y around 0

            \[\leadsto \color{blue}{x + z \cdot \left(1 - \log t\right)} \]
          7. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right) + x} \]
            2. accelerator-lowering-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, x\right)} \]
            3. --lowering--.f64N/A

              \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x\right) \]
            4. log-lowering-log.f6476.8

              \[\leadsto \mathsf{fma}\left(z, 1 - \color{blue}{\log t}, x\right) \]
          8. Simplified76.8%

            \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, x\right)} \]
        3. Recombined 3 regimes into one program.
        4. Add Preprocessing

        Alternative 11: 84.6% accurate, 1.0× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} t_1 := \mathsf{fma}\left(\log t, -z, z\right)\\ \mathbf{if}\;z \leq -1.8 \cdot 10^{+217}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;z \leq 2.2 \cdot 10^{+206}:\\ \;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (let* ((t_1 (fma (log t) (- z) z)))
           (if (<= z -1.8e+217)
             t_1
             (if (<= z 2.2e+206) (+ y (fma b (+ a -0.5) x)) t_1))))
        double code(double x, double y, double z, double t, double a, double b) {
        	double t_1 = fma(log(t), -z, z);
        	double tmp;
        	if (z <= -1.8e+217) {
        		tmp = t_1;
        	} else if (z <= 2.2e+206) {
        		tmp = y + fma(b, (a + -0.5), x);
        	} else {
        		tmp = t_1;
        	}
        	return tmp;
        }
        
        function code(x, y, z, t, a, b)
        	t_1 = fma(log(t), Float64(-z), z)
        	tmp = 0.0
        	if (z <= -1.8e+217)
        		tmp = t_1;
        	elseif (z <= 2.2e+206)
        		tmp = Float64(y + fma(b, Float64(a + -0.5), x));
        	else
        		tmp = t_1;
        	end
        	return tmp
        end
        
        code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[Log[t], $MachinePrecision] * (-z) + z), $MachinePrecision]}, If[LessEqual[z, -1.8e+217], t$95$1, If[LessEqual[z, 2.2e+206], N[(y + N[(b * N[(a + -0.5), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision], t$95$1]]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        t_1 := \mathsf{fma}\left(\log t, -z, z\right)\\
        \mathbf{if}\;z \leq -1.8 \cdot 10^{+217}:\\
        \;\;\;\;t\_1\\
        
        \mathbf{elif}\;z \leq 2.2 \cdot 10^{+206}:\\
        \;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\
        
        \mathbf{else}:\\
        \;\;\;\;t\_1\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if z < -1.8000000000000001e217 or 2.20000000000000001e206 < z

          1. Initial program 99.5%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
            2. sub-negN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            3. distribute-lft-inN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            4. accelerator-lowering-fma.f64N/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            5. *-lowering-*.f64N/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
            6. metadata-eval99.5

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
          4. Applied egg-rr99.5%

            \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
          5. Taylor expanded in z around inf

            \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right)} \]
          6. Step-by-step derivation
            1. sub-negN/A

              \[\leadsto z \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} \]
            2. log-recN/A

              \[\leadsto z \cdot \left(1 + \color{blue}{\log \left(\frac{1}{t}\right)}\right) \]
            3. +-commutativeN/A

              \[\leadsto z \cdot \color{blue}{\left(\log \left(\frac{1}{t}\right) + 1\right)} \]
            4. distribute-rgt-inN/A

              \[\leadsto \color{blue}{\log \left(\frac{1}{t}\right) \cdot z + 1 \cdot z} \]
            5. *-lft-identityN/A

              \[\leadsto \log \left(\frac{1}{t}\right) \cdot z + \color{blue}{z} \]
            6. log-recN/A

              \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)} \cdot z + z \]
            7. distribute-lft-neg-inN/A

              \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\log t \cdot z\right)\right)} + z \]
            8. distribute-rgt-neg-inN/A

              \[\leadsto \color{blue}{\log t \cdot \left(\mathsf{neg}\left(z\right)\right)} + z \]
            9. neg-mul-1N/A

              \[\leadsto \log t \cdot \color{blue}{\left(-1 \cdot z\right)} + z \]
            10. accelerator-lowering-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, -1 \cdot z, z\right)} \]
            11. log-lowering-log.f64N/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{\log t}, -1 \cdot z, z\right) \]
            12. neg-mul-1N/A

              \[\leadsto \mathsf{fma}\left(\log t, \color{blue}{\mathsf{neg}\left(z\right)}, z\right) \]
            13. neg-lowering-neg.f6481.3

              \[\leadsto \mathsf{fma}\left(\log t, \color{blue}{-z}, z\right) \]
          7. Simplified81.3%

            \[\leadsto \color{blue}{\mathsf{fma}\left(\log t, -z, z\right)} \]

          if -1.8000000000000001e217 < z < 2.20000000000000001e206

          1. Initial program 99.9%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Taylor expanded in z around 0

            \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
          4. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right) + x} \]
            2. associate-+l+N/A

              \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
            3. +-lowering-+.f64N/A

              \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
            4. accelerator-lowering-fma.f64N/A

              \[\leadsto y + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, x\right)} \]
            5. sub-negN/A

              \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, x\right) \]
            6. metadata-evalN/A

              \[\leadsto y + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, x\right) \]
            7. +-lowering-+.f6487.5

              \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, x\right) \]
          5. Simplified87.5%

            \[\leadsto \color{blue}{y + \mathsf{fma}\left(b, a + -0.5, x\right)} \]
        3. Recombined 2 regimes into one program.
        4. Add Preprocessing

        Alternative 12: 84.6% accurate, 1.0× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} t_1 := z - z \cdot \log t\\ \mathbf{if}\;z \leq -2.9 \cdot 10^{+217}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;z \leq 4 \cdot 10^{+204}:\\ \;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (let* ((t_1 (- z (* z (log t)))))
           (if (<= z -2.9e+217)
             t_1
             (if (<= z 4e+204) (+ y (fma b (+ a -0.5) x)) t_1))))
        double code(double x, double y, double z, double t, double a, double b) {
        	double t_1 = z - (z * log(t));
        	double tmp;
        	if (z <= -2.9e+217) {
        		tmp = t_1;
        	} else if (z <= 4e+204) {
        		tmp = y + fma(b, (a + -0.5), x);
        	} else {
        		tmp = t_1;
        	}
        	return tmp;
        }
        
        function code(x, y, z, t, a, b)
        	t_1 = Float64(z - Float64(z * log(t)))
        	tmp = 0.0
        	if (z <= -2.9e+217)
        		tmp = t_1;
        	elseif (z <= 4e+204)
        		tmp = Float64(y + fma(b, Float64(a + -0.5), x));
        	else
        		tmp = t_1;
        	end
        	return tmp
        end
        
        code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(z - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[z, -2.9e+217], t$95$1, If[LessEqual[z, 4e+204], N[(y + N[(b * N[(a + -0.5), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision], t$95$1]]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        t_1 := z - z \cdot \log t\\
        \mathbf{if}\;z \leq -2.9 \cdot 10^{+217}:\\
        \;\;\;\;t\_1\\
        
        \mathbf{elif}\;z \leq 4 \cdot 10^{+204}:\\
        \;\;\;\;y + \mathsf{fma}\left(b, a + -0.5, x\right)\\
        
        \mathbf{else}:\\
        \;\;\;\;t\_1\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if z < -2.89999999999999985e217 or 3.99999999999999996e204 < z

          1. Initial program 99.5%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Taylor expanded in z around inf

            \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right)} \]
          4. Step-by-step derivation
            1. sub-negN/A

              \[\leadsto z \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} \]
            2. log-recN/A

              \[\leadsto z \cdot \left(1 + \color{blue}{\log \left(\frac{1}{t}\right)}\right) \]
            3. distribute-lft-inN/A

              \[\leadsto \color{blue}{z \cdot 1 + z \cdot \log \left(\frac{1}{t}\right)} \]
            4. *-rgt-identityN/A

              \[\leadsto \color{blue}{z} + z \cdot \log \left(\frac{1}{t}\right) \]
            5. remove-double-negN/A

              \[\leadsto z + \color{blue}{\left(\mathsf{neg}\left(\left(\mathsf{neg}\left(z \cdot \log \left(\frac{1}{t}\right)\right)\right)\right)\right)} \]
            6. mul-1-negN/A

              \[\leadsto z + \left(\mathsf{neg}\left(\color{blue}{-1 \cdot \left(z \cdot \log \left(\frac{1}{t}\right)\right)}\right)\right) \]
            7. sub-negN/A

              \[\leadsto \color{blue}{z - -1 \cdot \left(z \cdot \log \left(\frac{1}{t}\right)\right)} \]
            8. --lowering--.f64N/A

              \[\leadsto \color{blue}{z - -1 \cdot \left(z \cdot \log \left(\frac{1}{t}\right)\right)} \]
            9. mul-1-negN/A

              \[\leadsto z - \color{blue}{\left(\mathsf{neg}\left(z \cdot \log \left(\frac{1}{t}\right)\right)\right)} \]
            10. distribute-rgt-neg-inN/A

              \[\leadsto z - \color{blue}{z \cdot \left(\mathsf{neg}\left(\log \left(\frac{1}{t}\right)\right)\right)} \]
            11. log-recN/A

              \[\leadsto z - z \cdot \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}\right)\right) \]
            12. remove-double-negN/A

              \[\leadsto z - z \cdot \color{blue}{\log t} \]
            13. *-lowering-*.f64N/A

              \[\leadsto z - \color{blue}{z \cdot \log t} \]
            14. log-lowering-log.f6481.3

              \[\leadsto z - z \cdot \color{blue}{\log t} \]
          5. Simplified81.3%

            \[\leadsto \color{blue}{z - z \cdot \log t} \]

          if -2.89999999999999985e217 < z < 3.99999999999999996e204

          1. Initial program 99.9%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Taylor expanded in z around 0

            \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
          4. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right) + x} \]
            2. associate-+l+N/A

              \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
            3. +-lowering-+.f64N/A

              \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
            4. accelerator-lowering-fma.f64N/A

              \[\leadsto y + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, x\right)} \]
            5. sub-negN/A

              \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, x\right) \]
            6. metadata-evalN/A

              \[\leadsto y + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, x\right) \]
            7. +-lowering-+.f6487.5

              \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, x\right) \]
          5. Simplified87.5%

            \[\leadsto \color{blue}{y + \mathsf{fma}\left(b, a + -0.5, x\right)} \]
        3. Recombined 2 regimes into one program.
        4. Add Preprocessing

        Alternative 13: 68.9% accurate, 3.3× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} t_1 := b \cdot \left(a - 0.5\right)\\ t_2 := b \cdot \left(a + -0.5\right)\\ \mathbf{if}\;t\_1 \leq -5 \cdot 10^{+252}:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 5 \cdot 10^{+177}:\\ \;\;\;\;x + \mathsf{fma}\left(b, -0.5, y\right)\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (let* ((t_1 (* b (- a 0.5))) (t_2 (* b (+ a -0.5))))
           (if (<= t_1 -5e+252) t_2 (if (<= t_1 5e+177) (+ x (fma b -0.5 y)) t_2))))
        double code(double x, double y, double z, double t, double a, double b) {
        	double t_1 = b * (a - 0.5);
        	double t_2 = b * (a + -0.5);
        	double tmp;
        	if (t_1 <= -5e+252) {
        		tmp = t_2;
        	} else if (t_1 <= 5e+177) {
        		tmp = x + fma(b, -0.5, y);
        	} else {
        		tmp = t_2;
        	}
        	return tmp;
        }
        
        function code(x, y, z, t, a, b)
        	t_1 = Float64(b * Float64(a - 0.5))
        	t_2 = Float64(b * Float64(a + -0.5))
        	tmp = 0.0
        	if (t_1 <= -5e+252)
        		tmp = t_2;
        	elseif (t_1 <= 5e+177)
        		tmp = Float64(x + fma(b, -0.5, y));
        	else
        		tmp = t_2;
        	end
        	return tmp
        end
        
        code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(b * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(b * N[(a + -0.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -5e+252], t$95$2, If[LessEqual[t$95$1, 5e+177], N[(x + N[(b * -0.5 + y), $MachinePrecision]), $MachinePrecision], t$95$2]]]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        t_1 := b \cdot \left(a - 0.5\right)\\
        t_2 := b \cdot \left(a + -0.5\right)\\
        \mathbf{if}\;t\_1 \leq -5 \cdot 10^{+252}:\\
        \;\;\;\;t\_2\\
        
        \mathbf{elif}\;t\_1 \leq 5 \cdot 10^{+177}:\\
        \;\;\;\;x + \mathsf{fma}\left(b, -0.5, y\right)\\
        
        \mathbf{else}:\\
        \;\;\;\;t\_2\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < -4.9999999999999997e252 or 5.0000000000000003e177 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b)

          1. Initial program 99.9%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Taylor expanded in b around inf

            \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
          4. Step-by-step derivation
            1. *-lowering-*.f64N/A

              \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
            2. sub-negN/A

              \[\leadsto b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            3. metadata-evalN/A

              \[\leadsto b \cdot \left(a + \color{blue}{\frac{-1}{2}}\right) \]
            4. +-lowering-+.f6491.3

              \[\leadsto b \cdot \color{blue}{\left(a + -0.5\right)} \]
          5. Simplified91.3%

            \[\leadsto \color{blue}{b \cdot \left(a + -0.5\right)} \]

          if -4.9999999999999997e252 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < 5.0000000000000003e177

          1. Initial program 99.8%

            \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
          2. Add Preprocessing
          3. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
            2. sub-negN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            3. distribute-lft-inN/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            4. accelerator-lowering-fma.f64N/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
            5. *-lowering-*.f64N/A

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
            6. metadata-eval99.8

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
          4. Applied egg-rr99.8%

            \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
          5. Taylor expanded in z around 0

            \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
          6. Step-by-step derivation
            1. associate-+r+N/A

              \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
            2. +-commutativeN/A

              \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
            3. distribute-rgt-inN/A

              \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
            4. metadata-evalN/A

              \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
            5. sub-negN/A

              \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
            6. associate-+r+N/A

              \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
            7. +-lowering-+.f64N/A

              \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
            8. +-commutativeN/A

              \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
            9. accelerator-lowering-fma.f64N/A

              \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
            10. sub-negN/A

              \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
            11. metadata-evalN/A

              \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
            12. +-lowering-+.f6470.5

              \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
          7. Simplified70.5%

            \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]
          8. Taylor expanded in a around 0

            \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{\frac{-1}{2}}, y\right) \]
          9. Step-by-step derivation
            1. Simplified65.2%

              \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{-0.5}, y\right) \]
          10. Recombined 2 regimes into one program.
          11. Final simplification73.1%

            \[\leadsto \begin{array}{l} \mathbf{if}\;b \cdot \left(a - 0.5\right) \leq -5 \cdot 10^{+252}:\\ \;\;\;\;b \cdot \left(a + -0.5\right)\\ \mathbf{elif}\;b \cdot \left(a - 0.5\right) \leq 5 \cdot 10^{+177}:\\ \;\;\;\;x + \mathsf{fma}\left(b, -0.5, y\right)\\ \mathbf{else}:\\ \;\;\;\;b \cdot \left(a + -0.5\right)\\ \end{array} \]
          12. Add Preprocessing

          Alternative 14: 62.7% accurate, 3.4× speedup?

          \[\begin{array}{l} \\ \begin{array}{l} t_1 := b \cdot \left(a - 0.5\right)\\ t_2 := b \cdot \left(a + -0.5\right)\\ \mathbf{if}\;t\_1 \leq -5 \cdot 10^{+252}:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 2 \cdot 10^{+88}:\\ \;\;\;\;x + y\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
          (FPCore (x y z t a b)
           :precision binary64
           (let* ((t_1 (* b (- a 0.5))) (t_2 (* b (+ a -0.5))))
             (if (<= t_1 -5e+252) t_2 (if (<= t_1 2e+88) (+ x y) t_2))))
          double code(double x, double y, double z, double t, double a, double b) {
          	double t_1 = b * (a - 0.5);
          	double t_2 = b * (a + -0.5);
          	double tmp;
          	if (t_1 <= -5e+252) {
          		tmp = t_2;
          	} else if (t_1 <= 2e+88) {
          		tmp = x + y;
          	} else {
          		tmp = t_2;
          	}
          	return tmp;
          }
          
          real(8) function code(x, y, z, t, a, b)
              real(8), intent (in) :: x
              real(8), intent (in) :: y
              real(8), intent (in) :: z
              real(8), intent (in) :: t
              real(8), intent (in) :: a
              real(8), intent (in) :: b
              real(8) :: t_1
              real(8) :: t_2
              real(8) :: tmp
              t_1 = b * (a - 0.5d0)
              t_2 = b * (a + (-0.5d0))
              if (t_1 <= (-5d+252)) then
                  tmp = t_2
              else if (t_1 <= 2d+88) then
                  tmp = x + y
              else
                  tmp = t_2
              end if
              code = tmp
          end function
          
          public static double code(double x, double y, double z, double t, double a, double b) {
          	double t_1 = b * (a - 0.5);
          	double t_2 = b * (a + -0.5);
          	double tmp;
          	if (t_1 <= -5e+252) {
          		tmp = t_2;
          	} else if (t_1 <= 2e+88) {
          		tmp = x + y;
          	} else {
          		tmp = t_2;
          	}
          	return tmp;
          }
          
          def code(x, y, z, t, a, b):
          	t_1 = b * (a - 0.5)
          	t_2 = b * (a + -0.5)
          	tmp = 0
          	if t_1 <= -5e+252:
          		tmp = t_2
          	elif t_1 <= 2e+88:
          		tmp = x + y
          	else:
          		tmp = t_2
          	return tmp
          
          function code(x, y, z, t, a, b)
          	t_1 = Float64(b * Float64(a - 0.5))
          	t_2 = Float64(b * Float64(a + -0.5))
          	tmp = 0.0
          	if (t_1 <= -5e+252)
          		tmp = t_2;
          	elseif (t_1 <= 2e+88)
          		tmp = Float64(x + y);
          	else
          		tmp = t_2;
          	end
          	return tmp
          end
          
          function tmp_2 = code(x, y, z, t, a, b)
          	t_1 = b * (a - 0.5);
          	t_2 = b * (a + -0.5);
          	tmp = 0.0;
          	if (t_1 <= -5e+252)
          		tmp = t_2;
          	elseif (t_1 <= 2e+88)
          		tmp = x + y;
          	else
          		tmp = t_2;
          	end
          	tmp_2 = tmp;
          end
          
          code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(b * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(b * N[(a + -0.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -5e+252], t$95$2, If[LessEqual[t$95$1, 2e+88], N[(x + y), $MachinePrecision], t$95$2]]]]
          
          \begin{array}{l}
          
          \\
          \begin{array}{l}
          t_1 := b \cdot \left(a - 0.5\right)\\
          t_2 := b \cdot \left(a + -0.5\right)\\
          \mathbf{if}\;t\_1 \leq -5 \cdot 10^{+252}:\\
          \;\;\;\;t\_2\\
          
          \mathbf{elif}\;t\_1 \leq 2 \cdot 10^{+88}:\\
          \;\;\;\;x + y\\
          
          \mathbf{else}:\\
          \;\;\;\;t\_2\\
          
          
          \end{array}
          \end{array}
          
          Derivation
          1. Split input into 2 regimes
          2. if (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < -4.9999999999999997e252 or 1.99999999999999992e88 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b)

            1. Initial program 99.9%

              \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
            2. Add Preprocessing
            3. Taylor expanded in b around inf

              \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
            4. Step-by-step derivation
              1. *-lowering-*.f64N/A

                \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
              2. sub-negN/A

                \[\leadsto b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
              3. metadata-evalN/A

                \[\leadsto b \cdot \left(a + \color{blue}{\frac{-1}{2}}\right) \]
              4. +-lowering-+.f6484.2

                \[\leadsto b \cdot \color{blue}{\left(a + -0.5\right)} \]
            5. Simplified84.2%

              \[\leadsto \color{blue}{b \cdot \left(a + -0.5\right)} \]

            if -4.9999999999999997e252 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < 1.99999999999999992e88

            1. Initial program 99.8%

              \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
            2. Add Preprocessing
            3. Taylor expanded in b around 0

              \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) - z \cdot \log t} \]
            4. Step-by-step derivation
              1. cancel-sign-sub-invN/A

                \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t} \]
              2. associate-+r+N/A

                \[\leadsto \color{blue}{\left(\left(x + y\right) + z\right)} + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t \]
              3. associate-+l+N/A

                \[\leadsto \color{blue}{\left(x + y\right) + \left(z + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t\right)} \]
              4. cancel-sign-sub-invN/A

                \[\leadsto \left(x + y\right) + \color{blue}{\left(z - z \cdot \log t\right)} \]
              5. *-rgt-identityN/A

                \[\leadsto \left(x + y\right) + \left(\color{blue}{z \cdot 1} - z \cdot \log t\right) \]
              6. distribute-lft-out--N/A

                \[\leadsto \left(x + y\right) + \color{blue}{z \cdot \left(1 - \log t\right)} \]
              7. +-commutativeN/A

                \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right) + \left(x + y\right)} \]
              8. sub-negN/A

                \[\leadsto z \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} + \left(x + y\right) \]
              9. mul-1-negN/A

                \[\leadsto z \cdot \left(1 + \color{blue}{-1 \cdot \log t}\right) + \left(x + y\right) \]
              10. accelerator-lowering-fma.f64N/A

                \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 + -1 \cdot \log t, x + y\right)} \]
              11. mul-1-negN/A

                \[\leadsto \mathsf{fma}\left(z, 1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}, x + y\right) \]
              12. sub-negN/A

                \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
              13. --lowering--.f64N/A

                \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
              14. log-lowering-log.f64N/A

                \[\leadsto \mathsf{fma}\left(z, 1 - \color{blue}{\log t}, x + y\right) \]
              15. +-commutativeN/A

                \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
              16. +-lowering-+.f6490.9

                \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
            5. Simplified90.9%

              \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y + x\right)} \]
            6. Taylor expanded in z around 0

              \[\leadsto \color{blue}{x + y} \]
            7. Step-by-step derivation
              1. +-commutativeN/A

                \[\leadsto \color{blue}{y + x} \]
              2. +-lowering-+.f6461.4

                \[\leadsto \color{blue}{y + x} \]
            8. Simplified61.4%

              \[\leadsto \color{blue}{y + x} \]
          3. Recombined 2 regimes into one program.
          4. Final simplification69.6%

            \[\leadsto \begin{array}{l} \mathbf{if}\;b \cdot \left(a - 0.5\right) \leq -5 \cdot 10^{+252}:\\ \;\;\;\;b \cdot \left(a + -0.5\right)\\ \mathbf{elif}\;b \cdot \left(a - 0.5\right) \leq 2 \cdot 10^{+88}:\\ \;\;\;\;x + y\\ \mathbf{else}:\\ \;\;\;\;b \cdot \left(a + -0.5\right)\\ \end{array} \]
          5. Add Preprocessing

          Alternative 15: 57.4% accurate, 3.7× speedup?

          \[\begin{array}{l} \\ \begin{array}{l} t_1 := b \cdot \left(a - 0.5\right)\\ \mathbf{if}\;t\_1 \leq -5 \cdot 10^{+252}:\\ \;\;\;\;b \cdot a\\ \mathbf{elif}\;t\_1 \leq 5 \cdot 10^{+177}:\\ \;\;\;\;x + y\\ \mathbf{else}:\\ \;\;\;\;b \cdot a\\ \end{array} \end{array} \]
          (FPCore (x y z t a b)
           :precision binary64
           (let* ((t_1 (* b (- a 0.5))))
             (if (<= t_1 -5e+252) (* b a) (if (<= t_1 5e+177) (+ x y) (* b a)))))
          double code(double x, double y, double z, double t, double a, double b) {
          	double t_1 = b * (a - 0.5);
          	double tmp;
          	if (t_1 <= -5e+252) {
          		tmp = b * a;
          	} else if (t_1 <= 5e+177) {
          		tmp = x + y;
          	} else {
          		tmp = b * a;
          	}
          	return tmp;
          }
          
          real(8) function code(x, y, z, t, a, b)
              real(8), intent (in) :: x
              real(8), intent (in) :: y
              real(8), intent (in) :: z
              real(8), intent (in) :: t
              real(8), intent (in) :: a
              real(8), intent (in) :: b
              real(8) :: t_1
              real(8) :: tmp
              t_1 = b * (a - 0.5d0)
              if (t_1 <= (-5d+252)) then
                  tmp = b * a
              else if (t_1 <= 5d+177) then
                  tmp = x + y
              else
                  tmp = b * a
              end if
              code = tmp
          end function
          
          public static double code(double x, double y, double z, double t, double a, double b) {
          	double t_1 = b * (a - 0.5);
          	double tmp;
          	if (t_1 <= -5e+252) {
          		tmp = b * a;
          	} else if (t_1 <= 5e+177) {
          		tmp = x + y;
          	} else {
          		tmp = b * a;
          	}
          	return tmp;
          }
          
          def code(x, y, z, t, a, b):
          	t_1 = b * (a - 0.5)
          	tmp = 0
          	if t_1 <= -5e+252:
          		tmp = b * a
          	elif t_1 <= 5e+177:
          		tmp = x + y
          	else:
          		tmp = b * a
          	return tmp
          
          function code(x, y, z, t, a, b)
          	t_1 = Float64(b * Float64(a - 0.5))
          	tmp = 0.0
          	if (t_1 <= -5e+252)
          		tmp = Float64(b * a);
          	elseif (t_1 <= 5e+177)
          		tmp = Float64(x + y);
          	else
          		tmp = Float64(b * a);
          	end
          	return tmp
          end
          
          function tmp_2 = code(x, y, z, t, a, b)
          	t_1 = b * (a - 0.5);
          	tmp = 0.0;
          	if (t_1 <= -5e+252)
          		tmp = b * a;
          	elseif (t_1 <= 5e+177)
          		tmp = x + y;
          	else
          		tmp = b * a;
          	end
          	tmp_2 = tmp;
          end
          
          code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(b * N[(a - 0.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -5e+252], N[(b * a), $MachinePrecision], If[LessEqual[t$95$1, 5e+177], N[(x + y), $MachinePrecision], N[(b * a), $MachinePrecision]]]]
          
          \begin{array}{l}
          
          \\
          \begin{array}{l}
          t_1 := b \cdot \left(a - 0.5\right)\\
          \mathbf{if}\;t\_1 \leq -5 \cdot 10^{+252}:\\
          \;\;\;\;b \cdot a\\
          
          \mathbf{elif}\;t\_1 \leq 5 \cdot 10^{+177}:\\
          \;\;\;\;x + y\\
          
          \mathbf{else}:\\
          \;\;\;\;b \cdot a\\
          
          
          \end{array}
          \end{array}
          
          Derivation
          1. Split input into 2 regimes
          2. if (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < -4.9999999999999997e252 or 5.0000000000000003e177 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b)

            1. Initial program 99.9%

              \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
            2. Add Preprocessing
            3. Taylor expanded in a around inf

              \[\leadsto \color{blue}{a \cdot b} \]
            4. Step-by-step derivation
              1. *-commutativeN/A

                \[\leadsto \color{blue}{b \cdot a} \]
              2. *-lowering-*.f6472.1

                \[\leadsto \color{blue}{b \cdot a} \]
            5. Simplified72.1%

              \[\leadsto \color{blue}{b \cdot a} \]

            if -4.9999999999999997e252 < (*.f64 (-.f64 a #s(literal 1/2 binary64)) b) < 5.0000000000000003e177

            1. Initial program 99.8%

              \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
            2. Add Preprocessing
            3. Taylor expanded in b around 0

              \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) - z \cdot \log t} \]
            4. Step-by-step derivation
              1. cancel-sign-sub-invN/A

                \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t} \]
              2. associate-+r+N/A

                \[\leadsto \color{blue}{\left(\left(x + y\right) + z\right)} + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t \]
              3. associate-+l+N/A

                \[\leadsto \color{blue}{\left(x + y\right) + \left(z + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t\right)} \]
              4. cancel-sign-sub-invN/A

                \[\leadsto \left(x + y\right) + \color{blue}{\left(z - z \cdot \log t\right)} \]
              5. *-rgt-identityN/A

                \[\leadsto \left(x + y\right) + \left(\color{blue}{z \cdot 1} - z \cdot \log t\right) \]
              6. distribute-lft-out--N/A

                \[\leadsto \left(x + y\right) + \color{blue}{z \cdot \left(1 - \log t\right)} \]
              7. +-commutativeN/A

                \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right) + \left(x + y\right)} \]
              8. sub-negN/A

                \[\leadsto z \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} + \left(x + y\right) \]
              9. mul-1-negN/A

                \[\leadsto z \cdot \left(1 + \color{blue}{-1 \cdot \log t}\right) + \left(x + y\right) \]
              10. accelerator-lowering-fma.f64N/A

                \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 + -1 \cdot \log t, x + y\right)} \]
              11. mul-1-negN/A

                \[\leadsto \mathsf{fma}\left(z, 1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}, x + y\right) \]
              12. sub-negN/A

                \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
              13. --lowering--.f64N/A

                \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
              14. log-lowering-log.f64N/A

                \[\leadsto \mathsf{fma}\left(z, 1 - \color{blue}{\log t}, x + y\right) \]
              15. +-commutativeN/A

                \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
              16. +-lowering-+.f6488.4

                \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
            5. Simplified88.4%

              \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y + x\right)} \]
            6. Taylor expanded in z around 0

              \[\leadsto \color{blue}{x + y} \]
            7. Step-by-step derivation
              1. +-commutativeN/A

                \[\leadsto \color{blue}{y + x} \]
              2. +-lowering-+.f6459.0

                \[\leadsto \color{blue}{y + x} \]
            8. Simplified59.0%

              \[\leadsto \color{blue}{y + x} \]
          3. Recombined 2 regimes into one program.
          4. Final simplification63.0%

            \[\leadsto \begin{array}{l} \mathbf{if}\;b \cdot \left(a - 0.5\right) \leq -5 \cdot 10^{+252}:\\ \;\;\;\;b \cdot a\\ \mathbf{elif}\;b \cdot \left(a - 0.5\right) \leq 5 \cdot 10^{+177}:\\ \;\;\;\;x + y\\ \mathbf{else}:\\ \;\;\;\;b \cdot a\\ \end{array} \]
          5. Add Preprocessing

          Alternative 16: 77.4% accurate, 4.5× speedup?

          \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;a - 0.5 \leq -1 \cdot 10^{+16}:\\ \;\;\;\;y + \mathsf{fma}\left(b, a, x\right)\\ \mathbf{elif}\;a - 0.5 \leq -0.4:\\ \;\;\;\;y + \mathsf{fma}\left(b, -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;x + \mathsf{fma}\left(b, a, y\right)\\ \end{array} \end{array} \]
          (FPCore (x y z t a b)
           :precision binary64
           (if (<= (- a 0.5) -1e+16)
             (+ y (fma b a x))
             (if (<= (- a 0.5) -0.4) (+ y (fma b -0.5 x)) (+ x (fma b a y)))))
          double code(double x, double y, double z, double t, double a, double b) {
          	double tmp;
          	if ((a - 0.5) <= -1e+16) {
          		tmp = y + fma(b, a, x);
          	} else if ((a - 0.5) <= -0.4) {
          		tmp = y + fma(b, -0.5, x);
          	} else {
          		tmp = x + fma(b, a, y);
          	}
          	return tmp;
          }
          
          function code(x, y, z, t, a, b)
          	tmp = 0.0
          	if (Float64(a - 0.5) <= -1e+16)
          		tmp = Float64(y + fma(b, a, x));
          	elseif (Float64(a - 0.5) <= -0.4)
          		tmp = Float64(y + fma(b, -0.5, x));
          	else
          		tmp = Float64(x + fma(b, a, y));
          	end
          	return tmp
          end
          
          code[x_, y_, z_, t_, a_, b_] := If[LessEqual[N[(a - 0.5), $MachinePrecision], -1e+16], N[(y + N[(b * a + x), $MachinePrecision]), $MachinePrecision], If[LessEqual[N[(a - 0.5), $MachinePrecision], -0.4], N[(y + N[(b * -0.5 + x), $MachinePrecision]), $MachinePrecision], N[(x + N[(b * a + y), $MachinePrecision]), $MachinePrecision]]]
          
          \begin{array}{l}
          
          \\
          \begin{array}{l}
          \mathbf{if}\;a - 0.5 \leq -1 \cdot 10^{+16}:\\
          \;\;\;\;y + \mathsf{fma}\left(b, a, x\right)\\
          
          \mathbf{elif}\;a - 0.5 \leq -0.4:\\
          \;\;\;\;y + \mathsf{fma}\left(b, -0.5, x\right)\\
          
          \mathbf{else}:\\
          \;\;\;\;x + \mathsf{fma}\left(b, a, y\right)\\
          
          
          \end{array}
          \end{array}
          
          Derivation
          1. Split input into 3 regimes
          2. if (-.f64 a #s(literal 1/2 binary64)) < -1e16

            1. Initial program 99.9%

              \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
            2. Add Preprocessing
            3. Step-by-step derivation
              1. *-commutativeN/A

                \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
              2. sub-negN/A

                \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
              3. distribute-lft-inN/A

                \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
              4. accelerator-lowering-fma.f64N/A

                \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
              5. *-lowering-*.f64N/A

                \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
              6. metadata-eval99.9

                \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
            4. Applied egg-rr99.9%

              \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
            5. Taylor expanded in z around 0

              \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
            6. Step-by-step derivation
              1. associate-+r+N/A

                \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
              2. +-commutativeN/A

                \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
              3. distribute-rgt-inN/A

                \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
              4. metadata-evalN/A

                \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
              5. sub-negN/A

                \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
              6. associate-+r+N/A

                \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
              7. +-lowering-+.f64N/A

                \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
              8. +-commutativeN/A

                \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
              9. accelerator-lowering-fma.f64N/A

                \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
              10. sub-negN/A

                \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
              11. metadata-evalN/A

                \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
              12. +-lowering-+.f6483.3

                \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
            7. Simplified83.3%

              \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]
            8. Step-by-step derivation
              1. associate-+r+N/A

                \[\leadsto \color{blue}{\left(x + b \cdot \left(a + \frac{-1}{2}\right)\right) + y} \]
              2. *-commutativeN/A

                \[\leadsto \left(x + \color{blue}{\left(a + \frac{-1}{2}\right) \cdot b}\right) + y \]
              3. +-commutativeN/A

                \[\leadsto \color{blue}{\left(\left(a + \frac{-1}{2}\right) \cdot b + x\right)} + y \]
              4. +-lowering-+.f64N/A

                \[\leadsto \color{blue}{\left(\left(a + \frac{-1}{2}\right) \cdot b + x\right) + y} \]
              5. *-commutativeN/A

                \[\leadsto \left(\color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} + x\right) + y \]
              6. metadata-evalN/A

                \[\leadsto \left(b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) + x\right) + y \]
              7. sub-negN/A

                \[\leadsto \left(b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} + x\right) + y \]
              8. accelerator-lowering-fma.f64N/A

                \[\leadsto \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, x\right)} + y \]
              9. sub-negN/A

                \[\leadsto \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, x\right) + y \]
              10. metadata-evalN/A

                \[\leadsto \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, x\right) + y \]
              11. +-lowering-+.f6483.3

                \[\leadsto \mathsf{fma}\left(b, \color{blue}{a + -0.5}, x\right) + y \]
            9. Applied egg-rr83.3%

              \[\leadsto \color{blue}{\mathsf{fma}\left(b, a + -0.5, x\right) + y} \]
            10. Taylor expanded in a around inf

              \[\leadsto \mathsf{fma}\left(b, \color{blue}{a}, x\right) + y \]
            11. Step-by-step derivation
              1. Simplified83.3%

                \[\leadsto \mathsf{fma}\left(b, \color{blue}{a}, x\right) + y \]

              if -1e16 < (-.f64 a #s(literal 1/2 binary64)) < -0.40000000000000002

              1. Initial program 99.8%

                \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
              2. Add Preprocessing
              3. Step-by-step derivation
                1. *-commutativeN/A

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
                2. sub-negN/A

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                3. distribute-lft-inN/A

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                4. accelerator-lowering-fma.f64N/A

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                5. *-lowering-*.f64N/A

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                6. metadata-eval99.8

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
              4. Applied egg-rr99.8%

                \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
              5. Taylor expanded in z around 0

                \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
              6. Step-by-step derivation
                1. associate-+r+N/A

                  \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
                2. +-commutativeN/A

                  \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
                3. distribute-rgt-inN/A

                  \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
                4. metadata-evalN/A

                  \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                5. sub-negN/A

                  \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
                6. associate-+r+N/A

                  \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                7. +-lowering-+.f64N/A

                  \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                8. +-commutativeN/A

                  \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
                9. accelerator-lowering-fma.f64N/A

                  \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
                10. sub-negN/A

                  \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
                11. metadata-evalN/A

                  \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
                12. +-lowering-+.f6473.7

                  \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
              7. Simplified73.7%

                \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]
              8. Step-by-step derivation
                1. associate-+r+N/A

                  \[\leadsto \color{blue}{\left(x + b \cdot \left(a + \frac{-1}{2}\right)\right) + y} \]
                2. *-commutativeN/A

                  \[\leadsto \left(x + \color{blue}{\left(a + \frac{-1}{2}\right) \cdot b}\right) + y \]
                3. +-commutativeN/A

                  \[\leadsto \color{blue}{\left(\left(a + \frac{-1}{2}\right) \cdot b + x\right)} + y \]
                4. +-lowering-+.f64N/A

                  \[\leadsto \color{blue}{\left(\left(a + \frac{-1}{2}\right) \cdot b + x\right) + y} \]
                5. *-commutativeN/A

                  \[\leadsto \left(\color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} + x\right) + y \]
                6. metadata-evalN/A

                  \[\leadsto \left(b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) + x\right) + y \]
                7. sub-negN/A

                  \[\leadsto \left(b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} + x\right) + y \]
                8. accelerator-lowering-fma.f64N/A

                  \[\leadsto \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, x\right)} + y \]
                9. sub-negN/A

                  \[\leadsto \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, x\right) + y \]
                10. metadata-evalN/A

                  \[\leadsto \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, x\right) + y \]
                11. +-lowering-+.f6473.7

                  \[\leadsto \mathsf{fma}\left(b, \color{blue}{a + -0.5}, x\right) + y \]
              9. Applied egg-rr73.7%

                \[\leadsto \color{blue}{\mathsf{fma}\left(b, a + -0.5, x\right) + y} \]
              10. Taylor expanded in a around 0

                \[\leadsto \color{blue}{\left(x + \frac{-1}{2} \cdot b\right)} + y \]
              11. Step-by-step derivation
                1. +-commutativeN/A

                  \[\leadsto \color{blue}{\left(\frac{-1}{2} \cdot b + x\right)} + y \]
                2. *-commutativeN/A

                  \[\leadsto \left(\color{blue}{b \cdot \frac{-1}{2}} + x\right) + y \]
                3. accelerator-lowering-fma.f6473.3

                  \[\leadsto \color{blue}{\mathsf{fma}\left(b, -0.5, x\right)} + y \]
              12. Simplified73.3%

                \[\leadsto \color{blue}{\mathsf{fma}\left(b, -0.5, x\right)} + y \]

              if -0.40000000000000002 < (-.f64 a #s(literal 1/2 binary64))

              1. Initial program 99.8%

                \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
              2. Add Preprocessing
              3. Step-by-step derivation
                1. *-commutativeN/A

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
                2. sub-negN/A

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                3. distribute-lft-inN/A

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                4. accelerator-lowering-fma.f64N/A

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                5. *-lowering-*.f64N/A

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                6. metadata-eval99.8

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
              4. Applied egg-rr99.8%

                \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
              5. Taylor expanded in z around 0

                \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
              6. Step-by-step derivation
                1. associate-+r+N/A

                  \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
                2. +-commutativeN/A

                  \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
                3. distribute-rgt-inN/A

                  \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
                4. metadata-evalN/A

                  \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                5. sub-negN/A

                  \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
                6. associate-+r+N/A

                  \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                7. +-lowering-+.f64N/A

                  \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                8. +-commutativeN/A

                  \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
                9. accelerator-lowering-fma.f64N/A

                  \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
                10. sub-negN/A

                  \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
                11. metadata-evalN/A

                  \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
                12. +-lowering-+.f6481.1

                  \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
              7. Simplified81.1%

                \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]
              8. Taylor expanded in a around inf

                \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a}, y\right) \]
              9. Step-by-step derivation
                1. Simplified81.1%

                  \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a}, y\right) \]
              10. Recombined 3 regimes into one program.
              11. Final simplification77.8%

                \[\leadsto \begin{array}{l} \mathbf{if}\;a - 0.5 \leq -1 \cdot 10^{+16}:\\ \;\;\;\;y + \mathsf{fma}\left(b, a, x\right)\\ \mathbf{elif}\;a - 0.5 \leq -0.4:\\ \;\;\;\;y + \mathsf{fma}\left(b, -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;x + \mathsf{fma}\left(b, a, y\right)\\ \end{array} \]
              12. Add Preprocessing

              Alternative 17: 77.4% accurate, 4.5× speedup?

              \[\begin{array}{l} \\ \begin{array}{l} t_1 := x + \mathsf{fma}\left(b, a, y\right)\\ \mathbf{if}\;a - 0.5 \leq -1 \cdot 10^{+16}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;a - 0.5 \leq -0.4:\\ \;\;\;\;y + \mathsf{fma}\left(b, -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
              (FPCore (x y z t a b)
               :precision binary64
               (let* ((t_1 (+ x (fma b a y))))
                 (if (<= (- a 0.5) -1e+16)
                   t_1
                   (if (<= (- a 0.5) -0.4) (+ y (fma b -0.5 x)) t_1))))
              double code(double x, double y, double z, double t, double a, double b) {
              	double t_1 = x + fma(b, a, y);
              	double tmp;
              	if ((a - 0.5) <= -1e+16) {
              		tmp = t_1;
              	} else if ((a - 0.5) <= -0.4) {
              		tmp = y + fma(b, -0.5, x);
              	} else {
              		tmp = t_1;
              	}
              	return tmp;
              }
              
              function code(x, y, z, t, a, b)
              	t_1 = Float64(x + fma(b, a, y))
              	tmp = 0.0
              	if (Float64(a - 0.5) <= -1e+16)
              		tmp = t_1;
              	elseif (Float64(a - 0.5) <= -0.4)
              		tmp = Float64(y + fma(b, -0.5, x));
              	else
              		tmp = t_1;
              	end
              	return tmp
              end
              
              code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(x + N[(b * a + y), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(a - 0.5), $MachinePrecision], -1e+16], t$95$1, If[LessEqual[N[(a - 0.5), $MachinePrecision], -0.4], N[(y + N[(b * -0.5 + x), $MachinePrecision]), $MachinePrecision], t$95$1]]]
              
              \begin{array}{l}
              
              \\
              \begin{array}{l}
              t_1 := x + \mathsf{fma}\left(b, a, y\right)\\
              \mathbf{if}\;a - 0.5 \leq -1 \cdot 10^{+16}:\\
              \;\;\;\;t\_1\\
              
              \mathbf{elif}\;a - 0.5 \leq -0.4:\\
              \;\;\;\;y + \mathsf{fma}\left(b, -0.5, x\right)\\
              
              \mathbf{else}:\\
              \;\;\;\;t\_1\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if (-.f64 a #s(literal 1/2 binary64)) < -1e16 or -0.40000000000000002 < (-.f64 a #s(literal 1/2 binary64))

                1. Initial program 99.9%

                  \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
                2. Add Preprocessing
                3. Step-by-step derivation
                  1. *-commutativeN/A

                    \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
                  2. sub-negN/A

                    \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                  3. distribute-lft-inN/A

                    \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                  4. accelerator-lowering-fma.f64N/A

                    \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                  5. *-lowering-*.f64N/A

                    \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                  6. metadata-eval99.9

                    \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
                4. Applied egg-rr99.9%

                  \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
                5. Taylor expanded in z around 0

                  \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
                6. Step-by-step derivation
                  1. associate-+r+N/A

                    \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
                  2. +-commutativeN/A

                    \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
                  3. distribute-rgt-inN/A

                    \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
                  4. metadata-evalN/A

                    \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                  5. sub-negN/A

                    \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
                  6. associate-+r+N/A

                    \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                  7. +-lowering-+.f64N/A

                    \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                  8. +-commutativeN/A

                    \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
                  9. accelerator-lowering-fma.f64N/A

                    \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
                  10. sub-negN/A

                    \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
                  11. metadata-evalN/A

                    \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
                  12. +-lowering-+.f6482.3

                    \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
                7. Simplified82.3%

                  \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]
                8. Taylor expanded in a around inf

                  \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a}, y\right) \]
                9. Step-by-step derivation
                  1. Simplified82.3%

                    \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a}, y\right) \]

                  if -1e16 < (-.f64 a #s(literal 1/2 binary64)) < -0.40000000000000002

                  1. Initial program 99.8%

                    \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
                  2. Add Preprocessing
                  3. Step-by-step derivation
                    1. *-commutativeN/A

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
                    2. sub-negN/A

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                    3. distribute-lft-inN/A

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                    4. accelerator-lowering-fma.f64N/A

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                    5. *-lowering-*.f64N/A

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                    6. metadata-eval99.8

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
                  4. Applied egg-rr99.8%

                    \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
                  5. Taylor expanded in z around 0

                    \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
                  6. Step-by-step derivation
                    1. associate-+r+N/A

                      \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
                    2. +-commutativeN/A

                      \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
                    3. distribute-rgt-inN/A

                      \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
                    4. metadata-evalN/A

                      \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                    5. sub-negN/A

                      \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
                    6. associate-+r+N/A

                      \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                    7. +-lowering-+.f64N/A

                      \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                    8. +-commutativeN/A

                      \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
                    9. accelerator-lowering-fma.f64N/A

                      \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
                    10. sub-negN/A

                      \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
                    11. metadata-evalN/A

                      \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
                    12. +-lowering-+.f6473.7

                      \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
                  7. Simplified73.7%

                    \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]
                  8. Step-by-step derivation
                    1. associate-+r+N/A

                      \[\leadsto \color{blue}{\left(x + b \cdot \left(a + \frac{-1}{2}\right)\right) + y} \]
                    2. *-commutativeN/A

                      \[\leadsto \left(x + \color{blue}{\left(a + \frac{-1}{2}\right) \cdot b}\right) + y \]
                    3. +-commutativeN/A

                      \[\leadsto \color{blue}{\left(\left(a + \frac{-1}{2}\right) \cdot b + x\right)} + y \]
                    4. +-lowering-+.f64N/A

                      \[\leadsto \color{blue}{\left(\left(a + \frac{-1}{2}\right) \cdot b + x\right) + y} \]
                    5. *-commutativeN/A

                      \[\leadsto \left(\color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} + x\right) + y \]
                    6. metadata-evalN/A

                      \[\leadsto \left(b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) + x\right) + y \]
                    7. sub-negN/A

                      \[\leadsto \left(b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} + x\right) + y \]
                    8. accelerator-lowering-fma.f64N/A

                      \[\leadsto \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, x\right)} + y \]
                    9. sub-negN/A

                      \[\leadsto \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, x\right) + y \]
                    10. metadata-evalN/A

                      \[\leadsto \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, x\right) + y \]
                    11. +-lowering-+.f6473.7

                      \[\leadsto \mathsf{fma}\left(b, \color{blue}{a + -0.5}, x\right) + y \]
                  9. Applied egg-rr73.7%

                    \[\leadsto \color{blue}{\mathsf{fma}\left(b, a + -0.5, x\right) + y} \]
                  10. Taylor expanded in a around 0

                    \[\leadsto \color{blue}{\left(x + \frac{-1}{2} \cdot b\right)} + y \]
                  11. Step-by-step derivation
                    1. +-commutativeN/A

                      \[\leadsto \color{blue}{\left(\frac{-1}{2} \cdot b + x\right)} + y \]
                    2. *-commutativeN/A

                      \[\leadsto \left(\color{blue}{b \cdot \frac{-1}{2}} + x\right) + y \]
                    3. accelerator-lowering-fma.f6473.3

                      \[\leadsto \color{blue}{\mathsf{fma}\left(b, -0.5, x\right)} + y \]
                  12. Simplified73.3%

                    \[\leadsto \color{blue}{\mathsf{fma}\left(b, -0.5, x\right)} + y \]
                10. Recombined 2 regimes into one program.
                11. Final simplification77.8%

                  \[\leadsto \begin{array}{l} \mathbf{if}\;a - 0.5 \leq -1 \cdot 10^{+16}:\\ \;\;\;\;x + \mathsf{fma}\left(b, a, y\right)\\ \mathbf{elif}\;a - 0.5 \leq -0.4:\\ \;\;\;\;y + \mathsf{fma}\left(b, -0.5, x\right)\\ \mathbf{else}:\\ \;\;\;\;x + \mathsf{fma}\left(b, a, y\right)\\ \end{array} \]
                12. Add Preprocessing

                Alternative 18: 77.4% accurate, 4.5× speedup?

                \[\begin{array}{l} \\ \begin{array}{l} t_1 := x + \mathsf{fma}\left(b, a, y\right)\\ \mathbf{if}\;a - 0.5 \leq -1 \cdot 10^{+16}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;a - 0.5 \leq -0.4:\\ \;\;\;\;x + \mathsf{fma}\left(b, -0.5, y\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
                (FPCore (x y z t a b)
                 :precision binary64
                 (let* ((t_1 (+ x (fma b a y))))
                   (if (<= (- a 0.5) -1e+16)
                     t_1
                     (if (<= (- a 0.5) -0.4) (+ x (fma b -0.5 y)) t_1))))
                double code(double x, double y, double z, double t, double a, double b) {
                	double t_1 = x + fma(b, a, y);
                	double tmp;
                	if ((a - 0.5) <= -1e+16) {
                		tmp = t_1;
                	} else if ((a - 0.5) <= -0.4) {
                		tmp = x + fma(b, -0.5, y);
                	} else {
                		tmp = t_1;
                	}
                	return tmp;
                }
                
                function code(x, y, z, t, a, b)
                	t_1 = Float64(x + fma(b, a, y))
                	tmp = 0.0
                	if (Float64(a - 0.5) <= -1e+16)
                		tmp = t_1;
                	elseif (Float64(a - 0.5) <= -0.4)
                		tmp = Float64(x + fma(b, -0.5, y));
                	else
                		tmp = t_1;
                	end
                	return tmp
                end
                
                code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(x + N[(b * a + y), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(a - 0.5), $MachinePrecision], -1e+16], t$95$1, If[LessEqual[N[(a - 0.5), $MachinePrecision], -0.4], N[(x + N[(b * -0.5 + y), $MachinePrecision]), $MachinePrecision], t$95$1]]]
                
                \begin{array}{l}
                
                \\
                \begin{array}{l}
                t_1 := x + \mathsf{fma}\left(b, a, y\right)\\
                \mathbf{if}\;a - 0.5 \leq -1 \cdot 10^{+16}:\\
                \;\;\;\;t\_1\\
                
                \mathbf{elif}\;a - 0.5 \leq -0.4:\\
                \;\;\;\;x + \mathsf{fma}\left(b, -0.5, y\right)\\
                
                \mathbf{else}:\\
                \;\;\;\;t\_1\\
                
                
                \end{array}
                \end{array}
                
                Derivation
                1. Split input into 2 regimes
                2. if (-.f64 a #s(literal 1/2 binary64)) < -1e16 or -0.40000000000000002 < (-.f64 a #s(literal 1/2 binary64))

                  1. Initial program 99.9%

                    \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
                  2. Add Preprocessing
                  3. Step-by-step derivation
                    1. *-commutativeN/A

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
                    2. sub-negN/A

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                    3. distribute-lft-inN/A

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                    4. accelerator-lowering-fma.f64N/A

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                    5. *-lowering-*.f64N/A

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                    6. metadata-eval99.9

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
                  4. Applied egg-rr99.9%

                    \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
                  5. Taylor expanded in z around 0

                    \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
                  6. Step-by-step derivation
                    1. associate-+r+N/A

                      \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
                    2. +-commutativeN/A

                      \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
                    3. distribute-rgt-inN/A

                      \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
                    4. metadata-evalN/A

                      \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                    5. sub-negN/A

                      \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
                    6. associate-+r+N/A

                      \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                    7. +-lowering-+.f64N/A

                      \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                    8. +-commutativeN/A

                      \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
                    9. accelerator-lowering-fma.f64N/A

                      \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
                    10. sub-negN/A

                      \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
                    11. metadata-evalN/A

                      \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
                    12. +-lowering-+.f6482.3

                      \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
                  7. Simplified82.3%

                    \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]
                  8. Taylor expanded in a around inf

                    \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a}, y\right) \]
                  9. Step-by-step derivation
                    1. Simplified82.3%

                      \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a}, y\right) \]

                    if -1e16 < (-.f64 a #s(literal 1/2 binary64)) < -0.40000000000000002

                    1. Initial program 99.8%

                      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
                    2. Add Preprocessing
                    3. Step-by-step derivation
                      1. *-commutativeN/A

                        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
                      2. sub-negN/A

                        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                      3. distribute-lft-inN/A

                        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                      4. accelerator-lowering-fma.f64N/A

                        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                      5. *-lowering-*.f64N/A

                        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                      6. metadata-eval99.8

                        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
                    4. Applied egg-rr99.8%

                      \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
                    5. Taylor expanded in z around 0

                      \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
                    6. Step-by-step derivation
                      1. associate-+r+N/A

                        \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
                      2. +-commutativeN/A

                        \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
                      3. distribute-rgt-inN/A

                        \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
                      4. metadata-evalN/A

                        \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                      5. sub-negN/A

                        \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
                      6. associate-+r+N/A

                        \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                      7. +-lowering-+.f64N/A

                        \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                      8. +-commutativeN/A

                        \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
                      9. accelerator-lowering-fma.f64N/A

                        \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
                      10. sub-negN/A

                        \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
                      11. metadata-evalN/A

                        \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
                      12. +-lowering-+.f6473.7

                        \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
                    7. Simplified73.7%

                      \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]
                    8. Taylor expanded in a around 0

                      \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{\frac{-1}{2}}, y\right) \]
                    9. Step-by-step derivation
                      1. Simplified73.2%

                        \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{-0.5}, y\right) \]
                    10. Recombined 2 regimes into one program.
                    11. Add Preprocessing

                    Alternative 19: 49.2% accurate, 4.7× speedup?

                    \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x + y \leq -2 \cdot 10^{+85}:\\ \;\;\;\;x + b \cdot a\\ \mathbf{elif}\;x + y \leq 2 \cdot 10^{-24}:\\ \;\;\;\;b \cdot \left(a + -0.5\right)\\ \mathbf{else}:\\ \;\;\;\;y + b \cdot a\\ \end{array} \end{array} \]
                    (FPCore (x y z t a b)
                     :precision binary64
                     (if (<= (+ x y) -2e+85)
                       (+ x (* b a))
                       (if (<= (+ x y) 2e-24) (* b (+ a -0.5)) (+ y (* b a)))))
                    double code(double x, double y, double z, double t, double a, double b) {
                    	double tmp;
                    	if ((x + y) <= -2e+85) {
                    		tmp = x + (b * a);
                    	} else if ((x + y) <= 2e-24) {
                    		tmp = b * (a + -0.5);
                    	} else {
                    		tmp = y + (b * a);
                    	}
                    	return tmp;
                    }
                    
                    real(8) function code(x, y, z, t, a, b)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        real(8), intent (in) :: z
                        real(8), intent (in) :: t
                        real(8), intent (in) :: a
                        real(8), intent (in) :: b
                        real(8) :: tmp
                        if ((x + y) <= (-2d+85)) then
                            tmp = x + (b * a)
                        else if ((x + y) <= 2d-24) then
                            tmp = b * (a + (-0.5d0))
                        else
                            tmp = y + (b * a)
                        end if
                        code = tmp
                    end function
                    
                    public static double code(double x, double y, double z, double t, double a, double b) {
                    	double tmp;
                    	if ((x + y) <= -2e+85) {
                    		tmp = x + (b * a);
                    	} else if ((x + y) <= 2e-24) {
                    		tmp = b * (a + -0.5);
                    	} else {
                    		tmp = y + (b * a);
                    	}
                    	return tmp;
                    }
                    
                    def code(x, y, z, t, a, b):
                    	tmp = 0
                    	if (x + y) <= -2e+85:
                    		tmp = x + (b * a)
                    	elif (x + y) <= 2e-24:
                    		tmp = b * (a + -0.5)
                    	else:
                    		tmp = y + (b * a)
                    	return tmp
                    
                    function code(x, y, z, t, a, b)
                    	tmp = 0.0
                    	if (Float64(x + y) <= -2e+85)
                    		tmp = Float64(x + Float64(b * a));
                    	elseif (Float64(x + y) <= 2e-24)
                    		tmp = Float64(b * Float64(a + -0.5));
                    	else
                    		tmp = Float64(y + Float64(b * a));
                    	end
                    	return tmp
                    end
                    
                    function tmp_2 = code(x, y, z, t, a, b)
                    	tmp = 0.0;
                    	if ((x + y) <= -2e+85)
                    		tmp = x + (b * a);
                    	elseif ((x + y) <= 2e-24)
                    		tmp = b * (a + -0.5);
                    	else
                    		tmp = y + (b * a);
                    	end
                    	tmp_2 = tmp;
                    end
                    
                    code[x_, y_, z_, t_, a_, b_] := If[LessEqual[N[(x + y), $MachinePrecision], -2e+85], N[(x + N[(b * a), $MachinePrecision]), $MachinePrecision], If[LessEqual[N[(x + y), $MachinePrecision], 2e-24], N[(b * N[(a + -0.5), $MachinePrecision]), $MachinePrecision], N[(y + N[(b * a), $MachinePrecision]), $MachinePrecision]]]
                    
                    \begin{array}{l}
                    
                    \\
                    \begin{array}{l}
                    \mathbf{if}\;x + y \leq -2 \cdot 10^{+85}:\\
                    \;\;\;\;x + b \cdot a\\
                    
                    \mathbf{elif}\;x + y \leq 2 \cdot 10^{-24}:\\
                    \;\;\;\;b \cdot \left(a + -0.5\right)\\
                    
                    \mathbf{else}:\\
                    \;\;\;\;y + b \cdot a\\
                    
                    
                    \end{array}
                    \end{array}
                    
                    Derivation
                    1. Split input into 3 regimes
                    2. if (+.f64 x y) < -2e85

                      1. Initial program 99.9%

                        \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
                      2. Add Preprocessing
                      3. Taylor expanded in x around inf

                        \[\leadsto \color{blue}{x} + \left(a - \frac{1}{2}\right) \cdot b \]
                      4. Step-by-step derivation
                        1. Simplified54.0%

                          \[\leadsto \color{blue}{x} + \left(a - 0.5\right) \cdot b \]
                        2. Taylor expanded in a around inf

                          \[\leadsto x + \color{blue}{a \cdot b} \]
                        3. Step-by-step derivation
                          1. *-commutativeN/A

                            \[\leadsto x + \color{blue}{b \cdot a} \]
                          2. *-lowering-*.f6449.5

                            \[\leadsto x + \color{blue}{b \cdot a} \]
                        4. Simplified49.5%

                          \[\leadsto x + \color{blue}{b \cdot a} \]

                        if -2e85 < (+.f64 x y) < 1.99999999999999985e-24

                        1. Initial program 99.8%

                          \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
                        2. Add Preprocessing
                        3. Taylor expanded in b around inf

                          \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
                        4. Step-by-step derivation
                          1. *-lowering-*.f64N/A

                            \[\leadsto \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
                          2. sub-negN/A

                            \[\leadsto b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                          3. metadata-evalN/A

                            \[\leadsto b \cdot \left(a + \color{blue}{\frac{-1}{2}}\right) \]
                          4. +-lowering-+.f6457.6

                            \[\leadsto b \cdot \color{blue}{\left(a + -0.5\right)} \]
                        5. Simplified57.6%

                          \[\leadsto \color{blue}{b \cdot \left(a + -0.5\right)} \]

                        if 1.99999999999999985e-24 < (+.f64 x y)

                        1. Initial program 99.9%

                          \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
                        2. Add Preprocessing
                        3. Step-by-step derivation
                          1. *-commutativeN/A

                            \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
                          2. sub-negN/A

                            \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                          3. distribute-lft-inN/A

                            \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                          4. accelerator-lowering-fma.f64N/A

                            \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                          5. *-lowering-*.f64N/A

                            \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                          6. metadata-eval99.9

                            \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
                        4. Applied egg-rr99.9%

                          \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
                        5. Taylor expanded in z around 0

                          \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
                        6. Step-by-step derivation
                          1. associate-+r+N/A

                            \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
                          2. +-commutativeN/A

                            \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
                          3. distribute-rgt-inN/A

                            \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
                          4. metadata-evalN/A

                            \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                          5. sub-negN/A

                            \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
                          6. associate-+r+N/A

                            \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                          7. +-lowering-+.f64N/A

                            \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                          8. +-commutativeN/A

                            \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
                          9. accelerator-lowering-fma.f64N/A

                            \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
                          10. sub-negN/A

                            \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
                          11. metadata-evalN/A

                            \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
                          12. +-lowering-+.f6483.2

                            \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
                        7. Simplified83.2%

                          \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]
                        8. Step-by-step derivation
                          1. associate-+r+N/A

                            \[\leadsto \color{blue}{\left(x + b \cdot \left(a + \frac{-1}{2}\right)\right) + y} \]
                          2. *-commutativeN/A

                            \[\leadsto \left(x + \color{blue}{\left(a + \frac{-1}{2}\right) \cdot b}\right) + y \]
                          3. +-commutativeN/A

                            \[\leadsto \color{blue}{\left(\left(a + \frac{-1}{2}\right) \cdot b + x\right)} + y \]
                          4. +-lowering-+.f64N/A

                            \[\leadsto \color{blue}{\left(\left(a + \frac{-1}{2}\right) \cdot b + x\right) + y} \]
                          5. *-commutativeN/A

                            \[\leadsto \left(\color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} + x\right) + y \]
                          6. metadata-evalN/A

                            \[\leadsto \left(b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) + x\right) + y \]
                          7. sub-negN/A

                            \[\leadsto \left(b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} + x\right) + y \]
                          8. accelerator-lowering-fma.f64N/A

                            \[\leadsto \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, x\right)} + y \]
                          9. sub-negN/A

                            \[\leadsto \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, x\right) + y \]
                          10. metadata-evalN/A

                            \[\leadsto \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, x\right) + y \]
                          11. +-lowering-+.f6483.2

                            \[\leadsto \mathsf{fma}\left(b, \color{blue}{a + -0.5}, x\right) + y \]
                        9. Applied egg-rr83.2%

                          \[\leadsto \color{blue}{\mathsf{fma}\left(b, a + -0.5, x\right) + y} \]
                        10. Taylor expanded in a around inf

                          \[\leadsto \color{blue}{a \cdot b} + y \]
                        11. Step-by-step derivation
                          1. *-lowering-*.f6450.4

                            \[\leadsto \color{blue}{a \cdot b} + y \]
                        12. Simplified50.4%

                          \[\leadsto \color{blue}{a \cdot b} + y \]
                      5. Recombined 3 regimes into one program.
                      6. Final simplification52.4%

                        \[\leadsto \begin{array}{l} \mathbf{if}\;x + y \leq -2 \cdot 10^{+85}:\\ \;\;\;\;x + b \cdot a\\ \mathbf{elif}\;x + y \leq 2 \cdot 10^{-24}:\\ \;\;\;\;b \cdot \left(a + -0.5\right)\\ \mathbf{else}:\\ \;\;\;\;y + b \cdot a\\ \end{array} \]
                      7. Add Preprocessing

                      Alternative 20: 78.0% accurate, 9.7× speedup?

                      \[\begin{array}{l} \\ y + \mathsf{fma}\left(b, a + -0.5, x\right) \end{array} \]
                      (FPCore (x y z t a b) :precision binary64 (+ y (fma b (+ a -0.5) x)))
                      double code(double x, double y, double z, double t, double a, double b) {
                      	return y + fma(b, (a + -0.5), x);
                      }
                      
                      function code(x, y, z, t, a, b)
                      	return Float64(y + fma(b, Float64(a + -0.5), x))
                      end
                      
                      code[x_, y_, z_, t_, a_, b_] := N[(y + N[(b * N[(a + -0.5), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]
                      
                      \begin{array}{l}
                      
                      \\
                      y + \mathsf{fma}\left(b, a + -0.5, x\right)
                      \end{array}
                      
                      Derivation
                      1. Initial program 99.9%

                        \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
                      2. Add Preprocessing
                      3. Taylor expanded in z around 0

                        \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                      4. Step-by-step derivation
                        1. +-commutativeN/A

                          \[\leadsto \color{blue}{\left(y + b \cdot \left(a - \frac{1}{2}\right)\right) + x} \]
                        2. associate-+l+N/A

                          \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
                        3. +-lowering-+.f64N/A

                          \[\leadsto \color{blue}{y + \left(b \cdot \left(a - \frac{1}{2}\right) + x\right)} \]
                        4. accelerator-lowering-fma.f64N/A

                          \[\leadsto y + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, x\right)} \]
                        5. sub-negN/A

                          \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, x\right) \]
                        6. metadata-evalN/A

                          \[\leadsto y + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, x\right) \]
                        7. +-lowering-+.f6478.1

                          \[\leadsto y + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, x\right) \]
                      5. Simplified78.1%

                        \[\leadsto \color{blue}{y + \mathsf{fma}\left(b, a + -0.5, x\right)} \]
                      6. Add Preprocessing

                      Alternative 21: 78.0% accurate, 9.7× speedup?

                      \[\begin{array}{l} \\ x + \mathsf{fma}\left(b, a + -0.5, y\right) \end{array} \]
                      (FPCore (x y z t a b) :precision binary64 (+ x (fma b (+ a -0.5) y)))
                      double code(double x, double y, double z, double t, double a, double b) {
                      	return x + fma(b, (a + -0.5), y);
                      }
                      
                      function code(x, y, z, t, a, b)
                      	return Float64(x + fma(b, Float64(a + -0.5), y))
                      end
                      
                      code[x_, y_, z_, t_, a_, b_] := N[(x + N[(b * N[(a + -0.5), $MachinePrecision] + y), $MachinePrecision]), $MachinePrecision]
                      
                      \begin{array}{l}
                      
                      \\
                      x + \mathsf{fma}\left(b, a + -0.5, y\right)
                      \end{array}
                      
                      Derivation
                      1. Initial program 99.9%

                        \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
                      2. Add Preprocessing
                      3. Step-by-step derivation
                        1. *-commutativeN/A

                          \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{b \cdot \left(a - \frac{1}{2}\right)} \]
                        2. sub-negN/A

                          \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + b \cdot \color{blue}{\left(a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                        3. distribute-lft-inN/A

                          \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\left(b \cdot a + b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                        4. accelerator-lowering-fma.f64N/A

                          \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} \]
                        5. *-lowering-*.f64N/A

                          \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, \color{blue}{b \cdot \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                        6. metadata-eval99.9

                          \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \mathsf{fma}\left(b, a, b \cdot \color{blue}{-0.5}\right) \]
                      4. Applied egg-rr99.9%

                        \[\leadsto \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \color{blue}{\mathsf{fma}\left(b, a, b \cdot -0.5\right)} \]
                      5. Taylor expanded in z around 0

                        \[\leadsto \color{blue}{x + \left(y + \left(\frac{-1}{2} \cdot b + a \cdot b\right)\right)} \]
                      6. Step-by-step derivation
                        1. associate-+r+N/A

                          \[\leadsto \color{blue}{\left(x + y\right) + \left(\frac{-1}{2} \cdot b + a \cdot b\right)} \]
                        2. +-commutativeN/A

                          \[\leadsto \left(x + y\right) + \color{blue}{\left(a \cdot b + \frac{-1}{2} \cdot b\right)} \]
                        3. distribute-rgt-inN/A

                          \[\leadsto \left(x + y\right) + \color{blue}{b \cdot \left(a + \frac{-1}{2}\right)} \]
                        4. metadata-evalN/A

                          \[\leadsto \left(x + y\right) + b \cdot \left(a + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right) \]
                        5. sub-negN/A

                          \[\leadsto \left(x + y\right) + b \cdot \color{blue}{\left(a - \frac{1}{2}\right)} \]
                        6. associate-+r+N/A

                          \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                        7. +-lowering-+.f64N/A

                          \[\leadsto \color{blue}{x + \left(y + b \cdot \left(a - \frac{1}{2}\right)\right)} \]
                        8. +-commutativeN/A

                          \[\leadsto x + \color{blue}{\left(b \cdot \left(a - \frac{1}{2}\right) + y\right)} \]
                        9. accelerator-lowering-fma.f64N/A

                          \[\leadsto x + \color{blue}{\mathsf{fma}\left(b, a - \frac{1}{2}, y\right)} \]
                        10. sub-negN/A

                          \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, y\right) \]
                        11. metadata-evalN/A

                          \[\leadsto x + \mathsf{fma}\left(b, a + \color{blue}{\frac{-1}{2}}, y\right) \]
                        12. +-lowering-+.f6478.1

                          \[\leadsto x + \mathsf{fma}\left(b, \color{blue}{a + -0.5}, y\right) \]
                      7. Simplified78.1%

                        \[\leadsto \color{blue}{x + \mathsf{fma}\left(b, a + -0.5, y\right)} \]
                      8. Add Preprocessing

                      Alternative 22: 42.4% accurate, 31.5× speedup?

                      \[\begin{array}{l} \\ x + y \end{array} \]
                      (FPCore (x y z t a b) :precision binary64 (+ x y))
                      double code(double x, double y, double z, double t, double a, double b) {
                      	return x + y;
                      }
                      
                      real(8) function code(x, y, z, t, a, b)
                          real(8), intent (in) :: x
                          real(8), intent (in) :: y
                          real(8), intent (in) :: z
                          real(8), intent (in) :: t
                          real(8), intent (in) :: a
                          real(8), intent (in) :: b
                          code = x + y
                      end function
                      
                      public static double code(double x, double y, double z, double t, double a, double b) {
                      	return x + y;
                      }
                      
                      def code(x, y, z, t, a, b):
                      	return x + y
                      
                      function code(x, y, z, t, a, b)
                      	return Float64(x + y)
                      end
                      
                      function tmp = code(x, y, z, t, a, b)
                      	tmp = x + y;
                      end
                      
                      code[x_, y_, z_, t_, a_, b_] := N[(x + y), $MachinePrecision]
                      
                      \begin{array}{l}
                      
                      \\
                      x + y
                      \end{array}
                      
                      Derivation
                      1. Initial program 99.9%

                        \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
                      2. Add Preprocessing
                      3. Taylor expanded in b around 0

                        \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) - z \cdot \log t} \]
                      4. Step-by-step derivation
                        1. cancel-sign-sub-invN/A

                          \[\leadsto \color{blue}{\left(x + \left(y + z\right)\right) + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t} \]
                        2. associate-+r+N/A

                          \[\leadsto \color{blue}{\left(\left(x + y\right) + z\right)} + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t \]
                        3. associate-+l+N/A

                          \[\leadsto \color{blue}{\left(x + y\right) + \left(z + \left(\mathsf{neg}\left(z\right)\right) \cdot \log t\right)} \]
                        4. cancel-sign-sub-invN/A

                          \[\leadsto \left(x + y\right) + \color{blue}{\left(z - z \cdot \log t\right)} \]
                        5. *-rgt-identityN/A

                          \[\leadsto \left(x + y\right) + \left(\color{blue}{z \cdot 1} - z \cdot \log t\right) \]
                        6. distribute-lft-out--N/A

                          \[\leadsto \left(x + y\right) + \color{blue}{z \cdot \left(1 - \log t\right)} \]
                        7. +-commutativeN/A

                          \[\leadsto \color{blue}{z \cdot \left(1 - \log t\right) + \left(x + y\right)} \]
                        8. sub-negN/A

                          \[\leadsto z \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\log t\right)\right)\right)} + \left(x + y\right) \]
                        9. mul-1-negN/A

                          \[\leadsto z \cdot \left(1 + \color{blue}{-1 \cdot \log t}\right) + \left(x + y\right) \]
                        10. accelerator-lowering-fma.f64N/A

                          \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 + -1 \cdot \log t, x + y\right)} \]
                        11. mul-1-negN/A

                          \[\leadsto \mathsf{fma}\left(z, 1 + \color{blue}{\left(\mathsf{neg}\left(\log t\right)\right)}, x + y\right) \]
                        12. sub-negN/A

                          \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
                        13. --lowering--.f64N/A

                          \[\leadsto \mathsf{fma}\left(z, \color{blue}{1 - \log t}, x + y\right) \]
                        14. log-lowering-log.f64N/A

                          \[\leadsto \mathsf{fma}\left(z, 1 - \color{blue}{\log t}, x + y\right) \]
                        15. +-commutativeN/A

                          \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
                        16. +-lowering-+.f6464.0

                          \[\leadsto \mathsf{fma}\left(z, 1 - \log t, \color{blue}{y + x}\right) \]
                      5. Simplified64.0%

                        \[\leadsto \color{blue}{\mathsf{fma}\left(z, 1 - \log t, y + x\right)} \]
                      6. Taylor expanded in z around 0

                        \[\leadsto \color{blue}{x + y} \]
                      7. Step-by-step derivation
                        1. +-commutativeN/A

                          \[\leadsto \color{blue}{y + x} \]
                        2. +-lowering-+.f6442.6

                          \[\leadsto \color{blue}{y + x} \]
                      8. Simplified42.6%

                        \[\leadsto \color{blue}{y + x} \]
                      9. Final simplification42.6%

                        \[\leadsto x + y \]
                      10. Add Preprocessing

                      Alternative 23: 22.3% accurate, 126.0× speedup?

                      \[\begin{array}{l} \\ x \end{array} \]
                      (FPCore (x y z t a b) :precision binary64 x)
                      double code(double x, double y, double z, double t, double a, double b) {
                      	return x;
                      }
                      
                      real(8) function code(x, y, z, t, a, b)
                          real(8), intent (in) :: x
                          real(8), intent (in) :: y
                          real(8), intent (in) :: z
                          real(8), intent (in) :: t
                          real(8), intent (in) :: a
                          real(8), intent (in) :: b
                          code = x
                      end function
                      
                      public static double code(double x, double y, double z, double t, double a, double b) {
                      	return x;
                      }
                      
                      def code(x, y, z, t, a, b):
                      	return x
                      
                      function code(x, y, z, t, a, b)
                      	return x
                      end
                      
                      function tmp = code(x, y, z, t, a, b)
                      	tmp = x;
                      end
                      
                      code[x_, y_, z_, t_, a_, b_] := x
                      
                      \begin{array}{l}
                      
                      \\
                      x
                      \end{array}
                      
                      Derivation
                      1. Initial program 99.9%

                        \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
                      2. Add Preprocessing
                      3. Taylor expanded in x around inf

                        \[\leadsto \color{blue}{x} \]
                      4. Step-by-step derivation
                        1. Simplified21.9%

                          \[\leadsto \color{blue}{x} \]
                        2. Add Preprocessing

                        Developer Target 1: 99.5% accurate, 0.4× speedup?

                        \[\begin{array}{l} \\ \left(\left(x + y\right) + \frac{\left(1 - {\log t}^{2}\right) \cdot z}{1 + \log t}\right) + \left(a - 0.5\right) \cdot b \end{array} \]
                        (FPCore (x y z t a b)
                         :precision binary64
                         (+
                          (+ (+ x y) (/ (* (- 1.0 (pow (log t) 2.0)) z) (+ 1.0 (log t))))
                          (* (- a 0.5) b)))
                        double code(double x, double y, double z, double t, double a, double b) {
                        	return ((x + y) + (((1.0 - pow(log(t), 2.0)) * z) / (1.0 + log(t)))) + ((a - 0.5) * b);
                        }
                        
                        real(8) function code(x, y, z, t, a, b)
                            real(8), intent (in) :: x
                            real(8), intent (in) :: y
                            real(8), intent (in) :: z
                            real(8), intent (in) :: t
                            real(8), intent (in) :: a
                            real(8), intent (in) :: b
                            code = ((x + y) + (((1.0d0 - (log(t) ** 2.0d0)) * z) / (1.0d0 + log(t)))) + ((a - 0.5d0) * b)
                        end function
                        
                        public static double code(double x, double y, double z, double t, double a, double b) {
                        	return ((x + y) + (((1.0 - Math.pow(Math.log(t), 2.0)) * z) / (1.0 + Math.log(t)))) + ((a - 0.5) * b);
                        }
                        
                        def code(x, y, z, t, a, b):
                        	return ((x + y) + (((1.0 - math.pow(math.log(t), 2.0)) * z) / (1.0 + math.log(t)))) + ((a - 0.5) * b)
                        
                        function code(x, y, z, t, a, b)
                        	return Float64(Float64(Float64(x + y) + Float64(Float64(Float64(1.0 - (log(t) ^ 2.0)) * z) / Float64(1.0 + log(t)))) + Float64(Float64(a - 0.5) * b))
                        end
                        
                        function tmp = code(x, y, z, t, a, b)
                        	tmp = ((x + y) + (((1.0 - (log(t) ^ 2.0)) * z) / (1.0 + log(t)))) + ((a - 0.5) * b);
                        end
                        
                        code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(x + y), $MachinePrecision] + N[(N[(N[(1.0 - N[Power[N[Log[t], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision] / N[(1.0 + N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision]
                        
                        \begin{array}{l}
                        
                        \\
                        \left(\left(x + y\right) + \frac{\left(1 - {\log t}^{2}\right) \cdot z}{1 + \log t}\right) + \left(a - 0.5\right) \cdot b
                        \end{array}
                        

                        Reproduce

                        ?
                        herbie shell --seed 2024204 
                        (FPCore (x y z t a b)
                          :name "Numeric.SpecFunctions:logBeta from math-functions-0.1.5.2, A"
                          :precision binary64
                        
                          :alt
                          (! :herbie-platform default (+ (+ (+ x y) (/ (* (- 1 (pow (log t) 2)) z) (+ 1 (log t)))) (* (- a 1/2) b)))
                        
                          (+ (- (+ (+ x y) z) (* z (log t))) (* (- a 0.5) b)))