Octave 3.8, jcobi/3

Percentage Accurate: 94.4% → 99.8%
Time: 9.9s
Alternatives: 18
Speedup: 1.7×

Specification

?
\[\alpha > -1 \land \beta > -1\]
\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ \frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1} \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))))
   (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
real(8) function code(alpha, beta)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0))
end
function tmp = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 18 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 94.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ \frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1} \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))))
   (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
real(8) function code(alpha, beta)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0))
end
function tmp = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}

Alternative 1: 99.8% accurate, 0.2× speedup?

\[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\alpha \leq 7000000000000:\\ \;\;\;\;\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{{\left(\frac{\mathsf{fma}\left(-1, \frac{2}{-1 - \beta} + \frac{\beta}{-1 - \beta}, \frac{-1 - \beta}{{\left(-1 - \beta\right)}^{2}}\right)}{\alpha} - {\left(-1 - \beta\right)}^{-1}\right)}^{-1}}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 1\right) + 2}\\ \end{array} \end{array} \]
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
 :precision binary64
 (if (<= alpha 7000000000000.0)
   (/
    (*
     (+ (fma beta alpha (+ beta alpha)) 1.0)
     (pow (+ (+ beta alpha) 2.0) -2.0))
    (+ 3.0 (+ beta alpha)))
   (/
    (/
     (pow
      (-
       (/
        (fma
         -1.0
         (+ (/ 2.0 (- -1.0 beta)) (/ beta (- -1.0 beta)))
         (/ (- -1.0 beta) (pow (- -1.0 beta) 2.0)))
        alpha)
       (pow (- -1.0 beta) -1.0))
      -1.0)
     (+ (+ alpha beta) 2.0))
    (+ (+ (+ alpha beta) 1.0) 2.0))))
assert(alpha < beta);
double code(double alpha, double beta) {
	double tmp;
	if (alpha <= 7000000000000.0) {
		tmp = ((fma(beta, alpha, (beta + alpha)) + 1.0) * pow(((beta + alpha) + 2.0), -2.0)) / (3.0 + (beta + alpha));
	} else {
		tmp = (pow(((fma(-1.0, ((2.0 / (-1.0 - beta)) + (beta / (-1.0 - beta))), ((-1.0 - beta) / pow((-1.0 - beta), 2.0))) / alpha) - pow((-1.0 - beta), -1.0)), -1.0) / ((alpha + beta) + 2.0)) / (((alpha + beta) + 1.0) + 2.0);
	}
	return tmp;
}
alpha, beta = sort([alpha, beta])
function code(alpha, beta)
	tmp = 0.0
	if (alpha <= 7000000000000.0)
		tmp = Float64(Float64(Float64(fma(beta, alpha, Float64(beta + alpha)) + 1.0) * (Float64(Float64(beta + alpha) + 2.0) ^ -2.0)) / Float64(3.0 + Float64(beta + alpha)));
	else
		tmp = Float64(Float64((Float64(Float64(fma(-1.0, Float64(Float64(2.0 / Float64(-1.0 - beta)) + Float64(beta / Float64(-1.0 - beta))), Float64(Float64(-1.0 - beta) / (Float64(-1.0 - beta) ^ 2.0))) / alpha) - (Float64(-1.0 - beta) ^ -1.0)) ^ -1.0) / Float64(Float64(alpha + beta) + 2.0)) / Float64(Float64(Float64(alpha + beta) + 1.0) + 2.0));
	end
	return tmp
end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := If[LessEqual[alpha, 7000000000000.0], N[(N[(N[(N[(beta * alpha + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] * N[Power[N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision], -2.0], $MachinePrecision]), $MachinePrecision] / N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[Power[N[(N[(N[(-1.0 * N[(N[(2.0 / N[(-1.0 - beta), $MachinePrecision]), $MachinePrecision] + N[(beta / N[(-1.0 - beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(-1.0 - beta), $MachinePrecision] / N[Power[N[(-1.0 - beta), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] - N[Power[N[(-1.0 - beta), $MachinePrecision], -1.0], $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(alpha + beta), $MachinePrecision] + 1.0), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 7000000000000:\\
\;\;\;\;\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{{\left(\frac{\mathsf{fma}\left(-1, \frac{2}{-1 - \beta} + \frac{\beta}{-1 - \beta}, \frac{-1 - \beta}{{\left(-1 - \beta\right)}^{2}}\right)}{\alpha} - {\left(-1 - \beta\right)}^{-1}\right)}^{-1}}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 1\right) + 2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if alpha < 7e12

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. Applied rewrites99.9%

        \[\leadsto \color{blue}{\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}} \]

      if 7e12 < alpha

      1. Initial program 76.3%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Add Preprocessing
      3. Step-by-step derivation
        1. lift-/.f64N/A

          \[\leadsto \frac{\frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. clear-numN/A

          \[\leadsto \frac{\frac{\color{blue}{\frac{1}{\frac{\left(\alpha + \beta\right) + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        3. lower-/.f64N/A

          \[\leadsto \frac{\frac{\color{blue}{\frac{1}{\frac{\left(\alpha + \beta\right) + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        4. lower-/.f6476.3

          \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\frac{\left(\alpha + \beta\right) + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        5. lift-+.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\frac{\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        6. +-commutativeN/A

          \[\leadsto \frac{\frac{\frac{1}{\frac{\color{blue}{\left(\beta + \alpha\right)} + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        7. lower-+.f6476.3

          \[\leadsto \frac{\frac{\frac{1}{\frac{\color{blue}{\left(\beta + \alpha\right)} + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        8. lift-*.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + \color{blue}{2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        9. metadata-eval76.3

          \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + \color{blue}{2}}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        10. lift-+.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right)} + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        11. +-commutativeN/A

          \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\color{blue}{\left(\beta \cdot \alpha + \left(\alpha + \beta\right)\right)} + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        12. lift-*.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\left(\color{blue}{\beta \cdot \alpha} + \left(\alpha + \beta\right)\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        13. lower-fma.f6476.3

          \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\color{blue}{\mathsf{fma}\left(\beta, \alpha, \alpha + \beta\right)} + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        14. lift-+.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\mathsf{fma}\left(\beta, \alpha, \color{blue}{\alpha + \beta}\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        15. +-commutativeN/A

          \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\mathsf{fma}\left(\beta, \alpha, \color{blue}{\beta + \alpha}\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        16. lower-+.f6476.3

          \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\mathsf{fma}\left(\beta, \alpha, \color{blue}{\beta + \alpha}\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. Applied rewrites76.3%

        \[\leadsto \frac{\frac{\color{blue}{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. Taylor expanded in alpha around 0

        \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\alpha \cdot \left(\frac{1}{1 + \beta} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)\right) + \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      6. Step-by-step derivation
        1. *-commutativeN/A

          \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\left(\frac{1}{1 + \beta} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)\right) \cdot \alpha} + \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. lower-fma.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        3. lower--.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\color{blue}{\frac{1}{1 + \beta} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}, \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        4. lower-/.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\color{blue}{\frac{1}{1 + \beta}} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        5. lower-+.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{\color{blue}{1 + \beta}} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        6. +-commutativeN/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \color{blue}{\left(\frac{\beta}{1 + \beta} + 2 \cdot \frac{1}{1 + \beta}\right)}, \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        7. lower-+.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \color{blue}{\left(\frac{\beta}{1 + \beta} + 2 \cdot \frac{1}{1 + \beta}\right)}, \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        8. lower-/.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\color{blue}{\frac{\beta}{1 + \beta}} + 2 \cdot \frac{1}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        9. lower-+.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{\color{blue}{1 + \beta}} + 2 \cdot \frac{1}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        10. associate-*r/N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \color{blue}{\frac{2 \cdot 1}{1 + \beta}}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        11. metadata-evalN/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{\color{blue}{2}}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        12. lower-/.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \color{blue}{\frac{2}{1 + \beta}}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        13. lower-+.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{\color{blue}{1 + \beta}}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        14. +-commutativeN/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \color{blue}{\frac{\beta}{1 + \beta} + 2 \cdot \frac{1}{1 + \beta}}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      7. Applied rewrites55.6%

        \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      8. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
        2. +-commutativeN/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{1 + \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
        3. lift-+.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{1 + \color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
        4. lift-*.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right)} \]
        5. metadata-evalN/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2}\right)} \]
        6. associate-+r+N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
        7. +-commutativeN/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 1\right)} + 2} \]
        8. metadata-evalN/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \color{blue}{\left(2 - 1\right)}\right) + 2} \]
        9. associate--l+N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\left(\alpha + \beta\right) + 2\right) - 1\right)} + 2} \]
        10. metadata-evalN/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) - 1\right) + 2} \]
        11. lift-*.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) - 1\right) + 2} \]
        12. lift-+.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} - 1\right) + 2} \]
        13. lower-+.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1\right) + 2}} \]
        14. lift-+.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} - 1\right) + 2} \]
        15. lift-*.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) - 1\right) + 2} \]
        16. metadata-evalN/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2}\right) - 1\right) + 2} \]
        17. associate--l+N/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + \left(2 - 1\right)\right)} + 2} \]
        18. metadata-evalN/A

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \color{blue}{1}\right) + 2} \]
        19. lower-+.f6455.6

          \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 1\right)} + 2} \]
      9. Applied rewrites55.6%

        \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 1\right) + 2}} \]
      10. Taylor expanded in alpha around -inf

        \[\leadsto \frac{\frac{\frac{1}{\color{blue}{-1 \cdot \frac{\left(2 \cdot \frac{1}{-1 \cdot \beta - 1} + \frac{\beta}{-1 \cdot \beta - 1}\right) - -1 \cdot \frac{1 + \beta}{{\left(-1 \cdot \beta - 1\right)}^{2}}}{\alpha} - \frac{1}{-1 \cdot \beta - 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
      11. Step-by-step derivation
        1. lower--.f64N/A

          \[\leadsto \frac{\frac{\frac{1}{\color{blue}{-1 \cdot \frac{\left(2 \cdot \frac{1}{-1 \cdot \beta - 1} + \frac{\beta}{-1 \cdot \beta - 1}\right) - -1 \cdot \frac{1 + \beta}{{\left(-1 \cdot \beta - 1\right)}^{2}}}{\alpha} - \frac{1}{-1 \cdot \beta - 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
      12. Applied rewrites99.8%

        \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\frac{\mathsf{fma}\left(-1, \frac{2}{-1 - \beta} + \frac{\beta}{-1 - \beta}, \frac{-1 - \beta}{{\left(-1 - \beta\right)}^{2}}\right)}{\alpha} - \frac{1}{-1 - \beta}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
    4. Recombined 2 regimes into one program.
    5. Final simplification99.9%

      \[\leadsto \begin{array}{l} \mathbf{if}\;\alpha \leq 7000000000000:\\ \;\;\;\;\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{{\left(\frac{\mathsf{fma}\left(-1, \frac{2}{-1 - \beta} + \frac{\beta}{-1 - \beta}, \frac{-1 - \beta}{{\left(-1 - \beta\right)}^{2}}\right)}{\alpha} - {\left(-1 - \beta\right)}^{-1}\right)}^{-1}}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 1\right) + 2}\\ \end{array} \]
    6. Add Preprocessing

    Alternative 2: 99.7% accurate, 0.5× speedup?

    \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 2 + \left(\alpha + \beta\right)\\ \mathbf{if}\;\beta \leq 2.55 \cdot 10^{+90}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot t\_0\right) \cdot t\_0}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\left(\left(1 + \left(\alpha + {\beta}^{-1}\right)\right) + \frac{\alpha}{\beta}\right) - \left(1 + \alpha\right) \cdot \frac{\mathsf{fma}\left(2, \alpha, 4\right)}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)}\\ \end{array} \end{array} \]
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    (FPCore (alpha beta)
     :precision binary64
     (let* ((t_0 (+ 2.0 (+ alpha beta))))
       (if (<= beta 2.55e+90)
         (/
          (fma (+ alpha 1.0) beta (/ (fma alpha alpha -1.0) (- alpha 1.0)))
          (* (* (+ 3.0 (+ alpha beta)) t_0) t_0))
         (/
          (/
           (-
            (+ (+ 1.0 (+ alpha (pow beta -1.0))) (/ alpha beta))
            (* (+ 1.0 alpha) (/ (fma 2.0 alpha 4.0) beta)))
           beta)
          (+ 3.0 (+ beta alpha))))))
    assert(alpha < beta);
    double code(double alpha, double beta) {
    	double t_0 = 2.0 + (alpha + beta);
    	double tmp;
    	if (beta <= 2.55e+90) {
    		tmp = fma((alpha + 1.0), beta, (fma(alpha, alpha, -1.0) / (alpha - 1.0))) / (((3.0 + (alpha + beta)) * t_0) * t_0);
    	} else {
    		tmp = ((((1.0 + (alpha + pow(beta, -1.0))) + (alpha / beta)) - ((1.0 + alpha) * (fma(2.0, alpha, 4.0) / beta))) / beta) / (3.0 + (beta + alpha));
    	}
    	return tmp;
    }
    
    alpha, beta = sort([alpha, beta])
    function code(alpha, beta)
    	t_0 = Float64(2.0 + Float64(alpha + beta))
    	tmp = 0.0
    	if (beta <= 2.55e+90)
    		tmp = Float64(fma(Float64(alpha + 1.0), beta, Float64(fma(alpha, alpha, -1.0) / Float64(alpha - 1.0))) / Float64(Float64(Float64(3.0 + Float64(alpha + beta)) * t_0) * t_0));
    	else
    		tmp = Float64(Float64(Float64(Float64(Float64(1.0 + Float64(alpha + (beta ^ -1.0))) + Float64(alpha / beta)) - Float64(Float64(1.0 + alpha) * Float64(fma(2.0, alpha, 4.0) / beta))) / beta) / Float64(3.0 + Float64(beta + alpha)));
    	end
    	return tmp
    end
    
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    code[alpha_, beta_] := Block[{t$95$0 = N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 2.55e+90], N[(N[(N[(alpha + 1.0), $MachinePrecision] * beta + N[(N[(alpha * alpha + -1.0), $MachinePrecision] / N[(alpha - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(1.0 + N[(alpha + N[Power[beta, -1.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(alpha / beta), $MachinePrecision]), $MachinePrecision] - N[(N[(1.0 + alpha), $MachinePrecision] * N[(N[(2.0 * alpha + 4.0), $MachinePrecision] / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / beta), $MachinePrecision] / N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
    
    \begin{array}{l}
    [alpha, beta] = \mathsf{sort}([alpha, beta])\\
    \\
    \begin{array}{l}
    t_0 := 2 + \left(\alpha + \beta\right)\\
    \mathbf{if}\;\beta \leq 2.55 \cdot 10^{+90}:\\
    \;\;\;\;\frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot t\_0\right) \cdot t\_0}\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{\frac{\left(\left(1 + \left(\alpha + {\beta}^{-1}\right)\right) + \frac{\alpha}{\beta}\right) - \left(1 + \alpha\right) \cdot \frac{\mathsf{fma}\left(2, \alpha, 4\right)}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if beta < 2.5499999999999998e90

      1. Initial program 99.9%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Add Preprocessing
      3. Step-by-step derivation
        1. Applied rewrites99.9%

          \[\leadsto \color{blue}{\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}} \]
        2. Applied rewrites96.3%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}} \]
        3. Step-by-step derivation
          1. lift-+.f64N/A

            \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \color{blue}{\alpha + 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
          2. flip-+N/A

            \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \color{blue}{\frac{\alpha \cdot \alpha - 1 \cdot 1}{\alpha - 1}}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
          3. lower-/.f64N/A

            \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \color{blue}{\frac{\alpha \cdot \alpha - 1 \cdot 1}{\alpha - 1}}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
          4. metadata-evalN/A

            \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\alpha \cdot \alpha - \color{blue}{1}}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
          5. sub-negN/A

            \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\color{blue}{\alpha \cdot \alpha + \left(\mathsf{neg}\left(1\right)\right)}}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
          6. metadata-evalN/A

            \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\alpha \cdot \alpha + \color{blue}{-1}}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
          7. lower-fma.f64N/A

            \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\color{blue}{\mathsf{fma}\left(\alpha, \alpha, -1\right)}}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
          8. lower--.f6483.8

            \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\color{blue}{\alpha - 1}}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
        4. Applied rewrites83.8%

          \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \color{blue}{\frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\alpha - 1}}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]

        if 2.5499999999999998e90 < beta

        1. Initial program 75.8%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Add Preprocessing
        3. Step-by-step derivation
          1. Applied rewrites69.7%

            \[\leadsto \color{blue}{\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}} \]
          2. Taylor expanded in beta around inf

            \[\leadsto \frac{\color{blue}{\frac{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}}{3 + \left(\beta + \alpha\right)} \]
          3. Step-by-step derivation
            1. lower-/.f64N/A

              \[\leadsto \frac{\color{blue}{\frac{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}}{3 + \left(\beta + \alpha\right)} \]
            2. lower--.f64N/A

              \[\leadsto \frac{\frac{\color{blue}{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            3. associate-+r+N/A

              \[\leadsto \frac{\frac{\left(1 + \color{blue}{\left(\left(\alpha + \frac{1}{\beta}\right) + \frac{\alpha}{\beta}\right)}\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            4. associate-+r+N/A

              \[\leadsto \frac{\frac{\color{blue}{\left(\left(1 + \left(\alpha + \frac{1}{\beta}\right)\right) + \frac{\alpha}{\beta}\right)} - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            5. lower-+.f64N/A

              \[\leadsto \frac{\frac{\color{blue}{\left(\left(1 + \left(\alpha + \frac{1}{\beta}\right)\right) + \frac{\alpha}{\beta}\right)} - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            6. lower-+.f64N/A

              \[\leadsto \frac{\frac{\left(\color{blue}{\left(1 + \left(\alpha + \frac{1}{\beta}\right)\right)} + \frac{\alpha}{\beta}\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            7. lower-+.f64N/A

              \[\leadsto \frac{\frac{\left(\left(1 + \color{blue}{\left(\alpha + \frac{1}{\beta}\right)}\right) + \frac{\alpha}{\beta}\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            8. lower-/.f64N/A

              \[\leadsto \frac{\frac{\left(\left(1 + \left(\alpha + \color{blue}{\frac{1}{\beta}}\right)\right) + \frac{\alpha}{\beta}\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            9. lower-/.f64N/A

              \[\leadsto \frac{\frac{\left(\left(1 + \left(\alpha + \frac{1}{\beta}\right)\right) + \color{blue}{\frac{\alpha}{\beta}}\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            10. associate-/l*N/A

              \[\leadsto \frac{\frac{\left(\left(1 + \left(\alpha + \frac{1}{\beta}\right)\right) + \frac{\alpha}{\beta}\right) - \color{blue}{\left(1 + \alpha\right) \cdot \frac{4 + 2 \cdot \alpha}{\beta}}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            11. lower-*.f64N/A

              \[\leadsto \frac{\frac{\left(\left(1 + \left(\alpha + \frac{1}{\beta}\right)\right) + \frac{\alpha}{\beta}\right) - \color{blue}{\left(1 + \alpha\right) \cdot \frac{4 + 2 \cdot \alpha}{\beta}}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            12. lower-+.f64N/A

              \[\leadsto \frac{\frac{\left(\left(1 + \left(\alpha + \frac{1}{\beta}\right)\right) + \frac{\alpha}{\beta}\right) - \color{blue}{\left(1 + \alpha\right)} \cdot \frac{4 + 2 \cdot \alpha}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            13. lower-/.f64N/A

              \[\leadsto \frac{\frac{\left(\left(1 + \left(\alpha + \frac{1}{\beta}\right)\right) + \frac{\alpha}{\beta}\right) - \left(1 + \alpha\right) \cdot \color{blue}{\frac{4 + 2 \cdot \alpha}{\beta}}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            14. +-commutativeN/A

              \[\leadsto \frac{\frac{\left(\left(1 + \left(\alpha + \frac{1}{\beta}\right)\right) + \frac{\alpha}{\beta}\right) - \left(1 + \alpha\right) \cdot \frac{\color{blue}{2 \cdot \alpha + 4}}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
            15. lower-fma.f6489.9

              \[\leadsto \frac{\frac{\left(\left(1 + \left(\alpha + \frac{1}{\beta}\right)\right) + \frac{\alpha}{\beta}\right) - \left(1 + \alpha\right) \cdot \frac{\color{blue}{\mathsf{fma}\left(2, \alpha, 4\right)}}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
          4. Applied rewrites89.9%

            \[\leadsto \frac{\color{blue}{\frac{\left(\left(1 + \left(\alpha + \frac{1}{\beta}\right)\right) + \frac{\alpha}{\beta}\right) - \left(1 + \alpha\right) \cdot \frac{\mathsf{fma}\left(2, \alpha, 4\right)}{\beta}}{\beta}}}{3 + \left(\beta + \alpha\right)} \]
        4. Recombined 2 regimes into one program.
        5. Final simplification85.4%

          \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 2.55 \cdot 10^{+90}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\left(\left(1 + \left(\alpha + {\beta}^{-1}\right)\right) + \frac{\alpha}{\beta}\right) - \left(1 + \alpha\right) \cdot \frac{\mathsf{fma}\left(2, \alpha, 4\right)}{\beta}}{\beta}}{3 + \left(\beta + \alpha\right)}\\ \end{array} \]
        6. Add Preprocessing

        Alternative 3: 99.2% accurate, 0.6× speedup?

        \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\alpha \leq 7 \cdot 10^{+49}:\\ \;\;\;\;\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} - \frac{-1}{\beta}\right)\right) - \left(1 + \alpha\right) \cdot \frac{2 + \alpha}{\beta}}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 1\right) + 2}\\ \end{array} \end{array} \]
        NOTE: alpha and beta should be sorted in increasing order before calling this function.
        (FPCore (alpha beta)
         :precision binary64
         (if (<= alpha 7e+49)
           (/
            (*
             (+ (fma beta alpha (+ beta alpha)) 1.0)
             (pow (+ (+ beta alpha) 2.0) -2.0))
            (+ 3.0 (+ beta alpha)))
           (/
            (/
             (-
              (+ (+ 1.0 alpha) (- (/ alpha beta) (/ -1.0 beta)))
              (* (+ 1.0 alpha) (/ (+ 2.0 alpha) beta)))
             (+ (+ alpha beta) 2.0))
            (+ (+ (+ alpha beta) 1.0) 2.0))))
        assert(alpha < beta);
        double code(double alpha, double beta) {
        	double tmp;
        	if (alpha <= 7e+49) {
        		tmp = ((fma(beta, alpha, (beta + alpha)) + 1.0) * pow(((beta + alpha) + 2.0), -2.0)) / (3.0 + (beta + alpha));
        	} else {
        		tmp = ((((1.0 + alpha) + ((alpha / beta) - (-1.0 / beta))) - ((1.0 + alpha) * ((2.0 + alpha) / beta))) / ((alpha + beta) + 2.0)) / (((alpha + beta) + 1.0) + 2.0);
        	}
        	return tmp;
        }
        
        alpha, beta = sort([alpha, beta])
        function code(alpha, beta)
        	tmp = 0.0
        	if (alpha <= 7e+49)
        		tmp = Float64(Float64(Float64(fma(beta, alpha, Float64(beta + alpha)) + 1.0) * (Float64(Float64(beta + alpha) + 2.0) ^ -2.0)) / Float64(3.0 + Float64(beta + alpha)));
        	else
        		tmp = Float64(Float64(Float64(Float64(Float64(1.0 + alpha) + Float64(Float64(alpha / beta) - Float64(-1.0 / beta))) - Float64(Float64(1.0 + alpha) * Float64(Float64(2.0 + alpha) / beta))) / Float64(Float64(alpha + beta) + 2.0)) / Float64(Float64(Float64(alpha + beta) + 1.0) + 2.0));
        	end
        	return tmp
        end
        
        NOTE: alpha and beta should be sorted in increasing order before calling this function.
        code[alpha_, beta_] := If[LessEqual[alpha, 7e+49], N[(N[(N[(N[(beta * alpha + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] * N[Power[N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision], -2.0], $MachinePrecision]), $MachinePrecision] / N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(1.0 + alpha), $MachinePrecision] + N[(N[(alpha / beta), $MachinePrecision] - N[(-1.0 / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(1.0 + alpha), $MachinePrecision] * N[(N[(2.0 + alpha), $MachinePrecision] / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(alpha + beta), $MachinePrecision] + 1.0), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]]
        
        \begin{array}{l}
        [alpha, beta] = \mathsf{sort}([alpha, beta])\\
        \\
        \begin{array}{l}
        \mathbf{if}\;\alpha \leq 7 \cdot 10^{+49}:\\
        \;\;\;\;\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} - \frac{-1}{\beta}\right)\right) - \left(1 + \alpha\right) \cdot \frac{2 + \alpha}{\beta}}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 1\right) + 2}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if alpha < 6.9999999999999995e49

          1. Initial program 99.9%

            \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          2. Add Preprocessing
          3. Step-by-step derivation
            1. Applied rewrites99.9%

              \[\leadsto \color{blue}{\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}} \]

            if 6.9999999999999995e49 < alpha

            1. Initial program 74.1%

              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            2. Add Preprocessing
            3. Step-by-step derivation
              1. lift-/.f64N/A

                \[\leadsto \frac{\frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. clear-numN/A

                \[\leadsto \frac{\frac{\color{blue}{\frac{1}{\frac{\left(\alpha + \beta\right) + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              3. lower-/.f64N/A

                \[\leadsto \frac{\frac{\color{blue}{\frac{1}{\frac{\left(\alpha + \beta\right) + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              4. lower-/.f6474.0

                \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\frac{\left(\alpha + \beta\right) + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              5. lift-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\frac{\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              6. +-commutativeN/A

                \[\leadsto \frac{\frac{\frac{1}{\frac{\color{blue}{\left(\beta + \alpha\right)} + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              7. lower-+.f6474.0

                \[\leadsto \frac{\frac{\frac{1}{\frac{\color{blue}{\left(\beta + \alpha\right)} + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              8. lift-*.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + \color{blue}{2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              9. metadata-eval74.0

                \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + \color{blue}{2}}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              10. lift-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right)} + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              11. +-commutativeN/A

                \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\color{blue}{\left(\beta \cdot \alpha + \left(\alpha + \beta\right)\right)} + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              12. lift-*.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\left(\color{blue}{\beta \cdot \alpha} + \left(\alpha + \beta\right)\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              13. lower-fma.f6474.0

                \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\color{blue}{\mathsf{fma}\left(\beta, \alpha, \alpha + \beta\right)} + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              14. lift-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\mathsf{fma}\left(\beta, \alpha, \color{blue}{\alpha + \beta}\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              15. +-commutativeN/A

                \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\mathsf{fma}\left(\beta, \alpha, \color{blue}{\beta + \alpha}\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              16. lower-+.f6474.0

                \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\mathsf{fma}\left(\beta, \alpha, \color{blue}{\beta + \alpha}\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            4. Applied rewrites74.0%

              \[\leadsto \frac{\frac{\color{blue}{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            5. Taylor expanded in alpha around 0

              \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\alpha \cdot \left(\frac{1}{1 + \beta} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)\right) + \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            6. Step-by-step derivation
              1. *-commutativeN/A

                \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\left(\frac{1}{1 + \beta} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)\right) \cdot \alpha} + \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. lower-fma.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              3. lower--.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\color{blue}{\frac{1}{1 + \beta} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}, \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              4. lower-/.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\color{blue}{\frac{1}{1 + \beta}} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              5. lower-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{\color{blue}{1 + \beta}} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              6. +-commutativeN/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \color{blue}{\left(\frac{\beta}{1 + \beta} + 2 \cdot \frac{1}{1 + \beta}\right)}, \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              7. lower-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \color{blue}{\left(\frac{\beta}{1 + \beta} + 2 \cdot \frac{1}{1 + \beta}\right)}, \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              8. lower-/.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\color{blue}{\frac{\beta}{1 + \beta}} + 2 \cdot \frac{1}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              9. lower-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{\color{blue}{1 + \beta}} + 2 \cdot \frac{1}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              10. associate-*r/N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \color{blue}{\frac{2 \cdot 1}{1 + \beta}}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              11. metadata-evalN/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{\color{blue}{2}}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              12. lower-/.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \color{blue}{\frac{2}{1 + \beta}}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              13. lower-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{\color{blue}{1 + \beta}}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              14. +-commutativeN/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \color{blue}{\frac{\beta}{1 + \beta} + 2 \cdot \frac{1}{1 + \beta}}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            7. Applied rewrites60.9%

              \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            8. Step-by-step derivation
              1. lift-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
              2. +-commutativeN/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{1 + \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
              3. lift-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{1 + \color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
              4. lift-*.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right)} \]
              5. metadata-evalN/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2}\right)} \]
              6. associate-+r+N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
              7. +-commutativeN/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 1\right)} + 2} \]
              8. metadata-evalN/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \color{blue}{\left(2 - 1\right)}\right) + 2} \]
              9. associate--l+N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\left(\alpha + \beta\right) + 2\right) - 1\right)} + 2} \]
              10. metadata-evalN/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) - 1\right) + 2} \]
              11. lift-*.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) - 1\right) + 2} \]
              12. lift-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} - 1\right) + 2} \]
              13. lower-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1\right) + 2}} \]
              14. lift-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} - 1\right) + 2} \]
              15. lift-*.f64N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) - 1\right) + 2} \]
              16. metadata-evalN/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2}\right) - 1\right) + 2} \]
              17. associate--l+N/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + \left(2 - 1\right)\right)} + 2} \]
              18. metadata-evalN/A

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \color{blue}{1}\right) + 2} \]
              19. lower-+.f6460.9

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 1\right)} + 2} \]
            9. Applied rewrites60.9%

              \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 1\right) + 2}} \]
            10. Taylor expanded in beta around inf

              \[\leadsto \frac{\frac{\color{blue}{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
            11. Step-by-step derivation
              1. lower--.f64N/A

                \[\leadsto \frac{\frac{\color{blue}{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              2. associate-+r+N/A

                \[\leadsto \frac{\frac{\color{blue}{\left(\left(1 + \alpha\right) + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)} - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              3. lower-+.f64N/A

                \[\leadsto \frac{\frac{\color{blue}{\left(\left(1 + \alpha\right) + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)} - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              4. lower-+.f64N/A

                \[\leadsto \frac{\frac{\left(\color{blue}{\left(1 + \alpha\right)} + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              5. +-commutativeN/A

                \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \color{blue}{\left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)}\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              6. lower-+.f64N/A

                \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \color{blue}{\left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)}\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              7. lower-/.f64N/A

                \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\color{blue}{\frac{\alpha}{\beta}} + \frac{1}{\beta}\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              8. lower-/.f64N/A

                \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \color{blue}{\frac{1}{\beta}}\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              9. associate-/l*N/A

                \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)\right) - \color{blue}{\left(1 + \alpha\right) \cdot \frac{2 + \alpha}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              10. lower-*.f64N/A

                \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)\right) - \color{blue}{\left(1 + \alpha\right) \cdot \frac{2 + \alpha}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              11. lower-+.f64N/A

                \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)\right) - \color{blue}{\left(1 + \alpha\right)} \cdot \frac{2 + \alpha}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              12. lower-/.f64N/A

                \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)\right) - \left(1 + \alpha\right) \cdot \color{blue}{\frac{2 + \alpha}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              13. lower-+.f6422.8

                \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)\right) - \left(1 + \alpha\right) \cdot \frac{\color{blue}{2 + \alpha}}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
            12. Applied rewrites22.8%

              \[\leadsto \frac{\frac{\color{blue}{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)\right) - \left(1 + \alpha\right) \cdot \frac{2 + \alpha}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
          4. Recombined 2 regimes into one program.
          5. Final simplification81.6%

            \[\leadsto \begin{array}{l} \mathbf{if}\;\alpha \leq 7 \cdot 10^{+49}:\\ \;\;\;\;\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} - \frac{-1}{\beta}\right)\right) - \left(1 + \alpha\right) \cdot \frac{2 + \alpha}{\beta}}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 1\right) + 2}\\ \end{array} \]
          6. Add Preprocessing

          Alternative 4: 99.7% accurate, 0.8× speedup?

          \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 2 + \left(\alpha + \beta\right)\\ \mathbf{if}\;\beta \leq 2.55 \cdot 10^{+90}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot t\_0\right) \cdot t\_0}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} - \frac{-1}{\beta}\right)\right) - \left(1 + \alpha\right) \cdot \frac{2 + \alpha}{\beta}}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 1\right) + 2}\\ \end{array} \end{array} \]
          NOTE: alpha and beta should be sorted in increasing order before calling this function.
          (FPCore (alpha beta)
           :precision binary64
           (let* ((t_0 (+ 2.0 (+ alpha beta))))
             (if (<= beta 2.55e+90)
               (/
                (fma (+ alpha 1.0) beta (/ (fma alpha alpha -1.0) (- alpha 1.0)))
                (* (* (+ 3.0 (+ alpha beta)) t_0) t_0))
               (/
                (/
                 (-
                  (+ (+ 1.0 alpha) (- (/ alpha beta) (/ -1.0 beta)))
                  (* (+ 1.0 alpha) (/ (+ 2.0 alpha) beta)))
                 (+ (+ alpha beta) 2.0))
                (+ (+ (+ alpha beta) 1.0) 2.0)))))
          assert(alpha < beta);
          double code(double alpha, double beta) {
          	double t_0 = 2.0 + (alpha + beta);
          	double tmp;
          	if (beta <= 2.55e+90) {
          		tmp = fma((alpha + 1.0), beta, (fma(alpha, alpha, -1.0) / (alpha - 1.0))) / (((3.0 + (alpha + beta)) * t_0) * t_0);
          	} else {
          		tmp = ((((1.0 + alpha) + ((alpha / beta) - (-1.0 / beta))) - ((1.0 + alpha) * ((2.0 + alpha) / beta))) / ((alpha + beta) + 2.0)) / (((alpha + beta) + 1.0) + 2.0);
          	}
          	return tmp;
          }
          
          alpha, beta = sort([alpha, beta])
          function code(alpha, beta)
          	t_0 = Float64(2.0 + Float64(alpha + beta))
          	tmp = 0.0
          	if (beta <= 2.55e+90)
          		tmp = Float64(fma(Float64(alpha + 1.0), beta, Float64(fma(alpha, alpha, -1.0) / Float64(alpha - 1.0))) / Float64(Float64(Float64(3.0 + Float64(alpha + beta)) * t_0) * t_0));
          	else
          		tmp = Float64(Float64(Float64(Float64(Float64(1.0 + alpha) + Float64(Float64(alpha / beta) - Float64(-1.0 / beta))) - Float64(Float64(1.0 + alpha) * Float64(Float64(2.0 + alpha) / beta))) / Float64(Float64(alpha + beta) + 2.0)) / Float64(Float64(Float64(alpha + beta) + 1.0) + 2.0));
          	end
          	return tmp
          end
          
          NOTE: alpha and beta should be sorted in increasing order before calling this function.
          code[alpha_, beta_] := Block[{t$95$0 = N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 2.55e+90], N[(N[(N[(alpha + 1.0), $MachinePrecision] * beta + N[(N[(alpha * alpha + -1.0), $MachinePrecision] / N[(alpha - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(1.0 + alpha), $MachinePrecision] + N[(N[(alpha / beta), $MachinePrecision] - N[(-1.0 / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(1.0 + alpha), $MachinePrecision] * N[(N[(2.0 + alpha), $MachinePrecision] / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(alpha + beta), $MachinePrecision] + 1.0), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]]]
          
          \begin{array}{l}
          [alpha, beta] = \mathsf{sort}([alpha, beta])\\
          \\
          \begin{array}{l}
          t_0 := 2 + \left(\alpha + \beta\right)\\
          \mathbf{if}\;\beta \leq 2.55 \cdot 10^{+90}:\\
          \;\;\;\;\frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot t\_0\right) \cdot t\_0}\\
          
          \mathbf{else}:\\
          \;\;\;\;\frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} - \frac{-1}{\beta}\right)\right) - \left(1 + \alpha\right) \cdot \frac{2 + \alpha}{\beta}}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 1\right) + 2}\\
          
          
          \end{array}
          \end{array}
          
          Derivation
          1. Split input into 2 regimes
          2. if beta < 2.5499999999999998e90

            1. Initial program 99.9%

              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            2. Add Preprocessing
            3. Step-by-step derivation
              1. Applied rewrites99.9%

                \[\leadsto \color{blue}{\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}} \]
              2. Applied rewrites96.3%

                \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}} \]
              3. Step-by-step derivation
                1. lift-+.f64N/A

                  \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \color{blue}{\alpha + 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                2. flip-+N/A

                  \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \color{blue}{\frac{\alpha \cdot \alpha - 1 \cdot 1}{\alpha - 1}}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                3. lower-/.f64N/A

                  \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \color{blue}{\frac{\alpha \cdot \alpha - 1 \cdot 1}{\alpha - 1}}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                4. metadata-evalN/A

                  \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\alpha \cdot \alpha - \color{blue}{1}}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                5. sub-negN/A

                  \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\color{blue}{\alpha \cdot \alpha + \left(\mathsf{neg}\left(1\right)\right)}}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                6. metadata-evalN/A

                  \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\alpha \cdot \alpha + \color{blue}{-1}}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                7. lower-fma.f64N/A

                  \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\color{blue}{\mathsf{fma}\left(\alpha, \alpha, -1\right)}}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                8. lower--.f6483.8

                  \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\color{blue}{\alpha - 1}}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
              4. Applied rewrites83.8%

                \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \color{blue}{\frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\alpha - 1}}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]

              if 2.5499999999999998e90 < beta

              1. Initial program 75.8%

                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. Add Preprocessing
              3. Step-by-step derivation
                1. lift-/.f64N/A

                  \[\leadsto \frac{\frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. clear-numN/A

                  \[\leadsto \frac{\frac{\color{blue}{\frac{1}{\frac{\left(\alpha + \beta\right) + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                3. lower-/.f64N/A

                  \[\leadsto \frac{\frac{\color{blue}{\frac{1}{\frac{\left(\alpha + \beta\right) + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                4. lower-/.f6475.8

                  \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\frac{\left(\alpha + \beta\right) + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                5. lift-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\frac{\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                6. +-commutativeN/A

                  \[\leadsto \frac{\frac{\frac{1}{\frac{\color{blue}{\left(\beta + \alpha\right)} + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                7. lower-+.f6475.8

                  \[\leadsto \frac{\frac{\frac{1}{\frac{\color{blue}{\left(\beta + \alpha\right)} + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                8. lift-*.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + \color{blue}{2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                9. metadata-eval75.8

                  \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + \color{blue}{2}}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                10. lift-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right)} + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                11. +-commutativeN/A

                  \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\color{blue}{\left(\beta \cdot \alpha + \left(\alpha + \beta\right)\right)} + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                12. lift-*.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\left(\color{blue}{\beta \cdot \alpha} + \left(\alpha + \beta\right)\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                13. lower-fma.f6475.8

                  \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\color{blue}{\mathsf{fma}\left(\beta, \alpha, \alpha + \beta\right)} + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                14. lift-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\mathsf{fma}\left(\beta, \alpha, \color{blue}{\alpha + \beta}\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                15. +-commutativeN/A

                  \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\mathsf{fma}\left(\beta, \alpha, \color{blue}{\beta + \alpha}\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                16. lower-+.f6475.8

                  \[\leadsto \frac{\frac{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\mathsf{fma}\left(\beta, \alpha, \color{blue}{\beta + \alpha}\right) + 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              4. Applied rewrites75.8%

                \[\leadsto \frac{\frac{\color{blue}{\frac{1}{\frac{\left(\beta + \alpha\right) + 2}{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              5. Taylor expanded in alpha around 0

                \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\alpha \cdot \left(\frac{1}{1 + \beta} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)\right) + \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              6. Step-by-step derivation
                1. *-commutativeN/A

                  \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\left(\frac{1}{1 + \beta} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)\right) \cdot \alpha} + \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. lower-fma.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                3. lower--.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\color{blue}{\frac{1}{1 + \beta} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}, \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                4. lower-/.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\color{blue}{\frac{1}{1 + \beta}} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                5. lower-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{\color{blue}{1 + \beta}} - \left(2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                6. +-commutativeN/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \color{blue}{\left(\frac{\beta}{1 + \beta} + 2 \cdot \frac{1}{1 + \beta}\right)}, \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                7. lower-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \color{blue}{\left(\frac{\beta}{1 + \beta} + 2 \cdot \frac{1}{1 + \beta}\right)}, \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                8. lower-/.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\color{blue}{\frac{\beta}{1 + \beta}} + 2 \cdot \frac{1}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                9. lower-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{\color{blue}{1 + \beta}} + 2 \cdot \frac{1}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                10. associate-*r/N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \color{blue}{\frac{2 \cdot 1}{1 + \beta}}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                11. metadata-evalN/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{\color{blue}{2}}{1 + \beta}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                12. lower-/.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \color{blue}{\frac{2}{1 + \beta}}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                13. lower-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{\color{blue}{1 + \beta}}\right), \alpha, 2 \cdot \frac{1}{1 + \beta} + \frac{\beta}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                14. +-commutativeN/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \color{blue}{\frac{\beta}{1 + \beta} + 2 \cdot \frac{1}{1 + \beta}}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              7. Applied rewrites85.8%

                \[\leadsto \frac{\frac{\frac{1}{\color{blue}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              8. Step-by-step derivation
                1. lift-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                2. +-commutativeN/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{1 + \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                3. lift-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{1 + \color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                4. lift-*.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right)} \]
                5. metadata-evalN/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2}\right)} \]
                6. associate-+r+N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                7. +-commutativeN/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 1\right)} + 2} \]
                8. metadata-evalN/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \color{blue}{\left(2 - 1\right)}\right) + 2} \]
                9. associate--l+N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\left(\alpha + \beta\right) + 2\right) - 1\right)} + 2} \]
                10. metadata-evalN/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) - 1\right) + 2} \]
                11. lift-*.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) - 1\right) + 2} \]
                12. lift-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} - 1\right) + 2} \]
                13. lower-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1\right) + 2}} \]
                14. lift-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} - 1\right) + 2} \]
                15. lift-*.f64N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) - 1\right) + 2} \]
                16. metadata-evalN/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2}\right) - 1\right) + 2} \]
                17. associate--l+N/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + \left(2 - 1\right)\right)} + 2} \]
                18. metadata-evalN/A

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \color{blue}{1}\right) + 2} \]
                19. lower-+.f6485.8

                  \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 1\right)} + 2} \]
              9. Applied rewrites85.8%

                \[\leadsto \frac{\frac{\frac{1}{\mathsf{fma}\left(\frac{1}{1 + \beta} - \left(\frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right), \alpha, \frac{\beta}{1 + \beta} + \frac{2}{1 + \beta}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 1\right) + 2}} \]
              10. Taylor expanded in beta around inf

                \[\leadsto \frac{\frac{\color{blue}{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              11. Step-by-step derivation
                1. lower--.f64N/A

                  \[\leadsto \frac{\frac{\color{blue}{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
                2. associate-+r+N/A

                  \[\leadsto \frac{\frac{\color{blue}{\left(\left(1 + \alpha\right) + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)} - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
                3. lower-+.f64N/A

                  \[\leadsto \frac{\frac{\color{blue}{\left(\left(1 + \alpha\right) + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)} - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
                4. lower-+.f64N/A

                  \[\leadsto \frac{\frac{\left(\color{blue}{\left(1 + \alpha\right)} + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
                5. +-commutativeN/A

                  \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \color{blue}{\left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)}\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
                6. lower-+.f64N/A

                  \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \color{blue}{\left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)}\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
                7. lower-/.f64N/A

                  \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\color{blue}{\frac{\alpha}{\beta}} + \frac{1}{\beta}\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
                8. lower-/.f64N/A

                  \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \color{blue}{\frac{1}{\beta}}\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(2 + \alpha\right)}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
                9. associate-/l*N/A

                  \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)\right) - \color{blue}{\left(1 + \alpha\right) \cdot \frac{2 + \alpha}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
                10. lower-*.f64N/A

                  \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)\right) - \color{blue}{\left(1 + \alpha\right) \cdot \frac{2 + \alpha}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
                11. lower-+.f64N/A

                  \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)\right) - \color{blue}{\left(1 + \alpha\right)} \cdot \frac{2 + \alpha}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
                12. lower-/.f64N/A

                  \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)\right) - \left(1 + \alpha\right) \cdot \color{blue}{\frac{2 + \alpha}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
                13. lower-+.f6490.0

                  \[\leadsto \frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)\right) - \left(1 + \alpha\right) \cdot \frac{\color{blue}{2 + \alpha}}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
              12. Applied rewrites90.0%

                \[\leadsto \frac{\frac{\color{blue}{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} + \frac{1}{\beta}\right)\right) - \left(1 + \alpha\right) \cdot \frac{2 + \alpha}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 1\right) + 2} \]
            4. Recombined 2 regimes into one program.
            5. Final simplification85.4%

              \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 2.55 \cdot 10^{+90}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\left(\left(1 + \alpha\right) + \left(\frac{\alpha}{\beta} - \frac{-1}{\beta}\right)\right) - \left(1 + \alpha\right) \cdot \frac{2 + \alpha}{\beta}}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 1\right) + 2}\\ \end{array} \]
            6. Add Preprocessing

            Alternative 5: 99.5% accurate, 1.1× speedup?

            \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 3 + \left(\alpha + \beta\right)\\ t_1 := 2 + \left(\alpha + \beta\right)\\ \mathbf{if}\;\beta \leq 4.4 \cdot 10^{+94}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\alpha - 1}\right)}{\left(t\_0 \cdot t\_1\right) \cdot t\_1}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{t\_1}}{t\_0}\\ \end{array} \end{array} \]
            NOTE: alpha and beta should be sorted in increasing order before calling this function.
            (FPCore (alpha beta)
             :precision binary64
             (let* ((t_0 (+ 3.0 (+ alpha beta))) (t_1 (+ 2.0 (+ alpha beta))))
               (if (<= beta 4.4e+94)
                 (/
                  (fma (+ alpha 1.0) beta (/ (fma alpha alpha -1.0) (- alpha 1.0)))
                  (* (* t_0 t_1) t_1))
                 (/ (/ (+ alpha 1.0) t_1) t_0))))
            assert(alpha < beta);
            double code(double alpha, double beta) {
            	double t_0 = 3.0 + (alpha + beta);
            	double t_1 = 2.0 + (alpha + beta);
            	double tmp;
            	if (beta <= 4.4e+94) {
            		tmp = fma((alpha + 1.0), beta, (fma(alpha, alpha, -1.0) / (alpha - 1.0))) / ((t_0 * t_1) * t_1);
            	} else {
            		tmp = ((alpha + 1.0) / t_1) / t_0;
            	}
            	return tmp;
            }
            
            alpha, beta = sort([alpha, beta])
            function code(alpha, beta)
            	t_0 = Float64(3.0 + Float64(alpha + beta))
            	t_1 = Float64(2.0 + Float64(alpha + beta))
            	tmp = 0.0
            	if (beta <= 4.4e+94)
            		tmp = Float64(fma(Float64(alpha + 1.0), beta, Float64(fma(alpha, alpha, -1.0) / Float64(alpha - 1.0))) / Float64(Float64(t_0 * t_1) * t_1));
            	else
            		tmp = Float64(Float64(Float64(alpha + 1.0) / t_1) / t_0);
            	end
            	return tmp
            end
            
            NOTE: alpha and beta should be sorted in increasing order before calling this function.
            code[alpha_, beta_] := Block[{t$95$0 = N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 4.4e+94], N[(N[(N[(alpha + 1.0), $MachinePrecision] * beta + N[(N[(alpha * alpha + -1.0), $MachinePrecision] / N[(alpha - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(t$95$0 * t$95$1), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / t$95$1), $MachinePrecision] / t$95$0), $MachinePrecision]]]]
            
            \begin{array}{l}
            [alpha, beta] = \mathsf{sort}([alpha, beta])\\
            \\
            \begin{array}{l}
            t_0 := 3 + \left(\alpha + \beta\right)\\
            t_1 := 2 + \left(\alpha + \beta\right)\\
            \mathbf{if}\;\beta \leq 4.4 \cdot 10^{+94}:\\
            \;\;\;\;\frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\alpha - 1}\right)}{\left(t\_0 \cdot t\_1\right) \cdot t\_1}\\
            
            \mathbf{else}:\\
            \;\;\;\;\frac{\frac{\alpha + 1}{t\_1}}{t\_0}\\
            
            
            \end{array}
            \end{array}
            
            Derivation
            1. Split input into 2 regimes
            2. if beta < 4.40000000000000024e94

              1. Initial program 99.9%

                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. Add Preprocessing
              3. Step-by-step derivation
                1. Applied rewrites99.9%

                  \[\leadsto \color{blue}{\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}} \]
                2. Applied rewrites96.3%

                  \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}} \]
                3. Step-by-step derivation
                  1. lift-+.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \color{blue}{\alpha + 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                  2. flip-+N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \color{blue}{\frac{\alpha \cdot \alpha - 1 \cdot 1}{\alpha - 1}}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                  3. lower-/.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \color{blue}{\frac{\alpha \cdot \alpha - 1 \cdot 1}{\alpha - 1}}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                  4. metadata-evalN/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\alpha \cdot \alpha - \color{blue}{1}}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                  5. sub-negN/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\color{blue}{\alpha \cdot \alpha + \left(\mathsf{neg}\left(1\right)\right)}}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                  6. metadata-evalN/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\alpha \cdot \alpha + \color{blue}{-1}}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                  7. lower-fma.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\color{blue}{\mathsf{fma}\left(\alpha, \alpha, -1\right)}}{\alpha - 1}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                  8. lower--.f6483.8

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\color{blue}{\alpha - 1}}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                4. Applied rewrites83.8%

                  \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \color{blue}{\frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\alpha - 1}}\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]

                if 4.40000000000000024e94 < beta

                1. Initial program 75.8%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Taylor expanded in beta around -inf

                  \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                4. Step-by-step derivation
                  1. mul-1-negN/A

                    \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  2. lower-neg.f64N/A

                    \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  3. sub-negN/A

                    \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha + \left(\mathsf{neg}\left(1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  4. mul-1-negN/A

                    \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} + \left(\mathsf{neg}\left(1\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  5. distribute-neg-inN/A

                    \[\leadsto \frac{\frac{-\color{blue}{\left(\mathsf{neg}\left(\left(\alpha + 1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  6. +-commutativeN/A

                    \[\leadsto \frac{\frac{-\left(\mathsf{neg}\left(\color{blue}{\left(1 + \alpha\right)}\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  7. distribute-neg-inN/A

                    \[\leadsto \frac{\frac{-\color{blue}{\left(\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  8. metadata-evalN/A

                    \[\leadsto \frac{\frac{-\left(\color{blue}{-1} + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  9. unsub-negN/A

                    \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  10. lower--.f6490.4

                    \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                5. Applied rewrites90.4%

                  \[\leadsto \frac{\frac{\color{blue}{-\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                6. Step-by-step derivation
                  1. Applied rewrites90.4%

                    \[\leadsto \color{blue}{\frac{\frac{\alpha + 1}{2 + \left(\alpha + \beta\right)}}{3 + \left(\alpha + \beta\right)}} \]
                7. Recombined 2 regimes into one program.
                8. Add Preprocessing

                Alternative 6: 99.5% accurate, 1.3× speedup?

                \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 2 + \left(\alpha + \beta\right)\\ \mathbf{if}\;\beta \leq 4.7 \cdot 10^{+94}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\mathsf{fma}\left(\mathsf{fma}\left(2, \alpha, \beta\right) + 5, \beta, \left(2 + \alpha\right) \cdot \left(3 + \alpha\right)\right) \cdot t\_0}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{t\_0}}{3 + \left(\alpha + \beta\right)}\\ \end{array} \end{array} \]
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                (FPCore (alpha beta)
                 :precision binary64
                 (let* ((t_0 (+ 2.0 (+ alpha beta))))
                   (if (<= beta 4.7e+94)
                     (/
                      (fma (+ alpha 1.0) beta (+ alpha 1.0))
                      (*
                       (fma (+ (fma 2.0 alpha beta) 5.0) beta (* (+ 2.0 alpha) (+ 3.0 alpha)))
                       t_0))
                     (/ (/ (+ alpha 1.0) t_0) (+ 3.0 (+ alpha beta))))))
                assert(alpha < beta);
                double code(double alpha, double beta) {
                	double t_0 = 2.0 + (alpha + beta);
                	double tmp;
                	if (beta <= 4.7e+94) {
                		tmp = fma((alpha + 1.0), beta, (alpha + 1.0)) / (fma((fma(2.0, alpha, beta) + 5.0), beta, ((2.0 + alpha) * (3.0 + alpha))) * t_0);
                	} else {
                		tmp = ((alpha + 1.0) / t_0) / (3.0 + (alpha + beta));
                	}
                	return tmp;
                }
                
                alpha, beta = sort([alpha, beta])
                function code(alpha, beta)
                	t_0 = Float64(2.0 + Float64(alpha + beta))
                	tmp = 0.0
                	if (beta <= 4.7e+94)
                		tmp = Float64(fma(Float64(alpha + 1.0), beta, Float64(alpha + 1.0)) / Float64(fma(Float64(fma(2.0, alpha, beta) + 5.0), beta, Float64(Float64(2.0 + alpha) * Float64(3.0 + alpha))) * t_0));
                	else
                		tmp = Float64(Float64(Float64(alpha + 1.0) / t_0) / Float64(3.0 + Float64(alpha + beta)));
                	end
                	return tmp
                end
                
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                code[alpha_, beta_] := Block[{t$95$0 = N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 4.7e+94], N[(N[(N[(alpha + 1.0), $MachinePrecision] * beta + N[(alpha + 1.0), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(N[(2.0 * alpha + beta), $MachinePrecision] + 5.0), $MachinePrecision] * beta + N[(N[(2.0 + alpha), $MachinePrecision] * N[(3.0 + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
                
                \begin{array}{l}
                [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                \\
                \begin{array}{l}
                t_0 := 2 + \left(\alpha + \beta\right)\\
                \mathbf{if}\;\beta \leq 4.7 \cdot 10^{+94}:\\
                \;\;\;\;\frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\mathsf{fma}\left(\mathsf{fma}\left(2, \alpha, \beta\right) + 5, \beta, \left(2 + \alpha\right) \cdot \left(3 + \alpha\right)\right) \cdot t\_0}\\
                
                \mathbf{else}:\\
                \;\;\;\;\frac{\frac{\alpha + 1}{t\_0}}{3 + \left(\alpha + \beta\right)}\\
                
                
                \end{array}
                \end{array}
                
                Derivation
                1. Split input into 2 regimes
                2. if beta < 4.70000000000000017e94

                  1. Initial program 99.9%

                    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  2. Add Preprocessing
                  3. Step-by-step derivation
                    1. Applied rewrites99.9%

                      \[\leadsto \color{blue}{\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}} \]
                    2. Applied rewrites96.3%

                      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}} \]
                    3. Taylor expanded in beta around 0

                      \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\color{blue}{\left(\beta \cdot \left(5 + \left(\beta + 2 \cdot \alpha\right)\right) + \left(2 + \alpha\right) \cdot \left(3 + \alpha\right)\right)} \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                    4. Step-by-step derivation
                      1. *-commutativeN/A

                        \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\left(\color{blue}{\left(5 + \left(\beta + 2 \cdot \alpha\right)\right) \cdot \beta} + \left(2 + \alpha\right) \cdot \left(3 + \alpha\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                      2. lower-fma.f64N/A

                        \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\color{blue}{\mathsf{fma}\left(5 + \left(\beta + 2 \cdot \alpha\right), \beta, \left(2 + \alpha\right) \cdot \left(3 + \alpha\right)\right)} \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                      3. +-commutativeN/A

                        \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\mathsf{fma}\left(\color{blue}{\left(\beta + 2 \cdot \alpha\right) + 5}, \beta, \left(2 + \alpha\right) \cdot \left(3 + \alpha\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                      4. lower-+.f64N/A

                        \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\mathsf{fma}\left(\color{blue}{\left(\beta + 2 \cdot \alpha\right) + 5}, \beta, \left(2 + \alpha\right) \cdot \left(3 + \alpha\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                      5. +-commutativeN/A

                        \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\mathsf{fma}\left(\color{blue}{\left(2 \cdot \alpha + \beta\right)} + 5, \beta, \left(2 + \alpha\right) \cdot \left(3 + \alpha\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                      6. lower-fma.f64N/A

                        \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(2, \alpha, \beta\right)} + 5, \beta, \left(2 + \alpha\right) \cdot \left(3 + \alpha\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                      7. lower-*.f64N/A

                        \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\mathsf{fma}\left(\mathsf{fma}\left(2, \alpha, \beta\right) + 5, \beta, \color{blue}{\left(2 + \alpha\right) \cdot \left(3 + \alpha\right)}\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                      8. lower-+.f64N/A

                        \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\mathsf{fma}\left(\mathsf{fma}\left(2, \alpha, \beta\right) + 5, \beta, \color{blue}{\left(2 + \alpha\right)} \cdot \left(3 + \alpha\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                      9. lower-+.f6496.3

                        \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\mathsf{fma}\left(\mathsf{fma}\left(2, \alpha, \beta\right) + 5, \beta, \left(2 + \alpha\right) \cdot \color{blue}{\left(3 + \alpha\right)}\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                    5. Applied rewrites96.3%

                      \[\leadsto \frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(2, \alpha, \beta\right) + 5, \beta, \left(2 + \alpha\right) \cdot \left(3 + \alpha\right)\right)} \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]

                    if 4.70000000000000017e94 < beta

                    1. Initial program 75.8%

                      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    2. Add Preprocessing
                    3. Taylor expanded in beta around -inf

                      \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    4. Step-by-step derivation
                      1. mul-1-negN/A

                        \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      2. lower-neg.f64N/A

                        \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      3. sub-negN/A

                        \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha + \left(\mathsf{neg}\left(1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      4. mul-1-negN/A

                        \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} + \left(\mathsf{neg}\left(1\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      5. distribute-neg-inN/A

                        \[\leadsto \frac{\frac{-\color{blue}{\left(\mathsf{neg}\left(\left(\alpha + 1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      6. +-commutativeN/A

                        \[\leadsto \frac{\frac{-\left(\mathsf{neg}\left(\color{blue}{\left(1 + \alpha\right)}\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      7. distribute-neg-inN/A

                        \[\leadsto \frac{\frac{-\color{blue}{\left(\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      8. metadata-evalN/A

                        \[\leadsto \frac{\frac{-\left(\color{blue}{-1} + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      9. unsub-negN/A

                        \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      10. lower--.f6490.4

                        \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    5. Applied rewrites90.4%

                      \[\leadsto \frac{\frac{\color{blue}{-\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    6. Step-by-step derivation
                      1. Applied rewrites90.4%

                        \[\leadsto \color{blue}{\frac{\frac{\alpha + 1}{2 + \left(\alpha + \beta\right)}}{3 + \left(\alpha + \beta\right)}} \]
                    7. Recombined 2 regimes into one program.
                    8. Add Preprocessing

                    Alternative 7: 99.5% accurate, 1.4× speedup?

                    \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := \left(\beta + \alpha\right) + 2\\ \mathbf{if}\;\beta \leq 4.7 \cdot 10^{+94}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{\left(\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_0\right) \cdot t\_0}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{2 + \left(\alpha + \beta\right)}}{3 + \left(\alpha + \beta\right)}\\ \end{array} \end{array} \]
                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                    (FPCore (alpha beta)
                     :precision binary64
                     (let* ((t_0 (+ (+ beta alpha) 2.0)))
                       (if (<= beta 4.7e+94)
                         (/
                          (+ (fma beta alpha (+ beta alpha)) 1.0)
                          (* (* (+ 3.0 (+ beta alpha)) t_0) t_0))
                         (/ (/ (+ alpha 1.0) (+ 2.0 (+ alpha beta))) (+ 3.0 (+ alpha beta))))))
                    assert(alpha < beta);
                    double code(double alpha, double beta) {
                    	double t_0 = (beta + alpha) + 2.0;
                    	double tmp;
                    	if (beta <= 4.7e+94) {
                    		tmp = (fma(beta, alpha, (beta + alpha)) + 1.0) / (((3.0 + (beta + alpha)) * t_0) * t_0);
                    	} else {
                    		tmp = ((alpha + 1.0) / (2.0 + (alpha + beta))) / (3.0 + (alpha + beta));
                    	}
                    	return tmp;
                    }
                    
                    alpha, beta = sort([alpha, beta])
                    function code(alpha, beta)
                    	t_0 = Float64(Float64(beta + alpha) + 2.0)
                    	tmp = 0.0
                    	if (beta <= 4.7e+94)
                    		tmp = Float64(Float64(fma(beta, alpha, Float64(beta + alpha)) + 1.0) / Float64(Float64(Float64(3.0 + Float64(beta + alpha)) * t_0) * t_0));
                    	else
                    		tmp = Float64(Float64(Float64(alpha + 1.0) / Float64(2.0 + Float64(alpha + beta))) / Float64(3.0 + Float64(alpha + beta)));
                    	end
                    	return tmp
                    end
                    
                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                    code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[beta, 4.7e+94], N[(N[(N[(beta * alpha + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / N[(N[(N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
                    
                    \begin{array}{l}
                    [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                    \\
                    \begin{array}{l}
                    t_0 := \left(\beta + \alpha\right) + 2\\
                    \mathbf{if}\;\beta \leq 4.7 \cdot 10^{+94}:\\
                    \;\;\;\;\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{\left(\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_0\right) \cdot t\_0}\\
                    
                    \mathbf{else}:\\
                    \;\;\;\;\frac{\frac{\alpha + 1}{2 + \left(\alpha + \beta\right)}}{3 + \left(\alpha + \beta\right)}\\
                    
                    
                    \end{array}
                    \end{array}
                    
                    Derivation
                    1. Split input into 2 regimes
                    2. if beta < 4.70000000000000017e94

                      1. Initial program 99.9%

                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      2. Add Preprocessing
                      3. Step-by-step derivation
                        1. lift-/.f64N/A

                          \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                        2. lift-/.f64N/A

                          \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        3. associate-/l/N/A

                          \[\leadsto \color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                        4. lift-/.f64N/A

                          \[\leadsto \frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
                        5. associate-/l/N/A

                          \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                        6. lower-/.f64N/A

                          \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                      4. Applied rewrites96.3%

                        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{\left(\left(3 + \left(\beta + \alpha\right)\right) \cdot \left(\left(\beta + \alpha\right) + 2\right)\right) \cdot \left(\left(\beta + \alpha\right) + 2\right)}} \]

                      if 4.70000000000000017e94 < beta

                      1. Initial program 75.8%

                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      2. Add Preprocessing
                      3. Taylor expanded in beta around -inf

                        \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      4. Step-by-step derivation
                        1. mul-1-negN/A

                          \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        2. lower-neg.f64N/A

                          \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        3. sub-negN/A

                          \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha + \left(\mathsf{neg}\left(1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        4. mul-1-negN/A

                          \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} + \left(\mathsf{neg}\left(1\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        5. distribute-neg-inN/A

                          \[\leadsto \frac{\frac{-\color{blue}{\left(\mathsf{neg}\left(\left(\alpha + 1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        6. +-commutativeN/A

                          \[\leadsto \frac{\frac{-\left(\mathsf{neg}\left(\color{blue}{\left(1 + \alpha\right)}\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        7. distribute-neg-inN/A

                          \[\leadsto \frac{\frac{-\color{blue}{\left(\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        8. metadata-evalN/A

                          \[\leadsto \frac{\frac{-\left(\color{blue}{-1} + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        9. unsub-negN/A

                          \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        10. lower--.f6490.4

                          \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      5. Applied rewrites90.4%

                        \[\leadsto \frac{\frac{\color{blue}{-\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      6. Step-by-step derivation
                        1. Applied rewrites90.4%

                          \[\leadsto \color{blue}{\frac{\frac{\alpha + 1}{2 + \left(\alpha + \beta\right)}}{3 + \left(\alpha + \beta\right)}} \]
                      7. Recombined 2 regimes into one program.
                      8. Add Preprocessing

                      Alternative 8: 98.5% accurate, 1.7× speedup?

                      \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 3 + \left(\alpha + \beta\right)\\ t_1 := 2 + \left(\alpha + \beta\right)\\ \mathbf{if}\;\beta \leq 2.05 \cdot 10^{+17}:\\ \;\;\;\;\frac{1 + \beta}{\left(t\_0 \cdot t\_1\right) \cdot t\_1}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{t\_1}}{t\_0}\\ \end{array} \end{array} \]
                      NOTE: alpha and beta should be sorted in increasing order before calling this function.
                      (FPCore (alpha beta)
                       :precision binary64
                       (let* ((t_0 (+ 3.0 (+ alpha beta))) (t_1 (+ 2.0 (+ alpha beta))))
                         (if (<= beta 2.05e+17)
                           (/ (+ 1.0 beta) (* (* t_0 t_1) t_1))
                           (/ (/ (+ alpha 1.0) t_1) t_0))))
                      assert(alpha < beta);
                      double code(double alpha, double beta) {
                      	double t_0 = 3.0 + (alpha + beta);
                      	double t_1 = 2.0 + (alpha + beta);
                      	double tmp;
                      	if (beta <= 2.05e+17) {
                      		tmp = (1.0 + beta) / ((t_0 * t_1) * t_1);
                      	} else {
                      		tmp = ((alpha + 1.0) / t_1) / t_0;
                      	}
                      	return tmp;
                      }
                      
                      NOTE: alpha and beta should be sorted in increasing order before calling this function.
                      real(8) function code(alpha, beta)
                          real(8), intent (in) :: alpha
                          real(8), intent (in) :: beta
                          real(8) :: t_0
                          real(8) :: t_1
                          real(8) :: tmp
                          t_0 = 3.0d0 + (alpha + beta)
                          t_1 = 2.0d0 + (alpha + beta)
                          if (beta <= 2.05d+17) then
                              tmp = (1.0d0 + beta) / ((t_0 * t_1) * t_1)
                          else
                              tmp = ((alpha + 1.0d0) / t_1) / t_0
                          end if
                          code = tmp
                      end function
                      
                      assert alpha < beta;
                      public static double code(double alpha, double beta) {
                      	double t_0 = 3.0 + (alpha + beta);
                      	double t_1 = 2.0 + (alpha + beta);
                      	double tmp;
                      	if (beta <= 2.05e+17) {
                      		tmp = (1.0 + beta) / ((t_0 * t_1) * t_1);
                      	} else {
                      		tmp = ((alpha + 1.0) / t_1) / t_0;
                      	}
                      	return tmp;
                      }
                      
                      [alpha, beta] = sort([alpha, beta])
                      def code(alpha, beta):
                      	t_0 = 3.0 + (alpha + beta)
                      	t_1 = 2.0 + (alpha + beta)
                      	tmp = 0
                      	if beta <= 2.05e+17:
                      		tmp = (1.0 + beta) / ((t_0 * t_1) * t_1)
                      	else:
                      		tmp = ((alpha + 1.0) / t_1) / t_0
                      	return tmp
                      
                      alpha, beta = sort([alpha, beta])
                      function code(alpha, beta)
                      	t_0 = Float64(3.0 + Float64(alpha + beta))
                      	t_1 = Float64(2.0 + Float64(alpha + beta))
                      	tmp = 0.0
                      	if (beta <= 2.05e+17)
                      		tmp = Float64(Float64(1.0 + beta) / Float64(Float64(t_0 * t_1) * t_1));
                      	else
                      		tmp = Float64(Float64(Float64(alpha + 1.0) / t_1) / t_0);
                      	end
                      	return tmp
                      end
                      
                      alpha, beta = num2cell(sort([alpha, beta])){:}
                      function tmp_2 = code(alpha, beta)
                      	t_0 = 3.0 + (alpha + beta);
                      	t_1 = 2.0 + (alpha + beta);
                      	tmp = 0.0;
                      	if (beta <= 2.05e+17)
                      		tmp = (1.0 + beta) / ((t_0 * t_1) * t_1);
                      	else
                      		tmp = ((alpha + 1.0) / t_1) / t_0;
                      	end
                      	tmp_2 = tmp;
                      end
                      
                      NOTE: alpha and beta should be sorted in increasing order before calling this function.
                      code[alpha_, beta_] := Block[{t$95$0 = N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 2.05e+17], N[(N[(1.0 + beta), $MachinePrecision] / N[(N[(t$95$0 * t$95$1), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / t$95$1), $MachinePrecision] / t$95$0), $MachinePrecision]]]]
                      
                      \begin{array}{l}
                      [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                      \\
                      \begin{array}{l}
                      t_0 := 3 + \left(\alpha + \beta\right)\\
                      t_1 := 2 + \left(\alpha + \beta\right)\\
                      \mathbf{if}\;\beta \leq 2.05 \cdot 10^{+17}:\\
                      \;\;\;\;\frac{1 + \beta}{\left(t\_0 \cdot t\_1\right) \cdot t\_1}\\
                      
                      \mathbf{else}:\\
                      \;\;\;\;\frac{\frac{\alpha + 1}{t\_1}}{t\_0}\\
                      
                      
                      \end{array}
                      \end{array}
                      
                      Derivation
                      1. Split input into 2 regimes
                      2. if beta < 2.05e17

                        1. Initial program 99.9%

                          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        2. Add Preprocessing
                        3. Step-by-step derivation
                          1. Applied rewrites99.9%

                            \[\leadsto \color{blue}{\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}} \]
                          2. Applied rewrites95.9%

                            \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\alpha + 1, \beta, \alpha + 1\right)}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}} \]
                          3. Taylor expanded in alpha around 0

                            \[\leadsto \frac{\color{blue}{1 + \beta}}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                          4. Step-by-step derivation
                            1. lower-+.f6486.9

                              \[\leadsto \frac{\color{blue}{1 + \beta}}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]
                          5. Applied rewrites86.9%

                            \[\leadsto \frac{\color{blue}{1 + \beta}}{\left(\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)} \]

                          if 2.05e17 < beta

                          1. Initial program 81.2%

                            \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                          2. Add Preprocessing
                          3. Taylor expanded in beta around -inf

                            \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                          4. Step-by-step derivation
                            1. mul-1-negN/A

                              \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            2. lower-neg.f64N/A

                              \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            3. sub-negN/A

                              \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha + \left(\mathsf{neg}\left(1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            4. mul-1-negN/A

                              \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} + \left(\mathsf{neg}\left(1\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            5. distribute-neg-inN/A

                              \[\leadsto \frac{\frac{-\color{blue}{\left(\mathsf{neg}\left(\left(\alpha + 1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            6. +-commutativeN/A

                              \[\leadsto \frac{\frac{-\left(\mathsf{neg}\left(\color{blue}{\left(1 + \alpha\right)}\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            7. distribute-neg-inN/A

                              \[\leadsto \frac{\frac{-\color{blue}{\left(\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            8. metadata-evalN/A

                              \[\leadsto \frac{\frac{-\left(\color{blue}{-1} + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            9. unsub-negN/A

                              \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            10. lower--.f6488.2

                              \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                          5. Applied rewrites88.2%

                            \[\leadsto \frac{\frac{\color{blue}{-\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                          6. Step-by-step derivation
                            1. Applied rewrites88.2%

                              \[\leadsto \color{blue}{\frac{\frac{\alpha + 1}{2 + \left(\alpha + \beta\right)}}{3 + \left(\alpha + \beta\right)}} \]
                          7. Recombined 2 regimes into one program.
                          8. Add Preprocessing

                          Alternative 9: 62.3% accurate, 2.2× speedup?

                          \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 6.6 \cdot 10^{+91}:\\ \;\;\;\;\frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{3 + \left(\beta + \alpha\right)}\\ \end{array} \end{array} \]
                          NOTE: alpha and beta should be sorted in increasing order before calling this function.
                          (FPCore (alpha beta)
                           :precision binary64
                           (if (<= beta 6.6e+91)
                             (/ (+ alpha 1.0) (* (+ 3.0 (+ alpha beta)) (+ 2.0 (+ alpha beta))))
                             (/ (/ (+ 1.0 alpha) beta) (+ 3.0 (+ beta alpha)))))
                          assert(alpha < beta);
                          double code(double alpha, double beta) {
                          	double tmp;
                          	if (beta <= 6.6e+91) {
                          		tmp = (alpha + 1.0) / ((3.0 + (alpha + beta)) * (2.0 + (alpha + beta)));
                          	} else {
                          		tmp = ((1.0 + alpha) / beta) / (3.0 + (beta + alpha));
                          	}
                          	return tmp;
                          }
                          
                          NOTE: alpha and beta should be sorted in increasing order before calling this function.
                          real(8) function code(alpha, beta)
                              real(8), intent (in) :: alpha
                              real(8), intent (in) :: beta
                              real(8) :: tmp
                              if (beta <= 6.6d+91) then
                                  tmp = (alpha + 1.0d0) / ((3.0d0 + (alpha + beta)) * (2.0d0 + (alpha + beta)))
                              else
                                  tmp = ((1.0d0 + alpha) / beta) / (3.0d0 + (beta + alpha))
                              end if
                              code = tmp
                          end function
                          
                          assert alpha < beta;
                          public static double code(double alpha, double beta) {
                          	double tmp;
                          	if (beta <= 6.6e+91) {
                          		tmp = (alpha + 1.0) / ((3.0 + (alpha + beta)) * (2.0 + (alpha + beta)));
                          	} else {
                          		tmp = ((1.0 + alpha) / beta) / (3.0 + (beta + alpha));
                          	}
                          	return tmp;
                          }
                          
                          [alpha, beta] = sort([alpha, beta])
                          def code(alpha, beta):
                          	tmp = 0
                          	if beta <= 6.6e+91:
                          		tmp = (alpha + 1.0) / ((3.0 + (alpha + beta)) * (2.0 + (alpha + beta)))
                          	else:
                          		tmp = ((1.0 + alpha) / beta) / (3.0 + (beta + alpha))
                          	return tmp
                          
                          alpha, beta = sort([alpha, beta])
                          function code(alpha, beta)
                          	tmp = 0.0
                          	if (beta <= 6.6e+91)
                          		tmp = Float64(Float64(alpha + 1.0) / Float64(Float64(3.0 + Float64(alpha + beta)) * Float64(2.0 + Float64(alpha + beta))));
                          	else
                          		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / Float64(3.0 + Float64(beta + alpha)));
                          	end
                          	return tmp
                          end
                          
                          alpha, beta = num2cell(sort([alpha, beta])){:}
                          function tmp_2 = code(alpha, beta)
                          	tmp = 0.0;
                          	if (beta <= 6.6e+91)
                          		tmp = (alpha + 1.0) / ((3.0 + (alpha + beta)) * (2.0 + (alpha + beta)));
                          	else
                          		tmp = ((1.0 + alpha) / beta) / (3.0 + (beta + alpha));
                          	end
                          	tmp_2 = tmp;
                          end
                          
                          NOTE: alpha and beta should be sorted in increasing order before calling this function.
                          code[alpha_, beta_] := If[LessEqual[beta, 6.6e+91], N[(N[(alpha + 1.0), $MachinePrecision] / N[(N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision] * N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
                          
                          \begin{array}{l}
                          [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                          \\
                          \begin{array}{l}
                          \mathbf{if}\;\beta \leq 6.6 \cdot 10^{+91}:\\
                          \;\;\;\;\frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}\\
                          
                          \mathbf{else}:\\
                          \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{3 + \left(\beta + \alpha\right)}\\
                          
                          
                          \end{array}
                          \end{array}
                          
                          Derivation
                          1. Split input into 2 regimes
                          2. if beta < 6.60000000000000034e91

                            1. Initial program 99.9%

                              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            2. Add Preprocessing
                            3. Taylor expanded in beta around -inf

                              \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            4. Step-by-step derivation
                              1. mul-1-negN/A

                                \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              2. lower-neg.f64N/A

                                \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              3. sub-negN/A

                                \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha + \left(\mathsf{neg}\left(1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              4. mul-1-negN/A

                                \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} + \left(\mathsf{neg}\left(1\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              5. distribute-neg-inN/A

                                \[\leadsto \frac{\frac{-\color{blue}{\left(\mathsf{neg}\left(\left(\alpha + 1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              6. +-commutativeN/A

                                \[\leadsto \frac{\frac{-\left(\mathsf{neg}\left(\color{blue}{\left(1 + \alpha\right)}\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              7. distribute-neg-inN/A

                                \[\leadsto \frac{\frac{-\color{blue}{\left(\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              8. metadata-evalN/A

                                \[\leadsto \frac{\frac{-\left(\color{blue}{-1} + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              9. unsub-negN/A

                                \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              10. lower--.f6422.7

                                \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            5. Applied rewrites22.7%

                              \[\leadsto \frac{\frac{\color{blue}{-\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            6. Step-by-step derivation
                              1. lift-/.f64N/A

                                \[\leadsto \color{blue}{\frac{\frac{-\left(-1 - \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                              2. lift-/.f64N/A

                                \[\leadsto \frac{\color{blue}{\frac{-\left(-1 - \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              3. associate-/l/N/A

                                \[\leadsto \color{blue}{\frac{-\left(-1 - \alpha\right)}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                              4. lower-/.f64N/A

                                \[\leadsto \color{blue}{\frac{-\left(-1 - \alpha\right)}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                            7. Applied rewrites34.2%

                              \[\leadsto \color{blue}{\frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}} \]

                            if 6.60000000000000034e91 < beta

                            1. Initial program 75.8%

                              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            2. Add Preprocessing
                            3. Step-by-step derivation
                              1. Applied rewrites69.7%

                                \[\leadsto \color{blue}{\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}} \]
                              2. Taylor expanded in beta around inf

                                \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{3 + \left(\beta + \alpha\right)} \]
                              3. Step-by-step derivation
                                1. lower-/.f64N/A

                                  \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{3 + \left(\beta + \alpha\right)} \]
                                2. lower-+.f6490.2

                                  \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
                              4. Applied rewrites90.2%

                                \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{3 + \left(\beta + \alpha\right)} \]
                            4. Recombined 2 regimes into one program.
                            5. Add Preprocessing

                            Alternative 10: 62.3% accurate, 2.2× speedup?

                            \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 2 \cdot 10^{+90}:\\ \;\;\;\;\frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \beta\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{3 + \left(\beta + \alpha\right)}\\ \end{array} \end{array} \]
                            NOTE: alpha and beta should be sorted in increasing order before calling this function.
                            (FPCore (alpha beta)
                             :precision binary64
                             (if (<= beta 2e+90)
                               (/ (+ alpha 1.0) (* (+ 3.0 (+ alpha beta)) (+ 2.0 beta)))
                               (/ (/ (+ 1.0 alpha) beta) (+ 3.0 (+ beta alpha)))))
                            assert(alpha < beta);
                            double code(double alpha, double beta) {
                            	double tmp;
                            	if (beta <= 2e+90) {
                            		tmp = (alpha + 1.0) / ((3.0 + (alpha + beta)) * (2.0 + beta));
                            	} else {
                            		tmp = ((1.0 + alpha) / beta) / (3.0 + (beta + alpha));
                            	}
                            	return tmp;
                            }
                            
                            NOTE: alpha and beta should be sorted in increasing order before calling this function.
                            real(8) function code(alpha, beta)
                                real(8), intent (in) :: alpha
                                real(8), intent (in) :: beta
                                real(8) :: tmp
                                if (beta <= 2d+90) then
                                    tmp = (alpha + 1.0d0) / ((3.0d0 + (alpha + beta)) * (2.0d0 + beta))
                                else
                                    tmp = ((1.0d0 + alpha) / beta) / (3.0d0 + (beta + alpha))
                                end if
                                code = tmp
                            end function
                            
                            assert alpha < beta;
                            public static double code(double alpha, double beta) {
                            	double tmp;
                            	if (beta <= 2e+90) {
                            		tmp = (alpha + 1.0) / ((3.0 + (alpha + beta)) * (2.0 + beta));
                            	} else {
                            		tmp = ((1.0 + alpha) / beta) / (3.0 + (beta + alpha));
                            	}
                            	return tmp;
                            }
                            
                            [alpha, beta] = sort([alpha, beta])
                            def code(alpha, beta):
                            	tmp = 0
                            	if beta <= 2e+90:
                            		tmp = (alpha + 1.0) / ((3.0 + (alpha + beta)) * (2.0 + beta))
                            	else:
                            		tmp = ((1.0 + alpha) / beta) / (3.0 + (beta + alpha))
                            	return tmp
                            
                            alpha, beta = sort([alpha, beta])
                            function code(alpha, beta)
                            	tmp = 0.0
                            	if (beta <= 2e+90)
                            		tmp = Float64(Float64(alpha + 1.0) / Float64(Float64(3.0 + Float64(alpha + beta)) * Float64(2.0 + beta)));
                            	else
                            		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / Float64(3.0 + Float64(beta + alpha)));
                            	end
                            	return tmp
                            end
                            
                            alpha, beta = num2cell(sort([alpha, beta])){:}
                            function tmp_2 = code(alpha, beta)
                            	tmp = 0.0;
                            	if (beta <= 2e+90)
                            		tmp = (alpha + 1.0) / ((3.0 + (alpha + beta)) * (2.0 + beta));
                            	else
                            		tmp = ((1.0 + alpha) / beta) / (3.0 + (beta + alpha));
                            	end
                            	tmp_2 = tmp;
                            end
                            
                            NOTE: alpha and beta should be sorted in increasing order before calling this function.
                            code[alpha_, beta_] := If[LessEqual[beta, 2e+90], N[(N[(alpha + 1.0), $MachinePrecision] / N[(N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision] * N[(2.0 + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
                            
                            \begin{array}{l}
                            [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                            \\
                            \begin{array}{l}
                            \mathbf{if}\;\beta \leq 2 \cdot 10^{+90}:\\
                            \;\;\;\;\frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \beta\right)}\\
                            
                            \mathbf{else}:\\
                            \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{3 + \left(\beta + \alpha\right)}\\
                            
                            
                            \end{array}
                            \end{array}
                            
                            Derivation
                            1. Split input into 2 regimes
                            2. if beta < 1.99999999999999993e90

                              1. Initial program 99.9%

                                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              2. Add Preprocessing
                              3. Taylor expanded in beta around -inf

                                \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              4. Step-by-step derivation
                                1. mul-1-negN/A

                                  \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                2. lower-neg.f64N/A

                                  \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                3. sub-negN/A

                                  \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha + \left(\mathsf{neg}\left(1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                4. mul-1-negN/A

                                  \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} + \left(\mathsf{neg}\left(1\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                5. distribute-neg-inN/A

                                  \[\leadsto \frac{\frac{-\color{blue}{\left(\mathsf{neg}\left(\left(\alpha + 1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                6. +-commutativeN/A

                                  \[\leadsto \frac{\frac{-\left(\mathsf{neg}\left(\color{blue}{\left(1 + \alpha\right)}\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                7. distribute-neg-inN/A

                                  \[\leadsto \frac{\frac{-\color{blue}{\left(\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                8. metadata-evalN/A

                                  \[\leadsto \frac{\frac{-\left(\color{blue}{-1} + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                9. unsub-negN/A

                                  \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                10. lower--.f6422.7

                                  \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              5. Applied rewrites22.7%

                                \[\leadsto \frac{\frac{\color{blue}{-\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              6. Step-by-step derivation
                                1. lift-/.f64N/A

                                  \[\leadsto \color{blue}{\frac{\frac{-\left(-1 - \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                                2. lift-/.f64N/A

                                  \[\leadsto \frac{\color{blue}{\frac{-\left(-1 - \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                3. associate-/l/N/A

                                  \[\leadsto \color{blue}{\frac{-\left(-1 - \alpha\right)}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                                4. lower-/.f64N/A

                                  \[\leadsto \color{blue}{\frac{-\left(-1 - \alpha\right)}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                              7. Applied rewrites34.2%

                                \[\leadsto \color{blue}{\frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}} \]
                              8. Taylor expanded in alpha around 0

                                \[\leadsto \frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \color{blue}{\left(2 + \beta\right)}} \]
                              9. Step-by-step derivation
                                1. lower-+.f6421.9

                                  \[\leadsto \frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \color{blue}{\left(2 + \beta\right)}} \]
                              10. Applied rewrites21.9%

                                \[\leadsto \frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \color{blue}{\left(2 + \beta\right)}} \]

                              if 1.99999999999999993e90 < beta

                              1. Initial program 75.8%

                                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              2. Add Preprocessing
                              3. Step-by-step derivation
                                1. Applied rewrites69.7%

                                  \[\leadsto \color{blue}{\frac{\left(\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1\right) \cdot {\left(\left(\beta + \alpha\right) + 2\right)}^{-2}}{3 + \left(\beta + \alpha\right)}} \]
                                2. Taylor expanded in beta around inf

                                  \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{3 + \left(\beta + \alpha\right)} \]
                                3. Step-by-step derivation
                                  1. lower-/.f64N/A

                                    \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{3 + \left(\beta + \alpha\right)} \]
                                  2. lower-+.f6490.2

                                    \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\beta}}{3 + \left(\beta + \alpha\right)} \]
                                4. Applied rewrites90.2%

                                  \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{3 + \left(\beta + \alpha\right)} \]
                              4. Recombined 2 regimes into one program.
                              5. Add Preprocessing

                              Alternative 11: 62.4% accurate, 2.2× speedup?

                              \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \frac{\frac{\alpha + 1}{2 + \left(\alpha + \beta\right)}}{3 + \left(\alpha + \beta\right)} \end{array} \]
                              NOTE: alpha and beta should be sorted in increasing order before calling this function.
                              (FPCore (alpha beta)
                               :precision binary64
                               (/ (/ (+ alpha 1.0) (+ 2.0 (+ alpha beta))) (+ 3.0 (+ alpha beta))))
                              assert(alpha < beta);
                              double code(double alpha, double beta) {
                              	return ((alpha + 1.0) / (2.0 + (alpha + beta))) / (3.0 + (alpha + beta));
                              }
                              
                              NOTE: alpha and beta should be sorted in increasing order before calling this function.
                              real(8) function code(alpha, beta)
                                  real(8), intent (in) :: alpha
                                  real(8), intent (in) :: beta
                                  code = ((alpha + 1.0d0) / (2.0d0 + (alpha + beta))) / (3.0d0 + (alpha + beta))
                              end function
                              
                              assert alpha < beta;
                              public static double code(double alpha, double beta) {
                              	return ((alpha + 1.0) / (2.0 + (alpha + beta))) / (3.0 + (alpha + beta));
                              }
                              
                              [alpha, beta] = sort([alpha, beta])
                              def code(alpha, beta):
                              	return ((alpha + 1.0) / (2.0 + (alpha + beta))) / (3.0 + (alpha + beta))
                              
                              alpha, beta = sort([alpha, beta])
                              function code(alpha, beta)
                              	return Float64(Float64(Float64(alpha + 1.0) / Float64(2.0 + Float64(alpha + beta))) / Float64(3.0 + Float64(alpha + beta)))
                              end
                              
                              alpha, beta = num2cell(sort([alpha, beta])){:}
                              function tmp = code(alpha, beta)
                              	tmp = ((alpha + 1.0) / (2.0 + (alpha + beta))) / (3.0 + (alpha + beta));
                              end
                              
                              NOTE: alpha and beta should be sorted in increasing order before calling this function.
                              code[alpha_, beta_] := N[(N[(N[(alpha + 1.0), $MachinePrecision] / N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
                              
                              \begin{array}{l}
                              [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                              \\
                              \frac{\frac{\alpha + 1}{2 + \left(\alpha + \beta\right)}}{3 + \left(\alpha + \beta\right)}
                              \end{array}
                              
                              Derivation
                              1. Initial program 93.7%

                                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              2. Add Preprocessing
                              3. Taylor expanded in beta around -inf

                                \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              4. Step-by-step derivation
                                1. mul-1-negN/A

                                  \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                2. lower-neg.f64N/A

                                  \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                3. sub-negN/A

                                  \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha + \left(\mathsf{neg}\left(1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                4. mul-1-negN/A

                                  \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} + \left(\mathsf{neg}\left(1\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                5. distribute-neg-inN/A

                                  \[\leadsto \frac{\frac{-\color{blue}{\left(\mathsf{neg}\left(\left(\alpha + 1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                6. +-commutativeN/A

                                  \[\leadsto \frac{\frac{-\left(\mathsf{neg}\left(\color{blue}{\left(1 + \alpha\right)}\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                7. distribute-neg-inN/A

                                  \[\leadsto \frac{\frac{-\color{blue}{\left(\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                8. metadata-evalN/A

                                  \[\leadsto \frac{\frac{-\left(\color{blue}{-1} + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                9. unsub-negN/A

                                  \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                10. lower--.f6439.9

                                  \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              5. Applied rewrites39.9%

                                \[\leadsto \frac{\frac{\color{blue}{-\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              6. Step-by-step derivation
                                1. Applied rewrites39.9%

                                  \[\leadsto \color{blue}{\frac{\frac{\alpha + 1}{2 + \left(\alpha + \beta\right)}}{3 + \left(\alpha + \beta\right)}} \]
                                2. Add Preprocessing

                                Alternative 12: 62.3% accurate, 2.4× speedup?

                                \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 6.6 \cdot 10^{+91}:\\ \;\;\;\;\frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \beta\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \end{array} \]
                                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                (FPCore (alpha beta)
                                 :precision binary64
                                 (if (<= beta 6.6e+91)
                                   (/ (+ alpha 1.0) (* (+ 3.0 (+ alpha beta)) (+ 2.0 beta)))
                                   (/ (/ (+ alpha 1.0) beta) beta)))
                                assert(alpha < beta);
                                double code(double alpha, double beta) {
                                	double tmp;
                                	if (beta <= 6.6e+91) {
                                		tmp = (alpha + 1.0) / ((3.0 + (alpha + beta)) * (2.0 + beta));
                                	} else {
                                		tmp = ((alpha + 1.0) / beta) / beta;
                                	}
                                	return tmp;
                                }
                                
                                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                real(8) function code(alpha, beta)
                                    real(8), intent (in) :: alpha
                                    real(8), intent (in) :: beta
                                    real(8) :: tmp
                                    if (beta <= 6.6d+91) then
                                        tmp = (alpha + 1.0d0) / ((3.0d0 + (alpha + beta)) * (2.0d0 + beta))
                                    else
                                        tmp = ((alpha + 1.0d0) / beta) / beta
                                    end if
                                    code = tmp
                                end function
                                
                                assert alpha < beta;
                                public static double code(double alpha, double beta) {
                                	double tmp;
                                	if (beta <= 6.6e+91) {
                                		tmp = (alpha + 1.0) / ((3.0 + (alpha + beta)) * (2.0 + beta));
                                	} else {
                                		tmp = ((alpha + 1.0) / beta) / beta;
                                	}
                                	return tmp;
                                }
                                
                                [alpha, beta] = sort([alpha, beta])
                                def code(alpha, beta):
                                	tmp = 0
                                	if beta <= 6.6e+91:
                                		tmp = (alpha + 1.0) / ((3.0 + (alpha + beta)) * (2.0 + beta))
                                	else:
                                		tmp = ((alpha + 1.0) / beta) / beta
                                	return tmp
                                
                                alpha, beta = sort([alpha, beta])
                                function code(alpha, beta)
                                	tmp = 0.0
                                	if (beta <= 6.6e+91)
                                		tmp = Float64(Float64(alpha + 1.0) / Float64(Float64(3.0 + Float64(alpha + beta)) * Float64(2.0 + beta)));
                                	else
                                		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / beta);
                                	end
                                	return tmp
                                end
                                
                                alpha, beta = num2cell(sort([alpha, beta])){:}
                                function tmp_2 = code(alpha, beta)
                                	tmp = 0.0;
                                	if (beta <= 6.6e+91)
                                		tmp = (alpha + 1.0) / ((3.0 + (alpha + beta)) * (2.0 + beta));
                                	else
                                		tmp = ((alpha + 1.0) / beta) / beta;
                                	end
                                	tmp_2 = tmp;
                                end
                                
                                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                code[alpha_, beta_] := If[LessEqual[beta, 6.6e+91], N[(N[(alpha + 1.0), $MachinePrecision] / N[(N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision] * N[(2.0 + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
                                
                                \begin{array}{l}
                                [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                                \\
                                \begin{array}{l}
                                \mathbf{if}\;\beta \leq 6.6 \cdot 10^{+91}:\\
                                \;\;\;\;\frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \beta\right)}\\
                                
                                \mathbf{else}:\\
                                \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\
                                
                                
                                \end{array}
                                \end{array}
                                
                                Derivation
                                1. Split input into 2 regimes
                                2. if beta < 6.60000000000000034e91

                                  1. Initial program 99.9%

                                    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                  2. Add Preprocessing
                                  3. Taylor expanded in beta around -inf

                                    \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                  4. Step-by-step derivation
                                    1. mul-1-negN/A

                                      \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    2. lower-neg.f64N/A

                                      \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    3. sub-negN/A

                                      \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha + \left(\mathsf{neg}\left(1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    4. mul-1-negN/A

                                      \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} + \left(\mathsf{neg}\left(1\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    5. distribute-neg-inN/A

                                      \[\leadsto \frac{\frac{-\color{blue}{\left(\mathsf{neg}\left(\left(\alpha + 1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    6. +-commutativeN/A

                                      \[\leadsto \frac{\frac{-\left(\mathsf{neg}\left(\color{blue}{\left(1 + \alpha\right)}\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    7. distribute-neg-inN/A

                                      \[\leadsto \frac{\frac{-\color{blue}{\left(\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    8. metadata-evalN/A

                                      \[\leadsto \frac{\frac{-\left(\color{blue}{-1} + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    9. unsub-negN/A

                                      \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    10. lower--.f6422.7

                                      \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                  5. Applied rewrites22.7%

                                    \[\leadsto \frac{\frac{\color{blue}{-\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                  6. Step-by-step derivation
                                    1. lift-/.f64N/A

                                      \[\leadsto \color{blue}{\frac{\frac{-\left(-1 - \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                                    2. lift-/.f64N/A

                                      \[\leadsto \frac{\color{blue}{\frac{-\left(-1 - \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    3. associate-/l/N/A

                                      \[\leadsto \color{blue}{\frac{-\left(-1 - \alpha\right)}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                                    4. lower-/.f64N/A

                                      \[\leadsto \color{blue}{\frac{-\left(-1 - \alpha\right)}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                                  7. Applied rewrites34.2%

                                    \[\leadsto \color{blue}{\frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}} \]
                                  8. Taylor expanded in alpha around 0

                                    \[\leadsto \frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \color{blue}{\left(2 + \beta\right)}} \]
                                  9. Step-by-step derivation
                                    1. lower-+.f6421.9

                                      \[\leadsto \frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \color{blue}{\left(2 + \beta\right)}} \]
                                  10. Applied rewrites21.9%

                                    \[\leadsto \frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \color{blue}{\left(2 + \beta\right)}} \]

                                  if 6.60000000000000034e91 < beta

                                  1. Initial program 75.8%

                                    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                  2. Add Preprocessing
                                  3. Taylor expanded in beta around inf

                                    \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                  4. Step-by-step derivation
                                    1. lower-/.f64N/A

                                      \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                    2. lower-+.f64N/A

                                      \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                    3. unpow2N/A

                                      \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                    4. lower-*.f6488.5

                                      \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                  5. Applied rewrites88.5%

                                    \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                  6. Step-by-step derivation
                                    1. Applied rewrites90.1%

                                      \[\leadsto \frac{\frac{\alpha + 1}{\beta}}{\color{blue}{\beta}} \]
                                  7. Recombined 2 regimes into one program.
                                  8. Add Preprocessing

                                  Alternative 13: 62.3% accurate, 2.6× speedup?

                                  \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 2 \cdot 10^{+90}:\\ \;\;\;\;\frac{\alpha + 1}{\left(2 + \beta\right) \cdot \left(3 + \beta\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \end{array} \]
                                  NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                  (FPCore (alpha beta)
                                   :precision binary64
                                   (if (<= beta 2e+90)
                                     (/ (+ alpha 1.0) (* (+ 2.0 beta) (+ 3.0 beta)))
                                     (/ (/ (+ alpha 1.0) beta) beta)))
                                  assert(alpha < beta);
                                  double code(double alpha, double beta) {
                                  	double tmp;
                                  	if (beta <= 2e+90) {
                                  		tmp = (alpha + 1.0) / ((2.0 + beta) * (3.0 + beta));
                                  	} else {
                                  		tmp = ((alpha + 1.0) / beta) / beta;
                                  	}
                                  	return tmp;
                                  }
                                  
                                  NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                  real(8) function code(alpha, beta)
                                      real(8), intent (in) :: alpha
                                      real(8), intent (in) :: beta
                                      real(8) :: tmp
                                      if (beta <= 2d+90) then
                                          tmp = (alpha + 1.0d0) / ((2.0d0 + beta) * (3.0d0 + beta))
                                      else
                                          tmp = ((alpha + 1.0d0) / beta) / beta
                                      end if
                                      code = tmp
                                  end function
                                  
                                  assert alpha < beta;
                                  public static double code(double alpha, double beta) {
                                  	double tmp;
                                  	if (beta <= 2e+90) {
                                  		tmp = (alpha + 1.0) / ((2.0 + beta) * (3.0 + beta));
                                  	} else {
                                  		tmp = ((alpha + 1.0) / beta) / beta;
                                  	}
                                  	return tmp;
                                  }
                                  
                                  [alpha, beta] = sort([alpha, beta])
                                  def code(alpha, beta):
                                  	tmp = 0
                                  	if beta <= 2e+90:
                                  		tmp = (alpha + 1.0) / ((2.0 + beta) * (3.0 + beta))
                                  	else:
                                  		tmp = ((alpha + 1.0) / beta) / beta
                                  	return tmp
                                  
                                  alpha, beta = sort([alpha, beta])
                                  function code(alpha, beta)
                                  	tmp = 0.0
                                  	if (beta <= 2e+90)
                                  		tmp = Float64(Float64(alpha + 1.0) / Float64(Float64(2.0 + beta) * Float64(3.0 + beta)));
                                  	else
                                  		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / beta);
                                  	end
                                  	return tmp
                                  end
                                  
                                  alpha, beta = num2cell(sort([alpha, beta])){:}
                                  function tmp_2 = code(alpha, beta)
                                  	tmp = 0.0;
                                  	if (beta <= 2e+90)
                                  		tmp = (alpha + 1.0) / ((2.0 + beta) * (3.0 + beta));
                                  	else
                                  		tmp = ((alpha + 1.0) / beta) / beta;
                                  	end
                                  	tmp_2 = tmp;
                                  end
                                  
                                  NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                  code[alpha_, beta_] := If[LessEqual[beta, 2e+90], N[(N[(alpha + 1.0), $MachinePrecision] / N[(N[(2.0 + beta), $MachinePrecision] * N[(3.0 + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
                                  
                                  \begin{array}{l}
                                  [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                                  \\
                                  \begin{array}{l}
                                  \mathbf{if}\;\beta \leq 2 \cdot 10^{+90}:\\
                                  \;\;\;\;\frac{\alpha + 1}{\left(2 + \beta\right) \cdot \left(3 + \beta\right)}\\
                                  
                                  \mathbf{else}:\\
                                  \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\
                                  
                                  
                                  \end{array}
                                  \end{array}
                                  
                                  Derivation
                                  1. Split input into 2 regimes
                                  2. if beta < 1.99999999999999993e90

                                    1. Initial program 99.9%

                                      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    2. Add Preprocessing
                                    3. Taylor expanded in beta around -inf

                                      \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    4. Step-by-step derivation
                                      1. mul-1-negN/A

                                        \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      2. lower-neg.f64N/A

                                        \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      3. sub-negN/A

                                        \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha + \left(\mathsf{neg}\left(1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      4. mul-1-negN/A

                                        \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} + \left(\mathsf{neg}\left(1\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      5. distribute-neg-inN/A

                                        \[\leadsto \frac{\frac{-\color{blue}{\left(\mathsf{neg}\left(\left(\alpha + 1\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      6. +-commutativeN/A

                                        \[\leadsto \frac{\frac{-\left(\mathsf{neg}\left(\color{blue}{\left(1 + \alpha\right)}\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      7. distribute-neg-inN/A

                                        \[\leadsto \frac{\frac{-\color{blue}{\left(\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      8. metadata-evalN/A

                                        \[\leadsto \frac{\frac{-\left(\color{blue}{-1} + \left(\mathsf{neg}\left(\alpha\right)\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      9. unsub-negN/A

                                        \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      10. lower--.f6422.7

                                        \[\leadsto \frac{\frac{-\color{blue}{\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    5. Applied rewrites22.7%

                                      \[\leadsto \frac{\frac{\color{blue}{-\left(-1 - \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    6. Step-by-step derivation
                                      1. lift-/.f64N/A

                                        \[\leadsto \color{blue}{\frac{\frac{-\left(-1 - \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                                      2. lift-/.f64N/A

                                        \[\leadsto \frac{\color{blue}{\frac{-\left(-1 - \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      3. associate-/l/N/A

                                        \[\leadsto \color{blue}{\frac{-\left(-1 - \alpha\right)}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                                      4. lower-/.f64N/A

                                        \[\leadsto \color{blue}{\frac{-\left(-1 - \alpha\right)}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                                    7. Applied rewrites34.2%

                                      \[\leadsto \color{blue}{\frac{\alpha + 1}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \left(\alpha + \beta\right)\right)}} \]
                                    8. Taylor expanded in alpha around 0

                                      \[\leadsto \frac{\alpha + 1}{\color{blue}{\left(2 + \beta\right) \cdot \left(3 + \beta\right)}} \]
                                    9. Step-by-step derivation
                                      1. lower-*.f64N/A

                                        \[\leadsto \frac{\alpha + 1}{\color{blue}{\left(2 + \beta\right) \cdot \left(3 + \beta\right)}} \]
                                      2. lower-+.f64N/A

                                        \[\leadsto \frac{\alpha + 1}{\color{blue}{\left(2 + \beta\right)} \cdot \left(3 + \beta\right)} \]
                                      3. lower-+.f6421.6

                                        \[\leadsto \frac{\alpha + 1}{\left(2 + \beta\right) \cdot \color{blue}{\left(3 + \beta\right)}} \]
                                    10. Applied rewrites21.6%

                                      \[\leadsto \frac{\alpha + 1}{\color{blue}{\left(2 + \beta\right) \cdot \left(3 + \beta\right)}} \]

                                    if 1.99999999999999993e90 < beta

                                    1. Initial program 75.8%

                                      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    2. Add Preprocessing
                                    3. Taylor expanded in beta around inf

                                      \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                    4. Step-by-step derivation
                                      1. lower-/.f64N/A

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                      2. lower-+.f64N/A

                                        \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                      3. unpow2N/A

                                        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      4. lower-*.f6488.5

                                        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                    5. Applied rewrites88.5%

                                      \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                    6. Step-by-step derivation
                                      1. Applied rewrites90.1%

                                        \[\leadsto \frac{\frac{\alpha + 1}{\beta}}{\color{blue}{\beta}} \]
                                    7. Recombined 2 regimes into one program.
                                    8. Add Preprocessing

                                    Alternative 14: 55.3% accurate, 2.9× speedup?

                                    \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 5 \cdot 10^{+154}:\\ \;\;\;\;\frac{1 + \alpha}{\beta \cdot \beta}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
                                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                    (FPCore (alpha beta)
                                     :precision binary64
                                     (if (<= beta 5e+154) (/ (+ 1.0 alpha) (* beta beta)) (/ (/ alpha beta) beta)))
                                    assert(alpha < beta);
                                    double code(double alpha, double beta) {
                                    	double tmp;
                                    	if (beta <= 5e+154) {
                                    		tmp = (1.0 + alpha) / (beta * beta);
                                    	} else {
                                    		tmp = (alpha / beta) / beta;
                                    	}
                                    	return tmp;
                                    }
                                    
                                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                    real(8) function code(alpha, beta)
                                        real(8), intent (in) :: alpha
                                        real(8), intent (in) :: beta
                                        real(8) :: tmp
                                        if (beta <= 5d+154) then
                                            tmp = (1.0d0 + alpha) / (beta * beta)
                                        else
                                            tmp = (alpha / beta) / beta
                                        end if
                                        code = tmp
                                    end function
                                    
                                    assert alpha < beta;
                                    public static double code(double alpha, double beta) {
                                    	double tmp;
                                    	if (beta <= 5e+154) {
                                    		tmp = (1.0 + alpha) / (beta * beta);
                                    	} else {
                                    		tmp = (alpha / beta) / beta;
                                    	}
                                    	return tmp;
                                    }
                                    
                                    [alpha, beta] = sort([alpha, beta])
                                    def code(alpha, beta):
                                    	tmp = 0
                                    	if beta <= 5e+154:
                                    		tmp = (1.0 + alpha) / (beta * beta)
                                    	else:
                                    		tmp = (alpha / beta) / beta
                                    	return tmp
                                    
                                    alpha, beta = sort([alpha, beta])
                                    function code(alpha, beta)
                                    	tmp = 0.0
                                    	if (beta <= 5e+154)
                                    		tmp = Float64(Float64(1.0 + alpha) / Float64(beta * beta));
                                    	else
                                    		tmp = Float64(Float64(alpha / beta) / beta);
                                    	end
                                    	return tmp
                                    end
                                    
                                    alpha, beta = num2cell(sort([alpha, beta])){:}
                                    function tmp_2 = code(alpha, beta)
                                    	tmp = 0.0;
                                    	if (beta <= 5e+154)
                                    		tmp = (1.0 + alpha) / (beta * beta);
                                    	else
                                    		tmp = (alpha / beta) / beta;
                                    	end
                                    	tmp_2 = tmp;
                                    end
                                    
                                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                    code[alpha_, beta_] := If[LessEqual[beta, 5e+154], N[(N[(1.0 + alpha), $MachinePrecision] / N[(beta * beta), $MachinePrecision]), $MachinePrecision], N[(N[(alpha / beta), $MachinePrecision] / beta), $MachinePrecision]]
                                    
                                    \begin{array}{l}
                                    [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                                    \\
                                    \begin{array}{l}
                                    \mathbf{if}\;\beta \leq 5 \cdot 10^{+154}:\\
                                    \;\;\;\;\frac{1 + \alpha}{\beta \cdot \beta}\\
                                    
                                    \mathbf{else}:\\
                                    \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\
                                    
                                    
                                    \end{array}
                                    \end{array}
                                    
                                    Derivation
                                    1. Split input into 2 regimes
                                    2. if beta < 5.00000000000000004e154

                                      1. Initial program 99.9%

                                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      2. Add Preprocessing
                                      3. Taylor expanded in beta around inf

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                      4. Step-by-step derivation
                                        1. lower-/.f64N/A

                                          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                        2. lower-+.f64N/A

                                          \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                        3. unpow2N/A

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                        4. lower-*.f6417.8

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      5. Applied rewrites17.8%

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]

                                      if 5.00000000000000004e154 < beta

                                      1. Initial program 68.5%

                                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      2. Add Preprocessing
                                      3. Taylor expanded in beta around inf

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                      4. Step-by-step derivation
                                        1. lower-/.f64N/A

                                          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                        2. lower-+.f64N/A

                                          \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                        3. unpow2N/A

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                        4. lower-*.f6487.0

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      5. Applied rewrites87.0%

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                      6. Taylor expanded in alpha around inf

                                        \[\leadsto \frac{\alpha}{\color{blue}{{\beta}^{2}}} \]
                                      7. Step-by-step derivation
                                        1. Applied rewrites87.0%

                                          \[\leadsto \frac{\alpha}{\color{blue}{\beta \cdot \beta}} \]
                                        2. Step-by-step derivation
                                          1. Applied rewrites89.0%

                                            \[\leadsto \frac{\frac{\alpha}{\beta}}{\beta} \]
                                        3. Recombined 2 regimes into one program.
                                        4. Add Preprocessing

                                        Alternative 15: 55.7% accurate, 3.2× speedup?

                                        \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \frac{\frac{\alpha + 1}{\beta}}{\beta} \end{array} \]
                                        NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                        (FPCore (alpha beta) :precision binary64 (/ (/ (+ alpha 1.0) beta) beta))
                                        assert(alpha < beta);
                                        double code(double alpha, double beta) {
                                        	return ((alpha + 1.0) / beta) / beta;
                                        }
                                        
                                        NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                        real(8) function code(alpha, beta)
                                            real(8), intent (in) :: alpha
                                            real(8), intent (in) :: beta
                                            code = ((alpha + 1.0d0) / beta) / beta
                                        end function
                                        
                                        assert alpha < beta;
                                        public static double code(double alpha, double beta) {
                                        	return ((alpha + 1.0) / beta) / beta;
                                        }
                                        
                                        [alpha, beta] = sort([alpha, beta])
                                        def code(alpha, beta):
                                        	return ((alpha + 1.0) / beta) / beta
                                        
                                        alpha, beta = sort([alpha, beta])
                                        function code(alpha, beta)
                                        	return Float64(Float64(Float64(alpha + 1.0) / beta) / beta)
                                        end
                                        
                                        alpha, beta = num2cell(sort([alpha, beta])){:}
                                        function tmp = code(alpha, beta)
                                        	tmp = ((alpha + 1.0) / beta) / beta;
                                        end
                                        
                                        NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                        code[alpha_, beta_] := N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]
                                        
                                        \begin{array}{l}
                                        [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                                        \\
                                        \frac{\frac{\alpha + 1}{\beta}}{\beta}
                                        \end{array}
                                        
                                        Derivation
                                        1. Initial program 93.7%

                                          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                        2. Add Preprocessing
                                        3. Taylor expanded in beta around inf

                                          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                        4. Step-by-step derivation
                                          1. lower-/.f64N/A

                                            \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                          2. lower-+.f64N/A

                                            \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                          3. unpow2N/A

                                            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                          4. lower-*.f6431.3

                                            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                        5. Applied rewrites31.3%

                                          \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                        6. Step-by-step derivation
                                          1. Applied rewrites31.7%

                                            \[\leadsto \frac{\frac{\alpha + 1}{\beta}}{\color{blue}{\beta}} \]
                                          2. Add Preprocessing

                                          Alternative 16: 52.9% accurate, 4.2× speedup?

                                          \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \frac{1 + \alpha}{\beta \cdot \beta} \end{array} \]
                                          NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                          (FPCore (alpha beta) :precision binary64 (/ (+ 1.0 alpha) (* beta beta)))
                                          assert(alpha < beta);
                                          double code(double alpha, double beta) {
                                          	return (1.0 + alpha) / (beta * beta);
                                          }
                                          
                                          NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                          real(8) function code(alpha, beta)
                                              real(8), intent (in) :: alpha
                                              real(8), intent (in) :: beta
                                              code = (1.0d0 + alpha) / (beta * beta)
                                          end function
                                          
                                          assert alpha < beta;
                                          public static double code(double alpha, double beta) {
                                          	return (1.0 + alpha) / (beta * beta);
                                          }
                                          
                                          [alpha, beta] = sort([alpha, beta])
                                          def code(alpha, beta):
                                          	return (1.0 + alpha) / (beta * beta)
                                          
                                          alpha, beta = sort([alpha, beta])
                                          function code(alpha, beta)
                                          	return Float64(Float64(1.0 + alpha) / Float64(beta * beta))
                                          end
                                          
                                          alpha, beta = num2cell(sort([alpha, beta])){:}
                                          function tmp = code(alpha, beta)
                                          	tmp = (1.0 + alpha) / (beta * beta);
                                          end
                                          
                                          NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                          code[alpha_, beta_] := N[(N[(1.0 + alpha), $MachinePrecision] / N[(beta * beta), $MachinePrecision]), $MachinePrecision]
                                          
                                          \begin{array}{l}
                                          [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                                          \\
                                          \frac{1 + \alpha}{\beta \cdot \beta}
                                          \end{array}
                                          
                                          Derivation
                                          1. Initial program 93.7%

                                            \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                          2. Add Preprocessing
                                          3. Taylor expanded in beta around inf

                                            \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                          4. Step-by-step derivation
                                            1. lower-/.f64N/A

                                              \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                            2. lower-+.f64N/A

                                              \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                            3. unpow2N/A

                                              \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                            4. lower-*.f6431.3

                                              \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                          5. Applied rewrites31.3%

                                            \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                          6. Add Preprocessing

                                          Alternative 17: 50.3% accurate, 4.9× speedup?

                                          \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \frac{1}{\beta \cdot \beta} \end{array} \]
                                          NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                          (FPCore (alpha beta) :precision binary64 (/ 1.0 (* beta beta)))
                                          assert(alpha < beta);
                                          double code(double alpha, double beta) {
                                          	return 1.0 / (beta * beta);
                                          }
                                          
                                          NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                          real(8) function code(alpha, beta)
                                              real(8), intent (in) :: alpha
                                              real(8), intent (in) :: beta
                                              code = 1.0d0 / (beta * beta)
                                          end function
                                          
                                          assert alpha < beta;
                                          public static double code(double alpha, double beta) {
                                          	return 1.0 / (beta * beta);
                                          }
                                          
                                          [alpha, beta] = sort([alpha, beta])
                                          def code(alpha, beta):
                                          	return 1.0 / (beta * beta)
                                          
                                          alpha, beta = sort([alpha, beta])
                                          function code(alpha, beta)
                                          	return Float64(1.0 / Float64(beta * beta))
                                          end
                                          
                                          alpha, beta = num2cell(sort([alpha, beta])){:}
                                          function tmp = code(alpha, beta)
                                          	tmp = 1.0 / (beta * beta);
                                          end
                                          
                                          NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                          code[alpha_, beta_] := N[(1.0 / N[(beta * beta), $MachinePrecision]), $MachinePrecision]
                                          
                                          \begin{array}{l}
                                          [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                                          \\
                                          \frac{1}{\beta \cdot \beta}
                                          \end{array}
                                          
                                          Derivation
                                          1. Initial program 93.7%

                                            \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                          2. Add Preprocessing
                                          3. Taylor expanded in beta around inf

                                            \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                          4. Step-by-step derivation
                                            1. lower-/.f64N/A

                                              \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                            2. lower-+.f64N/A

                                              \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                            3. unpow2N/A

                                              \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                            4. lower-*.f6431.3

                                              \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                          5. Applied rewrites31.3%

                                            \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                          6. Taylor expanded in alpha around 0

                                            \[\leadsto \frac{1}{\color{blue}{\beta} \cdot \beta} \]
                                          7. Step-by-step derivation
                                            1. Applied rewrites30.3%

                                              \[\leadsto \frac{1}{\color{blue}{\beta} \cdot \beta} \]
                                            2. Add Preprocessing

                                            Alternative 18: 32.2% accurate, 4.9× speedup?

                                            \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \frac{\alpha}{\beta \cdot \beta} \end{array} \]
                                            NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                            (FPCore (alpha beta) :precision binary64 (/ alpha (* beta beta)))
                                            assert(alpha < beta);
                                            double code(double alpha, double beta) {
                                            	return alpha / (beta * beta);
                                            }
                                            
                                            NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                            real(8) function code(alpha, beta)
                                                real(8), intent (in) :: alpha
                                                real(8), intent (in) :: beta
                                                code = alpha / (beta * beta)
                                            end function
                                            
                                            assert alpha < beta;
                                            public static double code(double alpha, double beta) {
                                            	return alpha / (beta * beta);
                                            }
                                            
                                            [alpha, beta] = sort([alpha, beta])
                                            def code(alpha, beta):
                                            	return alpha / (beta * beta)
                                            
                                            alpha, beta = sort([alpha, beta])
                                            function code(alpha, beta)
                                            	return Float64(alpha / Float64(beta * beta))
                                            end
                                            
                                            alpha, beta = num2cell(sort([alpha, beta])){:}
                                            function tmp = code(alpha, beta)
                                            	tmp = alpha / (beta * beta);
                                            end
                                            
                                            NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                            code[alpha_, beta_] := N[(alpha / N[(beta * beta), $MachinePrecision]), $MachinePrecision]
                                            
                                            \begin{array}{l}
                                            [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                                            \\
                                            \frac{\alpha}{\beta \cdot \beta}
                                            \end{array}
                                            
                                            Derivation
                                            1. Initial program 93.7%

                                              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                            2. Add Preprocessing
                                            3. Taylor expanded in beta around inf

                                              \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                            4. Step-by-step derivation
                                              1. lower-/.f64N/A

                                                \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                              2. lower-+.f64N/A

                                                \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                              3. unpow2N/A

                                                \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                              4. lower-*.f6431.3

                                                \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                            5. Applied rewrites31.3%

                                              \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                            6. Taylor expanded in alpha around inf

                                              \[\leadsto \frac{\alpha}{\color{blue}{{\beta}^{2}}} \]
                                            7. Step-by-step derivation
                                              1. Applied rewrites20.1%

                                                \[\leadsto \frac{\alpha}{\color{blue}{\beta \cdot \beta}} \]
                                              2. Add Preprocessing

                                              Reproduce

                                              ?
                                              herbie shell --seed 2024307 
                                              (FPCore (alpha beta)
                                                :name "Octave 3.8, jcobi/3"
                                                :precision binary64
                                                :pre (and (> alpha -1.0) (> beta -1.0))
                                                (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))