Octave 3.8, jcobi/3

Percentage Accurate: 94.4% → 99.6%
Time: 14.8s
Alternatives: 22
Speedup: 3.1×

Specification

?
\[\alpha > -1 \land \beta > -1\]
\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ \frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1} \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))))
   (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
real(8) function code(alpha, beta)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0))
end
function tmp = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 22 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 94.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ \frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1} \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))))
   (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
real(8) function code(alpha, beta)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0))
end
function tmp = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}

Alternative 1: 99.6% accurate, 0.6× speedup?

\[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 10^{+119}:\\ \;\;\;\;{\left(\alpha + \left(\beta + 2\right)\right)}^{-2} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{\alpha + \left(\beta + 3\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \end{array} \]
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
 :precision binary64
 (if (<= beta 1e+119)
   (*
    (pow (+ alpha (+ beta 2.0)) -2.0)
    (/ (+ (fma alpha beta (+ beta alpha)) 1.0) (+ alpha (+ beta 3.0))))
   (/ (/ (+ alpha 1.0) beta) beta)))
assert(alpha < beta);
double code(double alpha, double beta) {
	double tmp;
	if (beta <= 1e+119) {
		tmp = pow((alpha + (beta + 2.0)), -2.0) * ((fma(alpha, beta, (beta + alpha)) + 1.0) / (alpha + (beta + 3.0)));
	} else {
		tmp = ((alpha + 1.0) / beta) / beta;
	}
	return tmp;
}
alpha, beta = sort([alpha, beta])
function code(alpha, beta)
	tmp = 0.0
	if (beta <= 1e+119)
		tmp = Float64((Float64(alpha + Float64(beta + 2.0)) ^ -2.0) * Float64(Float64(fma(alpha, beta, Float64(beta + alpha)) + 1.0) / Float64(alpha + Float64(beta + 3.0))));
	else
		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / beta);
	end
	return tmp
end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := If[LessEqual[beta, 1e+119], N[(N[Power[N[(alpha + N[(beta + 2.0), $MachinePrecision]), $MachinePrecision], -2.0], $MachinePrecision] * N[(N[(N[(alpha * beta + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 10^{+119}:\\
\;\;\;\;{\left(\alpha + \left(\beta + 2\right)\right)}^{-2} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{\alpha + \left(\beta + 3\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 9.99999999999999944e118

    1. Initial program 98.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
      2. lift-/.f64N/A

        \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. associate-/l/N/A

        \[\leadsto \color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
      4. lift-/.f64N/A

        \[\leadsto \frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
      5. clear-numN/A

        \[\leadsto \frac{\color{blue}{\frac{1}{\frac{\left(\alpha + \beta\right) + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
      6. associate-/r/N/A

        \[\leadsto \frac{\color{blue}{\frac{1}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \left(\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1\right)}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
      7. *-commutativeN/A

        \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \left(\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1\right)}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
      8. times-fracN/A

        \[\leadsto \color{blue}{\frac{\frac{1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
      9. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
    4. Applied rewrites97.2%

      \[\leadsto \color{blue}{\frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)}} \]
    5. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2}} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
      2. div-invN/A

        \[\leadsto \color{blue}{\left(\frac{1}{\left(\alpha + \beta\right) + 2} \cdot \frac{1}{\left(\alpha + \beta\right) + 2}\right)} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
      3. lift-/.f64N/A

        \[\leadsto \left(\color{blue}{\frac{1}{\left(\alpha + \beta\right) + 2}} \cdot \frac{1}{\left(\alpha + \beta\right) + 2}\right) \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
      4. inv-powN/A

        \[\leadsto \left(\color{blue}{{\left(\left(\alpha + \beta\right) + 2\right)}^{-1}} \cdot \frac{1}{\left(\alpha + \beta\right) + 2}\right) \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
      5. inv-powN/A

        \[\leadsto \left({\left(\left(\alpha + \beta\right) + 2\right)}^{-1} \cdot \color{blue}{{\left(\left(\alpha + \beta\right) + 2\right)}^{-1}}\right) \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
      6. pow-prod-upN/A

        \[\leadsto \color{blue}{{\left(\left(\alpha + \beta\right) + 2\right)}^{\left(-1 + -1\right)}} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
      7. lower-pow.f64N/A

        \[\leadsto \color{blue}{{\left(\left(\alpha + \beta\right) + 2\right)}^{\left(-1 + -1\right)}} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
      8. lift-+.f64N/A

        \[\leadsto {\color{blue}{\left(\left(\alpha + \beta\right) + 2\right)}}^{\left(-1 + -1\right)} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
      9. lift-+.f64N/A

        \[\leadsto {\left(\color{blue}{\left(\alpha + \beta\right)} + 2\right)}^{\left(-1 + -1\right)} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
      10. associate-+l+N/A

        \[\leadsto {\color{blue}{\left(\alpha + \left(\beta + 2\right)\right)}}^{\left(-1 + -1\right)} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
      11. lower-+.f64N/A

        \[\leadsto {\color{blue}{\left(\alpha + \left(\beta + 2\right)\right)}}^{\left(-1 + -1\right)} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
      12. lower-+.f64N/A

        \[\leadsto {\left(\alpha + \color{blue}{\left(\beta + 2\right)}\right)}^{\left(-1 + -1\right)} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
      13. metadata-eval97.3

        \[\leadsto {\left(\alpha + \left(\beta + 2\right)\right)}^{\color{blue}{-2}} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]
    6. Applied rewrites97.3%

      \[\leadsto \color{blue}{{\left(\alpha + \left(\beta + 2\right)\right)}^{-2}} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)} \]

    if 9.99999999999999944e118 < beta

    1. Initial program 68.0%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Add Preprocessing
    3. Taylor expanded in beta around inf

      \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
    4. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
      2. lower-+.f64N/A

        \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
      3. unpow2N/A

        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
      4. lower-*.f6479.5

        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
    5. Applied rewrites79.5%

      \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
    6. Step-by-step derivation
      1. Applied rewrites82.2%

        \[\leadsto \frac{\frac{\alpha + 1}{\beta}}{\color{blue}{\beta}} \]
    7. Recombined 2 regimes into one program.
    8. Final simplification94.8%

      \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 10^{+119}:\\ \;\;\;\;{\left(\alpha + \left(\beta + 2\right)\right)}^{-2} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{\alpha + \left(\beta + 3\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \]
    9. Add Preprocessing

    Alternative 2: 99.5% accurate, 0.9× speedup?

    \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 2 + \left(\beta + \alpha\right)\\ \mathbf{if}\;\beta \leq 5 \cdot 10^{+107}:\\ \;\;\;\;\frac{\frac{1}{t\_0}}{t\_0} \cdot \frac{\frac{\left(1 - \alpha \cdot \alpha\right) \cdot \left(\beta + 1\right)}{1 - \alpha}}{\alpha + \left(\beta + 3\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \end{array} \]
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    (FPCore (alpha beta)
     :precision binary64
     (let* ((t_0 (+ 2.0 (+ beta alpha))))
       (if (<= beta 5e+107)
         (*
          (/ (/ 1.0 t_0) t_0)
          (/
           (/ (* (- 1.0 (* alpha alpha)) (+ beta 1.0)) (- 1.0 alpha))
           (+ alpha (+ beta 3.0))))
         (/ (/ (+ alpha 1.0) beta) beta))))
    assert(alpha < beta);
    double code(double alpha, double beta) {
    	double t_0 = 2.0 + (beta + alpha);
    	double tmp;
    	if (beta <= 5e+107) {
    		tmp = ((1.0 / t_0) / t_0) * ((((1.0 - (alpha * alpha)) * (beta + 1.0)) / (1.0 - alpha)) / (alpha + (beta + 3.0)));
    	} else {
    		tmp = ((alpha + 1.0) / beta) / beta;
    	}
    	return tmp;
    }
    
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    real(8) function code(alpha, beta)
        real(8), intent (in) :: alpha
        real(8), intent (in) :: beta
        real(8) :: t_0
        real(8) :: tmp
        t_0 = 2.0d0 + (beta + alpha)
        if (beta <= 5d+107) then
            tmp = ((1.0d0 / t_0) / t_0) * ((((1.0d0 - (alpha * alpha)) * (beta + 1.0d0)) / (1.0d0 - alpha)) / (alpha + (beta + 3.0d0)))
        else
            tmp = ((alpha + 1.0d0) / beta) / beta
        end if
        code = tmp
    end function
    
    assert alpha < beta;
    public static double code(double alpha, double beta) {
    	double t_0 = 2.0 + (beta + alpha);
    	double tmp;
    	if (beta <= 5e+107) {
    		tmp = ((1.0 / t_0) / t_0) * ((((1.0 - (alpha * alpha)) * (beta + 1.0)) / (1.0 - alpha)) / (alpha + (beta + 3.0)));
    	} else {
    		tmp = ((alpha + 1.0) / beta) / beta;
    	}
    	return tmp;
    }
    
    [alpha, beta] = sort([alpha, beta])
    def code(alpha, beta):
    	t_0 = 2.0 + (beta + alpha)
    	tmp = 0
    	if beta <= 5e+107:
    		tmp = ((1.0 / t_0) / t_0) * ((((1.0 - (alpha * alpha)) * (beta + 1.0)) / (1.0 - alpha)) / (alpha + (beta + 3.0)))
    	else:
    		tmp = ((alpha + 1.0) / beta) / beta
    	return tmp
    
    alpha, beta = sort([alpha, beta])
    function code(alpha, beta)
    	t_0 = Float64(2.0 + Float64(beta + alpha))
    	tmp = 0.0
    	if (beta <= 5e+107)
    		tmp = Float64(Float64(Float64(1.0 / t_0) / t_0) * Float64(Float64(Float64(Float64(1.0 - Float64(alpha * alpha)) * Float64(beta + 1.0)) / Float64(1.0 - alpha)) / Float64(alpha + Float64(beta + 3.0))));
    	else
    		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / beta);
    	end
    	return tmp
    end
    
    alpha, beta = num2cell(sort([alpha, beta])){:}
    function tmp_2 = code(alpha, beta)
    	t_0 = 2.0 + (beta + alpha);
    	tmp = 0.0;
    	if (beta <= 5e+107)
    		tmp = ((1.0 / t_0) / t_0) * ((((1.0 - (alpha * alpha)) * (beta + 1.0)) / (1.0 - alpha)) / (alpha + (beta + 3.0)));
    	else
    		tmp = ((alpha + 1.0) / beta) / beta;
    	end
    	tmp_2 = tmp;
    end
    
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    code[alpha_, beta_] := Block[{t$95$0 = N[(2.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 5e+107], N[(N[(N[(1.0 / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] * N[(N[(N[(N[(1.0 - N[(alpha * alpha), $MachinePrecision]), $MachinePrecision] * N[(beta + 1.0), $MachinePrecision]), $MachinePrecision] / N[(1.0 - alpha), $MachinePrecision]), $MachinePrecision] / N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]]
    
    \begin{array}{l}
    [alpha, beta] = \mathsf{sort}([alpha, beta])\\
    \\
    \begin{array}{l}
    t_0 := 2 + \left(\beta + \alpha\right)\\
    \mathbf{if}\;\beta \leq 5 \cdot 10^{+107}:\\
    \;\;\;\;\frac{\frac{1}{t\_0}}{t\_0} \cdot \frac{\frac{\left(1 - \alpha \cdot \alpha\right) \cdot \left(\beta + 1\right)}{1 - \alpha}}{\alpha + \left(\beta + 3\right)}\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if beta < 5.0000000000000002e107

      1. Initial program 99.3%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Add Preprocessing
      3. Step-by-step derivation
        1. lift-/.f64N/A

          \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
        2. lift-/.f64N/A

          \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        3. associate-/l/N/A

          \[\leadsto \color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
        4. lift-/.f64N/A

          \[\leadsto \frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
        5. clear-numN/A

          \[\leadsto \frac{\color{blue}{\frac{1}{\frac{\left(\alpha + \beta\right) + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
        6. associate-/r/N/A

          \[\leadsto \frac{\color{blue}{\frac{1}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \left(\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1\right)}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
        7. *-commutativeN/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \left(\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1\right)}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
        8. times-fracN/A

          \[\leadsto \color{blue}{\frac{\frac{1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
        9. lower-*.f64N/A

          \[\leadsto \color{blue}{\frac{\frac{1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
      4. Applied rewrites97.7%

        \[\leadsto \color{blue}{\frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)}} \]
      5. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}}{\alpha + \left(\beta + 3\right)} \]
        2. lift-fma.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\left(\alpha \cdot \beta + \left(\alpha + \beta\right)\right)} + 1}{\alpha + \left(\beta + 3\right)} \]
        3. lift-+.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\left(\alpha \cdot \beta + \color{blue}{\left(\alpha + \beta\right)}\right) + 1}{\alpha + \left(\beta + 3\right)} \]
        4. associate-+r+N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\left(\left(\alpha \cdot \beta + \alpha\right) + \beta\right)} + 1}{\alpha + \left(\beta + 3\right)} \]
        5. associate-+l+N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\left(\alpha \cdot \beta + \alpha\right) + \left(\beta + 1\right)}}{\alpha + \left(\beta + 3\right)} \]
        6. *-commutativeN/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\left(\color{blue}{\beta \cdot \alpha} + \alpha\right) + \left(\beta + 1\right)}{\alpha + \left(\beta + 3\right)} \]
        7. distribute-lft1-inN/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\left(\beta + 1\right) \cdot \alpha} + \left(\beta + 1\right)}{\alpha + \left(\beta + 3\right)} \]
        8. +-commutativeN/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\left(1 + \beta\right)} \cdot \alpha + \left(\beta + 1\right)}{\alpha + \left(\beta + 3\right)} \]
        9. lift-+.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\left(1 + \beta\right)} \cdot \alpha + \left(\beta + 1\right)}{\alpha + \left(\beta + 3\right)} \]
        10. +-commutativeN/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\left(1 + \beta\right) \cdot \alpha + \color{blue}{\left(1 + \beta\right)}}{\alpha + \left(\beta + 3\right)} \]
        11. lift-+.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\left(1 + \beta\right) \cdot \alpha + \color{blue}{\left(1 + \beta\right)}}{\alpha + \left(\beta + 3\right)} \]
        12. *-rgt-identityN/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\left(1 + \beta\right) \cdot \alpha + \color{blue}{\left(1 + \beta\right) \cdot 1}}{\alpha + \left(\beta + 3\right)} \]
        13. distribute-lft-inN/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\left(1 + \beta\right) \cdot \left(\alpha + 1\right)}}{\alpha + \left(\beta + 3\right)} \]
        14. +-commutativeN/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\left(1 + \beta\right) \cdot \color{blue}{\left(1 + \alpha\right)}}{\alpha + \left(\beta + 3\right)} \]
        15. lift-+.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\left(1 + \beta\right) \cdot \color{blue}{\left(1 + \alpha\right)}}{\alpha + \left(\beta + 3\right)} \]
        16. *-commutativeN/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\left(1 + \alpha\right) \cdot \left(1 + \beta\right)}}{\alpha + \left(\beta + 3\right)} \]
        17. lift-+.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\left(1 + \alpha\right)} \cdot \left(1 + \beta\right)}{\alpha + \left(\beta + 3\right)} \]
        18. flip-+N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\frac{1 \cdot 1 - \alpha \cdot \alpha}{1 - \alpha}} \cdot \left(1 + \beta\right)}{\alpha + \left(\beta + 3\right)} \]
        19. associate-*l/N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\frac{\left(1 \cdot 1 - \alpha \cdot \alpha\right) \cdot \left(1 + \beta\right)}{1 - \alpha}}}{\alpha + \left(\beta + 3\right)} \]
        20. lower-/.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\frac{\left(1 \cdot 1 - \alpha \cdot \alpha\right) \cdot \left(1 + \beta\right)}{1 - \alpha}}}{\alpha + \left(\beta + 3\right)} \]
        21. lower-*.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\frac{\color{blue}{\left(1 \cdot 1 - \alpha \cdot \alpha\right) \cdot \left(1 + \beta\right)}}{1 - \alpha}}{\alpha + \left(\beta + 3\right)} \]
        22. metadata-evalN/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\frac{\left(\color{blue}{1} - \alpha \cdot \alpha\right) \cdot \left(1 + \beta\right)}{1 - \alpha}}{\alpha + \left(\beta + 3\right)} \]
        23. lower--.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\frac{\color{blue}{\left(1 - \alpha \cdot \alpha\right)} \cdot \left(1 + \beta\right)}{1 - \alpha}}{\alpha + \left(\beta + 3\right)} \]
        24. lower-*.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\frac{\left(1 - \color{blue}{\alpha \cdot \alpha}\right) \cdot \left(1 + \beta\right)}{1 - \alpha}}{\alpha + \left(\beta + 3\right)} \]
        25. lift-+.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\frac{\left(1 - \alpha \cdot \alpha\right) \cdot \color{blue}{\left(1 + \beta\right)}}{1 - \alpha}}{\alpha + \left(\beta + 3\right)} \]
        26. +-commutativeN/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\frac{\left(1 - \alpha \cdot \alpha\right) \cdot \color{blue}{\left(\beta + 1\right)}}{1 - \alpha}}{\alpha + \left(\beta + 3\right)} \]
        27. lower-+.f64N/A

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\frac{\left(1 - \alpha \cdot \alpha\right) \cdot \color{blue}{\left(\beta + 1\right)}}{1 - \alpha}}{\alpha + \left(\beta + 3\right)} \]
        28. lower--.f6482.3

          \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\frac{\left(1 - \alpha \cdot \alpha\right) \cdot \left(\beta + 1\right)}{\color{blue}{1 - \alpha}}}{\alpha + \left(\beta + 3\right)} \]
      6. Applied rewrites82.3%

        \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\color{blue}{\frac{\left(1 - \alpha \cdot \alpha\right) \cdot \left(\beta + 1\right)}{1 - \alpha}}}{\alpha + \left(\beta + 3\right)} \]

      if 5.0000000000000002e107 < beta

      1. Initial program 67.2%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Add Preprocessing
      3. Taylor expanded in beta around inf

        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
      4. Step-by-step derivation
        1. lower-/.f64N/A

          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
        2. lower-+.f64N/A

          \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
        3. unpow2N/A

          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
        4. lower-*.f6478.2

          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
      5. Applied rewrites78.2%

        \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
      6. Step-by-step derivation
        1. Applied rewrites80.8%

          \[\leadsto \frac{\frac{\alpha + 1}{\beta}}{\color{blue}{\beta}} \]
      7. Recombined 2 regimes into one program.
      8. Final simplification82.1%

        \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 5 \cdot 10^{+107}:\\ \;\;\;\;\frac{\frac{1}{2 + \left(\beta + \alpha\right)}}{2 + \left(\beta + \alpha\right)} \cdot \frac{\frac{\left(1 - \alpha \cdot \alpha\right) \cdot \left(\beta + 1\right)}{1 - \alpha}}{\alpha + \left(\beta + 3\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \]
      9. Add Preprocessing

      Alternative 3: 99.5% accurate, 1.1× speedup?

      \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 2 + \left(\beta + \alpha\right)\\ \mathbf{if}\;\beta \leq 2 \cdot 10^{+111}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{\alpha + \left(\beta + 3\right)} \cdot \frac{\frac{1}{t\_0}}{t\_0}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \end{array} \]
      NOTE: alpha and beta should be sorted in increasing order before calling this function.
      (FPCore (alpha beta)
       :precision binary64
       (let* ((t_0 (+ 2.0 (+ beta alpha))))
         (if (<= beta 2e+111)
           (*
            (/ (+ (fma alpha beta (+ beta alpha)) 1.0) (+ alpha (+ beta 3.0)))
            (/ (/ 1.0 t_0) t_0))
           (/ (/ (+ alpha 1.0) beta) beta))))
      assert(alpha < beta);
      double code(double alpha, double beta) {
      	double t_0 = 2.0 + (beta + alpha);
      	double tmp;
      	if (beta <= 2e+111) {
      		tmp = ((fma(alpha, beta, (beta + alpha)) + 1.0) / (alpha + (beta + 3.0))) * ((1.0 / t_0) / t_0);
      	} else {
      		tmp = ((alpha + 1.0) / beta) / beta;
      	}
      	return tmp;
      }
      
      alpha, beta = sort([alpha, beta])
      function code(alpha, beta)
      	t_0 = Float64(2.0 + Float64(beta + alpha))
      	tmp = 0.0
      	if (beta <= 2e+111)
      		tmp = Float64(Float64(Float64(fma(alpha, beta, Float64(beta + alpha)) + 1.0) / Float64(alpha + Float64(beta + 3.0))) * Float64(Float64(1.0 / t_0) / t_0));
      	else
      		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / beta);
      	end
      	return tmp
      end
      
      NOTE: alpha and beta should be sorted in increasing order before calling this function.
      code[alpha_, beta_] := Block[{t$95$0 = N[(2.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 2e+111], N[(N[(N[(N[(alpha * beta + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]]
      
      \begin{array}{l}
      [alpha, beta] = \mathsf{sort}([alpha, beta])\\
      \\
      \begin{array}{l}
      t_0 := 2 + \left(\beta + \alpha\right)\\
      \mathbf{if}\;\beta \leq 2 \cdot 10^{+111}:\\
      \;\;\;\;\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{\alpha + \left(\beta + 3\right)} \cdot \frac{\frac{1}{t\_0}}{t\_0}\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if beta < 1.99999999999999991e111

        1. Initial program 99.3%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Add Preprocessing
        3. Step-by-step derivation
          1. lift-/.f64N/A

            \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
          2. lift-/.f64N/A

            \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          3. associate-/l/N/A

            \[\leadsto \color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
          4. lift-/.f64N/A

            \[\leadsto \frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
          5. clear-numN/A

            \[\leadsto \frac{\color{blue}{\frac{1}{\frac{\left(\alpha + \beta\right) + 2 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
          6. associate-/r/N/A

            \[\leadsto \frac{\color{blue}{\frac{1}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \left(\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1\right)}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
          7. *-commutativeN/A

            \[\leadsto \frac{\frac{1}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \left(\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1\right)}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
          8. times-fracN/A

            \[\leadsto \color{blue}{\frac{\frac{1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
          9. lower-*.f64N/A

            \[\leadsto \color{blue}{\frac{\frac{1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
        4. Applied rewrites97.7%

          \[\leadsto \color{blue}{\frac{\frac{1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2} \cdot \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\alpha + \left(\beta + 3\right)}} \]

        if 1.99999999999999991e111 < beta

        1. Initial program 67.2%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Add Preprocessing
        3. Taylor expanded in beta around inf

          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
        4. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
          2. lower-+.f64N/A

            \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
          3. unpow2N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
          4. lower-*.f6478.2

            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
        5. Applied rewrites78.2%

          \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
        6. Step-by-step derivation
          1. Applied rewrites80.8%

            \[\leadsto \frac{\frac{\alpha + 1}{\beta}}{\color{blue}{\beta}} \]
        7. Recombined 2 regimes into one program.
        8. Final simplification94.7%

          \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 2 \cdot 10^{+111}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{\alpha + \left(\beta + 3\right)} \cdot \frac{\frac{1}{2 + \left(\beta + \alpha\right)}}{2 + \left(\beta + \alpha\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \]
        9. Add Preprocessing

        Alternative 4: 99.5% accurate, 1.3× speedup?

        \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 2 + \left(\beta + \alpha\right)\\ \mathbf{if}\;\beta \leq 2.5 \cdot 10^{+111}:\\ \;\;\;\;\frac{\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{t\_0}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot t\_0}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \end{array} \]
        NOTE: alpha and beta should be sorted in increasing order before calling this function.
        (FPCore (alpha beta)
         :precision binary64
         (let* ((t_0 (+ 2.0 (+ beta alpha))))
           (if (<= beta 2.5e+111)
             (/
              (/ (+ (fma alpha beta (+ beta alpha)) 1.0) t_0)
              (* (+ alpha (+ beta 3.0)) t_0))
             (/ (/ (+ alpha 1.0) beta) beta))))
        assert(alpha < beta);
        double code(double alpha, double beta) {
        	double t_0 = 2.0 + (beta + alpha);
        	double tmp;
        	if (beta <= 2.5e+111) {
        		tmp = ((fma(alpha, beta, (beta + alpha)) + 1.0) / t_0) / ((alpha + (beta + 3.0)) * t_0);
        	} else {
        		tmp = ((alpha + 1.0) / beta) / beta;
        	}
        	return tmp;
        }
        
        alpha, beta = sort([alpha, beta])
        function code(alpha, beta)
        	t_0 = Float64(2.0 + Float64(beta + alpha))
        	tmp = 0.0
        	if (beta <= 2.5e+111)
        		tmp = Float64(Float64(Float64(fma(alpha, beta, Float64(beta + alpha)) + 1.0) / t_0) / Float64(Float64(alpha + Float64(beta + 3.0)) * t_0));
        	else
        		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / beta);
        	end
        	return tmp
        end
        
        NOTE: alpha and beta should be sorted in increasing order before calling this function.
        code[alpha_, beta_] := Block[{t$95$0 = N[(2.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 2.5e+111], N[(N[(N[(N[(alpha * beta + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]]
        
        \begin{array}{l}
        [alpha, beta] = \mathsf{sort}([alpha, beta])\\
        \\
        \begin{array}{l}
        t_0 := 2 + \left(\beta + \alpha\right)\\
        \mathbf{if}\;\beta \leq 2.5 \cdot 10^{+111}:\\
        \;\;\;\;\frac{\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{t\_0}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot t\_0}\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if beta < 2.4999999999999998e111

          1. Initial program 99.3%

            \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          2. Add Preprocessing
          3. Step-by-step derivation
            1. lift-/.f64N/A

              \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
            2. lift-/.f64N/A

              \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            3. associate-/l/N/A

              \[\leadsto \color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
            4. lower-/.f64N/A

              \[\leadsto \color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
            5. lift-+.f64N/A

              \[\leadsto \frac{\frac{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right)} + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
            6. +-commutativeN/A

              \[\leadsto \frac{\frac{\color{blue}{\left(\beta \cdot \alpha + \left(\alpha + \beta\right)\right)} + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
            7. lift-*.f64N/A

              \[\leadsto \frac{\frac{\left(\color{blue}{\beta \cdot \alpha} + \left(\alpha + \beta\right)\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
            8. *-commutativeN/A

              \[\leadsto \frac{\frac{\left(\color{blue}{\alpha \cdot \beta} + \left(\alpha + \beta\right)\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
            9. lower-fma.f64N/A

              \[\leadsto \frac{\frac{\color{blue}{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right)} + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
            10. lift-*.f64N/A

              \[\leadsto \frac{\frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
            11. metadata-evalN/A

              \[\leadsto \frac{\frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\alpha + \beta\right) + \color{blue}{2}}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
            12. *-commutativeN/A

              \[\leadsto \frac{\frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\alpha + \beta\right) + 2}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
          4. Applied rewrites96.9%

            \[\leadsto \color{blue}{\frac{\frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\alpha + \left(\beta + 3\right)\right)}} \]

          if 2.4999999999999998e111 < beta

          1. Initial program 67.2%

            \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          2. Add Preprocessing
          3. Taylor expanded in beta around inf

            \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
          4. Step-by-step derivation
            1. lower-/.f64N/A

              \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
            2. lower-+.f64N/A

              \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
            3. unpow2N/A

              \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
            4. lower-*.f6478.2

              \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
          5. Applied rewrites78.2%

            \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
          6. Step-by-step derivation
            1. Applied rewrites80.8%

              \[\leadsto \frac{\frac{\alpha + 1}{\beta}}{\color{blue}{\beta}} \]
          7. Recombined 2 regimes into one program.
          8. Final simplification94.1%

            \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 2.5 \cdot 10^{+111}:\\ \;\;\;\;\frac{\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{2 + \left(\beta + \alpha\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(2 + \left(\beta + \alpha\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \]
          9. Add Preprocessing

          Alternative 5: 99.5% accurate, 1.3× speedup?

          \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := -2 - \left(\beta + \alpha\right)\\ \mathbf{if}\;\beta \leq 2.5 \cdot 10^{+111}:\\ \;\;\;\;\frac{\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{t\_0}}{\left(\left(\beta + \alpha\right) + 3\right) \cdot t\_0}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \end{array} \]
          NOTE: alpha and beta should be sorted in increasing order before calling this function.
          (FPCore (alpha beta)
           :precision binary64
           (let* ((t_0 (- -2.0 (+ beta alpha))))
             (if (<= beta 2.5e+111)
               (/ (/ (* (+ beta 1.0) (+ alpha 1.0)) t_0) (* (+ (+ beta alpha) 3.0) t_0))
               (/ (/ (+ alpha 1.0) beta) beta))))
          assert(alpha < beta);
          double code(double alpha, double beta) {
          	double t_0 = -2.0 - (beta + alpha);
          	double tmp;
          	if (beta <= 2.5e+111) {
          		tmp = (((beta + 1.0) * (alpha + 1.0)) / t_0) / (((beta + alpha) + 3.0) * t_0);
          	} else {
          		tmp = ((alpha + 1.0) / beta) / beta;
          	}
          	return tmp;
          }
          
          NOTE: alpha and beta should be sorted in increasing order before calling this function.
          real(8) function code(alpha, beta)
              real(8), intent (in) :: alpha
              real(8), intent (in) :: beta
              real(8) :: t_0
              real(8) :: tmp
              t_0 = (-2.0d0) - (beta + alpha)
              if (beta <= 2.5d+111) then
                  tmp = (((beta + 1.0d0) * (alpha + 1.0d0)) / t_0) / (((beta + alpha) + 3.0d0) * t_0)
              else
                  tmp = ((alpha + 1.0d0) / beta) / beta
              end if
              code = tmp
          end function
          
          assert alpha < beta;
          public static double code(double alpha, double beta) {
          	double t_0 = -2.0 - (beta + alpha);
          	double tmp;
          	if (beta <= 2.5e+111) {
          		tmp = (((beta + 1.0) * (alpha + 1.0)) / t_0) / (((beta + alpha) + 3.0) * t_0);
          	} else {
          		tmp = ((alpha + 1.0) / beta) / beta;
          	}
          	return tmp;
          }
          
          [alpha, beta] = sort([alpha, beta])
          def code(alpha, beta):
          	t_0 = -2.0 - (beta + alpha)
          	tmp = 0
          	if beta <= 2.5e+111:
          		tmp = (((beta + 1.0) * (alpha + 1.0)) / t_0) / (((beta + alpha) + 3.0) * t_0)
          	else:
          		tmp = ((alpha + 1.0) / beta) / beta
          	return tmp
          
          alpha, beta = sort([alpha, beta])
          function code(alpha, beta)
          	t_0 = Float64(-2.0 - Float64(beta + alpha))
          	tmp = 0.0
          	if (beta <= 2.5e+111)
          		tmp = Float64(Float64(Float64(Float64(beta + 1.0) * Float64(alpha + 1.0)) / t_0) / Float64(Float64(Float64(beta + alpha) + 3.0) * t_0));
          	else
          		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / beta);
          	end
          	return tmp
          end
          
          alpha, beta = num2cell(sort([alpha, beta])){:}
          function tmp_2 = code(alpha, beta)
          	t_0 = -2.0 - (beta + alpha);
          	tmp = 0.0;
          	if (beta <= 2.5e+111)
          		tmp = (((beta + 1.0) * (alpha + 1.0)) / t_0) / (((beta + alpha) + 3.0) * t_0);
          	else
          		tmp = ((alpha + 1.0) / beta) / beta;
          	end
          	tmp_2 = tmp;
          end
          
          NOTE: alpha and beta should be sorted in increasing order before calling this function.
          code[alpha_, beta_] := Block[{t$95$0 = N[(-2.0 - N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 2.5e+111], N[(N[(N[(N[(beta + 1.0), $MachinePrecision] * N[(alpha + 1.0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(N[(N[(beta + alpha), $MachinePrecision] + 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]]
          
          \begin{array}{l}
          [alpha, beta] = \mathsf{sort}([alpha, beta])\\
          \\
          \begin{array}{l}
          t_0 := -2 - \left(\beta + \alpha\right)\\
          \mathbf{if}\;\beta \leq 2.5 \cdot 10^{+111}:\\
          \;\;\;\;\frac{\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{t\_0}}{\left(\left(\beta + \alpha\right) + 3\right) \cdot t\_0}\\
          
          \mathbf{else}:\\
          \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\
          
          
          \end{array}
          \end{array}
          
          Derivation
          1. Split input into 2 regimes
          2. if beta < 2.4999999999999998e111

            1. Initial program 99.3%

              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            2. Add Preprocessing
            3. Taylor expanded in alpha around 0

              \[\leadsto \frac{\frac{\frac{\color{blue}{1 + \left(\beta + \alpha \cdot \left(1 + \beta\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            4. Step-by-step derivation
              1. distribute-rgt-inN/A

                \[\leadsto \frac{\frac{\frac{1 + \left(\beta + \color{blue}{\left(1 \cdot \alpha + \beta \cdot \alpha\right)}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. *-lft-identityN/A

                \[\leadsto \frac{\frac{\frac{1 + \left(\beta + \left(\color{blue}{\alpha} + \beta \cdot \alpha\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              3. associate-+r+N/A

                \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\left(\left(\beta + \alpha\right) + \beta \cdot \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              4. +-commutativeN/A

                \[\leadsto \frac{\frac{\frac{1 + \left(\color{blue}{\left(\alpha + \beta\right)} + \beta \cdot \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              5. associate-+r+N/A

                \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\left(\alpha + \left(\beta + \beta \cdot \alpha\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              6. *-rgt-identityN/A

                \[\leadsto \frac{\frac{\frac{1 + \left(\alpha + \left(\color{blue}{\beta \cdot 1} + \beta \cdot \alpha\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              7. distribute-lft-inN/A

                \[\leadsto \frac{\frac{\frac{1 + \left(\alpha + \color{blue}{\beta \cdot \left(1 + \alpha\right)}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              8. associate-+r+N/A

                \[\leadsto \frac{\frac{\frac{\color{blue}{\left(1 + \alpha\right) + \beta \cdot \left(1 + \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              9. distribute-rgt1-inN/A

                \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\beta + 1\right) \cdot \left(1 + \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              10. +-commutativeN/A

                \[\leadsto \frac{\frac{\frac{\color{blue}{\left(1 + \beta\right)} \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              11. lower-*.f64N/A

                \[\leadsto \frac{\frac{\frac{\color{blue}{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              12. lower-+.f64N/A

                \[\leadsto \frac{\frac{\frac{\color{blue}{\left(1 + \beta\right)} \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              13. lower-+.f6499.3

                \[\leadsto \frac{\frac{\frac{\left(1 + \beta\right) \cdot \color{blue}{\left(1 + \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            5. Applied rewrites99.3%

              \[\leadsto \frac{\frac{\frac{\color{blue}{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            6. Step-by-step derivation
              1. lift-/.f64N/A

                \[\leadsto \color{blue}{\frac{\frac{\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
              2. lift-/.f64N/A

                \[\leadsto \frac{\color{blue}{\frac{\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              3. lift-*.f64N/A

                \[\leadsto \frac{\frac{\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              4. metadata-evalN/A

                \[\leadsto \frac{\frac{\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + \color{blue}{2}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              5. frac-2negN/A

                \[\leadsto \frac{\color{blue}{\frac{\mathsf{neg}\left(\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}\right)}{\mathsf{neg}\left(\left(\left(\alpha + \beta\right) + 2\right)\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              6. associate-/l/N/A

                \[\leadsto \color{blue}{\frac{\mathsf{neg}\left(\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}\right)}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\mathsf{neg}\left(\left(\left(\alpha + \beta\right) + 2\right)\right)\right)}} \]
              7. lower-/.f64N/A

                \[\leadsto \color{blue}{\frac{\mathsf{neg}\left(\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}\right)}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\mathsf{neg}\left(\left(\left(\alpha + \beta\right) + 2\right)\right)\right)}} \]
            7. Applied rewrites96.9%

              \[\leadsto \color{blue}{\frac{\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{-2 + \left(-\left(\alpha + \beta\right)\right)}}{\left(\left(\alpha + \beta\right) + 3\right) \cdot \left(-2 + \left(-\left(\alpha + \beta\right)\right)\right)}} \]

            if 2.4999999999999998e111 < beta

            1. Initial program 67.2%

              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            2. Add Preprocessing
            3. Taylor expanded in beta around inf

              \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
            4. Step-by-step derivation
              1. lower-/.f64N/A

                \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
              2. lower-+.f64N/A

                \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
              3. unpow2N/A

                \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
              4. lower-*.f6478.2

                \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
            5. Applied rewrites78.2%

              \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
            6. Step-by-step derivation
              1. Applied rewrites80.8%

                \[\leadsto \frac{\frac{\alpha + 1}{\beta}}{\color{blue}{\beta}} \]
            7. Recombined 2 regimes into one program.
            8. Final simplification94.1%

              \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 2.5 \cdot 10^{+111}:\\ \;\;\;\;\frac{\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{-2 - \left(\beta + \alpha\right)}}{\left(\left(\beta + \alpha\right) + 3\right) \cdot \left(-2 - \left(\beta + \alpha\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \]
            9. Add Preprocessing

            Alternative 6: 99.5% accurate, 1.3× speedup?

            \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := \alpha + \left(\beta + 2\right)\\ \mathbf{if}\;\beta \leq 2.5 \cdot 10^{+111}:\\ \;\;\;\;\frac{\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{\left(\beta + \alpha\right) + 3}}{t\_0 \cdot t\_0}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \end{array} \]
            NOTE: alpha and beta should be sorted in increasing order before calling this function.
            (FPCore (alpha beta)
             :precision binary64
             (let* ((t_0 (+ alpha (+ beta 2.0))))
               (if (<= beta 2.5e+111)
                 (/ (/ (* (+ beta 1.0) (+ alpha 1.0)) (+ (+ beta alpha) 3.0)) (* t_0 t_0))
                 (/ (/ (+ alpha 1.0) beta) beta))))
            assert(alpha < beta);
            double code(double alpha, double beta) {
            	double t_0 = alpha + (beta + 2.0);
            	double tmp;
            	if (beta <= 2.5e+111) {
            		tmp = (((beta + 1.0) * (alpha + 1.0)) / ((beta + alpha) + 3.0)) / (t_0 * t_0);
            	} else {
            		tmp = ((alpha + 1.0) / beta) / beta;
            	}
            	return tmp;
            }
            
            NOTE: alpha and beta should be sorted in increasing order before calling this function.
            real(8) function code(alpha, beta)
                real(8), intent (in) :: alpha
                real(8), intent (in) :: beta
                real(8) :: t_0
                real(8) :: tmp
                t_0 = alpha + (beta + 2.0d0)
                if (beta <= 2.5d+111) then
                    tmp = (((beta + 1.0d0) * (alpha + 1.0d0)) / ((beta + alpha) + 3.0d0)) / (t_0 * t_0)
                else
                    tmp = ((alpha + 1.0d0) / beta) / beta
                end if
                code = tmp
            end function
            
            assert alpha < beta;
            public static double code(double alpha, double beta) {
            	double t_0 = alpha + (beta + 2.0);
            	double tmp;
            	if (beta <= 2.5e+111) {
            		tmp = (((beta + 1.0) * (alpha + 1.0)) / ((beta + alpha) + 3.0)) / (t_0 * t_0);
            	} else {
            		tmp = ((alpha + 1.0) / beta) / beta;
            	}
            	return tmp;
            }
            
            [alpha, beta] = sort([alpha, beta])
            def code(alpha, beta):
            	t_0 = alpha + (beta + 2.0)
            	tmp = 0
            	if beta <= 2.5e+111:
            		tmp = (((beta + 1.0) * (alpha + 1.0)) / ((beta + alpha) + 3.0)) / (t_0 * t_0)
            	else:
            		tmp = ((alpha + 1.0) / beta) / beta
            	return tmp
            
            alpha, beta = sort([alpha, beta])
            function code(alpha, beta)
            	t_0 = Float64(alpha + Float64(beta + 2.0))
            	tmp = 0.0
            	if (beta <= 2.5e+111)
            		tmp = Float64(Float64(Float64(Float64(beta + 1.0) * Float64(alpha + 1.0)) / Float64(Float64(beta + alpha) + 3.0)) / Float64(t_0 * t_0));
            	else
            		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / beta);
            	end
            	return tmp
            end
            
            alpha, beta = num2cell(sort([alpha, beta])){:}
            function tmp_2 = code(alpha, beta)
            	t_0 = alpha + (beta + 2.0);
            	tmp = 0.0;
            	if (beta <= 2.5e+111)
            		tmp = (((beta + 1.0) * (alpha + 1.0)) / ((beta + alpha) + 3.0)) / (t_0 * t_0);
            	else
            		tmp = ((alpha + 1.0) / beta) / beta;
            	end
            	tmp_2 = tmp;
            end
            
            NOTE: alpha and beta should be sorted in increasing order before calling this function.
            code[alpha_, beta_] := Block[{t$95$0 = N[(alpha + N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 2.5e+111], N[(N[(N[(N[(beta + 1.0), $MachinePrecision] * N[(alpha + 1.0), $MachinePrecision]), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 3.0), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]]
            
            \begin{array}{l}
            [alpha, beta] = \mathsf{sort}([alpha, beta])\\
            \\
            \begin{array}{l}
            t_0 := \alpha + \left(\beta + 2\right)\\
            \mathbf{if}\;\beta \leq 2.5 \cdot 10^{+111}:\\
            \;\;\;\;\frac{\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{\left(\beta + \alpha\right) + 3}}{t\_0 \cdot t\_0}\\
            
            \mathbf{else}:\\
            \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\
            
            
            \end{array}
            \end{array}
            
            Derivation
            1. Split input into 2 regimes
            2. if beta < 2.4999999999999998e111

              1. Initial program 99.3%

                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. Add Preprocessing
              3. Step-by-step derivation
                1. lift-/.f64N/A

                  \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                2. lift-/.f64N/A

                  \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                3. lift-/.f64N/A

                  \[\leadsto \frac{\frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                4. associate-/l/N/A

                  \[\leadsto \frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                5. associate-/l/N/A

                  \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)}} \]
                6. lower-/.f64N/A

                  \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)}} \]
                7. lift-+.f64N/A

                  \[\leadsto \frac{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right)} + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                8. +-commutativeN/A

                  \[\leadsto \frac{\color{blue}{\left(\beta \cdot \alpha + \left(\alpha + \beta\right)\right)} + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                9. lift-*.f64N/A

                  \[\leadsto \frac{\left(\color{blue}{\beta \cdot \alpha} + \left(\alpha + \beta\right)\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                10. *-commutativeN/A

                  \[\leadsto \frac{\left(\color{blue}{\alpha \cdot \beta} + \left(\alpha + \beta\right)\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                11. lower-fma.f64N/A

                  \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right)} + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
              4. Applied rewrites90.4%

                \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)}} \]
              5. Applied rewrites96.9%

                \[\leadsto \color{blue}{\frac{\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{\left(\alpha + \beta\right) + 3}}{\left(\alpha + \left(\beta + 2\right)\right) \cdot \left(\alpha + \left(\beta + 2\right)\right)}} \]

              if 2.4999999999999998e111 < beta

              1. Initial program 67.2%

                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. Add Preprocessing
              3. Taylor expanded in beta around inf

                \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
              4. Step-by-step derivation
                1. lower-/.f64N/A

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                2. lower-+.f64N/A

                  \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                3. unpow2N/A

                  \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                4. lower-*.f6478.2

                  \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
              5. Applied rewrites78.2%

                \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
              6. Step-by-step derivation
                1. Applied rewrites80.8%

                  \[\leadsto \frac{\frac{\alpha + 1}{\beta}}{\color{blue}{\beta}} \]
              7. Recombined 2 regimes into one program.
              8. Final simplification94.1%

                \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 2.5 \cdot 10^{+111}:\\ \;\;\;\;\frac{\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{\left(\beta + \alpha\right) + 3}}{\left(\alpha + \left(\beta + 2\right)\right) \cdot \left(\alpha + \left(\beta + 2\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \]
              9. Add Preprocessing

              Alternative 7: 99.5% accurate, 1.4× speedup?

              \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 2 + \left(\beta + \alpha\right)\\ t_1 := \alpha + \left(\beta + 2\right)\\ \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{t\_1 \cdot \left(t\_1 \cdot \left(\left(\beta + \alpha\right) + 3\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{t\_0}}{t\_0 + 1}\\ \end{array} \end{array} \]
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              (FPCore (alpha beta)
               :precision binary64
               (let* ((t_0 (+ 2.0 (+ beta alpha))) (t_1 (+ alpha (+ beta 2.0))))
                 (if (<= beta 7.8e+100)
                   (/
                    (+ (fma alpha beta (+ beta alpha)) 1.0)
                    (* t_1 (* t_1 (+ (+ beta alpha) 3.0))))
                   (/ (/ (+ alpha 1.0) t_0) (+ t_0 1.0)))))
              assert(alpha < beta);
              double code(double alpha, double beta) {
              	double t_0 = 2.0 + (beta + alpha);
              	double t_1 = alpha + (beta + 2.0);
              	double tmp;
              	if (beta <= 7.8e+100) {
              		tmp = (fma(alpha, beta, (beta + alpha)) + 1.0) / (t_1 * (t_1 * ((beta + alpha) + 3.0)));
              	} else {
              		tmp = ((alpha + 1.0) / t_0) / (t_0 + 1.0);
              	}
              	return tmp;
              }
              
              alpha, beta = sort([alpha, beta])
              function code(alpha, beta)
              	t_0 = Float64(2.0 + Float64(beta + alpha))
              	t_1 = Float64(alpha + Float64(beta + 2.0))
              	tmp = 0.0
              	if (beta <= 7.8e+100)
              		tmp = Float64(Float64(fma(alpha, beta, Float64(beta + alpha)) + 1.0) / Float64(t_1 * Float64(t_1 * Float64(Float64(beta + alpha) + 3.0))));
              	else
              		tmp = Float64(Float64(Float64(alpha + 1.0) / t_0) / Float64(t_0 + 1.0));
              	end
              	return tmp
              end
              
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              code[alpha_, beta_] := Block[{t$95$0 = N[(2.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(alpha + N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 7.8e+100], N[(N[(N[(alpha * beta + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / N[(t$95$1 * N[(t$95$1 * N[(N[(beta + alpha), $MachinePrecision] + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]]]
              
              \begin{array}{l}
              [alpha, beta] = \mathsf{sort}([alpha, beta])\\
              \\
              \begin{array}{l}
              t_0 := 2 + \left(\beta + \alpha\right)\\
              t_1 := \alpha + \left(\beta + 2\right)\\
              \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\
              \;\;\;\;\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{t\_1 \cdot \left(t\_1 \cdot \left(\left(\beta + \alpha\right) + 3\right)\right)}\\
              
              \mathbf{else}:\\
              \;\;\;\;\frac{\frac{\alpha + 1}{t\_0}}{t\_0 + 1}\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if beta < 7.8e100

                1. Initial program 99.3%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Step-by-step derivation
                  1. lift-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                  2. lift-/.f64N/A

                    \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  3. lift-/.f64N/A

                    \[\leadsto \frac{\frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  4. associate-/l/N/A

                    \[\leadsto \frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  5. associate-/l/N/A

                    \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)}} \]
                  6. lower-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)}} \]
                  7. lift-+.f64N/A

                    \[\leadsto \frac{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right)} + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                  8. +-commutativeN/A

                    \[\leadsto \frac{\color{blue}{\left(\beta \cdot \alpha + \left(\alpha + \beta\right)\right)} + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                  9. lift-*.f64N/A

                    \[\leadsto \frac{\left(\color{blue}{\beta \cdot \alpha} + \left(\alpha + \beta\right)\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                  10. *-commutativeN/A

                    \[\leadsto \frac{\left(\color{blue}{\alpha \cdot \beta} + \left(\alpha + \beta\right)\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                  11. lower-fma.f64N/A

                    \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right)} + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                4. Applied rewrites91.2%

                  \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)}} \]
                5. Step-by-step derivation
                  1. lift-*.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\color{blue}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)}} \]
                  2. lift-+.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\color{blue}{\left(\alpha + \left(\beta + 3\right)\right)} \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  3. lift-+.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\alpha + \color{blue}{\left(\beta + 3\right)}\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  4. associate-+r+N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\color{blue}{\left(\left(\alpha + \beta\right) + 3\right)} \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  5. lift-+.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\color{blue}{\left(\alpha + \beta\right)} + 3\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  6. metadata-evalN/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\left(\alpha + \beta\right) + \color{blue}{\left(2 + 1\right)}\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  7. associate-+l+N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\color{blue}{\left(\left(\left(\alpha + \beta\right) + 2\right) + 1\right)} \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  8. metadata-evalN/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  9. lift-*.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  10. lift-+.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  11. lift-+.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\color{blue}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)} \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  12. lift-*.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \color{blue}{\left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)}} \]
                  13. associate-*r*N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\color{blue}{\left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
                  14. *-commutativeN/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\color{blue}{\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)}} \]
                  15. lower-*.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\color{blue}{\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)}} \]
                  16. lift-+.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\color{blue}{\left(\left(\alpha + \beta\right) + 2\right)} \cdot \left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  17. lift-+.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\color{blue}{\left(\alpha + \beta\right)} + 2\right) \cdot \left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  18. associate-+l+N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\color{blue}{\left(\alpha + \left(\beta + 2\right)\right)} \cdot \left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  19. lower-+.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\color{blue}{\left(\alpha + \left(\beta + 2\right)\right)} \cdot \left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  20. lower-+.f64N/A

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\alpha + \color{blue}{\left(\beta + 2\right)}\right) \cdot \left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  21. lower-*.f6491.2

                    \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\alpha + \left(\beta + 2\right)\right) \cdot \color{blue}{\left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)}} \]
                6. Applied rewrites91.2%

                  \[\leadsto \frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\color{blue}{\left(\alpha + \left(\beta + 2\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 3\right) \cdot \left(\alpha + \left(\beta + 2\right)\right)\right)}} \]

                if 7.8e100 < beta

                1. Initial program 69.2%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Taylor expanded in beta around inf

                  \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                4. Step-by-step derivation
                  1. lower-+.f6480.0

                    \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                5. Applied rewrites80.0%

                  \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              3. Recombined 2 regimes into one program.
              4. Final simplification89.1%

                \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{\left(\alpha + \left(\beta + 2\right)\right) \cdot \left(\left(\alpha + \left(\beta + 2\right)\right) \cdot \left(\left(\beta + \alpha\right) + 3\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{2 + \left(\beta + \alpha\right)}}{\left(2 + \left(\beta + \alpha\right)\right) + 1}\\ \end{array} \]
              5. Add Preprocessing

              Alternative 8: 99.5% accurate, 1.4× speedup?

              \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 2 + \left(\beta + \alpha\right)\\ \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{t\_0 \cdot \left(\left(\alpha + \left(\beta + 3\right)\right) \cdot t\_0\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{t\_0}}{t\_0 + 1}\\ \end{array} \end{array} \]
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              (FPCore (alpha beta)
               :precision binary64
               (let* ((t_0 (+ 2.0 (+ beta alpha))))
                 (if (<= beta 7.8e+100)
                   (/
                    (+ (fma alpha beta (+ beta alpha)) 1.0)
                    (* t_0 (* (+ alpha (+ beta 3.0)) t_0)))
                   (/ (/ (+ alpha 1.0) t_0) (+ t_0 1.0)))))
              assert(alpha < beta);
              double code(double alpha, double beta) {
              	double t_0 = 2.0 + (beta + alpha);
              	double tmp;
              	if (beta <= 7.8e+100) {
              		tmp = (fma(alpha, beta, (beta + alpha)) + 1.0) / (t_0 * ((alpha + (beta + 3.0)) * t_0));
              	} else {
              		tmp = ((alpha + 1.0) / t_0) / (t_0 + 1.0);
              	}
              	return tmp;
              }
              
              alpha, beta = sort([alpha, beta])
              function code(alpha, beta)
              	t_0 = Float64(2.0 + Float64(beta + alpha))
              	tmp = 0.0
              	if (beta <= 7.8e+100)
              		tmp = Float64(Float64(fma(alpha, beta, Float64(beta + alpha)) + 1.0) / Float64(t_0 * Float64(Float64(alpha + Float64(beta + 3.0)) * t_0)));
              	else
              		tmp = Float64(Float64(Float64(alpha + 1.0) / t_0) / Float64(t_0 + 1.0));
              	end
              	return tmp
              end
              
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              code[alpha_, beta_] := Block[{t$95$0 = N[(2.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 7.8e+100], N[(N[(N[(alpha * beta + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / N[(t$95$0 * N[(N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]]
              
              \begin{array}{l}
              [alpha, beta] = \mathsf{sort}([alpha, beta])\\
              \\
              \begin{array}{l}
              t_0 := 2 + \left(\beta + \alpha\right)\\
              \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\
              \;\;\;\;\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{t\_0 \cdot \left(\left(\alpha + \left(\beta + 3\right)\right) \cdot t\_0\right)}\\
              
              \mathbf{else}:\\
              \;\;\;\;\frac{\frac{\alpha + 1}{t\_0}}{t\_0 + 1}\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if beta < 7.8e100

                1. Initial program 99.3%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Step-by-step derivation
                  1. lift-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                  2. lift-/.f64N/A

                    \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  3. associate-/l/N/A

                    \[\leadsto \color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                  4. lift-/.f64N/A

                    \[\leadsto \frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} \]
                  5. associate-/l/N/A

                    \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                  6. lower-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                4. Applied rewrites91.2%

                  \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\alpha + \left(\beta + 3\right)\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]

                if 7.8e100 < beta

                1. Initial program 69.2%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Taylor expanded in beta around inf

                  \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                4. Step-by-step derivation
                  1. lower-+.f6480.0

                    \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                5. Applied rewrites80.0%

                  \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              3. Recombined 2 regimes into one program.
              4. Final simplification89.1%

                \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(2 + \left(\beta + \alpha\right)\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{2 + \left(\beta + \alpha\right)}}{\left(2 + \left(\beta + \alpha\right)\right) + 1}\\ \end{array} \]
              5. Add Preprocessing

              Alternative 9: 99.5% accurate, 1.5× speedup?

              \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 2 + \left(\beta + \alpha\right)\\ t_1 := -2 - \left(\beta + \alpha\right)\\ \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\ \;\;\;\;\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{t\_1 \cdot \left(\left(\alpha + \left(\beta + 3\right)\right) \cdot t\_1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{t\_0}}{t\_0 + 1}\\ \end{array} \end{array} \]
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              (FPCore (alpha beta)
               :precision binary64
               (let* ((t_0 (+ 2.0 (+ beta alpha))) (t_1 (- -2.0 (+ beta alpha))))
                 (if (<= beta 7.8e+100)
                   (/ (* (+ beta 1.0) (+ alpha 1.0)) (* t_1 (* (+ alpha (+ beta 3.0)) t_1)))
                   (/ (/ (+ alpha 1.0) t_0) (+ t_0 1.0)))))
              assert(alpha < beta);
              double code(double alpha, double beta) {
              	double t_0 = 2.0 + (beta + alpha);
              	double t_1 = -2.0 - (beta + alpha);
              	double tmp;
              	if (beta <= 7.8e+100) {
              		tmp = ((beta + 1.0) * (alpha + 1.0)) / (t_1 * ((alpha + (beta + 3.0)) * t_1));
              	} else {
              		tmp = ((alpha + 1.0) / t_0) / (t_0 + 1.0);
              	}
              	return tmp;
              }
              
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              real(8) function code(alpha, beta)
                  real(8), intent (in) :: alpha
                  real(8), intent (in) :: beta
                  real(8) :: t_0
                  real(8) :: t_1
                  real(8) :: tmp
                  t_0 = 2.0d0 + (beta + alpha)
                  t_1 = (-2.0d0) - (beta + alpha)
                  if (beta <= 7.8d+100) then
                      tmp = ((beta + 1.0d0) * (alpha + 1.0d0)) / (t_1 * ((alpha + (beta + 3.0d0)) * t_1))
                  else
                      tmp = ((alpha + 1.0d0) / t_0) / (t_0 + 1.0d0)
                  end if
                  code = tmp
              end function
              
              assert alpha < beta;
              public static double code(double alpha, double beta) {
              	double t_0 = 2.0 + (beta + alpha);
              	double t_1 = -2.0 - (beta + alpha);
              	double tmp;
              	if (beta <= 7.8e+100) {
              		tmp = ((beta + 1.0) * (alpha + 1.0)) / (t_1 * ((alpha + (beta + 3.0)) * t_1));
              	} else {
              		tmp = ((alpha + 1.0) / t_0) / (t_0 + 1.0);
              	}
              	return tmp;
              }
              
              [alpha, beta] = sort([alpha, beta])
              def code(alpha, beta):
              	t_0 = 2.0 + (beta + alpha)
              	t_1 = -2.0 - (beta + alpha)
              	tmp = 0
              	if beta <= 7.8e+100:
              		tmp = ((beta + 1.0) * (alpha + 1.0)) / (t_1 * ((alpha + (beta + 3.0)) * t_1))
              	else:
              		tmp = ((alpha + 1.0) / t_0) / (t_0 + 1.0)
              	return tmp
              
              alpha, beta = sort([alpha, beta])
              function code(alpha, beta)
              	t_0 = Float64(2.0 + Float64(beta + alpha))
              	t_1 = Float64(-2.0 - Float64(beta + alpha))
              	tmp = 0.0
              	if (beta <= 7.8e+100)
              		tmp = Float64(Float64(Float64(beta + 1.0) * Float64(alpha + 1.0)) / Float64(t_1 * Float64(Float64(alpha + Float64(beta + 3.0)) * t_1)));
              	else
              		tmp = Float64(Float64(Float64(alpha + 1.0) / t_0) / Float64(t_0 + 1.0));
              	end
              	return tmp
              end
              
              alpha, beta = num2cell(sort([alpha, beta])){:}
              function tmp_2 = code(alpha, beta)
              	t_0 = 2.0 + (beta + alpha);
              	t_1 = -2.0 - (beta + alpha);
              	tmp = 0.0;
              	if (beta <= 7.8e+100)
              		tmp = ((beta + 1.0) * (alpha + 1.0)) / (t_1 * ((alpha + (beta + 3.0)) * t_1));
              	else
              		tmp = ((alpha + 1.0) / t_0) / (t_0 + 1.0);
              	end
              	tmp_2 = tmp;
              end
              
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              code[alpha_, beta_] := Block[{t$95$0 = N[(2.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(-2.0 - N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 7.8e+100], N[(N[(N[(beta + 1.0), $MachinePrecision] * N[(alpha + 1.0), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 * N[(N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]]]
              
              \begin{array}{l}
              [alpha, beta] = \mathsf{sort}([alpha, beta])\\
              \\
              \begin{array}{l}
              t_0 := 2 + \left(\beta + \alpha\right)\\
              t_1 := -2 - \left(\beta + \alpha\right)\\
              \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\
              \;\;\;\;\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{t\_1 \cdot \left(\left(\alpha + \left(\beta + 3\right)\right) \cdot t\_1\right)}\\
              
              \mathbf{else}:\\
              \;\;\;\;\frac{\frac{\alpha + 1}{t\_0}}{t\_0 + 1}\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if beta < 7.8e100

                1. Initial program 99.3%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Taylor expanded in alpha around 0

                  \[\leadsto \frac{\frac{\frac{\color{blue}{1 + \left(\beta + \alpha \cdot \left(1 + \beta\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                4. Step-by-step derivation
                  1. distribute-rgt-inN/A

                    \[\leadsto \frac{\frac{\frac{1 + \left(\beta + \color{blue}{\left(1 \cdot \alpha + \beta \cdot \alpha\right)}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  2. *-lft-identityN/A

                    \[\leadsto \frac{\frac{\frac{1 + \left(\beta + \left(\color{blue}{\alpha} + \beta \cdot \alpha\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  3. associate-+r+N/A

                    \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\left(\left(\beta + \alpha\right) + \beta \cdot \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  4. +-commutativeN/A

                    \[\leadsto \frac{\frac{\frac{1 + \left(\color{blue}{\left(\alpha + \beta\right)} + \beta \cdot \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  5. associate-+r+N/A

                    \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\left(\alpha + \left(\beta + \beta \cdot \alpha\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  6. *-rgt-identityN/A

                    \[\leadsto \frac{\frac{\frac{1 + \left(\alpha + \left(\color{blue}{\beta \cdot 1} + \beta \cdot \alpha\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  7. distribute-lft-inN/A

                    \[\leadsto \frac{\frac{\frac{1 + \left(\alpha + \color{blue}{\beta \cdot \left(1 + \alpha\right)}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  8. associate-+r+N/A

                    \[\leadsto \frac{\frac{\frac{\color{blue}{\left(1 + \alpha\right) + \beta \cdot \left(1 + \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  9. distribute-rgt1-inN/A

                    \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\beta + 1\right) \cdot \left(1 + \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  10. +-commutativeN/A

                    \[\leadsto \frac{\frac{\frac{\color{blue}{\left(1 + \beta\right)} \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  11. lower-*.f64N/A

                    \[\leadsto \frac{\frac{\frac{\color{blue}{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  12. lower-+.f64N/A

                    \[\leadsto \frac{\frac{\frac{\color{blue}{\left(1 + \beta\right)} \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  13. lower-+.f6499.3

                    \[\leadsto \frac{\frac{\frac{\left(1 + \beta\right) \cdot \color{blue}{\left(1 + \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                5. Applied rewrites99.3%

                  \[\leadsto \frac{\frac{\frac{\color{blue}{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                6. Step-by-step derivation
                  1. lift-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\frac{\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                  2. lift-/.f64N/A

                    \[\leadsto \frac{\color{blue}{\frac{\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  3. lift-*.f64N/A

                    \[\leadsto \frac{\frac{\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  4. metadata-evalN/A

                    \[\leadsto \frac{\frac{\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + \color{blue}{2}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  5. frac-2negN/A

                    \[\leadsto \frac{\color{blue}{\frac{\mathsf{neg}\left(\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}\right)}{\mathsf{neg}\left(\left(\left(\alpha + \beta\right) + 2\right)\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  6. associate-/l/N/A

                    \[\leadsto \color{blue}{\frac{\mathsf{neg}\left(\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}\right)}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\mathsf{neg}\left(\left(\left(\alpha + \beta\right) + 2\right)\right)\right)}} \]
                  7. lower-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\mathsf{neg}\left(\frac{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}\right)}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\mathsf{neg}\left(\left(\left(\alpha + \beta\right) + 2\right)\right)\right)}} \]
                7. Applied rewrites97.4%

                  \[\leadsto \color{blue}{\frac{\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{-2 + \left(-\left(\alpha + \beta\right)\right)}}{\left(\left(\alpha + \beta\right) + 3\right) \cdot \left(-2 + \left(-\left(\alpha + \beta\right)\right)\right)}} \]
                8. Step-by-step derivation
                  1. lift-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{-2 + \left(\mathsf{neg}\left(\left(\alpha + \beta\right)\right)\right)}}{\left(\left(\alpha + \beta\right) + 3\right) \cdot \left(-2 + \left(\mathsf{neg}\left(\left(\alpha + \beta\right)\right)\right)\right)}} \]
                  2. lift-/.f64N/A

                    \[\leadsto \frac{\color{blue}{\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{-2 + \left(\mathsf{neg}\left(\left(\alpha + \beta\right)\right)\right)}}}{\left(\left(\alpha + \beta\right) + 3\right) \cdot \left(-2 + \left(\mathsf{neg}\left(\left(\alpha + \beta\right)\right)\right)\right)} \]
                  3. frac-2negN/A

                    \[\leadsto \frac{\color{blue}{\frac{\mathsf{neg}\left(\left(\beta + 1\right) \cdot \left(\alpha + 1\right)\right)}{\mathsf{neg}\left(\left(-2 + \left(\mathsf{neg}\left(\left(\alpha + \beta\right)\right)\right)\right)\right)}}}{\left(\left(\alpha + \beta\right) + 3\right) \cdot \left(-2 + \left(\mathsf{neg}\left(\left(\alpha + \beta\right)\right)\right)\right)} \]
                  4. associate-/l/N/A

                    \[\leadsto \color{blue}{\frac{\mathsf{neg}\left(\left(\beta + 1\right) \cdot \left(\alpha + 1\right)\right)}{\left(\left(\left(\alpha + \beta\right) + 3\right) \cdot \left(-2 + \left(\mathsf{neg}\left(\left(\alpha + \beta\right)\right)\right)\right)\right) \cdot \left(\mathsf{neg}\left(\left(-2 + \left(\mathsf{neg}\left(\left(\alpha + \beta\right)\right)\right)\right)\right)\right)}} \]
                  5. lower-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\mathsf{neg}\left(\left(\beta + 1\right) \cdot \left(\alpha + 1\right)\right)}{\left(\left(\left(\alpha + \beta\right) + 3\right) \cdot \left(-2 + \left(\mathsf{neg}\left(\left(\alpha + \beta\right)\right)\right)\right)\right) \cdot \left(\mathsf{neg}\left(\left(-2 + \left(\mathsf{neg}\left(\left(\alpha + \beta\right)\right)\right)\right)\right)\right)}} \]
                9. Applied rewrites91.2%

                  \[\leadsto \color{blue}{\frac{-\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}{\left(\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(-2 - \left(\alpha + \beta\right)\right)\right) \cdot \left(-\left(-2 - \left(\alpha + \beta\right)\right)\right)}} \]

                if 7.8e100 < beta

                1. Initial program 69.2%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Taylor expanded in beta around inf

                  \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                4. Step-by-step derivation
                  1. lower-+.f6480.0

                    \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                5. Applied rewrites80.0%

                  \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              3. Recombined 2 regimes into one program.
              4. Final simplification89.1%

                \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\ \;\;\;\;\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{\left(-2 - \left(\beta + \alpha\right)\right) \cdot \left(\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(-2 - \left(\beta + \alpha\right)\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{2 + \left(\beta + \alpha\right)}}{\left(2 + \left(\beta + \alpha\right)\right) + 1}\\ \end{array} \]
              5. Add Preprocessing

              Alternative 10: 99.5% accurate, 1.5× speedup?

              \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 2 + \left(\beta + \alpha\right)\\ \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\ \;\;\;\;\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(t\_0 \cdot t\_0\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{t\_0}}{t\_0 + 1}\\ \end{array} \end{array} \]
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              (FPCore (alpha beta)
               :precision binary64
               (let* ((t_0 (+ 2.0 (+ beta alpha))))
                 (if (<= beta 7.8e+100)
                   (/ (* (+ beta 1.0) (+ alpha 1.0)) (* (+ alpha (+ beta 3.0)) (* t_0 t_0)))
                   (/ (/ (+ alpha 1.0) t_0) (+ t_0 1.0)))))
              assert(alpha < beta);
              double code(double alpha, double beta) {
              	double t_0 = 2.0 + (beta + alpha);
              	double tmp;
              	if (beta <= 7.8e+100) {
              		tmp = ((beta + 1.0) * (alpha + 1.0)) / ((alpha + (beta + 3.0)) * (t_0 * t_0));
              	} else {
              		tmp = ((alpha + 1.0) / t_0) / (t_0 + 1.0);
              	}
              	return tmp;
              }
              
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              real(8) function code(alpha, beta)
                  real(8), intent (in) :: alpha
                  real(8), intent (in) :: beta
                  real(8) :: t_0
                  real(8) :: tmp
                  t_0 = 2.0d0 + (beta + alpha)
                  if (beta <= 7.8d+100) then
                      tmp = ((beta + 1.0d0) * (alpha + 1.0d0)) / ((alpha + (beta + 3.0d0)) * (t_0 * t_0))
                  else
                      tmp = ((alpha + 1.0d0) / t_0) / (t_0 + 1.0d0)
                  end if
                  code = tmp
              end function
              
              assert alpha < beta;
              public static double code(double alpha, double beta) {
              	double t_0 = 2.0 + (beta + alpha);
              	double tmp;
              	if (beta <= 7.8e+100) {
              		tmp = ((beta + 1.0) * (alpha + 1.0)) / ((alpha + (beta + 3.0)) * (t_0 * t_0));
              	} else {
              		tmp = ((alpha + 1.0) / t_0) / (t_0 + 1.0);
              	}
              	return tmp;
              }
              
              [alpha, beta] = sort([alpha, beta])
              def code(alpha, beta):
              	t_0 = 2.0 + (beta + alpha)
              	tmp = 0
              	if beta <= 7.8e+100:
              		tmp = ((beta + 1.0) * (alpha + 1.0)) / ((alpha + (beta + 3.0)) * (t_0 * t_0))
              	else:
              		tmp = ((alpha + 1.0) / t_0) / (t_0 + 1.0)
              	return tmp
              
              alpha, beta = sort([alpha, beta])
              function code(alpha, beta)
              	t_0 = Float64(2.0 + Float64(beta + alpha))
              	tmp = 0.0
              	if (beta <= 7.8e+100)
              		tmp = Float64(Float64(Float64(beta + 1.0) * Float64(alpha + 1.0)) / Float64(Float64(alpha + Float64(beta + 3.0)) * Float64(t_0 * t_0)));
              	else
              		tmp = Float64(Float64(Float64(alpha + 1.0) / t_0) / Float64(t_0 + 1.0));
              	end
              	return tmp
              end
              
              alpha, beta = num2cell(sort([alpha, beta])){:}
              function tmp_2 = code(alpha, beta)
              	t_0 = 2.0 + (beta + alpha);
              	tmp = 0.0;
              	if (beta <= 7.8e+100)
              		tmp = ((beta + 1.0) * (alpha + 1.0)) / ((alpha + (beta + 3.0)) * (t_0 * t_0));
              	else
              		tmp = ((alpha + 1.0) / t_0) / (t_0 + 1.0);
              	end
              	tmp_2 = tmp;
              end
              
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              code[alpha_, beta_] := Block[{t$95$0 = N[(2.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 7.8e+100], N[(N[(N[(beta + 1.0), $MachinePrecision] * N[(alpha + 1.0), $MachinePrecision]), $MachinePrecision] / N[(N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision] * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]]
              
              \begin{array}{l}
              [alpha, beta] = \mathsf{sort}([alpha, beta])\\
              \\
              \begin{array}{l}
              t_0 := 2 + \left(\beta + \alpha\right)\\
              \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\
              \;\;\;\;\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(t\_0 \cdot t\_0\right)}\\
              
              \mathbf{else}:\\
              \;\;\;\;\frac{\frac{\alpha + 1}{t\_0}}{t\_0 + 1}\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if beta < 7.8e100

                1. Initial program 99.3%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Step-by-step derivation
                  1. lift-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                  2. lift-/.f64N/A

                    \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  3. lift-/.f64N/A

                    \[\leadsto \frac{\frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  4. associate-/l/N/A

                    \[\leadsto \frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  5. associate-/l/N/A

                    \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)}} \]
                  6. lower-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)}} \]
                  7. lift-+.f64N/A

                    \[\leadsto \frac{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right)} + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                  8. +-commutativeN/A

                    \[\leadsto \frac{\color{blue}{\left(\beta \cdot \alpha + \left(\alpha + \beta\right)\right)} + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                  9. lift-*.f64N/A

                    \[\leadsto \frac{\left(\color{blue}{\beta \cdot \alpha} + \left(\alpha + \beta\right)\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                  10. *-commutativeN/A

                    \[\leadsto \frac{\left(\color{blue}{\alpha \cdot \beta} + \left(\alpha + \beta\right)\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                  11. lower-fma.f64N/A

                    \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right)} + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                4. Applied rewrites91.2%

                  \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)}} \]
                5. Step-by-step derivation
                  1. lift-+.f64N/A

                    \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  2. lift-fma.f64N/A

                    \[\leadsto \frac{\color{blue}{\left(\alpha \cdot \beta + \left(\alpha + \beta\right)\right)} + 1}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  3. lift-+.f64N/A

                    \[\leadsto \frac{\left(\alpha \cdot \beta + \color{blue}{\left(\alpha + \beta\right)}\right) + 1}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  4. associate-+r+N/A

                    \[\leadsto \frac{\color{blue}{\left(\left(\alpha \cdot \beta + \alpha\right) + \beta\right)} + 1}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  5. associate-+l+N/A

                    \[\leadsto \frac{\color{blue}{\left(\alpha \cdot \beta + \alpha\right) + \left(\beta + 1\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  6. *-commutativeN/A

                    \[\leadsto \frac{\left(\color{blue}{\beta \cdot \alpha} + \alpha\right) + \left(\beta + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  7. distribute-lft1-inN/A

                    \[\leadsto \frac{\color{blue}{\left(\beta + 1\right) \cdot \alpha} + \left(\beta + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  8. +-commutativeN/A

                    \[\leadsto \frac{\color{blue}{\left(1 + \beta\right)} \cdot \alpha + \left(\beta + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  9. lift-+.f64N/A

                    \[\leadsto \frac{\color{blue}{\left(1 + \beta\right)} \cdot \alpha + \left(\beta + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  10. +-commutativeN/A

                    \[\leadsto \frac{\left(1 + \beta\right) \cdot \alpha + \color{blue}{\left(1 + \beta\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  11. lift-+.f64N/A

                    \[\leadsto \frac{\left(1 + \beta\right) \cdot \alpha + \color{blue}{\left(1 + \beta\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  12. *-rgt-identityN/A

                    \[\leadsto \frac{\left(1 + \beta\right) \cdot \alpha + \color{blue}{\left(1 + \beta\right) \cdot 1}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  13. distribute-lft-inN/A

                    \[\leadsto \frac{\color{blue}{\left(1 + \beta\right) \cdot \left(\alpha + 1\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  14. +-commutativeN/A

                    \[\leadsto \frac{\left(1 + \beta\right) \cdot \color{blue}{\left(1 + \alpha\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  15. lift-+.f64N/A

                    \[\leadsto \frac{\left(1 + \beta\right) \cdot \color{blue}{\left(1 + \alpha\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  16. lift-*.f6491.2

                    \[\leadsto \frac{\color{blue}{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  17. lift-+.f64N/A

                    \[\leadsto \frac{\color{blue}{\left(1 + \beta\right)} \cdot \left(1 + \alpha\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  18. +-commutativeN/A

                    \[\leadsto \frac{\color{blue}{\left(\beta + 1\right)} \cdot \left(1 + \alpha\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  19. lower-+.f6491.2

                    \[\leadsto \frac{\color{blue}{\left(\beta + 1\right)} \cdot \left(1 + \alpha\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  20. lift-+.f64N/A

                    \[\leadsto \frac{\left(\beta + 1\right) \cdot \color{blue}{\left(1 + \alpha\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  21. +-commutativeN/A

                    \[\leadsto \frac{\left(\beta + 1\right) \cdot \color{blue}{\left(\alpha + 1\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  22. lower-+.f6491.2

                    \[\leadsto \frac{\left(\beta + 1\right) \cdot \color{blue}{\left(\alpha + 1\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                6. Applied rewrites91.2%

                  \[\leadsto \frac{\color{blue}{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]

                if 7.8e100 < beta

                1. Initial program 69.2%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Taylor expanded in beta around inf

                  \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                4. Step-by-step derivation
                  1. lower-+.f6480.0

                    \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                5. Applied rewrites80.0%

                  \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              3. Recombined 2 regimes into one program.
              4. Final simplification89.1%

                \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\ \;\;\;\;\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(2 + \left(\beta + \alpha\right)\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{2 + \left(\beta + \alpha\right)}}{\left(2 + \left(\beta + \alpha\right)\right) + 1}\\ \end{array} \]
              5. Add Preprocessing

              Alternative 11: 99.5% accurate, 1.5× speedup?

              \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := 2 + \left(\beta + \alpha\right)\\ \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\ \;\;\;\;\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(t\_0 \cdot t\_0\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\left(\beta + \alpha\right) + 3}\\ \end{array} \end{array} \]
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              (FPCore (alpha beta)
               :precision binary64
               (let* ((t_0 (+ 2.0 (+ beta alpha))))
                 (if (<= beta 7.8e+100)
                   (/ (* (+ beta 1.0) (+ alpha 1.0)) (* (+ alpha (+ beta 3.0)) (* t_0 t_0)))
                   (/ (/ (+ alpha 1.0) beta) (+ (+ beta alpha) 3.0)))))
              assert(alpha < beta);
              double code(double alpha, double beta) {
              	double t_0 = 2.0 + (beta + alpha);
              	double tmp;
              	if (beta <= 7.8e+100) {
              		tmp = ((beta + 1.0) * (alpha + 1.0)) / ((alpha + (beta + 3.0)) * (t_0 * t_0));
              	} else {
              		tmp = ((alpha + 1.0) / beta) / ((beta + alpha) + 3.0);
              	}
              	return tmp;
              }
              
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              real(8) function code(alpha, beta)
                  real(8), intent (in) :: alpha
                  real(8), intent (in) :: beta
                  real(8) :: t_0
                  real(8) :: tmp
                  t_0 = 2.0d0 + (beta + alpha)
                  if (beta <= 7.8d+100) then
                      tmp = ((beta + 1.0d0) * (alpha + 1.0d0)) / ((alpha + (beta + 3.0d0)) * (t_0 * t_0))
                  else
                      tmp = ((alpha + 1.0d0) / beta) / ((beta + alpha) + 3.0d0)
                  end if
                  code = tmp
              end function
              
              assert alpha < beta;
              public static double code(double alpha, double beta) {
              	double t_0 = 2.0 + (beta + alpha);
              	double tmp;
              	if (beta <= 7.8e+100) {
              		tmp = ((beta + 1.0) * (alpha + 1.0)) / ((alpha + (beta + 3.0)) * (t_0 * t_0));
              	} else {
              		tmp = ((alpha + 1.0) / beta) / ((beta + alpha) + 3.0);
              	}
              	return tmp;
              }
              
              [alpha, beta] = sort([alpha, beta])
              def code(alpha, beta):
              	t_0 = 2.0 + (beta + alpha)
              	tmp = 0
              	if beta <= 7.8e+100:
              		tmp = ((beta + 1.0) * (alpha + 1.0)) / ((alpha + (beta + 3.0)) * (t_0 * t_0))
              	else:
              		tmp = ((alpha + 1.0) / beta) / ((beta + alpha) + 3.0)
              	return tmp
              
              alpha, beta = sort([alpha, beta])
              function code(alpha, beta)
              	t_0 = Float64(2.0 + Float64(beta + alpha))
              	tmp = 0.0
              	if (beta <= 7.8e+100)
              		tmp = Float64(Float64(Float64(beta + 1.0) * Float64(alpha + 1.0)) / Float64(Float64(alpha + Float64(beta + 3.0)) * Float64(t_0 * t_0)));
              	else
              		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / Float64(Float64(beta + alpha) + 3.0));
              	end
              	return tmp
              end
              
              alpha, beta = num2cell(sort([alpha, beta])){:}
              function tmp_2 = code(alpha, beta)
              	t_0 = 2.0 + (beta + alpha);
              	tmp = 0.0;
              	if (beta <= 7.8e+100)
              		tmp = ((beta + 1.0) * (alpha + 1.0)) / ((alpha + (beta + 3.0)) * (t_0 * t_0));
              	else
              		tmp = ((alpha + 1.0) / beta) / ((beta + alpha) + 3.0);
              	end
              	tmp_2 = tmp;
              end
              
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              code[alpha_, beta_] := Block[{t$95$0 = N[(2.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 7.8e+100], N[(N[(N[(beta + 1.0), $MachinePrecision] * N[(alpha + 1.0), $MachinePrecision]), $MachinePrecision] / N[(N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision] * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 3.0), $MachinePrecision]), $MachinePrecision]]]
              
              \begin{array}{l}
              [alpha, beta] = \mathsf{sort}([alpha, beta])\\
              \\
              \begin{array}{l}
              t_0 := 2 + \left(\beta + \alpha\right)\\
              \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\
              \;\;\;\;\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(t\_0 \cdot t\_0\right)}\\
              
              \mathbf{else}:\\
              \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\left(\beta + \alpha\right) + 3}\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if beta < 7.8e100

                1. Initial program 99.3%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Step-by-step derivation
                  1. lift-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                  2. lift-/.f64N/A

                    \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  3. lift-/.f64N/A

                    \[\leadsto \frac{\frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  4. associate-/l/N/A

                    \[\leadsto \frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  5. associate-/l/N/A

                    \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)}} \]
                  6. lower-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)}} \]
                  7. lift-+.f64N/A

                    \[\leadsto \frac{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right)} + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                  8. +-commutativeN/A

                    \[\leadsto \frac{\color{blue}{\left(\beta \cdot \alpha + \left(\alpha + \beta\right)\right)} + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                  9. lift-*.f64N/A

                    \[\leadsto \frac{\left(\color{blue}{\beta \cdot \alpha} + \left(\alpha + \beta\right)\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                  10. *-commutativeN/A

                    \[\leadsto \frac{\left(\color{blue}{\alpha \cdot \beta} + \left(\alpha + \beta\right)\right) + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                  11. lower-fma.f64N/A

                    \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right)} + 1}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)\right)} \]
                4. Applied rewrites91.2%

                  \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)}} \]
                5. Step-by-step derivation
                  1. lift-+.f64N/A

                    \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right) + 1}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  2. lift-fma.f64N/A

                    \[\leadsto \frac{\color{blue}{\left(\alpha \cdot \beta + \left(\alpha + \beta\right)\right)} + 1}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  3. lift-+.f64N/A

                    \[\leadsto \frac{\left(\alpha \cdot \beta + \color{blue}{\left(\alpha + \beta\right)}\right) + 1}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  4. associate-+r+N/A

                    \[\leadsto \frac{\color{blue}{\left(\left(\alpha \cdot \beta + \alpha\right) + \beta\right)} + 1}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  5. associate-+l+N/A

                    \[\leadsto \frac{\color{blue}{\left(\alpha \cdot \beta + \alpha\right) + \left(\beta + 1\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  6. *-commutativeN/A

                    \[\leadsto \frac{\left(\color{blue}{\beta \cdot \alpha} + \alpha\right) + \left(\beta + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  7. distribute-lft1-inN/A

                    \[\leadsto \frac{\color{blue}{\left(\beta + 1\right) \cdot \alpha} + \left(\beta + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  8. +-commutativeN/A

                    \[\leadsto \frac{\color{blue}{\left(1 + \beta\right)} \cdot \alpha + \left(\beta + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  9. lift-+.f64N/A

                    \[\leadsto \frac{\color{blue}{\left(1 + \beta\right)} \cdot \alpha + \left(\beta + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  10. +-commutativeN/A

                    \[\leadsto \frac{\left(1 + \beta\right) \cdot \alpha + \color{blue}{\left(1 + \beta\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  11. lift-+.f64N/A

                    \[\leadsto \frac{\left(1 + \beta\right) \cdot \alpha + \color{blue}{\left(1 + \beta\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  12. *-rgt-identityN/A

                    \[\leadsto \frac{\left(1 + \beta\right) \cdot \alpha + \color{blue}{\left(1 + \beta\right) \cdot 1}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  13. distribute-lft-inN/A

                    \[\leadsto \frac{\color{blue}{\left(1 + \beta\right) \cdot \left(\alpha + 1\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  14. +-commutativeN/A

                    \[\leadsto \frac{\left(1 + \beta\right) \cdot \color{blue}{\left(1 + \alpha\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  15. lift-+.f64N/A

                    \[\leadsto \frac{\left(1 + \beta\right) \cdot \color{blue}{\left(1 + \alpha\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  16. lift-*.f6491.2

                    \[\leadsto \frac{\color{blue}{\left(1 + \beta\right) \cdot \left(1 + \alpha\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  17. lift-+.f64N/A

                    \[\leadsto \frac{\color{blue}{\left(1 + \beta\right)} \cdot \left(1 + \alpha\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  18. +-commutativeN/A

                    \[\leadsto \frac{\color{blue}{\left(\beta + 1\right)} \cdot \left(1 + \alpha\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  19. lower-+.f6491.2

                    \[\leadsto \frac{\color{blue}{\left(\beta + 1\right)} \cdot \left(1 + \alpha\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  20. lift-+.f64N/A

                    \[\leadsto \frac{\left(\beta + 1\right) \cdot \color{blue}{\left(1 + \alpha\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  21. +-commutativeN/A

                    \[\leadsto \frac{\left(\beta + 1\right) \cdot \color{blue}{\left(\alpha + 1\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                  22. lower-+.f6491.2

                    \[\leadsto \frac{\left(\beta + 1\right) \cdot \color{blue}{\left(\alpha + 1\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]
                6. Applied rewrites91.2%

                  \[\leadsto \frac{\color{blue}{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)\right)} \]

                if 7.8e100 < beta

                1. Initial program 69.2%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Taylor expanded in beta around inf

                  \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                4. Step-by-step derivation
                  1. lower-/.f64N/A

                    \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  2. lower-+.f6479.4

                    \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                5. Applied rewrites79.4%

                  \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                6. Step-by-step derivation
                  1. lift-+.f64N/A

                    \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                  2. lift-+.f64N/A

                    \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
                  3. lift-*.f64N/A

                    \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
                  4. metadata-evalN/A

                    \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\left(\left(\alpha + \beta\right) + \color{blue}{2}\right) + 1} \]
                  5. associate-+l+N/A

                    \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\alpha + \beta\right) + \left(2 + 1\right)}} \]
                  6. metadata-evalN/A

                    \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\left(\alpha + \beta\right) + \color{blue}{3}} \]
                  7. lower-+.f64N/A

                    \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\alpha + \beta\right) + 3}} \]
                7. Applied rewrites79.4%

                  \[\leadsto \color{blue}{\frac{\frac{\alpha + 1}{\beta}}{\left(\alpha + \beta\right) + 3}} \]
              3. Recombined 2 regimes into one program.
              4. Final simplification89.0%

                \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 7.8 \cdot 10^{+100}:\\ \;\;\;\;\frac{\left(\beta + 1\right) \cdot \left(\alpha + 1\right)}{\left(\alpha + \left(\beta + 3\right)\right) \cdot \left(\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(2 + \left(\beta + \alpha\right)\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\left(\beta + \alpha\right) + 3}\\ \end{array} \]
              5. Add Preprocessing

              Alternative 12: 98.8% accurate, 1.7× speedup?

              \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 1.3 \cdot 10^{+41}:\\ \;\;\;\;\frac{\frac{\beta + 1}{\mathsf{fma}\left(\beta, \beta + 4, 4\right)}}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\left(\beta + \alpha\right) + 3}\\ \end{array} \end{array} \]
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              (FPCore (alpha beta)
               :precision binary64
               (if (<= beta 1.3e+41)
                 (/
                  (/ (+ beta 1.0) (fma beta (+ beta 4.0) 4.0))
                  (+ 2.0 (+ (+ beta alpha) 1.0)))
                 (/ (/ (+ alpha 1.0) beta) (+ (+ beta alpha) 3.0))))
              assert(alpha < beta);
              double code(double alpha, double beta) {
              	double tmp;
              	if (beta <= 1.3e+41) {
              		tmp = ((beta + 1.0) / fma(beta, (beta + 4.0), 4.0)) / (2.0 + ((beta + alpha) + 1.0));
              	} else {
              		tmp = ((alpha + 1.0) / beta) / ((beta + alpha) + 3.0);
              	}
              	return tmp;
              }
              
              alpha, beta = sort([alpha, beta])
              function code(alpha, beta)
              	tmp = 0.0
              	if (beta <= 1.3e+41)
              		tmp = Float64(Float64(Float64(beta + 1.0) / fma(beta, Float64(beta + 4.0), 4.0)) / Float64(2.0 + Float64(Float64(beta + alpha) + 1.0)));
              	else
              		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / Float64(Float64(beta + alpha) + 3.0));
              	end
              	return tmp
              end
              
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              code[alpha_, beta_] := If[LessEqual[beta, 1.3e+41], N[(N[(N[(beta + 1.0), $MachinePrecision] / N[(beta * N[(beta + 4.0), $MachinePrecision] + 4.0), $MachinePrecision]), $MachinePrecision] / N[(2.0 + N[(N[(beta + alpha), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 3.0), $MachinePrecision]), $MachinePrecision]]
              
              \begin{array}{l}
              [alpha, beta] = \mathsf{sort}([alpha, beta])\\
              \\
              \begin{array}{l}
              \mathbf{if}\;\beta \leq 1.3 \cdot 10^{+41}:\\
              \;\;\;\;\frac{\frac{\beta + 1}{\mathsf{fma}\left(\beta, \beta + 4, 4\right)}}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\
              
              \mathbf{else}:\\
              \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\left(\beta + \alpha\right) + 3}\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if beta < 1.3e41

                1. Initial program 99.8%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Taylor expanded in alpha around 0

                  \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                4. Step-by-step derivation
                  1. lower-/.f64N/A

                    \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  2. lower-+.f64N/A

                    \[\leadsto \frac{\frac{\color{blue}{1 + \beta}}{{\left(2 + \beta\right)}^{2}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  3. unpow2N/A

                    \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  4. lower-*.f64N/A

                    \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  5. +-commutativeN/A

                    \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  6. lower-+.f64N/A

                    \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  7. +-commutativeN/A

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  8. lower-+.f6470.5

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                5. Applied rewrites70.5%

                  \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                6. Step-by-step derivation
                  1. lift-+.f64N/A

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                  2. lift-+.f64N/A

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
                  3. lift-+.f64N/A

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right) + 1} \]
                  4. lift-*.f64N/A

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
                  5. +-commutativeN/A

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{1 + \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                  6. lift-+.f64N/A

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right)} \]
                  7. metadata-evalN/A

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2}\right)} \]
                  8. associate-+r+N/A

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                  9. lower-+.f64N/A

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                  10. lower-+.f6470.5

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right)} + 2} \]
                7. Applied rewrites70.5%

                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                8. Taylor expanded in beta around 0

                  \[\leadsto \frac{\frac{1 + \beta}{4 + \color{blue}{\beta \cdot \left(4 + \beta\right)}}}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]
                9. Step-by-step derivation
                  1. Applied rewrites70.5%

                    \[\leadsto \frac{\frac{1 + \beta}{\mathsf{fma}\left(\beta, \color{blue}{\beta + 4}, 4\right)}}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]

                  if 1.3e41 < beta

                  1. Initial program 77.4%

                    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  2. Add Preprocessing
                  3. Taylor expanded in beta around inf

                    \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  4. Step-by-step derivation
                    1. lower-/.f64N/A

                      \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    2. lower-+.f6473.6

                      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  5. Applied rewrites73.6%

                    \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  6. Step-by-step derivation
                    1. lift-+.f64N/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                    2. lift-+.f64N/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
                    3. lift-*.f64N/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
                    4. metadata-evalN/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\left(\left(\alpha + \beta\right) + \color{blue}{2}\right) + 1} \]
                    5. associate-+l+N/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\alpha + \beta\right) + \left(2 + 1\right)}} \]
                    6. metadata-evalN/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\left(\alpha + \beta\right) + \color{blue}{3}} \]
                    7. lower-+.f64N/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\alpha + \beta\right) + 3}} \]
                  7. Applied rewrites73.6%

                    \[\leadsto \color{blue}{\frac{\frac{\alpha + 1}{\beta}}{\left(\alpha + \beta\right) + 3}} \]
                10. Recombined 2 regimes into one program.
                11. Final simplification71.4%

                  \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 1.3 \cdot 10^{+41}:\\ \;\;\;\;\frac{\frac{\beta + 1}{\mathsf{fma}\left(\beta, \beta + 4, 4\right)}}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\left(\beta + \alpha\right) + 3}\\ \end{array} \]
                12. Add Preprocessing

                Alternative 13: 97.8% accurate, 2.1× speedup?

                \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 14:\\ \;\;\;\;\frac{\alpha + 1}{\left(\left(\alpha + 2\right) \cdot \left(\alpha + 2\right)\right) \cdot \left(\alpha + 3\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\left(\beta + \alpha\right) + 3}\\ \end{array} \end{array} \]
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                (FPCore (alpha beta)
                 :precision binary64
                 (if (<= beta 14.0)
                   (/ (+ alpha 1.0) (* (* (+ alpha 2.0) (+ alpha 2.0)) (+ alpha 3.0)))
                   (/ (/ (+ alpha 1.0) beta) (+ (+ beta alpha) 3.0))))
                assert(alpha < beta);
                double code(double alpha, double beta) {
                	double tmp;
                	if (beta <= 14.0) {
                		tmp = (alpha + 1.0) / (((alpha + 2.0) * (alpha + 2.0)) * (alpha + 3.0));
                	} else {
                		tmp = ((alpha + 1.0) / beta) / ((beta + alpha) + 3.0);
                	}
                	return tmp;
                }
                
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                real(8) function code(alpha, beta)
                    real(8), intent (in) :: alpha
                    real(8), intent (in) :: beta
                    real(8) :: tmp
                    if (beta <= 14.0d0) then
                        tmp = (alpha + 1.0d0) / (((alpha + 2.0d0) * (alpha + 2.0d0)) * (alpha + 3.0d0))
                    else
                        tmp = ((alpha + 1.0d0) / beta) / ((beta + alpha) + 3.0d0)
                    end if
                    code = tmp
                end function
                
                assert alpha < beta;
                public static double code(double alpha, double beta) {
                	double tmp;
                	if (beta <= 14.0) {
                		tmp = (alpha + 1.0) / (((alpha + 2.0) * (alpha + 2.0)) * (alpha + 3.0));
                	} else {
                		tmp = ((alpha + 1.0) / beta) / ((beta + alpha) + 3.0);
                	}
                	return tmp;
                }
                
                [alpha, beta] = sort([alpha, beta])
                def code(alpha, beta):
                	tmp = 0
                	if beta <= 14.0:
                		tmp = (alpha + 1.0) / (((alpha + 2.0) * (alpha + 2.0)) * (alpha + 3.0))
                	else:
                		tmp = ((alpha + 1.0) / beta) / ((beta + alpha) + 3.0)
                	return tmp
                
                alpha, beta = sort([alpha, beta])
                function code(alpha, beta)
                	tmp = 0.0
                	if (beta <= 14.0)
                		tmp = Float64(Float64(alpha + 1.0) / Float64(Float64(Float64(alpha + 2.0) * Float64(alpha + 2.0)) * Float64(alpha + 3.0)));
                	else
                		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / Float64(Float64(beta + alpha) + 3.0));
                	end
                	return tmp
                end
                
                alpha, beta = num2cell(sort([alpha, beta])){:}
                function tmp_2 = code(alpha, beta)
                	tmp = 0.0;
                	if (beta <= 14.0)
                		tmp = (alpha + 1.0) / (((alpha + 2.0) * (alpha + 2.0)) * (alpha + 3.0));
                	else
                		tmp = ((alpha + 1.0) / beta) / ((beta + alpha) + 3.0);
                	end
                	tmp_2 = tmp;
                end
                
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                code[alpha_, beta_] := If[LessEqual[beta, 14.0], N[(N[(alpha + 1.0), $MachinePrecision] / N[(N[(N[(alpha + 2.0), $MachinePrecision] * N[(alpha + 2.0), $MachinePrecision]), $MachinePrecision] * N[(alpha + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 3.0), $MachinePrecision]), $MachinePrecision]]
                
                \begin{array}{l}
                [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                \\
                \begin{array}{l}
                \mathbf{if}\;\beta \leq 14:\\
                \;\;\;\;\frac{\alpha + 1}{\left(\left(\alpha + 2\right) \cdot \left(\alpha + 2\right)\right) \cdot \left(\alpha + 3\right)}\\
                
                \mathbf{else}:\\
                \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\left(\beta + \alpha\right) + 3}\\
                
                
                \end{array}
                \end{array}
                
                Derivation
                1. Split input into 2 regimes
                2. if beta < 14

                  1. Initial program 99.8%

                    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  2. Add Preprocessing
                  3. Taylor expanded in beta around 0

                    \[\leadsto \color{blue}{\frac{1 + \alpha}{{\left(2 + \alpha\right)}^{2} \cdot \left(3 + \alpha\right)}} \]
                  4. Step-by-step derivation
                    1. lower-/.f64N/A

                      \[\leadsto \color{blue}{\frac{1 + \alpha}{{\left(2 + \alpha\right)}^{2} \cdot \left(3 + \alpha\right)}} \]
                    2. lower-+.f64N/A

                      \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\left(2 + \alpha\right)}^{2} \cdot \left(3 + \alpha\right)} \]
                    3. lower-*.f64N/A

                      \[\leadsto \frac{1 + \alpha}{\color{blue}{{\left(2 + \alpha\right)}^{2} \cdot \left(3 + \alpha\right)}} \]
                    4. unpow2N/A

                      \[\leadsto \frac{1 + \alpha}{\color{blue}{\left(\left(2 + \alpha\right) \cdot \left(2 + \alpha\right)\right)} \cdot \left(3 + \alpha\right)} \]
                    5. lower-*.f64N/A

                      \[\leadsto \frac{1 + \alpha}{\color{blue}{\left(\left(2 + \alpha\right) \cdot \left(2 + \alpha\right)\right)} \cdot \left(3 + \alpha\right)} \]
                    6. lower-+.f64N/A

                      \[\leadsto \frac{1 + \alpha}{\left(\color{blue}{\left(2 + \alpha\right)} \cdot \left(2 + \alpha\right)\right) \cdot \left(3 + \alpha\right)} \]
                    7. lower-+.f64N/A

                      \[\leadsto \frac{1 + \alpha}{\left(\left(2 + \alpha\right) \cdot \color{blue}{\left(2 + \alpha\right)}\right) \cdot \left(3 + \alpha\right)} \]
                    8. lower-+.f6493.1

                      \[\leadsto \frac{1 + \alpha}{\left(\left(2 + \alpha\right) \cdot \left(2 + \alpha\right)\right) \cdot \color{blue}{\left(3 + \alpha\right)}} \]
                  5. Applied rewrites93.1%

                    \[\leadsto \color{blue}{\frac{1 + \alpha}{\left(\left(2 + \alpha\right) \cdot \left(2 + \alpha\right)\right) \cdot \left(3 + \alpha\right)}} \]

                  if 14 < beta

                  1. Initial program 78.9%

                    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  2. Add Preprocessing
                  3. Taylor expanded in beta around inf

                    \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  4. Step-by-step derivation
                    1. lower-/.f64N/A

                      \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    2. lower-+.f6472.8

                      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  5. Applied rewrites72.8%

                    \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  6. Step-by-step derivation
                    1. lift-+.f64N/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                    2. lift-+.f64N/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
                    3. lift-*.f64N/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
                    4. metadata-evalN/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\left(\left(\alpha + \beta\right) + \color{blue}{2}\right) + 1} \]
                    5. associate-+l+N/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\alpha + \beta\right) + \left(2 + 1\right)}} \]
                    6. metadata-evalN/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\left(\alpha + \beta\right) + \color{blue}{3}} \]
                    7. lower-+.f64N/A

                      \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\alpha + \beta\right) + 3}} \]
                  7. Applied rewrites72.8%

                    \[\leadsto \color{blue}{\frac{\frac{\alpha + 1}{\beta}}{\left(\alpha + \beta\right) + 3}} \]
                3. Recombined 2 regimes into one program.
                4. Final simplification87.2%

                  \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 14:\\ \;\;\;\;\frac{\alpha + 1}{\left(\left(\alpha + 2\right) \cdot \left(\alpha + 2\right)\right) \cdot \left(\alpha + 3\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\left(\beta + \alpha\right) + 3}\\ \end{array} \]
                5. Add Preprocessing

                Alternative 14: 98.0% accurate, 2.2× speedup?

                \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 1.75:\\ \;\;\;\;\frac{\mathsf{fma}\left(\beta \cdot \beta, -0.0625, 0.25\right)}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\left(\beta + \alpha\right) + 3}\\ \end{array} \end{array} \]
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                (FPCore (alpha beta)
                 :precision binary64
                 (if (<= beta 1.75)
                   (/ (fma (* beta beta) -0.0625 0.25) (+ 2.0 (+ (+ beta alpha) 1.0)))
                   (/ (/ (+ alpha 1.0) beta) (+ (+ beta alpha) 3.0))))
                assert(alpha < beta);
                double code(double alpha, double beta) {
                	double tmp;
                	if (beta <= 1.75) {
                		tmp = fma((beta * beta), -0.0625, 0.25) / (2.0 + ((beta + alpha) + 1.0));
                	} else {
                		tmp = ((alpha + 1.0) / beta) / ((beta + alpha) + 3.0);
                	}
                	return tmp;
                }
                
                alpha, beta = sort([alpha, beta])
                function code(alpha, beta)
                	tmp = 0.0
                	if (beta <= 1.75)
                		tmp = Float64(fma(Float64(beta * beta), -0.0625, 0.25) / Float64(2.0 + Float64(Float64(beta + alpha) + 1.0)));
                	else
                		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / Float64(Float64(beta + alpha) + 3.0));
                	end
                	return tmp
                end
                
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                code[alpha_, beta_] := If[LessEqual[beta, 1.75], N[(N[(N[(beta * beta), $MachinePrecision] * -0.0625 + 0.25), $MachinePrecision] / N[(2.0 + N[(N[(beta + alpha), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 3.0), $MachinePrecision]), $MachinePrecision]]
                
                \begin{array}{l}
                [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                \\
                \begin{array}{l}
                \mathbf{if}\;\beta \leq 1.75:\\
                \;\;\;\;\frac{\mathsf{fma}\left(\beta \cdot \beta, -0.0625, 0.25\right)}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\
                
                \mathbf{else}:\\
                \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\left(\beta + \alpha\right) + 3}\\
                
                
                \end{array}
                \end{array}
                
                Derivation
                1. Split input into 2 regimes
                2. if beta < 1.75

                  1. Initial program 99.8%

                    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  2. Add Preprocessing
                  3. Taylor expanded in alpha around 0

                    \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  4. Step-by-step derivation
                    1. lower-/.f64N/A

                      \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    2. lower-+.f64N/A

                      \[\leadsto \frac{\frac{\color{blue}{1 + \beta}}{{\left(2 + \beta\right)}^{2}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    3. unpow2N/A

                      \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    4. lower-*.f64N/A

                      \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    5. +-commutativeN/A

                      \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    6. lower-+.f64N/A

                      \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    7. +-commutativeN/A

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    8. lower-+.f6470.7

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  5. Applied rewrites70.7%

                    \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  6. Step-by-step derivation
                    1. lift-+.f64N/A

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                    2. lift-+.f64N/A

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
                    3. lift-+.f64N/A

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right) + 1} \]
                    4. lift-*.f64N/A

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
                    5. +-commutativeN/A

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{1 + \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                    6. lift-+.f64N/A

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right)} \]
                    7. metadata-evalN/A

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2}\right)} \]
                    8. associate-+r+N/A

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                    9. lower-+.f64N/A

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                    10. lower-+.f6470.7

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right)} + 2} \]
                  7. Applied rewrites70.7%

                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                  8. Taylor expanded in beta around 0

                    \[\leadsto \frac{\frac{1}{4} + \color{blue}{\frac{-1}{16} \cdot {\beta}^{2}}}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]
                  9. Step-by-step derivation
                    1. Applied rewrites70.7%

                      \[\leadsto \frac{\mathsf{fma}\left(\beta \cdot \beta, \color{blue}{-0.0625}, 0.25\right)}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]

                    if 1.75 < beta

                    1. Initial program 78.9%

                      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    2. Add Preprocessing
                    3. Taylor expanded in beta around inf

                      \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    4. Step-by-step derivation
                      1. lower-/.f64N/A

                        \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      2. lower-+.f6472.8

                        \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    5. Applied rewrites72.8%

                      \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    6. Step-by-step derivation
                      1. lift-+.f64N/A

                        \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                      2. lift-+.f64N/A

                        \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
                      3. lift-*.f64N/A

                        \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
                      4. metadata-evalN/A

                        \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\left(\left(\alpha + \beta\right) + \color{blue}{2}\right) + 1} \]
                      5. associate-+l+N/A

                        \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\alpha + \beta\right) + \left(2 + 1\right)}} \]
                      6. metadata-evalN/A

                        \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\left(\alpha + \beta\right) + \color{blue}{3}} \]
                      7. lower-+.f64N/A

                        \[\leadsto \frac{\frac{\mathsf{Rewrite=>}\left(lower-+.f64, \left(\alpha + 1\right)\right)}{\beta}}{\color{blue}{\left(\alpha + \beta\right) + 3}} \]
                    7. Applied rewrites72.8%

                      \[\leadsto \color{blue}{\frac{\frac{\alpha + 1}{\beta}}{\left(\alpha + \beta\right) + 3}} \]
                  10. Recombined 2 regimes into one program.
                  11. Final simplification71.3%

                    \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 1.75:\\ \;\;\;\;\frac{\mathsf{fma}\left(\beta \cdot \beta, -0.0625, 0.25\right)}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\left(\beta + \alpha\right) + 3}\\ \end{array} \]
                  12. Add Preprocessing

                  Alternative 15: 97.9% accurate, 2.2× speedup?

                  \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 1.75:\\ \;\;\;\;\frac{\mathsf{fma}\left(\beta \cdot \beta, -0.0625, 0.25\right)}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta + 3}\\ \end{array} \end{array} \]
                  NOTE: alpha and beta should be sorted in increasing order before calling this function.
                  (FPCore (alpha beta)
                   :precision binary64
                   (if (<= beta 1.75)
                     (/ (fma (* beta beta) -0.0625 0.25) (+ 2.0 (+ (+ beta alpha) 1.0)))
                     (/ (/ (+ alpha 1.0) beta) (+ beta 3.0))))
                  assert(alpha < beta);
                  double code(double alpha, double beta) {
                  	double tmp;
                  	if (beta <= 1.75) {
                  		tmp = fma((beta * beta), -0.0625, 0.25) / (2.0 + ((beta + alpha) + 1.0));
                  	} else {
                  		tmp = ((alpha + 1.0) / beta) / (beta + 3.0);
                  	}
                  	return tmp;
                  }
                  
                  alpha, beta = sort([alpha, beta])
                  function code(alpha, beta)
                  	tmp = 0.0
                  	if (beta <= 1.75)
                  		tmp = Float64(fma(Float64(beta * beta), -0.0625, 0.25) / Float64(2.0 + Float64(Float64(beta + alpha) + 1.0)));
                  	else
                  		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / Float64(beta + 3.0));
                  	end
                  	return tmp
                  end
                  
                  NOTE: alpha and beta should be sorted in increasing order before calling this function.
                  code[alpha_, beta_] := If[LessEqual[beta, 1.75], N[(N[(N[(beta * beta), $MachinePrecision] * -0.0625 + 0.25), $MachinePrecision] / N[(2.0 + N[(N[(beta + alpha), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]]
                  
                  \begin{array}{l}
                  [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                  \\
                  \begin{array}{l}
                  \mathbf{if}\;\beta \leq 1.75:\\
                  \;\;\;\;\frac{\mathsf{fma}\left(\beta \cdot \beta, -0.0625, 0.25\right)}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\
                  
                  \mathbf{else}:\\
                  \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta + 3}\\
                  
                  
                  \end{array}
                  \end{array}
                  
                  Derivation
                  1. Split input into 2 regimes
                  2. if beta < 1.75

                    1. Initial program 99.8%

                      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    2. Add Preprocessing
                    3. Taylor expanded in alpha around 0

                      \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    4. Step-by-step derivation
                      1. lower-/.f64N/A

                        \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      2. lower-+.f64N/A

                        \[\leadsto \frac{\frac{\color{blue}{1 + \beta}}{{\left(2 + \beta\right)}^{2}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      3. unpow2N/A

                        \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      4. lower-*.f64N/A

                        \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      5. +-commutativeN/A

                        \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      6. lower-+.f64N/A

                        \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      7. +-commutativeN/A

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      8. lower-+.f6470.7

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    5. Applied rewrites70.7%

                      \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    6. Step-by-step derivation
                      1. lift-+.f64N/A

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                      2. lift-+.f64N/A

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
                      3. lift-+.f64N/A

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right) + 1} \]
                      4. lift-*.f64N/A

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
                      5. +-commutativeN/A

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{1 + \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                      6. lift-+.f64N/A

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right)} \]
                      7. metadata-evalN/A

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2}\right)} \]
                      8. associate-+r+N/A

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                      9. lower-+.f64N/A

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                      10. lower-+.f6470.7

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right)} + 2} \]
                    7. Applied rewrites70.7%

                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                    8. Taylor expanded in beta around 0

                      \[\leadsto \frac{\frac{1}{4} + \color{blue}{\frac{-1}{16} \cdot {\beta}^{2}}}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]
                    9. Step-by-step derivation
                      1. Applied rewrites70.7%

                        \[\leadsto \frac{\mathsf{fma}\left(\beta \cdot \beta, \color{blue}{-0.0625}, 0.25\right)}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]

                      if 1.75 < beta

                      1. Initial program 78.9%

                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      2. Add Preprocessing
                      3. Taylor expanded in beta around inf

                        \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      4. Step-by-step derivation
                        1. lower-/.f64N/A

                          \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        2. lower-+.f6472.8

                          \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      5. Applied rewrites72.8%

                        \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      6. Taylor expanded in alpha around 0

                        \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{3 + \beta}} \]
                      7. Step-by-step derivation
                        1. +-commutativeN/A

                          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta + 3}} \]
                        2. lower-+.f6472.5

                          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta + 3}} \]
                      8. Applied rewrites72.5%

                        \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta + 3}} \]
                    10. Recombined 2 regimes into one program.
                    11. Final simplification71.2%

                      \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 1.75:\\ \;\;\;\;\frac{\mathsf{fma}\left(\beta \cdot \beta, -0.0625, 0.25\right)}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta + 3}\\ \end{array} \]
                    12. Add Preprocessing

                    Alternative 16: 97.3% accurate, 2.4× speedup?

                    \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 6.6:\\ \;\;\;\;\frac{0.25}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{elif}\;\beta \leq 1.35 \cdot 10^{+154}:\\ \;\;\;\;\frac{\alpha + 1}{\beta \cdot \beta}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                    (FPCore (alpha beta)
                     :precision binary64
                     (if (<= beta 6.6)
                       (/ 0.25 (+ 2.0 (+ (+ beta alpha) 1.0)))
                       (if (<= beta 1.35e+154)
                         (/ (+ alpha 1.0) (* beta beta))
                         (/ (/ alpha beta) beta))))
                    assert(alpha < beta);
                    double code(double alpha, double beta) {
                    	double tmp;
                    	if (beta <= 6.6) {
                    		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0));
                    	} else if (beta <= 1.35e+154) {
                    		tmp = (alpha + 1.0) / (beta * beta);
                    	} else {
                    		tmp = (alpha / beta) / beta;
                    	}
                    	return tmp;
                    }
                    
                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                    real(8) function code(alpha, beta)
                        real(8), intent (in) :: alpha
                        real(8), intent (in) :: beta
                        real(8) :: tmp
                        if (beta <= 6.6d0) then
                            tmp = 0.25d0 / (2.0d0 + ((beta + alpha) + 1.0d0))
                        else if (beta <= 1.35d+154) then
                            tmp = (alpha + 1.0d0) / (beta * beta)
                        else
                            tmp = (alpha / beta) / beta
                        end if
                        code = tmp
                    end function
                    
                    assert alpha < beta;
                    public static double code(double alpha, double beta) {
                    	double tmp;
                    	if (beta <= 6.6) {
                    		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0));
                    	} else if (beta <= 1.35e+154) {
                    		tmp = (alpha + 1.0) / (beta * beta);
                    	} else {
                    		tmp = (alpha / beta) / beta;
                    	}
                    	return tmp;
                    }
                    
                    [alpha, beta] = sort([alpha, beta])
                    def code(alpha, beta):
                    	tmp = 0
                    	if beta <= 6.6:
                    		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0))
                    	elif beta <= 1.35e+154:
                    		tmp = (alpha + 1.0) / (beta * beta)
                    	else:
                    		tmp = (alpha / beta) / beta
                    	return tmp
                    
                    alpha, beta = sort([alpha, beta])
                    function code(alpha, beta)
                    	tmp = 0.0
                    	if (beta <= 6.6)
                    		tmp = Float64(0.25 / Float64(2.0 + Float64(Float64(beta + alpha) + 1.0)));
                    	elseif (beta <= 1.35e+154)
                    		tmp = Float64(Float64(alpha + 1.0) / Float64(beta * beta));
                    	else
                    		tmp = Float64(Float64(alpha / beta) / beta);
                    	end
                    	return tmp
                    end
                    
                    alpha, beta = num2cell(sort([alpha, beta])){:}
                    function tmp_2 = code(alpha, beta)
                    	tmp = 0.0;
                    	if (beta <= 6.6)
                    		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0));
                    	elseif (beta <= 1.35e+154)
                    		tmp = (alpha + 1.0) / (beta * beta);
                    	else
                    		tmp = (alpha / beta) / beta;
                    	end
                    	tmp_2 = tmp;
                    end
                    
                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                    code[alpha_, beta_] := If[LessEqual[beta, 6.6], N[(0.25 / N[(2.0 + N[(N[(beta + alpha), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[beta, 1.35e+154], N[(N[(alpha + 1.0), $MachinePrecision] / N[(beta * beta), $MachinePrecision]), $MachinePrecision], N[(N[(alpha / beta), $MachinePrecision] / beta), $MachinePrecision]]]
                    
                    \begin{array}{l}
                    [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                    \\
                    \begin{array}{l}
                    \mathbf{if}\;\beta \leq 6.6:\\
                    \;\;\;\;\frac{0.25}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\
                    
                    \mathbf{elif}\;\beta \leq 1.35 \cdot 10^{+154}:\\
                    \;\;\;\;\frac{\alpha + 1}{\beta \cdot \beta}\\
                    
                    \mathbf{else}:\\
                    \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\
                    
                    
                    \end{array}
                    \end{array}
                    
                    Derivation
                    1. Split input into 3 regimes
                    2. if beta < 6.5999999999999996

                      1. Initial program 99.8%

                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      2. Add Preprocessing
                      3. Taylor expanded in alpha around 0

                        \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      4. Step-by-step derivation
                        1. lower-/.f64N/A

                          \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        2. lower-+.f64N/A

                          \[\leadsto \frac{\frac{\color{blue}{1 + \beta}}{{\left(2 + \beta\right)}^{2}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        3. unpow2N/A

                          \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        4. lower-*.f64N/A

                          \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        5. +-commutativeN/A

                          \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        6. lower-+.f64N/A

                          \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        7. +-commutativeN/A

                          \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        8. lower-+.f6470.7

                          \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      5. Applied rewrites70.7%

                        \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      6. Step-by-step derivation
                        1. lift-+.f64N/A

                          \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                        2. lift-+.f64N/A

                          \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
                        3. lift-+.f64N/A

                          \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right) + 1} \]
                        4. lift-*.f64N/A

                          \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
                        5. +-commutativeN/A

                          \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{1 + \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                        6. lift-+.f64N/A

                          \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right)} \]
                        7. metadata-evalN/A

                          \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2}\right)} \]
                        8. associate-+r+N/A

                          \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                        9. lower-+.f64N/A

                          \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                        10. lower-+.f6470.7

                          \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right)} + 2} \]
                      7. Applied rewrites70.7%

                        \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                      8. Taylor expanded in beta around 0

                        \[\leadsto \frac{\frac{1}{4}}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]
                      9. Step-by-step derivation
                        1. Applied rewrites70.6%

                          \[\leadsto \frac{0.25}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]

                        if 6.5999999999999996 < beta < 1.35000000000000003e154

                        1. Initial program 80.6%

                          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        2. Add Preprocessing
                        3. Taylor expanded in beta around inf

                          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                        4. Step-by-step derivation
                          1. lower-/.f64N/A

                            \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                          2. lower-+.f64N/A

                            \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                          3. unpow2N/A

                            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                          4. lower-*.f6454.2

                            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                        5. Applied rewrites54.2%

                          \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]

                        if 1.35000000000000003e154 < beta

                        1. Initial program 76.8%

                          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        2. Add Preprocessing
                        3. Taylor expanded in beta around inf

                          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                        4. Step-by-step derivation
                          1. lower-/.f64N/A

                            \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                          2. lower-+.f64N/A

                            \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                          3. unpow2N/A

                            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                          4. lower-*.f6491.0

                            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                        5. Applied rewrites91.0%

                          \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                        6. Taylor expanded in alpha around inf

                          \[\leadsto \frac{\alpha}{\color{blue}{{\beta}^{2}}} \]
                        7. Step-by-step derivation
                          1. Applied rewrites91.0%

                            \[\leadsto \frac{\alpha}{\color{blue}{\beta \cdot \beta}} \]
                          2. Step-by-step derivation
                            1. Applied rewrites91.7%

                              \[\leadsto \frac{\frac{\alpha}{\beta}}{\beta} \]
                          3. Recombined 3 regimes into one program.
                          4. Final simplification70.8%

                            \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 6.6:\\ \;\;\;\;\frac{0.25}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{elif}\;\beta \leq 1.35 \cdot 10^{+154}:\\ \;\;\;\;\frac{\alpha + 1}{\beta \cdot \beta}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\ \end{array} \]
                          5. Add Preprocessing

                          Alternative 17: 97.8% accurate, 2.4× speedup?

                          \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 4.5:\\ \;\;\;\;\frac{0.25}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta + 3}\\ \end{array} \end{array} \]
                          NOTE: alpha and beta should be sorted in increasing order before calling this function.
                          (FPCore (alpha beta)
                           :precision binary64
                           (if (<= beta 4.5)
                             (/ 0.25 (+ 2.0 (+ (+ beta alpha) 1.0)))
                             (/ (/ (+ alpha 1.0) beta) (+ beta 3.0))))
                          assert(alpha < beta);
                          double code(double alpha, double beta) {
                          	double tmp;
                          	if (beta <= 4.5) {
                          		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0));
                          	} else {
                          		tmp = ((alpha + 1.0) / beta) / (beta + 3.0);
                          	}
                          	return tmp;
                          }
                          
                          NOTE: alpha and beta should be sorted in increasing order before calling this function.
                          real(8) function code(alpha, beta)
                              real(8), intent (in) :: alpha
                              real(8), intent (in) :: beta
                              real(8) :: tmp
                              if (beta <= 4.5d0) then
                                  tmp = 0.25d0 / (2.0d0 + ((beta + alpha) + 1.0d0))
                              else
                                  tmp = ((alpha + 1.0d0) / beta) / (beta + 3.0d0)
                              end if
                              code = tmp
                          end function
                          
                          assert alpha < beta;
                          public static double code(double alpha, double beta) {
                          	double tmp;
                          	if (beta <= 4.5) {
                          		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0));
                          	} else {
                          		tmp = ((alpha + 1.0) / beta) / (beta + 3.0);
                          	}
                          	return tmp;
                          }
                          
                          [alpha, beta] = sort([alpha, beta])
                          def code(alpha, beta):
                          	tmp = 0
                          	if beta <= 4.5:
                          		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0))
                          	else:
                          		tmp = ((alpha + 1.0) / beta) / (beta + 3.0)
                          	return tmp
                          
                          alpha, beta = sort([alpha, beta])
                          function code(alpha, beta)
                          	tmp = 0.0
                          	if (beta <= 4.5)
                          		tmp = Float64(0.25 / Float64(2.0 + Float64(Float64(beta + alpha) + 1.0)));
                          	else
                          		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / Float64(beta + 3.0));
                          	end
                          	return tmp
                          end
                          
                          alpha, beta = num2cell(sort([alpha, beta])){:}
                          function tmp_2 = code(alpha, beta)
                          	tmp = 0.0;
                          	if (beta <= 4.5)
                          		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0));
                          	else
                          		tmp = ((alpha + 1.0) / beta) / (beta + 3.0);
                          	end
                          	tmp_2 = tmp;
                          end
                          
                          NOTE: alpha and beta should be sorted in increasing order before calling this function.
                          code[alpha_, beta_] := If[LessEqual[beta, 4.5], N[(0.25 / N[(2.0 + N[(N[(beta + alpha), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]]
                          
                          \begin{array}{l}
                          [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                          \\
                          \begin{array}{l}
                          \mathbf{if}\;\beta \leq 4.5:\\
                          \;\;\;\;\frac{0.25}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\
                          
                          \mathbf{else}:\\
                          \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta + 3}\\
                          
                          
                          \end{array}
                          \end{array}
                          
                          Derivation
                          1. Split input into 2 regimes
                          2. if beta < 4.5

                            1. Initial program 99.8%

                              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            2. Add Preprocessing
                            3. Taylor expanded in alpha around 0

                              \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            4. Step-by-step derivation
                              1. lower-/.f64N/A

                                \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              2. lower-+.f64N/A

                                \[\leadsto \frac{\frac{\color{blue}{1 + \beta}}{{\left(2 + \beta\right)}^{2}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              3. unpow2N/A

                                \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              4. lower-*.f64N/A

                                \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              5. +-commutativeN/A

                                \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              6. lower-+.f64N/A

                                \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              7. +-commutativeN/A

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              8. lower-+.f6470.7

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            5. Applied rewrites70.7%

                              \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            6. Step-by-step derivation
                              1. lift-+.f64N/A

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                              2. lift-+.f64N/A

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
                              3. lift-+.f64N/A

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right) + 1} \]
                              4. lift-*.f64N/A

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
                              5. +-commutativeN/A

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{1 + \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                              6. lift-+.f64N/A

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right)} \]
                              7. metadata-evalN/A

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2}\right)} \]
                              8. associate-+r+N/A

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                              9. lower-+.f64N/A

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                              10. lower-+.f6470.7

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right)} + 2} \]
                            7. Applied rewrites70.7%

                              \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                            8. Taylor expanded in beta around 0

                              \[\leadsto \frac{\frac{1}{4}}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]
                            9. Step-by-step derivation
                              1. Applied rewrites70.6%

                                \[\leadsto \frac{0.25}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]

                              if 4.5 < beta

                              1. Initial program 78.9%

                                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              2. Add Preprocessing
                              3. Taylor expanded in beta around inf

                                \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              4. Step-by-step derivation
                                1. lower-/.f64N/A

                                  \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                2. lower-+.f6472.8

                                  \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              5. Applied rewrites72.8%

                                \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              6. Taylor expanded in alpha around 0

                                \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{3 + \beta}} \]
                              7. Step-by-step derivation
                                1. +-commutativeN/A

                                  \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta + 3}} \]
                                2. lower-+.f6472.5

                                  \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta + 3}} \]
                              8. Applied rewrites72.5%

                                \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta + 3}} \]
                            10. Recombined 2 regimes into one program.
                            11. Final simplification71.1%

                              \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 4.5:\\ \;\;\;\;\frac{0.25}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta + 3}\\ \end{array} \]
                            12. Add Preprocessing

                            Alternative 18: 97.8% accurate, 2.6× speedup?

                            \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 6.6:\\ \;\;\;\;\frac{0.25}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \end{array} \]
                            NOTE: alpha and beta should be sorted in increasing order before calling this function.
                            (FPCore (alpha beta)
                             :precision binary64
                             (if (<= beta 6.6)
                               (/ 0.25 (+ 2.0 (+ (+ beta alpha) 1.0)))
                               (/ (/ (+ alpha 1.0) beta) beta)))
                            assert(alpha < beta);
                            double code(double alpha, double beta) {
                            	double tmp;
                            	if (beta <= 6.6) {
                            		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0));
                            	} else {
                            		tmp = ((alpha + 1.0) / beta) / beta;
                            	}
                            	return tmp;
                            }
                            
                            NOTE: alpha and beta should be sorted in increasing order before calling this function.
                            real(8) function code(alpha, beta)
                                real(8), intent (in) :: alpha
                                real(8), intent (in) :: beta
                                real(8) :: tmp
                                if (beta <= 6.6d0) then
                                    tmp = 0.25d0 / (2.0d0 + ((beta + alpha) + 1.0d0))
                                else
                                    tmp = ((alpha + 1.0d0) / beta) / beta
                                end if
                                code = tmp
                            end function
                            
                            assert alpha < beta;
                            public static double code(double alpha, double beta) {
                            	double tmp;
                            	if (beta <= 6.6) {
                            		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0));
                            	} else {
                            		tmp = ((alpha + 1.0) / beta) / beta;
                            	}
                            	return tmp;
                            }
                            
                            [alpha, beta] = sort([alpha, beta])
                            def code(alpha, beta):
                            	tmp = 0
                            	if beta <= 6.6:
                            		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0))
                            	else:
                            		tmp = ((alpha + 1.0) / beta) / beta
                            	return tmp
                            
                            alpha, beta = sort([alpha, beta])
                            function code(alpha, beta)
                            	tmp = 0.0
                            	if (beta <= 6.6)
                            		tmp = Float64(0.25 / Float64(2.0 + Float64(Float64(beta + alpha) + 1.0)));
                            	else
                            		tmp = Float64(Float64(Float64(alpha + 1.0) / beta) / beta);
                            	end
                            	return tmp
                            end
                            
                            alpha, beta = num2cell(sort([alpha, beta])){:}
                            function tmp_2 = code(alpha, beta)
                            	tmp = 0.0;
                            	if (beta <= 6.6)
                            		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0));
                            	else
                            		tmp = ((alpha + 1.0) / beta) / beta;
                            	end
                            	tmp_2 = tmp;
                            end
                            
                            NOTE: alpha and beta should be sorted in increasing order before calling this function.
                            code[alpha_, beta_] := If[LessEqual[beta, 6.6], N[(0.25 / N[(2.0 + N[(N[(beta + alpha), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
                            
                            \begin{array}{l}
                            [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                            \\
                            \begin{array}{l}
                            \mathbf{if}\;\beta \leq 6.6:\\
                            \;\;\;\;\frac{0.25}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\
                            
                            \mathbf{else}:\\
                            \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\
                            
                            
                            \end{array}
                            \end{array}
                            
                            Derivation
                            1. Split input into 2 regimes
                            2. if beta < 6.5999999999999996

                              1. Initial program 99.8%

                                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              2. Add Preprocessing
                              3. Taylor expanded in alpha around 0

                                \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              4. Step-by-step derivation
                                1. lower-/.f64N/A

                                  \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                2. lower-+.f64N/A

                                  \[\leadsto \frac{\frac{\color{blue}{1 + \beta}}{{\left(2 + \beta\right)}^{2}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                3. unpow2N/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                4. lower-*.f64N/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                5. +-commutativeN/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                6. lower-+.f64N/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                7. +-commutativeN/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                8. lower-+.f6470.7

                                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              5. Applied rewrites70.7%

                                \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              6. Step-by-step derivation
                                1. lift-+.f64N/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                                2. lift-+.f64N/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
                                3. lift-+.f64N/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right) + 1} \]
                                4. lift-*.f64N/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
                                5. +-commutativeN/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{1 + \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                                6. lift-+.f64N/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right)} \]
                                7. metadata-evalN/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2}\right)} \]
                                8. associate-+r+N/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                                9. lower-+.f64N/A

                                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                                10. lower-+.f6470.7

                                  \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right)} + 2} \]
                              7. Applied rewrites70.7%

                                \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                              8. Taylor expanded in beta around 0

                                \[\leadsto \frac{\frac{1}{4}}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]
                              9. Step-by-step derivation
                                1. Applied rewrites70.6%

                                  \[\leadsto \frac{0.25}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]

                                if 6.5999999999999996 < beta

                                1. Initial program 78.9%

                                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                2. Add Preprocessing
                                3. Taylor expanded in beta around inf

                                  \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                4. Step-by-step derivation
                                  1. lower-/.f64N/A

                                    \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                  2. lower-+.f64N/A

                                    \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                  3. unpow2N/A

                                    \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                  4. lower-*.f6470.9

                                    \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                5. Applied rewrites70.9%

                                  \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                6. Step-by-step derivation
                                  1. Applied rewrites72.5%

                                    \[\leadsto \frac{\frac{\alpha + 1}{\beta}}{\color{blue}{\beta}} \]
                                7. Recombined 2 regimes into one program.
                                8. Final simplification71.1%

                                  \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 6.6:\\ \;\;\;\;\frac{0.25}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha + 1}{\beta}}{\beta}\\ \end{array} \]
                                9. Add Preprocessing

                                Alternative 19: 94.7% accurate, 3.1× speedup?

                                \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 6.6:\\ \;\;\;\;\frac{0.25}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\alpha + 1}{\beta \cdot \beta}\\ \end{array} \end{array} \]
                                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                (FPCore (alpha beta)
                                 :precision binary64
                                 (if (<= beta 6.6)
                                   (/ 0.25 (+ 2.0 (+ (+ beta alpha) 1.0)))
                                   (/ (+ alpha 1.0) (* beta beta))))
                                assert(alpha < beta);
                                double code(double alpha, double beta) {
                                	double tmp;
                                	if (beta <= 6.6) {
                                		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0));
                                	} else {
                                		tmp = (alpha + 1.0) / (beta * beta);
                                	}
                                	return tmp;
                                }
                                
                                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                real(8) function code(alpha, beta)
                                    real(8), intent (in) :: alpha
                                    real(8), intent (in) :: beta
                                    real(8) :: tmp
                                    if (beta <= 6.6d0) then
                                        tmp = 0.25d0 / (2.0d0 + ((beta + alpha) + 1.0d0))
                                    else
                                        tmp = (alpha + 1.0d0) / (beta * beta)
                                    end if
                                    code = tmp
                                end function
                                
                                assert alpha < beta;
                                public static double code(double alpha, double beta) {
                                	double tmp;
                                	if (beta <= 6.6) {
                                		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0));
                                	} else {
                                		tmp = (alpha + 1.0) / (beta * beta);
                                	}
                                	return tmp;
                                }
                                
                                [alpha, beta] = sort([alpha, beta])
                                def code(alpha, beta):
                                	tmp = 0
                                	if beta <= 6.6:
                                		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0))
                                	else:
                                		tmp = (alpha + 1.0) / (beta * beta)
                                	return tmp
                                
                                alpha, beta = sort([alpha, beta])
                                function code(alpha, beta)
                                	tmp = 0.0
                                	if (beta <= 6.6)
                                		tmp = Float64(0.25 / Float64(2.0 + Float64(Float64(beta + alpha) + 1.0)));
                                	else
                                		tmp = Float64(Float64(alpha + 1.0) / Float64(beta * beta));
                                	end
                                	return tmp
                                end
                                
                                alpha, beta = num2cell(sort([alpha, beta])){:}
                                function tmp_2 = code(alpha, beta)
                                	tmp = 0.0;
                                	if (beta <= 6.6)
                                		tmp = 0.25 / (2.0 + ((beta + alpha) + 1.0));
                                	else
                                		tmp = (alpha + 1.0) / (beta * beta);
                                	end
                                	tmp_2 = tmp;
                                end
                                
                                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                code[alpha_, beta_] := If[LessEqual[beta, 6.6], N[(0.25 / N[(2.0 + N[(N[(beta + alpha), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(alpha + 1.0), $MachinePrecision] / N[(beta * beta), $MachinePrecision]), $MachinePrecision]]
                                
                                \begin{array}{l}
                                [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                                \\
                                \begin{array}{l}
                                \mathbf{if}\;\beta \leq 6.6:\\
                                \;\;\;\;\frac{0.25}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\
                                
                                \mathbf{else}:\\
                                \;\;\;\;\frac{\alpha + 1}{\beta \cdot \beta}\\
                                
                                
                                \end{array}
                                \end{array}
                                
                                Derivation
                                1. Split input into 2 regimes
                                2. if beta < 6.5999999999999996

                                  1. Initial program 99.8%

                                    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                  2. Add Preprocessing
                                  3. Taylor expanded in alpha around 0

                                    \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                  4. Step-by-step derivation
                                    1. lower-/.f64N/A

                                      \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    2. lower-+.f64N/A

                                      \[\leadsto \frac{\frac{\color{blue}{1 + \beta}}{{\left(2 + \beta\right)}^{2}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    3. unpow2N/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    4. lower-*.f64N/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    5. +-commutativeN/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    6. lower-+.f64N/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{\left(\beta + 2\right)} \cdot \left(2 + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    7. +-commutativeN/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    8. lower-+.f6470.7

                                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \color{blue}{\left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                  5. Applied rewrites70.7%

                                    \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                  6. Step-by-step derivation
                                    1. lift-+.f64N/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                                    2. lift-+.f64N/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
                                    3. lift-+.f64N/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right) + 1} \]
                                    4. lift-*.f64N/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
                                    5. +-commutativeN/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{1 + \left(\left(\alpha + \beta\right) + 2 \cdot 1\right)}} \]
                                    6. lift-+.f64N/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right)} \]
                                    7. metadata-evalN/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{1 + \left(\left(\alpha + \beta\right) + \color{blue}{2}\right)} \]
                                    8. associate-+r+N/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                                    9. lower-+.f64N/A

                                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                                    10. lower-+.f6470.7

                                      \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right)} + 2} \]
                                  7. Applied rewrites70.7%

                                    \[\leadsto \frac{\frac{1 + \beta}{\left(\beta + 2\right) \cdot \left(\beta + 2\right)}}{\color{blue}{\left(1 + \left(\alpha + \beta\right)\right) + 2}} \]
                                  8. Taylor expanded in beta around 0

                                    \[\leadsto \frac{\frac{1}{4}}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]
                                  9. Step-by-step derivation
                                    1. Applied rewrites70.6%

                                      \[\leadsto \frac{0.25}{\left(1 + \left(\alpha + \beta\right)\right) + 2} \]

                                    if 6.5999999999999996 < beta

                                    1. Initial program 78.9%

                                      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    2. Add Preprocessing
                                    3. Taylor expanded in beta around inf

                                      \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                    4. Step-by-step derivation
                                      1. lower-/.f64N/A

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                      2. lower-+.f64N/A

                                        \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                      3. unpow2N/A

                                        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      4. lower-*.f6470.9

                                        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                    5. Applied rewrites70.9%

                                      \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                  10. Recombined 2 regimes into one program.
                                  11. Final simplification70.7%

                                    \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 6.6:\\ \;\;\;\;\frac{0.25}{2 + \left(\left(\beta + \alpha\right) + 1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\alpha + 1}{\beta \cdot \beta}\\ \end{array} \]
                                  12. Add Preprocessing

                                  Alternative 20: 51.1% accurate, 3.6× speedup?

                                  \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\alpha \leq 1.25 \cdot 10^{-33}:\\ \;\;\;\;\frac{1}{\beta \cdot \beta}\\ \mathbf{else}:\\ \;\;\;\;\frac{\alpha}{\beta \cdot \beta}\\ \end{array} \end{array} \]
                                  NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                  (FPCore (alpha beta)
                                   :precision binary64
                                   (if (<= alpha 1.25e-33) (/ 1.0 (* beta beta)) (/ alpha (* beta beta))))
                                  assert(alpha < beta);
                                  double code(double alpha, double beta) {
                                  	double tmp;
                                  	if (alpha <= 1.25e-33) {
                                  		tmp = 1.0 / (beta * beta);
                                  	} else {
                                  		tmp = alpha / (beta * beta);
                                  	}
                                  	return tmp;
                                  }
                                  
                                  NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                  real(8) function code(alpha, beta)
                                      real(8), intent (in) :: alpha
                                      real(8), intent (in) :: beta
                                      real(8) :: tmp
                                      if (alpha <= 1.25d-33) then
                                          tmp = 1.0d0 / (beta * beta)
                                      else
                                          tmp = alpha / (beta * beta)
                                      end if
                                      code = tmp
                                  end function
                                  
                                  assert alpha < beta;
                                  public static double code(double alpha, double beta) {
                                  	double tmp;
                                  	if (alpha <= 1.25e-33) {
                                  		tmp = 1.0 / (beta * beta);
                                  	} else {
                                  		tmp = alpha / (beta * beta);
                                  	}
                                  	return tmp;
                                  }
                                  
                                  [alpha, beta] = sort([alpha, beta])
                                  def code(alpha, beta):
                                  	tmp = 0
                                  	if alpha <= 1.25e-33:
                                  		tmp = 1.0 / (beta * beta)
                                  	else:
                                  		tmp = alpha / (beta * beta)
                                  	return tmp
                                  
                                  alpha, beta = sort([alpha, beta])
                                  function code(alpha, beta)
                                  	tmp = 0.0
                                  	if (alpha <= 1.25e-33)
                                  		tmp = Float64(1.0 / Float64(beta * beta));
                                  	else
                                  		tmp = Float64(alpha / Float64(beta * beta));
                                  	end
                                  	return tmp
                                  end
                                  
                                  alpha, beta = num2cell(sort([alpha, beta])){:}
                                  function tmp_2 = code(alpha, beta)
                                  	tmp = 0.0;
                                  	if (alpha <= 1.25e-33)
                                  		tmp = 1.0 / (beta * beta);
                                  	else
                                  		tmp = alpha / (beta * beta);
                                  	end
                                  	tmp_2 = tmp;
                                  end
                                  
                                  NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                  code[alpha_, beta_] := If[LessEqual[alpha, 1.25e-33], N[(1.0 / N[(beta * beta), $MachinePrecision]), $MachinePrecision], N[(alpha / N[(beta * beta), $MachinePrecision]), $MachinePrecision]]
                                  
                                  \begin{array}{l}
                                  [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                                  \\
                                  \begin{array}{l}
                                  \mathbf{if}\;\alpha \leq 1.25 \cdot 10^{-33}:\\
                                  \;\;\;\;\frac{1}{\beta \cdot \beta}\\
                                  
                                  \mathbf{else}:\\
                                  \;\;\;\;\frac{\alpha}{\beta \cdot \beta}\\
                                  
                                  
                                  \end{array}
                                  \end{array}
                                  
                                  Derivation
                                  1. Split input into 2 regimes
                                  2. if alpha < 1.25000000000000007e-33

                                    1. Initial program 99.9%

                                      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    2. Add Preprocessing
                                    3. Taylor expanded in beta around inf

                                      \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                    4. Step-by-step derivation
                                      1. lower-/.f64N/A

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                      2. lower-+.f64N/A

                                        \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                      3. unpow2N/A

                                        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      4. lower-*.f6429.8

                                        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                    5. Applied rewrites29.8%

                                      \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                    6. Taylor expanded in alpha around 0

                                      \[\leadsto \frac{1}{\color{blue}{\beta} \cdot \beta} \]
                                    7. Step-by-step derivation
                                      1. Applied rewrites29.3%

                                        \[\leadsto \frac{1}{\color{blue}{\beta} \cdot \beta} \]

                                      if 1.25000000000000007e-33 < alpha

                                      1. Initial program 82.4%

                                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      2. Add Preprocessing
                                      3. Taylor expanded in beta around inf

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                      4. Step-by-step derivation
                                        1. lower-/.f64N/A

                                          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                        2. lower-+.f64N/A

                                          \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                        3. unpow2N/A

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                        4. lower-*.f6411.6

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      5. Applied rewrites11.6%

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                      6. Taylor expanded in alpha around inf

                                        \[\leadsto \frac{\alpha}{\color{blue}{{\beta}^{2}}} \]
                                      7. Step-by-step derivation
                                        1. Applied rewrites11.6%

                                          \[\leadsto \frac{\alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      8. Recombined 2 regimes into one program.
                                      9. Add Preprocessing

                                      Alternative 21: 52.6% accurate, 4.2× speedup?

                                      \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \frac{\alpha + 1}{\beta \cdot \beta} \end{array} \]
                                      NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                      (FPCore (alpha beta) :precision binary64 (/ (+ alpha 1.0) (* beta beta)))
                                      assert(alpha < beta);
                                      double code(double alpha, double beta) {
                                      	return (alpha + 1.0) / (beta * beta);
                                      }
                                      
                                      NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                      real(8) function code(alpha, beta)
                                          real(8), intent (in) :: alpha
                                          real(8), intent (in) :: beta
                                          code = (alpha + 1.0d0) / (beta * beta)
                                      end function
                                      
                                      assert alpha < beta;
                                      public static double code(double alpha, double beta) {
                                      	return (alpha + 1.0) / (beta * beta);
                                      }
                                      
                                      [alpha, beta] = sort([alpha, beta])
                                      def code(alpha, beta):
                                      	return (alpha + 1.0) / (beta * beta)
                                      
                                      alpha, beta = sort([alpha, beta])
                                      function code(alpha, beta)
                                      	return Float64(Float64(alpha + 1.0) / Float64(beta * beta))
                                      end
                                      
                                      alpha, beta = num2cell(sort([alpha, beta])){:}
                                      function tmp = code(alpha, beta)
                                      	tmp = (alpha + 1.0) / (beta * beta);
                                      end
                                      
                                      NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                      code[alpha_, beta_] := N[(N[(alpha + 1.0), $MachinePrecision] / N[(beta * beta), $MachinePrecision]), $MachinePrecision]
                                      
                                      \begin{array}{l}
                                      [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                                      \\
                                      \frac{\alpha + 1}{\beta \cdot \beta}
                                      \end{array}
                                      
                                      Derivation
                                      1. Initial program 93.7%

                                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      2. Add Preprocessing
                                      3. Taylor expanded in beta around inf

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                      4. Step-by-step derivation
                                        1. lower-/.f64N/A

                                          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                        2. lower-+.f64N/A

                                          \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                        3. unpow2N/A

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                        4. lower-*.f6423.3

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      5. Applied rewrites23.3%

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                      6. Final simplification23.3%

                                        \[\leadsto \frac{\alpha + 1}{\beta \cdot \beta} \]
                                      7. Add Preprocessing

                                      Alternative 22: 31.4% accurate, 4.9× speedup?

                                      \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \frac{\alpha}{\beta \cdot \beta} \end{array} \]
                                      NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                      (FPCore (alpha beta) :precision binary64 (/ alpha (* beta beta)))
                                      assert(alpha < beta);
                                      double code(double alpha, double beta) {
                                      	return alpha / (beta * beta);
                                      }
                                      
                                      NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                      real(8) function code(alpha, beta)
                                          real(8), intent (in) :: alpha
                                          real(8), intent (in) :: beta
                                          code = alpha / (beta * beta)
                                      end function
                                      
                                      assert alpha < beta;
                                      public static double code(double alpha, double beta) {
                                      	return alpha / (beta * beta);
                                      }
                                      
                                      [alpha, beta] = sort([alpha, beta])
                                      def code(alpha, beta):
                                      	return alpha / (beta * beta)
                                      
                                      alpha, beta = sort([alpha, beta])
                                      function code(alpha, beta)
                                      	return Float64(alpha / Float64(beta * beta))
                                      end
                                      
                                      alpha, beta = num2cell(sort([alpha, beta])){:}
                                      function tmp = code(alpha, beta)
                                      	tmp = alpha / (beta * beta);
                                      end
                                      
                                      NOTE: alpha and beta should be sorted in increasing order before calling this function.
                                      code[alpha_, beta_] := N[(alpha / N[(beta * beta), $MachinePrecision]), $MachinePrecision]
                                      
                                      \begin{array}{l}
                                      [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                                      \\
                                      \frac{\alpha}{\beta \cdot \beta}
                                      \end{array}
                                      
                                      Derivation
                                      1. Initial program 93.7%

                                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      2. Add Preprocessing
                                      3. Taylor expanded in beta around inf

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                      4. Step-by-step derivation
                                        1. lower-/.f64N/A

                                          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                        2. lower-+.f64N/A

                                          \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                        3. unpow2N/A

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                        4. lower-*.f6423.3

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      5. Applied rewrites23.3%

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                      6. Taylor expanded in alpha around inf

                                        \[\leadsto \frac{\alpha}{\color{blue}{{\beta}^{2}}} \]
                                      7. Step-by-step derivation
                                        1. Applied rewrites15.0%

                                          \[\leadsto \frac{\alpha}{\color{blue}{\beta \cdot \beta}} \]
                                        2. Add Preprocessing

                                        Reproduce

                                        ?
                                        herbie shell --seed 2024225 
                                        (FPCore (alpha beta)
                                          :name "Octave 3.8, jcobi/3"
                                          :precision binary64
                                          :pre (and (> alpha -1.0) (> beta -1.0))
                                          (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))