Octave 3.8, jcobi/3

Percentage Accurate: 94.3% → 99.5%
Time: 3.5s
Alternatives: 13
Speedup: 3.5×

Specification

?
\[\alpha > -1 \land \beta > -1\]
\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ \frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1} \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))))
   (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0))
end
function tmp = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 13 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 94.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ \frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1} \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))))
   (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0))
end
function tmp = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}

Alternative 1: 99.5% accurate, 0.9× speedup?

\[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ \mathbf{if}\;\beta \leq 3.6 \cdot 10^{+159}:\\ \;\;\;\;\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))))
   (if (<= beta 3.6e+159)
     (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))
     (/ (/ alpha beta) beta))))
assert(alpha < beta);
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	double tmp;
	if (beta <= 3.6e+159) {
		tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
	} else {
		tmp = (alpha / beta) / beta;
	}
	return tmp;
}
NOTE: alpha and beta should be sorted in increasing order before calling this function.
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    real(8) :: tmp
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    if (beta <= 3.6d+159) then
        tmp = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
    else
        tmp = (alpha / beta) / beta
    end if
    code = tmp
end function
assert alpha < beta;
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	double tmp;
	if (beta <= 3.6e+159) {
		tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
	} else {
		tmp = (alpha / beta) / beta;
	}
	return tmp;
}
[alpha, beta] = sort([alpha, beta])
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	tmp = 0
	if beta <= 3.6e+159:
		tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
	else:
		tmp = (alpha / beta) / beta
	return tmp
alpha, beta = sort([alpha, beta])
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	tmp = 0.0
	if (beta <= 3.6e+159)
		tmp = Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0));
	else
		tmp = Float64(Float64(alpha / beta) / beta);
	end
	return tmp
end
alpha, beta = num2cell(sort([alpha, beta])){:}
function tmp_2 = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	tmp = 0.0;
	if (beta <= 3.6e+159)
		tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
	else
		tmp = (alpha / beta) / beta;
	end
	tmp_2 = tmp;
end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 3.6e+159], N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(alpha / beta), $MachinePrecision] / beta), $MachinePrecision]]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\mathbf{if}\;\beta \leq 3.6 \cdot 10^{+159}:\\
\;\;\;\;\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 3.60000000000000037e159

    1. Initial program 99.8%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]

    if 3.60000000000000037e159 < beta

    1. Initial program 80.8%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in beta around inf

      \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
    3. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
      2. lower-+.f64N/A

        \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
      3. unpow2N/A

        \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
      4. lower-*.f6491.4

        \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
    4. Applied rewrites91.4%

      \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
    5. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
      2. lift-*.f64N/A

        \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
      3. lift-/.f64N/A

        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
      4. associate-/r*N/A

        \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
      5. div-add-revN/A

        \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\beta} \]
      6. lower-/.f64N/A

        \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\color{blue}{\beta}} \]
      7. div-add-revN/A

        \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
      8. lower-/.f64N/A

        \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
      9. lift-+.f6499.1

        \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
    6. Applied rewrites99.1%

      \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
    7. Taylor expanded in alpha around inf

      \[\leadsto \frac{\frac{\alpha}{\beta}}{\beta} \]
    8. Step-by-step derivation
      1. Applied rewrites99.0%

        \[\leadsto \frac{\frac{\alpha}{\beta}}{\beta} \]
    9. Recombined 2 regimes into one program.
    10. Add Preprocessing

    Alternative 2: 98.8% accurate, 1.5× speedup?

    \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := \left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\\ \mathbf{if}\;\beta \leq 8.2 \cdot 10^{+15}:\\ \;\;\;\;\frac{\frac{1 + \beta}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}{t\_0}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{t\_0}\\ \end{array} \end{array} \]
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    (FPCore (alpha beta)
     :precision binary64
     (let* ((t_0 (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))
       (if (<= beta 8.2e+15)
         (/ (/ (+ 1.0 beta) (* (+ 2.0 beta) (+ 2.0 beta))) t_0)
         (/ (/ (+ 1.0 alpha) beta) t_0))))
    assert(alpha < beta);
    double code(double alpha, double beta) {
    	double t_0 = ((alpha + beta) + (2.0 * 1.0)) + 1.0;
    	double tmp;
    	if (beta <= 8.2e+15) {
    		tmp = ((1.0 + beta) / ((2.0 + beta) * (2.0 + beta))) / t_0;
    	} else {
    		tmp = ((1.0 + alpha) / beta) / t_0;
    	}
    	return tmp;
    }
    
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    module fmin_fmax_functions
        implicit none
        private
        public fmax
        public fmin
    
        interface fmax
            module procedure fmax88
            module procedure fmax44
            module procedure fmax84
            module procedure fmax48
        end interface
        interface fmin
            module procedure fmin88
            module procedure fmin44
            module procedure fmin84
            module procedure fmin48
        end interface
    contains
        real(8) function fmax88(x, y) result (res)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
        end function
        real(4) function fmax44(x, y) result (res)
            real(4), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
        end function
        real(8) function fmax84(x, y) result(res)
            real(8), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
        end function
        real(8) function fmax48(x, y) result(res)
            real(4), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
        end function
        real(8) function fmin88(x, y) result (res)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
        end function
        real(4) function fmin44(x, y) result (res)
            real(4), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
        end function
        real(8) function fmin84(x, y) result(res)
            real(8), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
        end function
        real(8) function fmin48(x, y) result(res)
            real(4), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
        end function
    end module
    
    real(8) function code(alpha, beta)
    use fmin_fmax_functions
        real(8), intent (in) :: alpha
        real(8), intent (in) :: beta
        real(8) :: t_0
        real(8) :: tmp
        t_0 = ((alpha + beta) + (2.0d0 * 1.0d0)) + 1.0d0
        if (beta <= 8.2d+15) then
            tmp = ((1.0d0 + beta) / ((2.0d0 + beta) * (2.0d0 + beta))) / t_0
        else
            tmp = ((1.0d0 + alpha) / beta) / t_0
        end if
        code = tmp
    end function
    
    assert alpha < beta;
    public static double code(double alpha, double beta) {
    	double t_0 = ((alpha + beta) + (2.0 * 1.0)) + 1.0;
    	double tmp;
    	if (beta <= 8.2e+15) {
    		tmp = ((1.0 + beta) / ((2.0 + beta) * (2.0 + beta))) / t_0;
    	} else {
    		tmp = ((1.0 + alpha) / beta) / t_0;
    	}
    	return tmp;
    }
    
    [alpha, beta] = sort([alpha, beta])
    def code(alpha, beta):
    	t_0 = ((alpha + beta) + (2.0 * 1.0)) + 1.0
    	tmp = 0
    	if beta <= 8.2e+15:
    		tmp = ((1.0 + beta) / ((2.0 + beta) * (2.0 + beta))) / t_0
    	else:
    		tmp = ((1.0 + alpha) / beta) / t_0
    	return tmp
    
    alpha, beta = sort([alpha, beta])
    function code(alpha, beta)
    	t_0 = Float64(Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) + 1.0)
    	tmp = 0.0
    	if (beta <= 8.2e+15)
    		tmp = Float64(Float64(Float64(1.0 + beta) / Float64(Float64(2.0 + beta) * Float64(2.0 + beta))) / t_0);
    	else
    		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / t_0);
    	end
    	return tmp
    end
    
    alpha, beta = num2cell(sort([alpha, beta])){:}
    function tmp_2 = code(alpha, beta)
    	t_0 = ((alpha + beta) + (2.0 * 1.0)) + 1.0;
    	tmp = 0.0;
    	if (beta <= 8.2e+15)
    		tmp = ((1.0 + beta) / ((2.0 + beta) * (2.0 + beta))) / t_0;
    	else
    		tmp = ((1.0 + alpha) / beta) / t_0;
    	end
    	tmp_2 = tmp;
    end
    
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    code[alpha_, beta_] := Block[{t$95$0 = N[(N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]}, If[LessEqual[beta, 8.2e+15], N[(N[(N[(1.0 + beta), $MachinePrecision] / N[(N[(2.0 + beta), $MachinePrecision] * N[(2.0 + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / t$95$0), $MachinePrecision]]]
    
    \begin{array}{l}
    [alpha, beta] = \mathsf{sort}([alpha, beta])\\
    \\
    \begin{array}{l}
    t_0 := \left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\\
    \mathbf{if}\;\beta \leq 8.2 \cdot 10^{+15}:\\
    \;\;\;\;\frac{\frac{1 + \beta}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}{t\_0}\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{t\_0}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if beta < 8.2e15

      1. Initial program 99.9%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Taylor expanded in alpha around 0

        \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. Step-by-step derivation
        1. lower-/.f64N/A

          \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. lower-+.f64N/A

          \[\leadsto \frac{\frac{1 + \beta}{{\color{blue}{\left(2 + \beta\right)}}^{2}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        3. unpow2N/A

          \[\leadsto \frac{\frac{1 + \beta}{\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        4. lower-*.f64N/A

          \[\leadsto \frac{\frac{1 + \beta}{\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        5. lower-+.f64N/A

          \[\leadsto \frac{\frac{1 + \beta}{\left(2 + \beta\right) \cdot \left(\color{blue}{2} + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        6. lower-+.f6498.7

          \[\leadsto \frac{\frac{1 + \beta}{\left(2 + \beta\right) \cdot \left(2 + \color{blue}{\beta}\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. Applied rewrites98.7%

        \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]

      if 8.2e15 < beta

      1. Initial program 89.4%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Taylor expanded in beta around inf

        \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. Step-by-step derivation
        1. lower-/.f64N/A

          \[\leadsto \frac{\frac{1 + \alpha}{\color{blue}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. lower-+.f6498.9

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. Applied rewrites98.9%

        \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Recombined 2 regimes into one program.
    4. Add Preprocessing

    Alternative 3: 98.4% accurate, 2.0× speedup?

    \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 8.2 \cdot 10^{+15}:\\ \;\;\;\;\frac{1 + \beta}{\mathsf{fma}\left(\beta, \mathsf{fma}\left(\beta, 7 + \beta, 16\right), 12\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}\\ \end{array} \end{array} \]
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    (FPCore (alpha beta)
     :precision binary64
     (if (<= beta 8.2e+15)
       (/ (+ 1.0 beta) (fma beta (fma beta (+ 7.0 beta) 16.0) 12.0))
       (/ (/ (+ 1.0 alpha) beta) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0))))
    assert(alpha < beta);
    double code(double alpha, double beta) {
    	double tmp;
    	if (beta <= 8.2e+15) {
    		tmp = (1.0 + beta) / fma(beta, fma(beta, (7.0 + beta), 16.0), 12.0);
    	} else {
    		tmp = ((1.0 + alpha) / beta) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
    	}
    	return tmp;
    }
    
    alpha, beta = sort([alpha, beta])
    function code(alpha, beta)
    	tmp = 0.0
    	if (beta <= 8.2e+15)
    		tmp = Float64(Float64(1.0 + beta) / fma(beta, fma(beta, Float64(7.0 + beta), 16.0), 12.0));
    	else
    		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / Float64(Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) + 1.0));
    	end
    	return tmp
    end
    
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    code[alpha_, beta_] := If[LessEqual[beta, 8.2e+15], N[(N[(1.0 + beta), $MachinePrecision] / N[(beta * N[(beta * N[(7.0 + beta), $MachinePrecision] + 16.0), $MachinePrecision] + 12.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / N[(N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
    
    \begin{array}{l}
    [alpha, beta] = \mathsf{sort}([alpha, beta])\\
    \\
    \begin{array}{l}
    \mathbf{if}\;\beta \leq 8.2 \cdot 10^{+15}:\\
    \;\;\;\;\frac{1 + \beta}{\mathsf{fma}\left(\beta, \mathsf{fma}\left(\beta, 7 + \beta, 16\right), 12\right)}\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if beta < 8.2e15

      1. Initial program 99.9%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Taylor expanded in alpha around 0

        \[\leadsto \color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
      3. Step-by-step derivation
        1. lower-/.f64N/A

          \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
        2. lower-+.f64N/A

          \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2}} \cdot \left(3 + \beta\right)} \]
        3. *-commutativeN/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
        4. lower-*.f64N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
        5. lower-+.f64N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot {\color{blue}{\left(2 + \beta\right)}}^{2}} \]
        6. unpow2N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
        7. lower-*.f64N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
        8. lower-+.f64N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(\color{blue}{2} + \beta\right)\right)} \]
        9. lower-+.f6497.8

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \color{blue}{\beta}\right)\right)} \]
      4. Applied rewrites97.8%

        \[\leadsto \color{blue}{\frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \beta\right)\right)}} \]
      5. Taylor expanded in beta around 0

        \[\leadsto \frac{1 + \beta}{12 + \color{blue}{\beta \cdot \left(16 + \beta \cdot \left(7 + \beta\right)\right)}} \]
      6. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \frac{1 + \beta}{\beta \cdot \left(16 + \beta \cdot \left(7 + \beta\right)\right) + 12} \]
        2. lower-fma.f64N/A

          \[\leadsto \frac{1 + \beta}{\mathsf{fma}\left(\beta, 16 + \color{blue}{\beta \cdot \left(7 + \beta\right)}, 12\right)} \]
        3. +-commutativeN/A

          \[\leadsto \frac{1 + \beta}{\mathsf{fma}\left(\beta, \beta \cdot \left(7 + \beta\right) + 16, 12\right)} \]
        4. lower-fma.f64N/A

          \[\leadsto \frac{1 + \beta}{\mathsf{fma}\left(\beta, \mathsf{fma}\left(\beta, 7 + \color{blue}{\beta}, 16\right), 12\right)} \]
        5. lower-+.f6497.8

          \[\leadsto \frac{1 + \beta}{\mathsf{fma}\left(\beta, \mathsf{fma}\left(\beta, 7 + \beta, 16\right), 12\right)} \]
      7. Applied rewrites97.8%

        \[\leadsto \frac{1 + \beta}{\mathsf{fma}\left(\beta, \color{blue}{\mathsf{fma}\left(\beta, 7 + \beta, 16\right)}, 12\right)} \]

      if 8.2e15 < beta

      1. Initial program 89.4%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Taylor expanded in beta around inf

        \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. Step-by-step derivation
        1. lower-/.f64N/A

          \[\leadsto \frac{\frac{1 + \alpha}{\color{blue}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. lower-+.f6498.9

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. Applied rewrites98.9%

        \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Recombined 2 regimes into one program.
    4. Add Preprocessing

    Alternative 4: 98.4% accurate, 2.1× speedup?

    \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 8.2 \cdot 10^{+15}:\\ \;\;\;\;\frac{1 + \beta}{\mathsf{fma}\left(\beta, \mathsf{fma}\left(\beta, 7 + \beta, 16\right), 12\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    (FPCore (alpha beta)
     :precision binary64
     (if (<= beta 8.2e+15)
       (/ (+ 1.0 beta) (fma beta (fma beta (+ 7.0 beta) 16.0) 12.0))
       (/ (/ (+ 1.0 alpha) beta) beta)))
    assert(alpha < beta);
    double code(double alpha, double beta) {
    	double tmp;
    	if (beta <= 8.2e+15) {
    		tmp = (1.0 + beta) / fma(beta, fma(beta, (7.0 + beta), 16.0), 12.0);
    	} else {
    		tmp = ((1.0 + alpha) / beta) / beta;
    	}
    	return tmp;
    }
    
    alpha, beta = sort([alpha, beta])
    function code(alpha, beta)
    	tmp = 0.0
    	if (beta <= 8.2e+15)
    		tmp = Float64(Float64(1.0 + beta) / fma(beta, fma(beta, Float64(7.0 + beta), 16.0), 12.0));
    	else
    		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta);
    	end
    	return tmp
    end
    
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    code[alpha_, beta_] := If[LessEqual[beta, 8.2e+15], N[(N[(1.0 + beta), $MachinePrecision] / N[(beta * N[(beta * N[(7.0 + beta), $MachinePrecision] + 16.0), $MachinePrecision] + 12.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
    
    \begin{array}{l}
    [alpha, beta] = \mathsf{sort}([alpha, beta])\\
    \\
    \begin{array}{l}
    \mathbf{if}\;\beta \leq 8.2 \cdot 10^{+15}:\\
    \;\;\;\;\frac{1 + \beta}{\mathsf{fma}\left(\beta, \mathsf{fma}\left(\beta, 7 + \beta, 16\right), 12\right)}\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if beta < 8.2e15

      1. Initial program 99.9%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Taylor expanded in alpha around 0

        \[\leadsto \color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
      3. Step-by-step derivation
        1. lower-/.f64N/A

          \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
        2. lower-+.f64N/A

          \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2}} \cdot \left(3 + \beta\right)} \]
        3. *-commutativeN/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
        4. lower-*.f64N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
        5. lower-+.f64N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot {\color{blue}{\left(2 + \beta\right)}}^{2}} \]
        6. unpow2N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
        7. lower-*.f64N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
        8. lower-+.f64N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(\color{blue}{2} + \beta\right)\right)} \]
        9. lower-+.f6497.8

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \color{blue}{\beta}\right)\right)} \]
      4. Applied rewrites97.8%

        \[\leadsto \color{blue}{\frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \beta\right)\right)}} \]
      5. Taylor expanded in beta around 0

        \[\leadsto \frac{1 + \beta}{12 + \color{blue}{\beta \cdot \left(16 + \beta \cdot \left(7 + \beta\right)\right)}} \]
      6. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \frac{1 + \beta}{\beta \cdot \left(16 + \beta \cdot \left(7 + \beta\right)\right) + 12} \]
        2. lower-fma.f64N/A

          \[\leadsto \frac{1 + \beta}{\mathsf{fma}\left(\beta, 16 + \color{blue}{\beta \cdot \left(7 + \beta\right)}, 12\right)} \]
        3. +-commutativeN/A

          \[\leadsto \frac{1 + \beta}{\mathsf{fma}\left(\beta, \beta \cdot \left(7 + \beta\right) + 16, 12\right)} \]
        4. lower-fma.f64N/A

          \[\leadsto \frac{1 + \beta}{\mathsf{fma}\left(\beta, \mathsf{fma}\left(\beta, 7 + \color{blue}{\beta}, 16\right), 12\right)} \]
        5. lower-+.f6497.8

          \[\leadsto \frac{1 + \beta}{\mathsf{fma}\left(\beta, \mathsf{fma}\left(\beta, 7 + \beta, 16\right), 12\right)} \]
      7. Applied rewrites97.8%

        \[\leadsto \frac{1 + \beta}{\mathsf{fma}\left(\beta, \color{blue}{\mathsf{fma}\left(\beta, 7 + \beta, 16\right)}, 12\right)} \]

      if 8.2e15 < beta

      1. Initial program 89.4%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Taylor expanded in beta around inf

        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
      3. Step-by-step derivation
        1. lower-/.f64N/A

          \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
        2. lower-+.f64N/A

          \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
        3. unpow2N/A

          \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
        4. lower-*.f6493.6

          \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
      4. Applied rewrites93.6%

        \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
      5. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
        2. lift-*.f64N/A

          \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
        3. lift-/.f64N/A

          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
        4. associate-/r*N/A

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
        5. div-add-revN/A

          \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\beta} \]
        6. lower-/.f64N/A

          \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\color{blue}{\beta}} \]
        7. div-add-revN/A

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
        8. lower-/.f64N/A

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
        9. lift-+.f6498.9

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
      6. Applied rewrites98.9%

        \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
    3. Recombined 2 regimes into one program.
    4. Add Preprocessing

    Alternative 5: 97.5% accurate, 2.4× speedup?

    \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 2.2:\\ \;\;\;\;\mathsf{fma}\left(\beta, \beta \cdot \left(0.024691358024691357 \cdot \beta - 0.011574074074074073\right) - 0.027777777777777776, 0.08333333333333333\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    (FPCore (alpha beta)
     :precision binary64
     (if (<= beta 2.2)
       (fma
        beta
        (-
         (* beta (- (* 0.024691358024691357 beta) 0.011574074074074073))
         0.027777777777777776)
        0.08333333333333333)
       (/ (/ (+ 1.0 alpha) beta) beta)))
    assert(alpha < beta);
    double code(double alpha, double beta) {
    	double tmp;
    	if (beta <= 2.2) {
    		tmp = fma(beta, ((beta * ((0.024691358024691357 * beta) - 0.011574074074074073)) - 0.027777777777777776), 0.08333333333333333);
    	} else {
    		tmp = ((1.0 + alpha) / beta) / beta;
    	}
    	return tmp;
    }
    
    alpha, beta = sort([alpha, beta])
    function code(alpha, beta)
    	tmp = 0.0
    	if (beta <= 2.2)
    		tmp = fma(beta, Float64(Float64(beta * Float64(Float64(0.024691358024691357 * beta) - 0.011574074074074073)) - 0.027777777777777776), 0.08333333333333333);
    	else
    		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta);
    	end
    	return tmp
    end
    
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    code[alpha_, beta_] := If[LessEqual[beta, 2.2], N[(beta * N[(N[(beta * N[(N[(0.024691358024691357 * beta), $MachinePrecision] - 0.011574074074074073), $MachinePrecision]), $MachinePrecision] - 0.027777777777777776), $MachinePrecision] + 0.08333333333333333), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
    
    \begin{array}{l}
    [alpha, beta] = \mathsf{sort}([alpha, beta])\\
    \\
    \begin{array}{l}
    \mathbf{if}\;\beta \leq 2.2:\\
    \;\;\;\;\mathsf{fma}\left(\beta, \beta \cdot \left(0.024691358024691357 \cdot \beta - 0.011574074074074073\right) - 0.027777777777777776, 0.08333333333333333\right)\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if beta < 2.2000000000000002

      1. Initial program 99.9%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Taylor expanded in alpha around 0

        \[\leadsto \color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
      3. Step-by-step derivation
        1. lower-/.f64N/A

          \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
        2. lower-+.f64N/A

          \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2}} \cdot \left(3 + \beta\right)} \]
        3. *-commutativeN/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
        4. lower-*.f64N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
        5. lower-+.f64N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot {\color{blue}{\left(2 + \beta\right)}}^{2}} \]
        6. unpow2N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
        7. lower-*.f64N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
        8. lower-+.f64N/A

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(\color{blue}{2} + \beta\right)\right)} \]
        9. lower-+.f6497.9

          \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \color{blue}{\beta}\right)\right)} \]
      4. Applied rewrites97.9%

        \[\leadsto \color{blue}{\frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \beta\right)\right)}} \]
      5. Taylor expanded in beta around 0

        \[\leadsto \frac{1}{12} + \color{blue}{\beta \cdot \left(\beta \cdot \left(\frac{2}{81} \cdot \beta - \frac{5}{432}\right) - \frac{1}{36}\right)} \]
      6. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \beta \cdot \left(\beta \cdot \left(\frac{2}{81} \cdot \beta - \frac{5}{432}\right) - \frac{1}{36}\right) + \frac{1}{12} \]
        2. lower-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(\beta, \beta \cdot \left(\frac{2}{81} \cdot \beta - \frac{5}{432}\right) - \color{blue}{\frac{1}{36}}, \frac{1}{12}\right) \]
        3. lower--.f64N/A

          \[\leadsto \mathsf{fma}\left(\beta, \beta \cdot \left(\frac{2}{81} \cdot \beta - \frac{5}{432}\right) - \frac{1}{36}, \frac{1}{12}\right) \]
        4. lower-*.f64N/A

          \[\leadsto \mathsf{fma}\left(\beta, \beta \cdot \left(\frac{2}{81} \cdot \beta - \frac{5}{432}\right) - \frac{1}{36}, \frac{1}{12}\right) \]
        5. lower--.f64N/A

          \[\leadsto \mathsf{fma}\left(\beta, \beta \cdot \left(\frac{2}{81} \cdot \beta - \frac{5}{432}\right) - \frac{1}{36}, \frac{1}{12}\right) \]
        6. lower-*.f6497.5

          \[\leadsto \mathsf{fma}\left(\beta, \beta \cdot \left(0.024691358024691357 \cdot \beta - 0.011574074074074073\right) - 0.027777777777777776, 0.08333333333333333\right) \]
      7. Applied rewrites97.5%

        \[\leadsto \mathsf{fma}\left(\beta, \color{blue}{\beta \cdot \left(0.024691358024691357 \cdot \beta - 0.011574074074074073\right) - 0.027777777777777776}, 0.08333333333333333\right) \]

      if 2.2000000000000002 < beta

      1. Initial program 89.8%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Taylor expanded in beta around inf

        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
      3. Step-by-step derivation
        1. lower-/.f64N/A

          \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
        2. lower-+.f64N/A

          \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
        3. unpow2N/A

          \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
        4. lower-*.f6492.1

          \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
      4. Applied rewrites92.1%

        \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
      5. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
        2. lift-*.f64N/A

          \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
        3. lift-/.f64N/A

          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
        4. associate-/r*N/A

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
        5. div-add-revN/A

          \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\beta} \]
        6. lower-/.f64N/A

          \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\color{blue}{\beta}} \]
        7. div-add-revN/A

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
        8. lower-/.f64N/A

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
        9. lift-+.f6497.2

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
      6. Applied rewrites97.2%

        \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
    3. Recombined 2 regimes into one program.
    4. Add Preprocessing

    Alternative 6: 97.3% accurate, 2.6× speedup?

    \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 7.1:\\ \;\;\;\;\frac{0.25}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    (FPCore (alpha beta)
     :precision binary64
     (if (<= beta 7.1)
       (/ 0.25 (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0))
       (/ (/ (+ 1.0 alpha) beta) beta)))
    assert(alpha < beta);
    double code(double alpha, double beta) {
    	double tmp;
    	if (beta <= 7.1) {
    		tmp = 0.25 / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
    	} else {
    		tmp = ((1.0 + alpha) / beta) / beta;
    	}
    	return tmp;
    }
    
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    module fmin_fmax_functions
        implicit none
        private
        public fmax
        public fmin
    
        interface fmax
            module procedure fmax88
            module procedure fmax44
            module procedure fmax84
            module procedure fmax48
        end interface
        interface fmin
            module procedure fmin88
            module procedure fmin44
            module procedure fmin84
            module procedure fmin48
        end interface
    contains
        real(8) function fmax88(x, y) result (res)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
        end function
        real(4) function fmax44(x, y) result (res)
            real(4), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
        end function
        real(8) function fmax84(x, y) result(res)
            real(8), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
        end function
        real(8) function fmax48(x, y) result(res)
            real(4), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
        end function
        real(8) function fmin88(x, y) result (res)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
        end function
        real(4) function fmin44(x, y) result (res)
            real(4), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
        end function
        real(8) function fmin84(x, y) result(res)
            real(8), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
        end function
        real(8) function fmin48(x, y) result(res)
            real(4), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
        end function
    end module
    
    real(8) function code(alpha, beta)
    use fmin_fmax_functions
        real(8), intent (in) :: alpha
        real(8), intent (in) :: beta
        real(8) :: tmp
        if (beta <= 7.1d0) then
            tmp = 0.25d0 / (((alpha + beta) + (2.0d0 * 1.0d0)) + 1.0d0)
        else
            tmp = ((1.0d0 + alpha) / beta) / beta
        end if
        code = tmp
    end function
    
    assert alpha < beta;
    public static double code(double alpha, double beta) {
    	double tmp;
    	if (beta <= 7.1) {
    		tmp = 0.25 / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
    	} else {
    		tmp = ((1.0 + alpha) / beta) / beta;
    	}
    	return tmp;
    }
    
    [alpha, beta] = sort([alpha, beta])
    def code(alpha, beta):
    	tmp = 0
    	if beta <= 7.1:
    		tmp = 0.25 / (((alpha + beta) + (2.0 * 1.0)) + 1.0)
    	else:
    		tmp = ((1.0 + alpha) / beta) / beta
    	return tmp
    
    alpha, beta = sort([alpha, beta])
    function code(alpha, beta)
    	tmp = 0.0
    	if (beta <= 7.1)
    		tmp = Float64(0.25 / Float64(Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) + 1.0));
    	else
    		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta);
    	end
    	return tmp
    end
    
    alpha, beta = num2cell(sort([alpha, beta])){:}
    function tmp_2 = code(alpha, beta)
    	tmp = 0.0;
    	if (beta <= 7.1)
    		tmp = 0.25 / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
    	else
    		tmp = ((1.0 + alpha) / beta) / beta;
    	end
    	tmp_2 = tmp;
    end
    
    NOTE: alpha and beta should be sorted in increasing order before calling this function.
    code[alpha_, beta_] := If[LessEqual[beta, 7.1], N[(0.25 / N[(N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
    
    \begin{array}{l}
    [alpha, beta] = \mathsf{sort}([alpha, beta])\\
    \\
    \begin{array}{l}
    \mathbf{if}\;\beta \leq 7.1:\\
    \;\;\;\;\frac{0.25}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if beta < 7.0999999999999996

      1. Initial program 99.9%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Taylor expanded in alpha around 0

        \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. Step-by-step derivation
        1. lower-/.f64N/A

          \[\leadsto \frac{\frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2}}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. lower-+.f64N/A

          \[\leadsto \frac{\frac{1 + \beta}{{\color{blue}{\left(2 + \beta\right)}}^{2}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        3. unpow2N/A

          \[\leadsto \frac{\frac{1 + \beta}{\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        4. lower-*.f64N/A

          \[\leadsto \frac{\frac{1 + \beta}{\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        5. lower-+.f64N/A

          \[\leadsto \frac{\frac{1 + \beta}{\left(2 + \beta\right) \cdot \left(\color{blue}{2} + \beta\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        6. lower-+.f6498.9

          \[\leadsto \frac{\frac{1 + \beta}{\left(2 + \beta\right) \cdot \left(2 + \color{blue}{\beta}\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. Applied rewrites98.9%

        \[\leadsto \frac{\color{blue}{\frac{1 + \beta}{\left(2 + \beta\right) \cdot \left(2 + \beta\right)}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. Taylor expanded in beta around 0

        \[\leadsto \frac{\frac{1}{4}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      6. Step-by-step derivation
        1. Applied rewrites97.8%

          \[\leadsto \frac{0.25}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]

        if 7.0999999999999996 < beta

        1. Initial program 89.8%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Taylor expanded in beta around inf

          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
        3. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
          2. lower-+.f64N/A

            \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
          3. unpow2N/A

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
          4. lower-*.f6492.2

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
        4. Applied rewrites92.2%

          \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
        5. Step-by-step derivation
          1. lift-+.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
          2. lift-*.f64N/A

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
          3. lift-/.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
          4. associate-/r*N/A

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
          5. div-add-revN/A

            \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\beta} \]
          6. lower-/.f64N/A

            \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\color{blue}{\beta}} \]
          7. div-add-revN/A

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
          8. lower-/.f64N/A

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
          9. lift-+.f6497.3

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
        6. Applied rewrites97.3%

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
      7. Recombined 2 regimes into one program.
      8. Add Preprocessing

      Alternative 7: 97.2% accurate, 3.3× speedup?

      \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 1.65:\\ \;\;\;\;\mathsf{fma}\left(\beta, -0.011574074074074073 \cdot \beta - 0.027777777777777776, 0.08333333333333333\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
      NOTE: alpha and beta should be sorted in increasing order before calling this function.
      (FPCore (alpha beta)
       :precision binary64
       (if (<= beta 1.65)
         (fma
          beta
          (- (* -0.011574074074074073 beta) 0.027777777777777776)
          0.08333333333333333)
         (/ (/ (+ 1.0 alpha) beta) beta)))
      assert(alpha < beta);
      double code(double alpha, double beta) {
      	double tmp;
      	if (beta <= 1.65) {
      		tmp = fma(beta, ((-0.011574074074074073 * beta) - 0.027777777777777776), 0.08333333333333333);
      	} else {
      		tmp = ((1.0 + alpha) / beta) / beta;
      	}
      	return tmp;
      }
      
      alpha, beta = sort([alpha, beta])
      function code(alpha, beta)
      	tmp = 0.0
      	if (beta <= 1.65)
      		tmp = fma(beta, Float64(Float64(-0.011574074074074073 * beta) - 0.027777777777777776), 0.08333333333333333);
      	else
      		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta);
      	end
      	return tmp
      end
      
      NOTE: alpha and beta should be sorted in increasing order before calling this function.
      code[alpha_, beta_] := If[LessEqual[beta, 1.65], N[(beta * N[(N[(-0.011574074074074073 * beta), $MachinePrecision] - 0.027777777777777776), $MachinePrecision] + 0.08333333333333333), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
      
      \begin{array}{l}
      [alpha, beta] = \mathsf{sort}([alpha, beta])\\
      \\
      \begin{array}{l}
      \mathbf{if}\;\beta \leq 1.65:\\
      \;\;\;\;\mathsf{fma}\left(\beta, -0.011574074074074073 \cdot \beta - 0.027777777777777776, 0.08333333333333333\right)\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if beta < 1.6499999999999999

        1. Initial program 99.9%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Taylor expanded in alpha around 0

          \[\leadsto \color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
        3. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
          2. lower-+.f64N/A

            \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2}} \cdot \left(3 + \beta\right)} \]
          3. *-commutativeN/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
          4. lower-*.f64N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
          5. lower-+.f64N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot {\color{blue}{\left(2 + \beta\right)}}^{2}} \]
          6. unpow2N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
          7. lower-*.f64N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
          8. lower-+.f64N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(\color{blue}{2} + \beta\right)\right)} \]
          9. lower-+.f6497.9

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \color{blue}{\beta}\right)\right)} \]
        4. Applied rewrites97.9%

          \[\leadsto \color{blue}{\frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \beta\right)\right)}} \]
        5. Taylor expanded in beta around 0

          \[\leadsto \frac{1}{12} + \color{blue}{\beta \cdot \left(\frac{-5}{432} \cdot \beta - \frac{1}{36}\right)} \]
        6. Step-by-step derivation
          1. +-commutativeN/A

            \[\leadsto \beta \cdot \left(\frac{-5}{432} \cdot \beta - \frac{1}{36}\right) + \frac{1}{12} \]
          2. lower-fma.f64N/A

            \[\leadsto \mathsf{fma}\left(\beta, \frac{-5}{432} \cdot \beta - \color{blue}{\frac{1}{36}}, \frac{1}{12}\right) \]
          3. lower--.f64N/A

            \[\leadsto \mathsf{fma}\left(\beta, \frac{-5}{432} \cdot \beta - \frac{1}{36}, \frac{1}{12}\right) \]
          4. lower-*.f6497.3

            \[\leadsto \mathsf{fma}\left(\beta, -0.011574074074074073 \cdot \beta - 0.027777777777777776, 0.08333333333333333\right) \]
        7. Applied rewrites97.3%

          \[\leadsto \mathsf{fma}\left(\beta, \color{blue}{-0.011574074074074073 \cdot \beta - 0.027777777777777776}, 0.08333333333333333\right) \]

        if 1.6499999999999999 < beta

        1. Initial program 89.8%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Taylor expanded in beta around inf

          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
        3. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
          2. lower-+.f64N/A

            \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
          3. unpow2N/A

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
          4. lower-*.f6492.1

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
        4. Applied rewrites92.1%

          \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
        5. Step-by-step derivation
          1. lift-+.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
          2. lift-*.f64N/A

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
          3. lift-/.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
          4. associate-/r*N/A

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
          5. div-add-revN/A

            \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\beta} \]
          6. lower-/.f64N/A

            \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\color{blue}{\beta}} \]
          7. div-add-revN/A

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
          8. lower-/.f64N/A

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
          9. lift-+.f6497.2

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
        6. Applied rewrites97.2%

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
      3. Recombined 2 regimes into one program.
      4. Add Preprocessing

      Alternative 8: 97.1% accurate, 3.5× speedup?

      \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 2.8:\\ \;\;\;\;\mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
      NOTE: alpha and beta should be sorted in increasing order before calling this function.
      (FPCore (alpha beta)
       :precision binary64
       (if (<= beta 2.8)
         (fma -0.027777777777777776 beta 0.08333333333333333)
         (/ (/ (+ 1.0 alpha) beta) beta)))
      assert(alpha < beta);
      double code(double alpha, double beta) {
      	double tmp;
      	if (beta <= 2.8) {
      		tmp = fma(-0.027777777777777776, beta, 0.08333333333333333);
      	} else {
      		tmp = ((1.0 + alpha) / beta) / beta;
      	}
      	return tmp;
      }
      
      alpha, beta = sort([alpha, beta])
      function code(alpha, beta)
      	tmp = 0.0
      	if (beta <= 2.8)
      		tmp = fma(-0.027777777777777776, beta, 0.08333333333333333);
      	else
      		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta);
      	end
      	return tmp
      end
      
      NOTE: alpha and beta should be sorted in increasing order before calling this function.
      code[alpha_, beta_] := If[LessEqual[beta, 2.8], N[(-0.027777777777777776 * beta + 0.08333333333333333), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
      
      \begin{array}{l}
      [alpha, beta] = \mathsf{sort}([alpha, beta])\\
      \\
      \begin{array}{l}
      \mathbf{if}\;\beta \leq 2.8:\\
      \;\;\;\;\mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right)\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if beta < 2.7999999999999998

        1. Initial program 99.9%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Taylor expanded in alpha around 0

          \[\leadsto \color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
        3. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
          2. lower-+.f64N/A

            \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2}} \cdot \left(3 + \beta\right)} \]
          3. *-commutativeN/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
          4. lower-*.f64N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
          5. lower-+.f64N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot {\color{blue}{\left(2 + \beta\right)}}^{2}} \]
          6. unpow2N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
          7. lower-*.f64N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
          8. lower-+.f64N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(\color{blue}{2} + \beta\right)\right)} \]
          9. lower-+.f6497.9

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \color{blue}{\beta}\right)\right)} \]
        4. Applied rewrites97.9%

          \[\leadsto \color{blue}{\frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \beta\right)\right)}} \]
        5. Taylor expanded in beta around 0

          \[\leadsto \frac{1}{12} + \color{blue}{\frac{-1}{36} \cdot \beta} \]
        6. Step-by-step derivation
          1. +-commutativeN/A

            \[\leadsto \frac{-1}{36} \cdot \beta + \frac{1}{12} \]
          2. lower-fma.f6497.0

            \[\leadsto \mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right) \]
        7. Applied rewrites97.0%

          \[\leadsto \mathsf{fma}\left(-0.027777777777777776, \color{blue}{\beta}, 0.08333333333333333\right) \]

        if 2.7999999999999998 < beta

        1. Initial program 89.8%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Taylor expanded in beta around inf

          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
        3. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
          2. lower-+.f64N/A

            \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
          3. unpow2N/A

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
          4. lower-*.f6492.2

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
        4. Applied rewrites92.2%

          \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
        5. Step-by-step derivation
          1. lift-+.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
          2. lift-*.f64N/A

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
          3. lift-/.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
          4. associate-/r*N/A

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
          5. div-add-revN/A

            \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\beta} \]
          6. lower-/.f64N/A

            \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\color{blue}{\beta}} \]
          7. div-add-revN/A

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
          8. lower-/.f64N/A

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
          9. lift-+.f6497.2

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
        6. Applied rewrites97.2%

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
      3. Recombined 2 regimes into one program.
      4. Add Preprocessing

      Alternative 9: 96.6% accurate, 2.8× speedup?

      \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 2.8:\\ \;\;\;\;\mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right)\\ \mathbf{elif}\;\beta \leq 8.5 \cdot 10^{+153}:\\ \;\;\;\;\frac{1 + \alpha}{\beta \cdot \beta}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
      NOTE: alpha and beta should be sorted in increasing order before calling this function.
      (FPCore (alpha beta)
       :precision binary64
       (if (<= beta 2.8)
         (fma -0.027777777777777776 beta 0.08333333333333333)
         (if (<= beta 8.5e+153)
           (/ (+ 1.0 alpha) (* beta beta))
           (/ (/ alpha beta) beta))))
      assert(alpha < beta);
      double code(double alpha, double beta) {
      	double tmp;
      	if (beta <= 2.8) {
      		tmp = fma(-0.027777777777777776, beta, 0.08333333333333333);
      	} else if (beta <= 8.5e+153) {
      		tmp = (1.0 + alpha) / (beta * beta);
      	} else {
      		tmp = (alpha / beta) / beta;
      	}
      	return tmp;
      }
      
      alpha, beta = sort([alpha, beta])
      function code(alpha, beta)
      	tmp = 0.0
      	if (beta <= 2.8)
      		tmp = fma(-0.027777777777777776, beta, 0.08333333333333333);
      	elseif (beta <= 8.5e+153)
      		tmp = Float64(Float64(1.0 + alpha) / Float64(beta * beta));
      	else
      		tmp = Float64(Float64(alpha / beta) / beta);
      	end
      	return tmp
      end
      
      NOTE: alpha and beta should be sorted in increasing order before calling this function.
      code[alpha_, beta_] := If[LessEqual[beta, 2.8], N[(-0.027777777777777776 * beta + 0.08333333333333333), $MachinePrecision], If[LessEqual[beta, 8.5e+153], N[(N[(1.0 + alpha), $MachinePrecision] / N[(beta * beta), $MachinePrecision]), $MachinePrecision], N[(N[(alpha / beta), $MachinePrecision] / beta), $MachinePrecision]]]
      
      \begin{array}{l}
      [alpha, beta] = \mathsf{sort}([alpha, beta])\\
      \\
      \begin{array}{l}
      \mathbf{if}\;\beta \leq 2.8:\\
      \;\;\;\;\mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right)\\
      
      \mathbf{elif}\;\beta \leq 8.5 \cdot 10^{+153}:\\
      \;\;\;\;\frac{1 + \alpha}{\beta \cdot \beta}\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 3 regimes
      2. if beta < 2.7999999999999998

        1. Initial program 99.9%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Taylor expanded in alpha around 0

          \[\leadsto \color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
        3. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
          2. lower-+.f64N/A

            \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2}} \cdot \left(3 + \beta\right)} \]
          3. *-commutativeN/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
          4. lower-*.f64N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
          5. lower-+.f64N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot {\color{blue}{\left(2 + \beta\right)}}^{2}} \]
          6. unpow2N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
          7. lower-*.f64N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
          8. lower-+.f64N/A

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(\color{blue}{2} + \beta\right)\right)} \]
          9. lower-+.f6497.9

            \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \color{blue}{\beta}\right)\right)} \]
        4. Applied rewrites97.9%

          \[\leadsto \color{blue}{\frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \beta\right)\right)}} \]
        5. Taylor expanded in beta around 0

          \[\leadsto \frac{1}{12} + \color{blue}{\frac{-1}{36} \cdot \beta} \]
        6. Step-by-step derivation
          1. +-commutativeN/A

            \[\leadsto \frac{-1}{36} \cdot \beta + \frac{1}{12} \]
          2. lower-fma.f6497.0

            \[\leadsto \mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right) \]
        7. Applied rewrites97.0%

          \[\leadsto \mathsf{fma}\left(-0.027777777777777776, \color{blue}{\beta}, 0.08333333333333333\right) \]

        if 2.7999999999999998 < beta < 8.49999999999999935e153

        1. Initial program 99.6%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Taylor expanded in beta around inf

          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
        3. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
          2. lower-+.f64N/A

            \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
          3. unpow2N/A

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
          4. lower-*.f6494.9

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
        4. Applied rewrites94.9%

          \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]

        if 8.49999999999999935e153 < beta

        1. Initial program 81.5%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Taylor expanded in beta around inf

          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
        3. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
          2. lower-+.f64N/A

            \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
          3. unpow2N/A

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
          4. lower-*.f6489.8

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
        4. Applied rewrites89.8%

          \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
        5. Step-by-step derivation
          1. lift-+.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
          2. lift-*.f64N/A

            \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
          3. lift-/.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
          4. associate-/r*N/A

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
          5. div-add-revN/A

            \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\beta} \]
          6. lower-/.f64N/A

            \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\color{blue}{\beta}} \]
          7. div-add-revN/A

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
          8. lower-/.f64N/A

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
          9. lift-+.f6499.1

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
        6. Applied rewrites99.1%

          \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
        7. Taylor expanded in alpha around inf

          \[\leadsto \frac{\frac{\alpha}{\beta}}{\beta} \]
        8. Step-by-step derivation
          1. Applied rewrites97.6%

            \[\leadsto \frac{\frac{\alpha}{\beta}}{\beta} \]
        9. Recombined 3 regimes into one program.
        10. Add Preprocessing

        Alternative 10: 93.6% accurate, 3.3× speedup?

        \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 2.8:\\ \;\;\;\;\mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right)\\ \mathbf{elif}\;\beta \leq 8.5 \cdot 10^{+153}:\\ \;\;\;\;\frac{\frac{1}{\beta}}{\beta}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
        NOTE: alpha and beta should be sorted in increasing order before calling this function.
        (FPCore (alpha beta)
         :precision binary64
         (if (<= beta 2.8)
           (fma -0.027777777777777776 beta 0.08333333333333333)
           (if (<= beta 8.5e+153) (/ (/ 1.0 beta) beta) (/ (/ alpha beta) beta))))
        assert(alpha < beta);
        double code(double alpha, double beta) {
        	double tmp;
        	if (beta <= 2.8) {
        		tmp = fma(-0.027777777777777776, beta, 0.08333333333333333);
        	} else if (beta <= 8.5e+153) {
        		tmp = (1.0 / beta) / beta;
        	} else {
        		tmp = (alpha / beta) / beta;
        	}
        	return tmp;
        }
        
        alpha, beta = sort([alpha, beta])
        function code(alpha, beta)
        	tmp = 0.0
        	if (beta <= 2.8)
        		tmp = fma(-0.027777777777777776, beta, 0.08333333333333333);
        	elseif (beta <= 8.5e+153)
        		tmp = Float64(Float64(1.0 / beta) / beta);
        	else
        		tmp = Float64(Float64(alpha / beta) / beta);
        	end
        	return tmp
        end
        
        NOTE: alpha and beta should be sorted in increasing order before calling this function.
        code[alpha_, beta_] := If[LessEqual[beta, 2.8], N[(-0.027777777777777776 * beta + 0.08333333333333333), $MachinePrecision], If[LessEqual[beta, 8.5e+153], N[(N[(1.0 / beta), $MachinePrecision] / beta), $MachinePrecision], N[(N[(alpha / beta), $MachinePrecision] / beta), $MachinePrecision]]]
        
        \begin{array}{l}
        [alpha, beta] = \mathsf{sort}([alpha, beta])\\
        \\
        \begin{array}{l}
        \mathbf{if}\;\beta \leq 2.8:\\
        \;\;\;\;\mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right)\\
        
        \mathbf{elif}\;\beta \leq 8.5 \cdot 10^{+153}:\\
        \;\;\;\;\frac{\frac{1}{\beta}}{\beta}\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 3 regimes
        2. if beta < 2.7999999999999998

          1. Initial program 99.9%

            \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          2. Taylor expanded in alpha around 0

            \[\leadsto \color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
          3. Step-by-step derivation
            1. lower-/.f64N/A

              \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
            2. lower-+.f64N/A

              \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2}} \cdot \left(3 + \beta\right)} \]
            3. *-commutativeN/A

              \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
            4. lower-*.f64N/A

              \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
            5. lower-+.f64N/A

              \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot {\color{blue}{\left(2 + \beta\right)}}^{2}} \]
            6. unpow2N/A

              \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
            7. lower-*.f64N/A

              \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
            8. lower-+.f64N/A

              \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(\color{blue}{2} + \beta\right)\right)} \]
            9. lower-+.f6497.9

              \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \color{blue}{\beta}\right)\right)} \]
          4. Applied rewrites97.9%

            \[\leadsto \color{blue}{\frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \beta\right)\right)}} \]
          5. Taylor expanded in beta around 0

            \[\leadsto \frac{1}{12} + \color{blue}{\frac{-1}{36} \cdot \beta} \]
          6. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \frac{-1}{36} \cdot \beta + \frac{1}{12} \]
            2. lower-fma.f6497.0

              \[\leadsto \mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right) \]
          7. Applied rewrites97.0%

            \[\leadsto \mathsf{fma}\left(-0.027777777777777776, \color{blue}{\beta}, 0.08333333333333333\right) \]

          if 2.7999999999999998 < beta < 8.49999999999999935e153

          1. Initial program 99.6%

            \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          2. Taylor expanded in beta around inf

            \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
          3. Step-by-step derivation
            1. lower-/.f64N/A

              \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
            2. lower-+.f64N/A

              \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
            3. unpow2N/A

              \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
            4. lower-*.f6494.9

              \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
          4. Applied rewrites94.9%

            \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
          5. Step-by-step derivation
            1. lift-+.f64N/A

              \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
            2. lift-*.f64N/A

              \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
            3. lift-/.f64N/A

              \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
            4. associate-/r*N/A

              \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
            5. div-add-revN/A

              \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\beta} \]
            6. lower-/.f64N/A

              \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\color{blue}{\beta}} \]
            7. div-add-revN/A

              \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
            8. lower-/.f64N/A

              \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
            9. lift-+.f6495.0

              \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
          6. Applied rewrites95.0%

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
          7. Taylor expanded in alpha around 0

            \[\leadsto \frac{\frac{1}{\beta}}{\beta} \]
          8. Step-by-step derivation
            1. Applied rewrites83.1%

              \[\leadsto \frac{\frac{1}{\beta}}{\beta} \]

            if 8.49999999999999935e153 < beta

            1. Initial program 81.5%

              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            2. Taylor expanded in beta around inf

              \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
            3. Step-by-step derivation
              1. lower-/.f64N/A

                \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
              2. lower-+.f64N/A

                \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
              3. unpow2N/A

                \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
              4. lower-*.f6489.8

                \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
            4. Applied rewrites89.8%

              \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
            5. Step-by-step derivation
              1. lift-+.f64N/A

                \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
              2. lift-*.f64N/A

                \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
              3. lift-/.f64N/A

                \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
              4. associate-/r*N/A

                \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
              5. div-add-revN/A

                \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\beta} \]
              6. lower-/.f64N/A

                \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\color{blue}{\beta}} \]
              7. div-add-revN/A

                \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
              8. lower-/.f64N/A

                \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
              9. lift-+.f6499.1

                \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
            6. Applied rewrites99.1%

              \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
            7. Taylor expanded in alpha around inf

              \[\leadsto \frac{\frac{\alpha}{\beta}}{\beta} \]
            8. Step-by-step derivation
              1. Applied rewrites97.6%

                \[\leadsto \frac{\frac{\alpha}{\beta}}{\beta} \]
            9. Recombined 3 regimes into one program.
            10. Add Preprocessing

            Alternative 11: 91.7% accurate, 4.4× speedup?

            \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 2.8:\\ \;\;\;\;\mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1}{\beta}}{\beta}\\ \end{array} \end{array} \]
            NOTE: alpha and beta should be sorted in increasing order before calling this function.
            (FPCore (alpha beta)
             :precision binary64
             (if (<= beta 2.8)
               (fma -0.027777777777777776 beta 0.08333333333333333)
               (/ (/ 1.0 beta) beta)))
            assert(alpha < beta);
            double code(double alpha, double beta) {
            	double tmp;
            	if (beta <= 2.8) {
            		tmp = fma(-0.027777777777777776, beta, 0.08333333333333333);
            	} else {
            		tmp = (1.0 / beta) / beta;
            	}
            	return tmp;
            }
            
            alpha, beta = sort([alpha, beta])
            function code(alpha, beta)
            	tmp = 0.0
            	if (beta <= 2.8)
            		tmp = fma(-0.027777777777777776, beta, 0.08333333333333333);
            	else
            		tmp = Float64(Float64(1.0 / beta) / beta);
            	end
            	return tmp
            end
            
            NOTE: alpha and beta should be sorted in increasing order before calling this function.
            code[alpha_, beta_] := If[LessEqual[beta, 2.8], N[(-0.027777777777777776 * beta + 0.08333333333333333), $MachinePrecision], N[(N[(1.0 / beta), $MachinePrecision] / beta), $MachinePrecision]]
            
            \begin{array}{l}
            [alpha, beta] = \mathsf{sort}([alpha, beta])\\
            \\
            \begin{array}{l}
            \mathbf{if}\;\beta \leq 2.8:\\
            \;\;\;\;\mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right)\\
            
            \mathbf{else}:\\
            \;\;\;\;\frac{\frac{1}{\beta}}{\beta}\\
            
            
            \end{array}
            \end{array}
            
            Derivation
            1. Split input into 2 regimes
            2. if beta < 2.7999999999999998

              1. Initial program 99.9%

                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. Taylor expanded in alpha around 0

                \[\leadsto \color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
              3. Step-by-step derivation
                1. lower-/.f64N/A

                  \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
                2. lower-+.f64N/A

                  \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2}} \cdot \left(3 + \beta\right)} \]
                3. *-commutativeN/A

                  \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
                4. lower-*.f64N/A

                  \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
                5. lower-+.f64N/A

                  \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot {\color{blue}{\left(2 + \beta\right)}}^{2}} \]
                6. unpow2N/A

                  \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
                7. lower-*.f64N/A

                  \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
                8. lower-+.f64N/A

                  \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(\color{blue}{2} + \beta\right)\right)} \]
                9. lower-+.f6497.9

                  \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \color{blue}{\beta}\right)\right)} \]
              4. Applied rewrites97.9%

                \[\leadsto \color{blue}{\frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \beta\right)\right)}} \]
              5. Taylor expanded in beta around 0

                \[\leadsto \frac{1}{12} + \color{blue}{\frac{-1}{36} \cdot \beta} \]
              6. Step-by-step derivation
                1. +-commutativeN/A

                  \[\leadsto \frac{-1}{36} \cdot \beta + \frac{1}{12} \]
                2. lower-fma.f6497.0

                  \[\leadsto \mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right) \]
              7. Applied rewrites97.0%

                \[\leadsto \mathsf{fma}\left(-0.027777777777777776, \color{blue}{\beta}, 0.08333333333333333\right) \]

              if 2.7999999999999998 < beta

              1. Initial program 89.8%

                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. Taylor expanded in beta around inf

                \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
              3. Step-by-step derivation
                1. lower-/.f64N/A

                  \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
                2. lower-+.f64N/A

                  \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
                3. unpow2N/A

                  \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                4. lower-*.f6492.2

                  \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
              4. Applied rewrites92.2%

                \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
              5. Step-by-step derivation
                1. lift-+.f64N/A

                  \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
                2. lift-*.f64N/A

                  \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                3. lift-/.f64N/A

                  \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                4. associate-/r*N/A

                  \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
                5. div-add-revN/A

                  \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\beta} \]
                6. lower-/.f64N/A

                  \[\leadsto \frac{\frac{1}{\beta} + \frac{\alpha}{\beta}}{\color{blue}{\beta}} \]
                7. div-add-revN/A

                  \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
                8. lower-/.f64N/A

                  \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
                9. lift-+.f6497.2

                  \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
              6. Applied rewrites97.2%

                \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
              7. Taylor expanded in alpha around 0

                \[\leadsto \frac{\frac{1}{\beta}}{\beta} \]
              8. Step-by-step derivation
                1. Applied rewrites87.5%

                  \[\leadsto \frac{\frac{1}{\beta}}{\beta} \]
              9. Recombined 2 regimes into one program.
              10. Add Preprocessing

              Alternative 12: 91.3% accurate, 4.5× speedup?

              \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 2.8:\\ \;\;\;\;\mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{1}{\beta \cdot \beta}\\ \end{array} \end{array} \]
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              (FPCore (alpha beta)
               :precision binary64
               (if (<= beta 2.8)
                 (fma -0.027777777777777776 beta 0.08333333333333333)
                 (/ 1.0 (* beta beta))))
              assert(alpha < beta);
              double code(double alpha, double beta) {
              	double tmp;
              	if (beta <= 2.8) {
              		tmp = fma(-0.027777777777777776, beta, 0.08333333333333333);
              	} else {
              		tmp = 1.0 / (beta * beta);
              	}
              	return tmp;
              }
              
              alpha, beta = sort([alpha, beta])
              function code(alpha, beta)
              	tmp = 0.0
              	if (beta <= 2.8)
              		tmp = fma(-0.027777777777777776, beta, 0.08333333333333333);
              	else
              		tmp = Float64(1.0 / Float64(beta * beta));
              	end
              	return tmp
              end
              
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              code[alpha_, beta_] := If[LessEqual[beta, 2.8], N[(-0.027777777777777776 * beta + 0.08333333333333333), $MachinePrecision], N[(1.0 / N[(beta * beta), $MachinePrecision]), $MachinePrecision]]
              
              \begin{array}{l}
              [alpha, beta] = \mathsf{sort}([alpha, beta])\\
              \\
              \begin{array}{l}
              \mathbf{if}\;\beta \leq 2.8:\\
              \;\;\;\;\mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right)\\
              
              \mathbf{else}:\\
              \;\;\;\;\frac{1}{\beta \cdot \beta}\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if beta < 2.7999999999999998

                1. Initial program 99.9%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Taylor expanded in alpha around 0

                  \[\leadsto \color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
                3. Step-by-step derivation
                  1. lower-/.f64N/A

                    \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
                  2. lower-+.f64N/A

                    \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2}} \cdot \left(3 + \beta\right)} \]
                  3. *-commutativeN/A

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
                  4. lower-*.f64N/A

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
                  5. lower-+.f64N/A

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot {\color{blue}{\left(2 + \beta\right)}}^{2}} \]
                  6. unpow2N/A

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
                  7. lower-*.f64N/A

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
                  8. lower-+.f64N/A

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(\color{blue}{2} + \beta\right)\right)} \]
                  9. lower-+.f6497.9

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \color{blue}{\beta}\right)\right)} \]
                4. Applied rewrites97.9%

                  \[\leadsto \color{blue}{\frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \beta\right)\right)}} \]
                5. Taylor expanded in beta around 0

                  \[\leadsto \frac{1}{12} + \color{blue}{\frac{-1}{36} \cdot \beta} \]
                6. Step-by-step derivation
                  1. +-commutativeN/A

                    \[\leadsto \frac{-1}{36} \cdot \beta + \frac{1}{12} \]
                  2. lower-fma.f6497.0

                    \[\leadsto \mathsf{fma}\left(-0.027777777777777776, \beta, 0.08333333333333333\right) \]
                7. Applied rewrites97.0%

                  \[\leadsto \mathsf{fma}\left(-0.027777777777777776, \color{blue}{\beta}, 0.08333333333333333\right) \]

                if 2.7999999999999998 < beta

                1. Initial program 89.8%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Taylor expanded in beta around inf

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                3. Step-by-step derivation
                  1. lower-/.f64N/A

                    \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
                  2. lower-+.f64N/A

                    \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
                  3. unpow2N/A

                    \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                  4. lower-*.f6492.2

                    \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                4. Applied rewrites92.2%

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                5. Taylor expanded in alpha around 0

                  \[\leadsto \frac{1}{\color{blue}{\beta} \cdot \beta} \]
                6. Step-by-step derivation
                  1. Applied rewrites86.7%

                    \[\leadsto \frac{1}{\color{blue}{\beta} \cdot \beta} \]
                7. Recombined 2 regimes into one program.
                8. Add Preprocessing

                Alternative 13: 45.0% accurate, 50.2× speedup?

                \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ 0.08333333333333333 \end{array} \]
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                (FPCore (alpha beta) :precision binary64 0.08333333333333333)
                assert(alpha < beta);
                double code(double alpha, double beta) {
                	return 0.08333333333333333;
                }
                
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                module fmin_fmax_functions
                    implicit none
                    private
                    public fmax
                    public fmin
                
                    interface fmax
                        module procedure fmax88
                        module procedure fmax44
                        module procedure fmax84
                        module procedure fmax48
                    end interface
                    interface fmin
                        module procedure fmin88
                        module procedure fmin44
                        module procedure fmin84
                        module procedure fmin48
                    end interface
                contains
                    real(8) function fmax88(x, y) result (res)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                    end function
                    real(4) function fmax44(x, y) result (res)
                        real(4), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                    end function
                    real(8) function fmax84(x, y) result(res)
                        real(8), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                    end function
                    real(8) function fmax48(x, y) result(res)
                        real(4), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                    end function
                    real(8) function fmin88(x, y) result (res)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                    end function
                    real(4) function fmin44(x, y) result (res)
                        real(4), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                    end function
                    real(8) function fmin84(x, y) result(res)
                        real(8), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                    end function
                    real(8) function fmin48(x, y) result(res)
                        real(4), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                    end function
                end module
                
                real(8) function code(alpha, beta)
                use fmin_fmax_functions
                    real(8), intent (in) :: alpha
                    real(8), intent (in) :: beta
                    code = 0.08333333333333333d0
                end function
                
                assert alpha < beta;
                public static double code(double alpha, double beta) {
                	return 0.08333333333333333;
                }
                
                [alpha, beta] = sort([alpha, beta])
                def code(alpha, beta):
                	return 0.08333333333333333
                
                alpha, beta = sort([alpha, beta])
                function code(alpha, beta)
                	return 0.08333333333333333
                end
                
                alpha, beta = num2cell(sort([alpha, beta])){:}
                function tmp = code(alpha, beta)
                	tmp = 0.08333333333333333;
                end
                
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                code[alpha_, beta_] := 0.08333333333333333
                
                \begin{array}{l}
                [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                \\
                0.08333333333333333
                \end{array}
                
                Derivation
                1. Initial program 94.3%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Taylor expanded in alpha around 0

                  \[\leadsto \color{blue}{\frac{1 + \beta}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
                3. Step-by-step derivation
                  1. lower-/.f64N/A

                    \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2} \cdot \left(3 + \beta\right)}} \]
                  2. lower-+.f64N/A

                    \[\leadsto \frac{1 + \beta}{\color{blue}{{\left(2 + \beta\right)}^{2}} \cdot \left(3 + \beta\right)} \]
                  3. *-commutativeN/A

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
                  4. lower-*.f64N/A

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \color{blue}{{\left(2 + \beta\right)}^{2}}} \]
                  5. lower-+.f64N/A

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot {\color{blue}{\left(2 + \beta\right)}}^{2}} \]
                  6. unpow2N/A

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
                  7. lower-*.f64N/A

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}\right)} \]
                  8. lower-+.f64N/A

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(\color{blue}{2} + \beta\right)\right)} \]
                  9. lower-+.f6485.5

                    \[\leadsto \frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \color{blue}{\beta}\right)\right)} \]
                4. Applied rewrites85.5%

                  \[\leadsto \color{blue}{\frac{1 + \beta}{\left(3 + \beta\right) \cdot \left(\left(2 + \beta\right) \cdot \left(2 + \beta\right)\right)}} \]
                5. Taylor expanded in beta around 0

                  \[\leadsto \frac{1}{12} \]
                6. Step-by-step derivation
                  1. Applied rewrites45.0%

                    \[\leadsto 0.08333333333333333 \]
                  2. Add Preprocessing

                  Reproduce

                  ?
                  herbie shell --seed 2025117 
                  (FPCore (alpha beta)
                    :name "Octave 3.8, jcobi/3"
                    :precision binary64
                    :pre (and (> alpha -1.0) (> beta -1.0))
                    (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))