Octave 3.8, jcobi/3

Percentage Accurate: 94.4% → 99.4%
Time: 3.8s
Alternatives: 17
Speedup: 2.6×

Specification

?
\[\alpha > -1 \land \beta > -1\]
\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ \frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1} \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))))
   (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0))
end
function tmp = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 17 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 94.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ \frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1} \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))))
   (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0))
end
function tmp = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}

Alternative 1: 99.4% accurate, 0.7× speedup?

\[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ t_1 := t\_0 + 1\\ t_2 := \mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right)\\ \mathbf{if}\;\beta \leq 9.5 \cdot 10^{+15}:\\ \;\;\;\;\frac{\frac{\frac{\frac{t\_2 \cdot t\_2 - 1}{t\_2 - 1}}{t\_0}}{t\_0}}{t\_1}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{t\_1}\\ \end{array} \end{array} \]
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0)))
        (t_1 (+ t_0 1.0))
        (t_2 (fma beta alpha (+ beta alpha))))
   (if (<= beta 9.5e+15)
     (/ (/ (/ (/ (- (* t_2 t_2) 1.0) (- t_2 1.0)) t_0) t_0) t_1)
     (/ (/ (+ 1.0 alpha) t_0) t_1))))
assert(alpha < beta);
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	double t_1 = t_0 + 1.0;
	double t_2 = fma(beta, alpha, (beta + alpha));
	double tmp;
	if (beta <= 9.5e+15) {
		tmp = (((((t_2 * t_2) - 1.0) / (t_2 - 1.0)) / t_0) / t_0) / t_1;
	} else {
		tmp = ((1.0 + alpha) / t_0) / t_1;
	}
	return tmp;
}
alpha, beta = sort([alpha, beta])
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	t_1 = Float64(t_0 + 1.0)
	t_2 = fma(beta, alpha, Float64(beta + alpha))
	tmp = 0.0
	if (beta <= 9.5e+15)
		tmp = Float64(Float64(Float64(Float64(Float64(Float64(t_2 * t_2) - 1.0) / Float64(t_2 - 1.0)) / t_0) / t_0) / t_1);
	else
		tmp = Float64(Float64(Float64(1.0 + alpha) / t_0) / t_1);
	end
	return tmp
end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 + 1.0), $MachinePrecision]}, Block[{t$95$2 = N[(beta * alpha + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 9.5e+15], N[(N[(N[(N[(N[(N[(t$95$2 * t$95$2), $MachinePrecision] - 1.0), $MachinePrecision] / N[(t$95$2 - 1.0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$1), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$1), $MachinePrecision]]]]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
t_1 := t\_0 + 1\\
t_2 := \mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right)\\
\mathbf{if}\;\beta \leq 9.5 \cdot 10^{+15}:\\
\;\;\;\;\frac{\frac{\frac{\frac{t\_2 \cdot t\_2 - 1}{t\_2 - 1}}{t\_0}}{t\_0}}{t\_1}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{t\_1}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 9.5e15

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\color{blue}{\left(\alpha + \beta\right)} + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right)} + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. lift-*.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \color{blue}{\beta \cdot \alpha}\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. flip-+N/A

        \[\leadsto \frac{\frac{\frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) \cdot \left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) - 1 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) - 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      6. lower-/.f64N/A

        \[\leadsto \frac{\frac{\frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) \cdot \left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) - 1 \cdot 1}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) - 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Applied rewrites99.9%

      \[\leadsto \frac{\frac{\frac{\color{blue}{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) \cdot \mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) - 1}{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) - 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]

    if 9.5e15 < beta

    1. Initial program 89.5%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in beta around inf

      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. lower-+.f6499.0

        \[\leadsto \frac{\frac{1 + \color{blue}{\alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites99.0%

      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 2: 99.4% accurate, 0.8× speedup?

\[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ t_1 := t\_0 + 1\\ \mathbf{if}\;\beta \leq 1.12 \cdot 10^{+16}:\\ \;\;\;\;\frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{t\_0}}{t\_0}}{t\_1}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{t\_1}\\ \end{array} \end{array} \]
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))) (t_1 (+ t_0 1.0)))
   (if (<= beta 1.12e+16)
     (/ (/ (/ (* (+ (+ (/ (+ 1.0 alpha) beta) alpha) 1.0) beta) t_0) t_0) t_1)
     (/ (/ (+ 1.0 alpha) t_0) t_1))))
assert(alpha < beta);
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	double t_1 = t_0 + 1.0;
	double tmp;
	if (beta <= 1.12e+16) {
		tmp = (((((((1.0 + alpha) / beta) + alpha) + 1.0) * beta) / t_0) / t_0) / t_1;
	} else {
		tmp = ((1.0 + alpha) / t_0) / t_1;
	}
	return tmp;
}
NOTE: alpha and beta should be sorted in increasing order before calling this function.
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: tmp
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    t_1 = t_0 + 1.0d0
    if (beta <= 1.12d+16) then
        tmp = (((((((1.0d0 + alpha) / beta) + alpha) + 1.0d0) * beta) / t_0) / t_0) / t_1
    else
        tmp = ((1.0d0 + alpha) / t_0) / t_1
    end if
    code = tmp
end function
assert alpha < beta;
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	double t_1 = t_0 + 1.0;
	double tmp;
	if (beta <= 1.12e+16) {
		tmp = (((((((1.0 + alpha) / beta) + alpha) + 1.0) * beta) / t_0) / t_0) / t_1;
	} else {
		tmp = ((1.0 + alpha) / t_0) / t_1;
	}
	return tmp;
}
[alpha, beta] = sort([alpha, beta])
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	t_1 = t_0 + 1.0
	tmp = 0
	if beta <= 1.12e+16:
		tmp = (((((((1.0 + alpha) / beta) + alpha) + 1.0) * beta) / t_0) / t_0) / t_1
	else:
		tmp = ((1.0 + alpha) / t_0) / t_1
	return tmp
alpha, beta = sort([alpha, beta])
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	t_1 = Float64(t_0 + 1.0)
	tmp = 0.0
	if (beta <= 1.12e+16)
		tmp = Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.0 + alpha) / beta) + alpha) + 1.0) * beta) / t_0) / t_0) / t_1);
	else
		tmp = Float64(Float64(Float64(1.0 + alpha) / t_0) / t_1);
	end
	return tmp
end
alpha, beta = num2cell(sort([alpha, beta])){:}
function tmp_2 = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	t_1 = t_0 + 1.0;
	tmp = 0.0;
	if (beta <= 1.12e+16)
		tmp = (((((((1.0 + alpha) / beta) + alpha) + 1.0) * beta) / t_0) / t_0) / t_1;
	else
		tmp = ((1.0 + alpha) / t_0) / t_1;
	end
	tmp_2 = tmp;
end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 + 1.0), $MachinePrecision]}, If[LessEqual[beta, 1.12e+16], N[(N[(N[(N[(N[(N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] + alpha), $MachinePrecision] + 1.0), $MachinePrecision] * beta), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$1), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$1), $MachinePrecision]]]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
t_1 := t\_0 + 1\\
\mathbf{if}\;\beta \leq 1.12 \cdot 10^{+16}:\\
\;\;\;\;\frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{t\_0}}{t\_0}}{t\_1}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{t\_1}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 1.12e16

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in beta around inf

      \[\leadsto \frac{\frac{\frac{\color{blue}{\beta \cdot \left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \frac{\frac{\frac{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) \cdot \color{blue}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lower-*.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) \cdot \color{blue}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. +-commutativeN/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. lower-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. +-commutativeN/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right) + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      6. lower-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right) + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      7. div-add-revN/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      8. lower-/.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      9. lower-+.f6499.8

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites99.8%

      \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]

    if 1.12e16 < beta

    1. Initial program 89.5%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in beta around inf

      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. lower-+.f6499.0

        \[\leadsto \frac{\frac{1 + \color{blue}{\alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites99.0%

      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 3: 99.4% accurate, 1.1× speedup?

\[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ t_1 := 2 + \left(\beta + \alpha\right)\\ \mathbf{if}\;\beta \leq 2 \cdot 10^{+21}:\\ \;\;\;\;\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{t\_1}}{t\_1 \cdot \left(3 + \left(\beta + \alpha\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{t\_0 + 1}\\ \end{array} \end{array} \]
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))) (t_1 (+ 2.0 (+ beta alpha))))
   (if (<= beta 2e+21)
     (/
      (/ (* (+ (+ (/ (+ 1.0 alpha) beta) alpha) 1.0) beta) t_1)
      (* t_1 (+ 3.0 (+ beta alpha))))
     (/ (/ (+ 1.0 alpha) t_0) (+ t_0 1.0)))))
assert(alpha < beta);
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	double t_1 = 2.0 + (beta + alpha);
	double tmp;
	if (beta <= 2e+21) {
		tmp = ((((((1.0 + alpha) / beta) + alpha) + 1.0) * beta) / t_1) / (t_1 * (3.0 + (beta + alpha)));
	} else {
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0);
	}
	return tmp;
}
NOTE: alpha and beta should be sorted in increasing order before calling this function.
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: tmp
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    t_1 = 2.0d0 + (beta + alpha)
    if (beta <= 2d+21) then
        tmp = ((((((1.0d0 + alpha) / beta) + alpha) + 1.0d0) * beta) / t_1) / (t_1 * (3.0d0 + (beta + alpha)))
    else
        tmp = ((1.0d0 + alpha) / t_0) / (t_0 + 1.0d0)
    end if
    code = tmp
end function
assert alpha < beta;
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	double t_1 = 2.0 + (beta + alpha);
	double tmp;
	if (beta <= 2e+21) {
		tmp = ((((((1.0 + alpha) / beta) + alpha) + 1.0) * beta) / t_1) / (t_1 * (3.0 + (beta + alpha)));
	} else {
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0);
	}
	return tmp;
}
[alpha, beta] = sort([alpha, beta])
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	t_1 = 2.0 + (beta + alpha)
	tmp = 0
	if beta <= 2e+21:
		tmp = ((((((1.0 + alpha) / beta) + alpha) + 1.0) * beta) / t_1) / (t_1 * (3.0 + (beta + alpha)))
	else:
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0)
	return tmp
alpha, beta = sort([alpha, beta])
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	t_1 = Float64(2.0 + Float64(beta + alpha))
	tmp = 0.0
	if (beta <= 2e+21)
		tmp = Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.0 + alpha) / beta) + alpha) + 1.0) * beta) / t_1) / Float64(t_1 * Float64(3.0 + Float64(beta + alpha))));
	else
		tmp = Float64(Float64(Float64(1.0 + alpha) / t_0) / Float64(t_0 + 1.0));
	end
	return tmp
end
alpha, beta = num2cell(sort([alpha, beta])){:}
function tmp_2 = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	t_1 = 2.0 + (beta + alpha);
	tmp = 0.0;
	if (beta <= 2e+21)
		tmp = ((((((1.0 + alpha) / beta) + alpha) + 1.0) * beta) / t_1) / (t_1 * (3.0 + (beta + alpha)));
	else
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0);
	end
	tmp_2 = tmp;
end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(2.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 2e+21], N[(N[(N[(N[(N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] + alpha), $MachinePrecision] + 1.0), $MachinePrecision] * beta), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(t$95$1 * N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
t_1 := 2 + \left(\beta + \alpha\right)\\
\mathbf{if}\;\beta \leq 2 \cdot 10^{+21}:\\
\;\;\;\;\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{t\_1}}{t\_1 \cdot \left(3 + \left(\beta + \alpha\right)\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{t\_0 + 1}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 2e21

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in beta around inf

      \[\leadsto \frac{\frac{\frac{\color{blue}{\beta \cdot \left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \frac{\frac{\frac{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) \cdot \color{blue}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lower-*.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) \cdot \color{blue}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. +-commutativeN/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. lower-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. +-commutativeN/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right) + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      6. lower-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right) + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      7. div-add-revN/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      8. lower-/.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      9. lower-+.f6499.8

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites99.8%

      \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    5. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
      2. lift-/.f64N/A

        \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
      4. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
      5. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right) + 1} \]
      6. lift-*.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
      7. metadata-evalN/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \color{blue}{2}\right) + 1} \]
      8. associate-/l/N/A

        \[\leadsto \color{blue}{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) + 1\right)}} \]
      9. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) + 1\right)}} \]
    6. Applied rewrites99.8%

      \[\leadsto \color{blue}{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{2 + \left(\beta + \alpha\right)}}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)}} \]

    if 2e21 < beta

    1. Initial program 89.4%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in beta around inf

      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. lower-+.f6499.0

        \[\leadsto \frac{\frac{1 + \color{blue}{\alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites99.0%

      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 4: 99.4% accurate, 1.2× speedup?

\[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ t_1 := 2 + \left(\beta + \alpha\right)\\ \mathbf{if}\;\beta \leq 1.12 \cdot 10^{+16}:\\ \;\;\;\;\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{t\_1 \cdot \left(\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_1\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{t\_0 + 1}\\ \end{array} \end{array} \]
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))) (t_1 (+ 2.0 (+ beta alpha))))
   (if (<= beta 1.12e+16)
     (/
      (* (+ (+ (/ (+ 1.0 alpha) beta) alpha) 1.0) beta)
      (* t_1 (* (+ 3.0 (+ beta alpha)) t_1)))
     (/ (/ (+ 1.0 alpha) t_0) (+ t_0 1.0)))))
assert(alpha < beta);
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	double t_1 = 2.0 + (beta + alpha);
	double tmp;
	if (beta <= 1.12e+16) {
		tmp = (((((1.0 + alpha) / beta) + alpha) + 1.0) * beta) / (t_1 * ((3.0 + (beta + alpha)) * t_1));
	} else {
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0);
	}
	return tmp;
}
NOTE: alpha and beta should be sorted in increasing order before calling this function.
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: tmp
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    t_1 = 2.0d0 + (beta + alpha)
    if (beta <= 1.12d+16) then
        tmp = (((((1.0d0 + alpha) / beta) + alpha) + 1.0d0) * beta) / (t_1 * ((3.0d0 + (beta + alpha)) * t_1))
    else
        tmp = ((1.0d0 + alpha) / t_0) / (t_0 + 1.0d0)
    end if
    code = tmp
end function
assert alpha < beta;
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	double t_1 = 2.0 + (beta + alpha);
	double tmp;
	if (beta <= 1.12e+16) {
		tmp = (((((1.0 + alpha) / beta) + alpha) + 1.0) * beta) / (t_1 * ((3.0 + (beta + alpha)) * t_1));
	} else {
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0);
	}
	return tmp;
}
[alpha, beta] = sort([alpha, beta])
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	t_1 = 2.0 + (beta + alpha)
	tmp = 0
	if beta <= 1.12e+16:
		tmp = (((((1.0 + alpha) / beta) + alpha) + 1.0) * beta) / (t_1 * ((3.0 + (beta + alpha)) * t_1))
	else:
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0)
	return tmp
alpha, beta = sort([alpha, beta])
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	t_1 = Float64(2.0 + Float64(beta + alpha))
	tmp = 0.0
	if (beta <= 1.12e+16)
		tmp = Float64(Float64(Float64(Float64(Float64(Float64(1.0 + alpha) / beta) + alpha) + 1.0) * beta) / Float64(t_1 * Float64(Float64(3.0 + Float64(beta + alpha)) * t_1)));
	else
		tmp = Float64(Float64(Float64(1.0 + alpha) / t_0) / Float64(t_0 + 1.0));
	end
	return tmp
end
alpha, beta = num2cell(sort([alpha, beta])){:}
function tmp_2 = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	t_1 = 2.0 + (beta + alpha);
	tmp = 0.0;
	if (beta <= 1.12e+16)
		tmp = (((((1.0 + alpha) / beta) + alpha) + 1.0) * beta) / (t_1 * ((3.0 + (beta + alpha)) * t_1));
	else
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0);
	end
	tmp_2 = tmp;
end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(2.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 1.12e+16], N[(N[(N[(N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] + alpha), $MachinePrecision] + 1.0), $MachinePrecision] * beta), $MachinePrecision] / N[(t$95$1 * N[(N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
t_1 := 2 + \left(\beta + \alpha\right)\\
\mathbf{if}\;\beta \leq 1.12 \cdot 10^{+16}:\\
\;\;\;\;\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{t\_1 \cdot \left(\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_1\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{t\_0 + 1}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 1.12e16

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in beta around inf

      \[\leadsto \frac{\frac{\frac{\color{blue}{\beta \cdot \left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \frac{\frac{\frac{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) \cdot \color{blue}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lower-*.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) \cdot \color{blue}{\beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. +-commutativeN/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. lower-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. +-commutativeN/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right) + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      6. lower-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right) + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      7. div-add-revN/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      8. lower-/.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      9. lower-+.f6499.8

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites99.8%

      \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    5. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
      2. lift-/.f64N/A

        \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
      4. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} + 1} \]
      5. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\color{blue}{\left(\alpha + \beta\right)} + 2 \cdot 1\right) + 1} \]
      6. lift-*.f64N/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \color{blue}{2 \cdot 1}\right) + 1} \]
      7. metadata-evalN/A

        \[\leadsto \frac{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + \color{blue}{2}\right) + 1} \]
      8. associate-/l/N/A

        \[\leadsto \color{blue}{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) + 1\right)}} \]
      9. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2\right) + 1\right)}} \]
    6. Applied rewrites99.8%

      \[\leadsto \color{blue}{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{2 + \left(\beta + \alpha\right)}}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)}} \]
    7. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{2 + \left(\beta + \alpha\right)}}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)}} \]
      2. lift-/.f64N/A

        \[\leadsto \frac{\color{blue}{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{2 + \left(\beta + \alpha\right)}}}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)} \]
      3. lift-+.f64N/A

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{2 + \color{blue}{\left(\beta + \alpha\right)}}}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)} \]
      4. lift-+.f64N/A

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\color{blue}{2 + \left(\beta + \alpha\right)}}}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)} \]
      5. lift-*.f64N/A

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{2 + \left(\beta + \alpha\right)}}{\color{blue}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)}} \]
      6. lift-+.f64N/A

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{2 + \left(\beta + \alpha\right)}}{\left(2 + \color{blue}{\left(\beta + \alpha\right)}\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)} \]
      7. lift-+.f64N/A

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{2 + \left(\beta + \alpha\right)}}{\color{blue}{\left(2 + \left(\beta + \alpha\right)\right)} \cdot \left(3 + \left(\beta + \alpha\right)\right)} \]
      8. lift-+.f64N/A

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{2 + \left(\beta + \alpha\right)}}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(3 + \color{blue}{\left(\beta + \alpha\right)}\right)} \]
      9. lift-+.f64N/A

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{2 + \left(\beta + \alpha\right)}}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \color{blue}{\left(3 + \left(\beta + \alpha\right)\right)}} \]
      10. associate-/l/N/A

        \[\leadsto \color{blue}{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)\right)}} \]
      11. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)\right)}} \]
    8. Applied rewrites99.8%

      \[\leadsto \color{blue}{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) \cdot \beta}{\left(2 + \left(\beta + \alpha\right)\right) \cdot \left(\left(3 + \left(\beta + \alpha\right)\right) \cdot \left(2 + \left(\beta + \alpha\right)\right)\right)}} \]

    if 1.12e16 < beta

    1. Initial program 89.5%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in beta around inf

      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. lower-+.f6499.0

        \[\leadsto \frac{\frac{1 + \color{blue}{\alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites99.0%

      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 5: 99.4% accurate, 1.3× speedup?

\[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ t_1 := \left(\beta + \alpha\right) + 2\\ \mathbf{if}\;\beta \leq 1.12 \cdot 10^{+16}:\\ \;\;\;\;\frac{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{t\_1}}{t\_1 \cdot \left(3 + \left(\beta + \alpha\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{t\_0 + 1}\\ \end{array} \end{array} \]
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))) (t_1 (+ (+ beta alpha) 2.0)))
   (if (<= beta 1.12e+16)
     (/
      (/ (+ (fma beta alpha (+ beta alpha)) 1.0) t_1)
      (* t_1 (+ 3.0 (+ beta alpha))))
     (/ (/ (+ 1.0 alpha) t_0) (+ t_0 1.0)))))
assert(alpha < beta);
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	double t_1 = (beta + alpha) + 2.0;
	double tmp;
	if (beta <= 1.12e+16) {
		tmp = ((fma(beta, alpha, (beta + alpha)) + 1.0) / t_1) / (t_1 * (3.0 + (beta + alpha)));
	} else {
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0);
	}
	return tmp;
}
alpha, beta = sort([alpha, beta])
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	t_1 = Float64(Float64(beta + alpha) + 2.0)
	tmp = 0.0
	if (beta <= 1.12e+16)
		tmp = Float64(Float64(Float64(fma(beta, alpha, Float64(beta + alpha)) + 1.0) / t_1) / Float64(t_1 * Float64(3.0 + Float64(beta + alpha))));
	else
		tmp = Float64(Float64(Float64(1.0 + alpha) / t_0) / Float64(t_0 + 1.0));
	end
	return tmp
end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[beta, 1.12e+16], N[(N[(N[(N[(beta * alpha + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(t$95$1 * N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
t_1 := \left(\beta + \alpha\right) + 2\\
\mathbf{if}\;\beta \leq 1.12 \cdot 10^{+16}:\\
\;\;\;\;\frac{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{t\_1}}{t\_1 \cdot \left(3 + \left(\beta + \alpha\right)\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{t\_0 + 1}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 1.12e16

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
    3. Applied rewrites99.9%

      \[\leadsto \color{blue}{\frac{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}}{\left(\left(\beta + \alpha\right) + 2\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)}} \]

    if 1.12e16 < beta

    1. Initial program 89.5%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in beta around inf

      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. lower-+.f6499.0

        \[\leadsto \frac{\frac{1 + \color{blue}{\alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites99.0%

      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 6: 98.9% accurate, 1.5× speedup?

\[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ \mathbf{if}\;\beta \leq 1.85 \cdot 10^{+15}:\\ \;\;\;\;\frac{\frac{1 + \beta}{2 + \beta}}{\left(2 + \beta\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{t\_0 + 1}\\ \end{array} \end{array} \]
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))))
   (if (<= beta 1.85e+15)
     (/ (/ (+ 1.0 beta) (+ 2.0 beta)) (* (+ 2.0 beta) (+ 3.0 (+ beta alpha))))
     (/ (/ (+ 1.0 alpha) t_0) (+ t_0 1.0)))))
assert(alpha < beta);
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	double tmp;
	if (beta <= 1.85e+15) {
		tmp = ((1.0 + beta) / (2.0 + beta)) / ((2.0 + beta) * (3.0 + (beta + alpha)));
	} else {
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0);
	}
	return tmp;
}
NOTE: alpha and beta should be sorted in increasing order before calling this function.
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    real(8) :: tmp
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    if (beta <= 1.85d+15) then
        tmp = ((1.0d0 + beta) / (2.0d0 + beta)) / ((2.0d0 + beta) * (3.0d0 + (beta + alpha)))
    else
        tmp = ((1.0d0 + alpha) / t_0) / (t_0 + 1.0d0)
    end if
    code = tmp
end function
assert alpha < beta;
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	double tmp;
	if (beta <= 1.85e+15) {
		tmp = ((1.0 + beta) / (2.0 + beta)) / ((2.0 + beta) * (3.0 + (beta + alpha)));
	} else {
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0);
	}
	return tmp;
}
[alpha, beta] = sort([alpha, beta])
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	tmp = 0
	if beta <= 1.85e+15:
		tmp = ((1.0 + beta) / (2.0 + beta)) / ((2.0 + beta) * (3.0 + (beta + alpha)))
	else:
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0)
	return tmp
alpha, beta = sort([alpha, beta])
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	tmp = 0.0
	if (beta <= 1.85e+15)
		tmp = Float64(Float64(Float64(1.0 + beta) / Float64(2.0 + beta)) / Float64(Float64(2.0 + beta) * Float64(3.0 + Float64(beta + alpha))));
	else
		tmp = Float64(Float64(Float64(1.0 + alpha) / t_0) / Float64(t_0 + 1.0));
	end
	return tmp
end
alpha, beta = num2cell(sort([alpha, beta])){:}
function tmp_2 = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	tmp = 0.0;
	if (beta <= 1.85e+15)
		tmp = ((1.0 + beta) / (2.0 + beta)) / ((2.0 + beta) * (3.0 + (beta + alpha)));
	else
		tmp = ((1.0 + alpha) / t_0) / (t_0 + 1.0);
	end
	tmp_2 = tmp;
end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 1.85e+15], N[(N[(N[(1.0 + beta), $MachinePrecision] / N[(2.0 + beta), $MachinePrecision]), $MachinePrecision] / N[(N[(2.0 + beta), $MachinePrecision] * N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\mathbf{if}\;\beta \leq 1.85 \cdot 10^{+15}:\\
\;\;\;\;\frac{\frac{1 + \beta}{2 + \beta}}{\left(2 + \beta\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{t\_0 + 1}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 1.85e15

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in alpha around 0

      \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lower-+.f64N/A

        \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2} + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. lower-+.f6497.9

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \color{blue}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites97.9%

      \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    5. Taylor expanded in alpha around 0

      \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    6. Step-by-step derivation
      1. lift-+.f6498.9

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \color{blue}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    7. Applied rewrites98.9%

      \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    8. Step-by-step derivation
      1. +-commutative98.9

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. metadata-eval98.9

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. +-commutative98.9

        \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\beta}}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. +-commutative98.9

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. +-commutative98.9

        \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\beta}}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      6. +-commutativeN/A

        \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\beta}}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    9. Applied rewrites98.9%

      \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(2 + \beta\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)}} \]

    if 1.85e15 < beta

    1. Initial program 89.6%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in beta around inf

      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. lower-+.f6499.0

        \[\leadsto \frac{\frac{1 + \color{blue}{\alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites99.0%

      \[\leadsto \frac{\frac{\color{blue}{1 + \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 7: 98.9% accurate, 1.5× speedup?

\[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 1.3 \cdot 10^{+17}:\\ \;\;\;\;\frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \beta}}{3 + \left(\beta + \alpha\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}\\ \end{array} \end{array} \]
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
 :precision binary64
 (if (<= beta 1.3e+17)
   (/ (/ (/ (+ 1.0 beta) (+ 2.0 beta)) (+ 2.0 beta)) (+ 3.0 (+ beta alpha)))
   (/ (/ (+ 1.0 alpha) beta) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0))))
assert(alpha < beta);
double code(double alpha, double beta) {
	double tmp;
	if (beta <= 1.3e+17) {
		tmp = (((1.0 + beta) / (2.0 + beta)) / (2.0 + beta)) / (3.0 + (beta + alpha));
	} else {
		tmp = ((1.0 + alpha) / beta) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
	}
	return tmp;
}
NOTE: alpha and beta should be sorted in increasing order before calling this function.
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: tmp
    if (beta <= 1.3d+17) then
        tmp = (((1.0d0 + beta) / (2.0d0 + beta)) / (2.0d0 + beta)) / (3.0d0 + (beta + alpha))
    else
        tmp = ((1.0d0 + alpha) / beta) / (((alpha + beta) + (2.0d0 * 1.0d0)) + 1.0d0)
    end if
    code = tmp
end function
assert alpha < beta;
public static double code(double alpha, double beta) {
	double tmp;
	if (beta <= 1.3e+17) {
		tmp = (((1.0 + beta) / (2.0 + beta)) / (2.0 + beta)) / (3.0 + (beta + alpha));
	} else {
		tmp = ((1.0 + alpha) / beta) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
	}
	return tmp;
}
[alpha, beta] = sort([alpha, beta])
def code(alpha, beta):
	tmp = 0
	if beta <= 1.3e+17:
		tmp = (((1.0 + beta) / (2.0 + beta)) / (2.0 + beta)) / (3.0 + (beta + alpha))
	else:
		tmp = ((1.0 + alpha) / beta) / (((alpha + beta) + (2.0 * 1.0)) + 1.0)
	return tmp
alpha, beta = sort([alpha, beta])
function code(alpha, beta)
	tmp = 0.0
	if (beta <= 1.3e+17)
		tmp = Float64(Float64(Float64(Float64(1.0 + beta) / Float64(2.0 + beta)) / Float64(2.0 + beta)) / Float64(3.0 + Float64(beta + alpha)));
	else
		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / Float64(Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) + 1.0));
	end
	return tmp
end
alpha, beta = num2cell(sort([alpha, beta])){:}
function tmp_2 = code(alpha, beta)
	tmp = 0.0;
	if (beta <= 1.3e+17)
		tmp = (((1.0 + beta) / (2.0 + beta)) / (2.0 + beta)) / (3.0 + (beta + alpha));
	else
		tmp = ((1.0 + alpha) / beta) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
	end
	tmp_2 = tmp;
end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := If[LessEqual[beta, 1.3e+17], N[(N[(N[(N[(1.0 + beta), $MachinePrecision] / N[(2.0 + beta), $MachinePrecision]), $MachinePrecision] / N[(2.0 + beta), $MachinePrecision]), $MachinePrecision] / N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / N[(N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 1.3 \cdot 10^{+17}:\\
\;\;\;\;\frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \beta}}{3 + \left(\beta + \alpha\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 1.3e17

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in alpha around 0

      \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lower-+.f64N/A

        \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2} + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. lower-+.f6497.9

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \color{blue}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites97.9%

      \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    5. Taylor expanded in alpha around 0

      \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    6. Step-by-step derivation
      1. lift-+.f6498.9

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \color{blue}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    7. Applied rewrites98.9%

      \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    8. Step-by-step derivation
      1. +-commutative98.9

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. metadata-eval98.9

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. +-commutative98.9

        \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\beta}}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. +-commutative98.9

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. +-commutative98.9

        \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\beta}}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      6. +-commutativeN/A

        \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\beta}}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    9. Applied rewrites98.9%

      \[\leadsto \color{blue}{\frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \beta}}{3 + \left(\beta + \alpha\right)}} \]

    if 1.3e17 < beta

    1. Initial program 89.5%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in beta around inf

      \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \frac{\frac{1 + \alpha}{\color{blue}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lower-+.f6498.9

        \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites98.9%

      \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 8: 98.9% accurate, 1.7× speedup?

\[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 5 \cdot 10^{+19}:\\ \;\;\;\;\frac{\frac{1 + \beta}{2 + \beta}}{\left(2 + \beta\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}\\ \end{array} \end{array} \]
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
 :precision binary64
 (if (<= beta 5e+19)
   (/ (/ (+ 1.0 beta) (+ 2.0 beta)) (* (+ 2.0 beta) (+ 3.0 (+ beta alpha))))
   (/ (/ (+ 1.0 alpha) beta) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0))))
assert(alpha < beta);
double code(double alpha, double beta) {
	double tmp;
	if (beta <= 5e+19) {
		tmp = ((1.0 + beta) / (2.0 + beta)) / ((2.0 + beta) * (3.0 + (beta + alpha)));
	} else {
		tmp = ((1.0 + alpha) / beta) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
	}
	return tmp;
}
NOTE: alpha and beta should be sorted in increasing order before calling this function.
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: tmp
    if (beta <= 5d+19) then
        tmp = ((1.0d0 + beta) / (2.0d0 + beta)) / ((2.0d0 + beta) * (3.0d0 + (beta + alpha)))
    else
        tmp = ((1.0d0 + alpha) / beta) / (((alpha + beta) + (2.0d0 * 1.0d0)) + 1.0d0)
    end if
    code = tmp
end function
assert alpha < beta;
public static double code(double alpha, double beta) {
	double tmp;
	if (beta <= 5e+19) {
		tmp = ((1.0 + beta) / (2.0 + beta)) / ((2.0 + beta) * (3.0 + (beta + alpha)));
	} else {
		tmp = ((1.0 + alpha) / beta) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
	}
	return tmp;
}
[alpha, beta] = sort([alpha, beta])
def code(alpha, beta):
	tmp = 0
	if beta <= 5e+19:
		tmp = ((1.0 + beta) / (2.0 + beta)) / ((2.0 + beta) * (3.0 + (beta + alpha)))
	else:
		tmp = ((1.0 + alpha) / beta) / (((alpha + beta) + (2.0 * 1.0)) + 1.0)
	return tmp
alpha, beta = sort([alpha, beta])
function code(alpha, beta)
	tmp = 0.0
	if (beta <= 5e+19)
		tmp = Float64(Float64(Float64(1.0 + beta) / Float64(2.0 + beta)) / Float64(Float64(2.0 + beta) * Float64(3.0 + Float64(beta + alpha))));
	else
		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / Float64(Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) + 1.0));
	end
	return tmp
end
alpha, beta = num2cell(sort([alpha, beta])){:}
function tmp_2 = code(alpha, beta)
	tmp = 0.0;
	if (beta <= 5e+19)
		tmp = ((1.0 + beta) / (2.0 + beta)) / ((2.0 + beta) * (3.0 + (beta + alpha)));
	else
		tmp = ((1.0 + alpha) / beta) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
	end
	tmp_2 = tmp;
end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := If[LessEqual[beta, 5e+19], N[(N[(N[(1.0 + beta), $MachinePrecision] / N[(2.0 + beta), $MachinePrecision]), $MachinePrecision] / N[(N[(2.0 + beta), $MachinePrecision] * N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / N[(N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 5 \cdot 10^{+19}:\\
\;\;\;\;\frac{\frac{1 + \beta}{2 + \beta}}{\left(2 + \beta\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 5e19

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in alpha around 0

      \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lower-+.f64N/A

        \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2} + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. lower-+.f6497.8

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \color{blue}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites97.8%

      \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    5. Taylor expanded in alpha around 0

      \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    6. Step-by-step derivation
      1. lift-+.f6498.8

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \color{blue}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    7. Applied rewrites98.8%

      \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    8. Step-by-step derivation
      1. +-commutative98.8

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. metadata-eval98.8

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. +-commutative98.8

        \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\beta}}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. +-commutative98.8

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. +-commutative98.8

        \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\beta}}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      6. +-commutativeN/A

        \[\leadsto \frac{\frac{\frac{1 + \color{blue}{\beta}}{2 + \beta}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    9. Applied rewrites98.8%

      \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(2 + \beta\right) \cdot \left(3 + \left(\beta + \alpha\right)\right)}} \]

    if 5e19 < beta

    1. Initial program 89.4%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in beta around inf

      \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \frac{\frac{1 + \alpha}{\color{blue}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lower-+.f6498.9

        \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites98.9%

      \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 9: 97.5% accurate, 1.8× speedup?

\[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} t_0 := \left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\\ \mathbf{if}\;\beta \leq 4.5:\\ \;\;\;\;\frac{\frac{0.5}{2}}{t\_0}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{t\_0}\\ \end{array} \end{array} \]
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))
   (if (<= beta 4.5) (/ (/ 0.5 2.0) t_0) (/ (/ (+ 1.0 alpha) beta) t_0))))
assert(alpha < beta);
double code(double alpha, double beta) {
	double t_0 = ((alpha + beta) + (2.0 * 1.0)) + 1.0;
	double tmp;
	if (beta <= 4.5) {
		tmp = (0.5 / 2.0) / t_0;
	} else {
		tmp = ((1.0 + alpha) / beta) / t_0;
	}
	return tmp;
}
NOTE: alpha and beta should be sorted in increasing order before calling this function.
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    real(8) :: tmp
    t_0 = ((alpha + beta) + (2.0d0 * 1.0d0)) + 1.0d0
    if (beta <= 4.5d0) then
        tmp = (0.5d0 / 2.0d0) / t_0
    else
        tmp = ((1.0d0 + alpha) / beta) / t_0
    end if
    code = tmp
end function
assert alpha < beta;
public static double code(double alpha, double beta) {
	double t_0 = ((alpha + beta) + (2.0 * 1.0)) + 1.0;
	double tmp;
	if (beta <= 4.5) {
		tmp = (0.5 / 2.0) / t_0;
	} else {
		tmp = ((1.0 + alpha) / beta) / t_0;
	}
	return tmp;
}
[alpha, beta] = sort([alpha, beta])
def code(alpha, beta):
	t_0 = ((alpha + beta) + (2.0 * 1.0)) + 1.0
	tmp = 0
	if beta <= 4.5:
		tmp = (0.5 / 2.0) / t_0
	else:
		tmp = ((1.0 + alpha) / beta) / t_0
	return tmp
alpha, beta = sort([alpha, beta])
function code(alpha, beta)
	t_0 = Float64(Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) + 1.0)
	tmp = 0.0
	if (beta <= 4.5)
		tmp = Float64(Float64(0.5 / 2.0) / t_0);
	else
		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / t_0);
	end
	return tmp
end
alpha, beta = num2cell(sort([alpha, beta])){:}
function tmp_2 = code(alpha, beta)
	t_0 = ((alpha + beta) + (2.0 * 1.0)) + 1.0;
	tmp = 0.0;
	if (beta <= 4.5)
		tmp = (0.5 / 2.0) / t_0;
	else
		tmp = ((1.0 + alpha) / beta) / t_0;
	end
	tmp_2 = tmp;
end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]}, If[LessEqual[beta, 4.5], N[(N[(0.5 / 2.0), $MachinePrecision] / t$95$0), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / t$95$0), $MachinePrecision]]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
t_0 := \left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\\
\mathbf{if}\;\beta \leq 4.5:\\
\;\;\;\;\frac{\frac{0.5}{2}}{t\_0}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{t\_0}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 4.5

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Taylor expanded in alpha around 0

      \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lower-+.f64N/A

        \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2} + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. lower-+.f6497.9

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \color{blue}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites97.9%

      \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    5. Taylor expanded in alpha around 0

      \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    6. Step-by-step derivation
      1. lift-+.f6499.0

        \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \color{blue}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    7. Applied rewrites99.0%

      \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    8. Taylor expanded in beta around 0

      \[\leadsto \frac{\frac{\frac{1}{2}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    9. Step-by-step derivation
      1. Applied rewrites96.8%

        \[\leadsto \frac{\frac{0.5}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Taylor expanded in beta around 0

        \[\leadsto \frac{\frac{\frac{1}{2}}{2}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. Step-by-step derivation
        1. Applied rewrites97.9%

          \[\leadsto \frac{\frac{0.5}{2}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]

        if 4.5 < beta

        1. Initial program 90.0%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Taylor expanded in beta around inf

          \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        3. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \frac{\frac{1 + \alpha}{\color{blue}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          2. lower-+.f6497.2

            \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        4. Applied rewrites97.2%

          \[\leadsto \frac{\color{blue}{\frac{1 + \alpha}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. Recombined 2 regimes into one program.
      5. Add Preprocessing

      Alternative 10: 97.4% accurate, 2.0× speedup?

      \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 6.2:\\ \;\;\;\;\frac{\frac{0.5}{2}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
      NOTE: alpha and beta should be sorted in increasing order before calling this function.
      (FPCore (alpha beta)
       :precision binary64
       (if (<= beta 6.2)
         (/ (/ 0.5 2.0) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0))
         (/ (/ (+ 1.0 alpha) beta) beta)))
      assert(alpha < beta);
      double code(double alpha, double beta) {
      	double tmp;
      	if (beta <= 6.2) {
      		tmp = (0.5 / 2.0) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
      	} else {
      		tmp = ((1.0 + alpha) / beta) / beta;
      	}
      	return tmp;
      }
      
      NOTE: alpha and beta should be sorted in increasing order before calling this function.
      module fmin_fmax_functions
          implicit none
          private
          public fmax
          public fmin
      
          interface fmax
              module procedure fmax88
              module procedure fmax44
              module procedure fmax84
              module procedure fmax48
          end interface
          interface fmin
              module procedure fmin88
              module procedure fmin44
              module procedure fmin84
              module procedure fmin48
          end interface
      contains
          real(8) function fmax88(x, y) result (res)
              real(8), intent (in) :: x
              real(8), intent (in) :: y
              res = merge(y, merge(x, max(x, y), y /= y), x /= x)
          end function
          real(4) function fmax44(x, y) result (res)
              real(4), intent (in) :: x
              real(4), intent (in) :: y
              res = merge(y, merge(x, max(x, y), y /= y), x /= x)
          end function
          real(8) function fmax84(x, y) result(res)
              real(8), intent (in) :: x
              real(4), intent (in) :: y
              res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
          end function
          real(8) function fmax48(x, y) result(res)
              real(4), intent (in) :: x
              real(8), intent (in) :: y
              res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
          end function
          real(8) function fmin88(x, y) result (res)
              real(8), intent (in) :: x
              real(8), intent (in) :: y
              res = merge(y, merge(x, min(x, y), y /= y), x /= x)
          end function
          real(4) function fmin44(x, y) result (res)
              real(4), intent (in) :: x
              real(4), intent (in) :: y
              res = merge(y, merge(x, min(x, y), y /= y), x /= x)
          end function
          real(8) function fmin84(x, y) result(res)
              real(8), intent (in) :: x
              real(4), intent (in) :: y
              res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
          end function
          real(8) function fmin48(x, y) result(res)
              real(4), intent (in) :: x
              real(8), intent (in) :: y
              res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
          end function
      end module
      
      real(8) function code(alpha, beta)
      use fmin_fmax_functions
          real(8), intent (in) :: alpha
          real(8), intent (in) :: beta
          real(8) :: tmp
          if (beta <= 6.2d0) then
              tmp = (0.5d0 / 2.0d0) / (((alpha + beta) + (2.0d0 * 1.0d0)) + 1.0d0)
          else
              tmp = ((1.0d0 + alpha) / beta) / beta
          end if
          code = tmp
      end function
      
      assert alpha < beta;
      public static double code(double alpha, double beta) {
      	double tmp;
      	if (beta <= 6.2) {
      		tmp = (0.5 / 2.0) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
      	} else {
      		tmp = ((1.0 + alpha) / beta) / beta;
      	}
      	return tmp;
      }
      
      [alpha, beta] = sort([alpha, beta])
      def code(alpha, beta):
      	tmp = 0
      	if beta <= 6.2:
      		tmp = (0.5 / 2.0) / (((alpha + beta) + (2.0 * 1.0)) + 1.0)
      	else:
      		tmp = ((1.0 + alpha) / beta) / beta
      	return tmp
      
      alpha, beta = sort([alpha, beta])
      function code(alpha, beta)
      	tmp = 0.0
      	if (beta <= 6.2)
      		tmp = Float64(Float64(0.5 / 2.0) / Float64(Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) + 1.0));
      	else
      		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta);
      	end
      	return tmp
      end
      
      alpha, beta = num2cell(sort([alpha, beta])){:}
      function tmp_2 = code(alpha, beta)
      	tmp = 0.0;
      	if (beta <= 6.2)
      		tmp = (0.5 / 2.0) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
      	else
      		tmp = ((1.0 + alpha) / beta) / beta;
      	end
      	tmp_2 = tmp;
      end
      
      NOTE: alpha and beta should be sorted in increasing order before calling this function.
      code[alpha_, beta_] := If[LessEqual[beta, 6.2], N[(N[(0.5 / 2.0), $MachinePrecision] / N[(N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
      
      \begin{array}{l}
      [alpha, beta] = \mathsf{sort}([alpha, beta])\\
      \\
      \begin{array}{l}
      \mathbf{if}\;\beta \leq 6.2:\\
      \;\;\;\;\frac{\frac{0.5}{2}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if beta < 6.20000000000000018

        1. Initial program 99.9%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Taylor expanded in alpha around 0

          \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        3. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          2. lower-+.f64N/A

            \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2} + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          3. lower-+.f6497.9

            \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \color{blue}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        4. Applied rewrites97.9%

          \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        5. Taylor expanded in alpha around 0

          \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        6. Step-by-step derivation
          1. lift-+.f6499.0

            \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \color{blue}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        7. Applied rewrites99.0%

          \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        8. Taylor expanded in beta around 0

          \[\leadsto \frac{\frac{\frac{1}{2}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        9. Step-by-step derivation
          1. Applied rewrites96.8%

            \[\leadsto \frac{\frac{0.5}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          2. Taylor expanded in beta around 0

            \[\leadsto \frac{\frac{\frac{1}{2}}{2}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          3. Step-by-step derivation
            1. Applied rewrites97.8%

              \[\leadsto \frac{\frac{0.5}{2}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]

            if 6.20000000000000018 < beta

            1. Initial program 90.0%

              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            2. Taylor expanded in beta around inf

              \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
            3. Step-by-step derivation
              1. lower-/.f64N/A

                \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
              2. lower-+.f64N/A

                \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
              3. unpow2N/A

                \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
              4. lower-*.f6491.5

                \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
            4. Applied rewrites91.5%

              \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
            5. Step-by-step derivation
              1. lift-+.f64N/A

                \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
              2. lift-*.f64N/A

                \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
              3. lift-/.f64N/A

                \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
              4. associate-/r*N/A

                \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
              5. lower-/.f64N/A

                \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
              6. lift-/.f64N/A

                \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
              7. lift-+.f6497.1

                \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
            6. Applied rewrites97.1%

              \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{\beta}}{\beta}} \]
          4. Recombined 2 regimes into one program.
          5. Add Preprocessing

          Alternative 11: 97.1% accurate, 2.4× speedup?

          \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 8.2:\\ \;\;\;\;\frac{\frac{0.5}{2 + \beta}}{3 + \alpha}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
          NOTE: alpha and beta should be sorted in increasing order before calling this function.
          (FPCore (alpha beta)
           :precision binary64
           (if (<= beta 8.2)
             (/ (/ 0.5 (+ 2.0 beta)) (+ 3.0 alpha))
             (/ (/ (+ 1.0 alpha) beta) beta)))
          assert(alpha < beta);
          double code(double alpha, double beta) {
          	double tmp;
          	if (beta <= 8.2) {
          		tmp = (0.5 / (2.0 + beta)) / (3.0 + alpha);
          	} else {
          		tmp = ((1.0 + alpha) / beta) / beta;
          	}
          	return tmp;
          }
          
          NOTE: alpha and beta should be sorted in increasing order before calling this function.
          module fmin_fmax_functions
              implicit none
              private
              public fmax
              public fmin
          
              interface fmax
                  module procedure fmax88
                  module procedure fmax44
                  module procedure fmax84
                  module procedure fmax48
              end interface
              interface fmin
                  module procedure fmin88
                  module procedure fmin44
                  module procedure fmin84
                  module procedure fmin48
              end interface
          contains
              real(8) function fmax88(x, y) result (res)
                  real(8), intent (in) :: x
                  real(8), intent (in) :: y
                  res = merge(y, merge(x, max(x, y), y /= y), x /= x)
              end function
              real(4) function fmax44(x, y) result (res)
                  real(4), intent (in) :: x
                  real(4), intent (in) :: y
                  res = merge(y, merge(x, max(x, y), y /= y), x /= x)
              end function
              real(8) function fmax84(x, y) result(res)
                  real(8), intent (in) :: x
                  real(4), intent (in) :: y
                  res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
              end function
              real(8) function fmax48(x, y) result(res)
                  real(4), intent (in) :: x
                  real(8), intent (in) :: y
                  res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
              end function
              real(8) function fmin88(x, y) result (res)
                  real(8), intent (in) :: x
                  real(8), intent (in) :: y
                  res = merge(y, merge(x, min(x, y), y /= y), x /= x)
              end function
              real(4) function fmin44(x, y) result (res)
                  real(4), intent (in) :: x
                  real(4), intent (in) :: y
                  res = merge(y, merge(x, min(x, y), y /= y), x /= x)
              end function
              real(8) function fmin84(x, y) result(res)
                  real(8), intent (in) :: x
                  real(4), intent (in) :: y
                  res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
              end function
              real(8) function fmin48(x, y) result(res)
                  real(4), intent (in) :: x
                  real(8), intent (in) :: y
                  res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
              end function
          end module
          
          real(8) function code(alpha, beta)
          use fmin_fmax_functions
              real(8), intent (in) :: alpha
              real(8), intent (in) :: beta
              real(8) :: tmp
              if (beta <= 8.2d0) then
                  tmp = (0.5d0 / (2.0d0 + beta)) / (3.0d0 + alpha)
              else
                  tmp = ((1.0d0 + alpha) / beta) / beta
              end if
              code = tmp
          end function
          
          assert alpha < beta;
          public static double code(double alpha, double beta) {
          	double tmp;
          	if (beta <= 8.2) {
          		tmp = (0.5 / (2.0 + beta)) / (3.0 + alpha);
          	} else {
          		tmp = ((1.0 + alpha) / beta) / beta;
          	}
          	return tmp;
          }
          
          [alpha, beta] = sort([alpha, beta])
          def code(alpha, beta):
          	tmp = 0
          	if beta <= 8.2:
          		tmp = (0.5 / (2.0 + beta)) / (3.0 + alpha)
          	else:
          		tmp = ((1.0 + alpha) / beta) / beta
          	return tmp
          
          alpha, beta = sort([alpha, beta])
          function code(alpha, beta)
          	tmp = 0.0
          	if (beta <= 8.2)
          		tmp = Float64(Float64(0.5 / Float64(2.0 + beta)) / Float64(3.0 + alpha));
          	else
          		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta);
          	end
          	return tmp
          end
          
          alpha, beta = num2cell(sort([alpha, beta])){:}
          function tmp_2 = code(alpha, beta)
          	tmp = 0.0;
          	if (beta <= 8.2)
          		tmp = (0.5 / (2.0 + beta)) / (3.0 + alpha);
          	else
          		tmp = ((1.0 + alpha) / beta) / beta;
          	end
          	tmp_2 = tmp;
          end
          
          NOTE: alpha and beta should be sorted in increasing order before calling this function.
          code[alpha_, beta_] := If[LessEqual[beta, 8.2], N[(N[(0.5 / N[(2.0 + beta), $MachinePrecision]), $MachinePrecision] / N[(3.0 + alpha), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
          
          \begin{array}{l}
          [alpha, beta] = \mathsf{sort}([alpha, beta])\\
          \\
          \begin{array}{l}
          \mathbf{if}\;\beta \leq 8.2:\\
          \;\;\;\;\frac{\frac{0.5}{2 + \beta}}{3 + \alpha}\\
          
          \mathbf{else}:\\
          \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
          
          
          \end{array}
          \end{array}
          
          Derivation
          1. Split input into 2 regimes
          2. if beta < 8.1999999999999993

            1. Initial program 99.9%

              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            2. Taylor expanded in alpha around 0

              \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            3. Step-by-step derivation
              1. lower-/.f64N/A

                \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. lower-+.f64N/A

                \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2} + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              3. lower-+.f6498.0

                \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \color{blue}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            4. Applied rewrites98.0%

              \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            5. Taylor expanded in alpha around 0

              \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            6. Step-by-step derivation
              1. lift-+.f6499.0

                \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \color{blue}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            7. Applied rewrites99.0%

              \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            8. Taylor expanded in beta around 0

              \[\leadsto \frac{\frac{\frac{1}{2}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            9. Step-by-step derivation
              1. Applied rewrites96.8%

                \[\leadsto \frac{\frac{0.5}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. Taylor expanded in beta around 0

                \[\leadsto \frac{\frac{\frac{1}{2}}{2 + \beta}}{\color{blue}{3 + \alpha}} \]
              3. Step-by-step derivation
                1. lower-+.f6496.9

                  \[\leadsto \frac{\frac{0.5}{2 + \beta}}{3 + \color{blue}{\alpha}} \]
              4. Applied rewrites96.9%

                \[\leadsto \frac{\frac{0.5}{2 + \beta}}{\color{blue}{3 + \alpha}} \]

              if 8.1999999999999993 < beta

              1. Initial program 90.0%

                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. Taylor expanded in beta around inf

                \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
              3. Step-by-step derivation
                1. lower-/.f64N/A

                  \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
                2. lower-+.f64N/A

                  \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
                3. unpow2N/A

                  \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                4. lower-*.f6491.5

                  \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
              4. Applied rewrites91.5%

                \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
              5. Step-by-step derivation
                1. lift-+.f64N/A

                  \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
                2. lift-*.f64N/A

                  \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                3. lift-/.f64N/A

                  \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                4. associate-/r*N/A

                  \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
                5. lower-/.f64N/A

                  \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
                6. lift-/.f64N/A

                  \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
                7. lift-+.f6497.1

                  \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
              6. Applied rewrites97.1%

                \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{\beta}}{\beta}} \]
            10. Recombined 2 regimes into one program.
            11. Add Preprocessing

            Alternative 12: 97.0% accurate, 2.6× speedup?

            \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 9.5:\\ \;\;\;\;\frac{0.5}{\left(2 + \beta\right) \cdot \left(\left(3 + \alpha\right) + \beta\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
            NOTE: alpha and beta should be sorted in increasing order before calling this function.
            (FPCore (alpha beta)
             :precision binary64
             (if (<= beta 9.5)
               (/ 0.5 (* (+ 2.0 beta) (+ (+ 3.0 alpha) beta)))
               (/ (/ (+ 1.0 alpha) beta) beta)))
            assert(alpha < beta);
            double code(double alpha, double beta) {
            	double tmp;
            	if (beta <= 9.5) {
            		tmp = 0.5 / ((2.0 + beta) * ((3.0 + alpha) + beta));
            	} else {
            		tmp = ((1.0 + alpha) / beta) / beta;
            	}
            	return tmp;
            }
            
            NOTE: alpha and beta should be sorted in increasing order before calling this function.
            module fmin_fmax_functions
                implicit none
                private
                public fmax
                public fmin
            
                interface fmax
                    module procedure fmax88
                    module procedure fmax44
                    module procedure fmax84
                    module procedure fmax48
                end interface
                interface fmin
                    module procedure fmin88
                    module procedure fmin44
                    module procedure fmin84
                    module procedure fmin48
                end interface
            contains
                real(8) function fmax88(x, y) result (res)
                    real(8), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                end function
                real(4) function fmax44(x, y) result (res)
                    real(4), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                end function
                real(8) function fmax84(x, y) result(res)
                    real(8), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                end function
                real(8) function fmax48(x, y) result(res)
                    real(4), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                end function
                real(8) function fmin88(x, y) result (res)
                    real(8), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                end function
                real(4) function fmin44(x, y) result (res)
                    real(4), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                end function
                real(8) function fmin84(x, y) result(res)
                    real(8), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                end function
                real(8) function fmin48(x, y) result(res)
                    real(4), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                end function
            end module
            
            real(8) function code(alpha, beta)
            use fmin_fmax_functions
                real(8), intent (in) :: alpha
                real(8), intent (in) :: beta
                real(8) :: tmp
                if (beta <= 9.5d0) then
                    tmp = 0.5d0 / ((2.0d0 + beta) * ((3.0d0 + alpha) + beta))
                else
                    tmp = ((1.0d0 + alpha) / beta) / beta
                end if
                code = tmp
            end function
            
            assert alpha < beta;
            public static double code(double alpha, double beta) {
            	double tmp;
            	if (beta <= 9.5) {
            		tmp = 0.5 / ((2.0 + beta) * ((3.0 + alpha) + beta));
            	} else {
            		tmp = ((1.0 + alpha) / beta) / beta;
            	}
            	return tmp;
            }
            
            [alpha, beta] = sort([alpha, beta])
            def code(alpha, beta):
            	tmp = 0
            	if beta <= 9.5:
            		tmp = 0.5 / ((2.0 + beta) * ((3.0 + alpha) + beta))
            	else:
            		tmp = ((1.0 + alpha) / beta) / beta
            	return tmp
            
            alpha, beta = sort([alpha, beta])
            function code(alpha, beta)
            	tmp = 0.0
            	if (beta <= 9.5)
            		tmp = Float64(0.5 / Float64(Float64(2.0 + beta) * Float64(Float64(3.0 + alpha) + beta)));
            	else
            		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta);
            	end
            	return tmp
            end
            
            alpha, beta = num2cell(sort([alpha, beta])){:}
            function tmp_2 = code(alpha, beta)
            	tmp = 0.0;
            	if (beta <= 9.5)
            		tmp = 0.5 / ((2.0 + beta) * ((3.0 + alpha) + beta));
            	else
            		tmp = ((1.0 + alpha) / beta) / beta;
            	end
            	tmp_2 = tmp;
            end
            
            NOTE: alpha and beta should be sorted in increasing order before calling this function.
            code[alpha_, beta_] := If[LessEqual[beta, 9.5], N[(0.5 / N[(N[(2.0 + beta), $MachinePrecision] * N[(N[(3.0 + alpha), $MachinePrecision] + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
            
            \begin{array}{l}
            [alpha, beta] = \mathsf{sort}([alpha, beta])\\
            \\
            \begin{array}{l}
            \mathbf{if}\;\beta \leq 9.5:\\
            \;\;\;\;\frac{0.5}{\left(2 + \beta\right) \cdot \left(\left(3 + \alpha\right) + \beta\right)}\\
            
            \mathbf{else}:\\
            \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
            
            
            \end{array}
            \end{array}
            
            Derivation
            1. Split input into 2 regimes
            2. if beta < 9.5

              1. Initial program 99.9%

                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. Taylor expanded in alpha around 0

                \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              3. Step-by-step derivation
                1. lower-/.f64N/A

                  \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. lower-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2} + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                3. lower-+.f6498.0

                  \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \color{blue}{\beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              4. Applied rewrites98.0%

                \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              5. Taylor expanded in alpha around 0

                \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              6. Step-by-step derivation
                1. lift-+.f6499.0

                  \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{2 + \color{blue}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              7. Applied rewrites99.0%

                \[\leadsto \frac{\frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              8. Taylor expanded in beta around 0

                \[\leadsto \frac{\frac{\frac{1}{2}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              9. Step-by-step derivation
                1. Applied rewrites96.8%

                  \[\leadsto \frac{\frac{0.5}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Step-by-step derivation
                  1. metadata-eval96.8

                    \[\leadsto \frac{\frac{0.5}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  2. metadata-evalN/A

                    \[\leadsto \frac{\frac{\frac{1}{2}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  3. lift-/.f64N/A

                    \[\leadsto \color{blue}{\frac{\frac{\frac{1}{2}}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                  4. lift-/.f64N/A

                    \[\leadsto \frac{\color{blue}{\frac{\frac{1}{2}}{2 + \beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  5. lift-+.f64N/A

                    \[\leadsto \frac{\frac{\frac{1}{2}}{2 + \beta}}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                3. Applied rewrites96.8%

                  \[\leadsto \color{blue}{\frac{0.5}{\left(2 + \beta\right) \cdot \left(\left(3 + \alpha\right) + \beta\right)}} \]

                if 9.5 < beta

                1. Initial program 90.0%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Taylor expanded in beta around inf

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                3. Step-by-step derivation
                  1. lower-/.f64N/A

                    \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
                  2. lower-+.f64N/A

                    \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
                  3. unpow2N/A

                    \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                  4. lower-*.f6491.5

                    \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                4. Applied rewrites91.5%

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                5. Step-by-step derivation
                  1. lift-+.f64N/A

                    \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
                  2. lift-*.f64N/A

                    \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                  3. lift-/.f64N/A

                    \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                  4. associate-/r*N/A

                    \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
                  5. lower-/.f64N/A

                    \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
                  6. lift-/.f64N/A

                    \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
                  7. lift-+.f6497.1

                    \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
                6. Applied rewrites97.1%

                  \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{\beta}}{\beta}} \]
              10. Recombined 2 regimes into one program.
              11. Add Preprocessing

              Alternative 13: 54.9% accurate, 2.9× speedup?

              \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\beta \leq 1.4 \cdot 10^{+154}:\\ \;\;\;\;\frac{1 + \alpha}{\beta \cdot \beta}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              (FPCore (alpha beta)
               :precision binary64
               (if (<= beta 1.4e+154)
                 (/ (+ 1.0 alpha) (* beta beta))
                 (/ (/ alpha beta) beta)))
              assert(alpha < beta);
              double code(double alpha, double beta) {
              	double tmp;
              	if (beta <= 1.4e+154) {
              		tmp = (1.0 + alpha) / (beta * beta);
              	} else {
              		tmp = (alpha / beta) / beta;
              	}
              	return tmp;
              }
              
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              module fmin_fmax_functions
                  implicit none
                  private
                  public fmax
                  public fmin
              
                  interface fmax
                      module procedure fmax88
                      module procedure fmax44
                      module procedure fmax84
                      module procedure fmax48
                  end interface
                  interface fmin
                      module procedure fmin88
                      module procedure fmin44
                      module procedure fmin84
                      module procedure fmin48
                  end interface
              contains
                  real(8) function fmax88(x, y) result (res)
                      real(8), intent (in) :: x
                      real(8), intent (in) :: y
                      res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                  end function
                  real(4) function fmax44(x, y) result (res)
                      real(4), intent (in) :: x
                      real(4), intent (in) :: y
                      res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                  end function
                  real(8) function fmax84(x, y) result(res)
                      real(8), intent (in) :: x
                      real(4), intent (in) :: y
                      res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                  end function
                  real(8) function fmax48(x, y) result(res)
                      real(4), intent (in) :: x
                      real(8), intent (in) :: y
                      res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                  end function
                  real(8) function fmin88(x, y) result (res)
                      real(8), intent (in) :: x
                      real(8), intent (in) :: y
                      res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                  end function
                  real(4) function fmin44(x, y) result (res)
                      real(4), intent (in) :: x
                      real(4), intent (in) :: y
                      res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                  end function
                  real(8) function fmin84(x, y) result(res)
                      real(8), intent (in) :: x
                      real(4), intent (in) :: y
                      res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                  end function
                  real(8) function fmin48(x, y) result(res)
                      real(4), intent (in) :: x
                      real(8), intent (in) :: y
                      res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                  end function
              end module
              
              real(8) function code(alpha, beta)
              use fmin_fmax_functions
                  real(8), intent (in) :: alpha
                  real(8), intent (in) :: beta
                  real(8) :: tmp
                  if (beta <= 1.4d+154) then
                      tmp = (1.0d0 + alpha) / (beta * beta)
                  else
                      tmp = (alpha / beta) / beta
                  end if
                  code = tmp
              end function
              
              assert alpha < beta;
              public static double code(double alpha, double beta) {
              	double tmp;
              	if (beta <= 1.4e+154) {
              		tmp = (1.0 + alpha) / (beta * beta);
              	} else {
              		tmp = (alpha / beta) / beta;
              	}
              	return tmp;
              }
              
              [alpha, beta] = sort([alpha, beta])
              def code(alpha, beta):
              	tmp = 0
              	if beta <= 1.4e+154:
              		tmp = (1.0 + alpha) / (beta * beta)
              	else:
              		tmp = (alpha / beta) / beta
              	return tmp
              
              alpha, beta = sort([alpha, beta])
              function code(alpha, beta)
              	tmp = 0.0
              	if (beta <= 1.4e+154)
              		tmp = Float64(Float64(1.0 + alpha) / Float64(beta * beta));
              	else
              		tmp = Float64(Float64(alpha / beta) / beta);
              	end
              	return tmp
              end
              
              alpha, beta = num2cell(sort([alpha, beta])){:}
              function tmp_2 = code(alpha, beta)
              	tmp = 0.0;
              	if (beta <= 1.4e+154)
              		tmp = (1.0 + alpha) / (beta * beta);
              	else
              		tmp = (alpha / beta) / beta;
              	end
              	tmp_2 = tmp;
              end
              
              NOTE: alpha and beta should be sorted in increasing order before calling this function.
              code[alpha_, beta_] := If[LessEqual[beta, 1.4e+154], N[(N[(1.0 + alpha), $MachinePrecision] / N[(beta * beta), $MachinePrecision]), $MachinePrecision], N[(N[(alpha / beta), $MachinePrecision] / beta), $MachinePrecision]]
              
              \begin{array}{l}
              [alpha, beta] = \mathsf{sort}([alpha, beta])\\
              \\
              \begin{array}{l}
              \mathbf{if}\;\beta \leq 1.4 \cdot 10^{+154}:\\
              \;\;\;\;\frac{1 + \alpha}{\beta \cdot \beta}\\
              
              \mathbf{else}:\\
              \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if beta < 1.4e154

                1. Initial program 99.8%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Taylor expanded in beta around inf

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                3. Step-by-step derivation
                  1. lower-/.f64N/A

                    \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
                  2. lower-+.f64N/A

                    \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
                  3. unpow2N/A

                    \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                  4. lower-*.f6436.6

                    \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                4. Applied rewrites36.6%

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]

                if 1.4e154 < beta

                1. Initial program 82.0%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Taylor expanded in beta around inf

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                3. Step-by-step derivation
                  1. lower-/.f64N/A

                    \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
                  2. lower-+.f64N/A

                    \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
                  3. unpow2N/A

                    \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                  4. lower-*.f6488.8

                    \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                4. Applied rewrites88.8%

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                5. Taylor expanded in alpha around inf

                  \[\leadsto \frac{\alpha}{\color{blue}{\beta} \cdot \beta} \]
                6. Step-by-step derivation
                  1. Applied rewrites88.8%

                    \[\leadsto \frac{\alpha}{\color{blue}{\beta} \cdot \beta} \]
                  2. Step-by-step derivation
                    1. lift-/.f64N/A

                      \[\leadsto \frac{\alpha}{\color{blue}{\beta \cdot \beta}} \]
                    2. lift-*.f64N/A

                      \[\leadsto \frac{\alpha}{\beta \cdot \color{blue}{\beta}} \]
                    3. associate-/r*N/A

                      \[\leadsto \frac{\frac{\alpha}{\beta}}{\color{blue}{\beta}} \]
                    4. lower-/.f64N/A

                      \[\leadsto \frac{\frac{\alpha}{\beta}}{\color{blue}{\beta}} \]
                    5. lower-/.f6497.5

                      \[\leadsto \frac{\frac{\alpha}{\beta}}{\beta} \]
                  3. Applied rewrites97.5%

                    \[\leadsto \color{blue}{\frac{\frac{\alpha}{\beta}}{\beta}} \]
                7. Recombined 2 regimes into one program.
                8. Add Preprocessing

                Alternative 14: 55.4% accurate, 3.2× speedup?

                \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \frac{\frac{1 + \alpha}{\beta}}{\beta} \end{array} \]
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                (FPCore (alpha beta) :precision binary64 (/ (/ (+ 1.0 alpha) beta) beta))
                assert(alpha < beta);
                double code(double alpha, double beta) {
                	return ((1.0 + alpha) / beta) / beta;
                }
                
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                module fmin_fmax_functions
                    implicit none
                    private
                    public fmax
                    public fmin
                
                    interface fmax
                        module procedure fmax88
                        module procedure fmax44
                        module procedure fmax84
                        module procedure fmax48
                    end interface
                    interface fmin
                        module procedure fmin88
                        module procedure fmin44
                        module procedure fmin84
                        module procedure fmin48
                    end interface
                contains
                    real(8) function fmax88(x, y) result (res)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                    end function
                    real(4) function fmax44(x, y) result (res)
                        real(4), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                    end function
                    real(8) function fmax84(x, y) result(res)
                        real(8), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                    end function
                    real(8) function fmax48(x, y) result(res)
                        real(4), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                    end function
                    real(8) function fmin88(x, y) result (res)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                    end function
                    real(4) function fmin44(x, y) result (res)
                        real(4), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                    end function
                    real(8) function fmin84(x, y) result(res)
                        real(8), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                    end function
                    real(8) function fmin48(x, y) result(res)
                        real(4), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                    end function
                end module
                
                real(8) function code(alpha, beta)
                use fmin_fmax_functions
                    real(8), intent (in) :: alpha
                    real(8), intent (in) :: beta
                    code = ((1.0d0 + alpha) / beta) / beta
                end function
                
                assert alpha < beta;
                public static double code(double alpha, double beta) {
                	return ((1.0 + alpha) / beta) / beta;
                }
                
                [alpha, beta] = sort([alpha, beta])
                def code(alpha, beta):
                	return ((1.0 + alpha) / beta) / beta
                
                alpha, beta = sort([alpha, beta])
                function code(alpha, beta)
                	return Float64(Float64(Float64(1.0 + alpha) / beta) / beta)
                end
                
                alpha, beta = num2cell(sort([alpha, beta])){:}
                function tmp = code(alpha, beta)
                	tmp = ((1.0 + alpha) / beta) / beta;
                end
                
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                code[alpha_, beta_] := N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]
                
                \begin{array}{l}
                [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                \\
                \frac{\frac{1 + \alpha}{\beta}}{\beta}
                \end{array}
                
                Derivation
                1. Initial program 94.4%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Taylor expanded in beta around inf

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                3. Step-by-step derivation
                  1. lower-/.f64N/A

                    \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
                  2. lower-+.f64N/A

                    \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
                  3. unpow2N/A

                    \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                  4. lower-*.f6452.3

                    \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                4. Applied rewrites52.3%

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                5. Step-by-step derivation
                  1. lift-+.f64N/A

                    \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta} \cdot \beta} \]
                  2. lift-*.f64N/A

                    \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                  3. lift-/.f64N/A

                    \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                  4. associate-/r*N/A

                    \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
                  5. lower-/.f64N/A

                    \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\color{blue}{\beta}} \]
                  6. lift-/.f64N/A

                    \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
                  7. lift-+.f6455.4

                    \[\leadsto \frac{\frac{1 + \alpha}{\beta}}{\beta} \]
                6. Applied rewrites55.4%

                  \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{\beta}}{\beta}} \]
                7. Add Preprocessing

                Alternative 15: 51.2% accurate, 3.6× speedup?

                \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \begin{array}{l} \mathbf{if}\;\alpha \leq 8 \cdot 10^{+22}:\\ \;\;\;\;\frac{1}{\beta \cdot \beta}\\ \mathbf{else}:\\ \;\;\;\;\frac{\alpha}{\beta \cdot \beta}\\ \end{array} \end{array} \]
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                (FPCore (alpha beta)
                 :precision binary64
                 (if (<= alpha 8e+22) (/ 1.0 (* beta beta)) (/ alpha (* beta beta))))
                assert(alpha < beta);
                double code(double alpha, double beta) {
                	double tmp;
                	if (alpha <= 8e+22) {
                		tmp = 1.0 / (beta * beta);
                	} else {
                		tmp = alpha / (beta * beta);
                	}
                	return tmp;
                }
                
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                module fmin_fmax_functions
                    implicit none
                    private
                    public fmax
                    public fmin
                
                    interface fmax
                        module procedure fmax88
                        module procedure fmax44
                        module procedure fmax84
                        module procedure fmax48
                    end interface
                    interface fmin
                        module procedure fmin88
                        module procedure fmin44
                        module procedure fmin84
                        module procedure fmin48
                    end interface
                contains
                    real(8) function fmax88(x, y) result (res)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                    end function
                    real(4) function fmax44(x, y) result (res)
                        real(4), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                    end function
                    real(8) function fmax84(x, y) result(res)
                        real(8), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                    end function
                    real(8) function fmax48(x, y) result(res)
                        real(4), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                    end function
                    real(8) function fmin88(x, y) result (res)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                    end function
                    real(4) function fmin44(x, y) result (res)
                        real(4), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                    end function
                    real(8) function fmin84(x, y) result(res)
                        real(8), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                    end function
                    real(8) function fmin48(x, y) result(res)
                        real(4), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                    end function
                end module
                
                real(8) function code(alpha, beta)
                use fmin_fmax_functions
                    real(8), intent (in) :: alpha
                    real(8), intent (in) :: beta
                    real(8) :: tmp
                    if (alpha <= 8d+22) then
                        tmp = 1.0d0 / (beta * beta)
                    else
                        tmp = alpha / (beta * beta)
                    end if
                    code = tmp
                end function
                
                assert alpha < beta;
                public static double code(double alpha, double beta) {
                	double tmp;
                	if (alpha <= 8e+22) {
                		tmp = 1.0 / (beta * beta);
                	} else {
                		tmp = alpha / (beta * beta);
                	}
                	return tmp;
                }
                
                [alpha, beta] = sort([alpha, beta])
                def code(alpha, beta):
                	tmp = 0
                	if alpha <= 8e+22:
                		tmp = 1.0 / (beta * beta)
                	else:
                		tmp = alpha / (beta * beta)
                	return tmp
                
                alpha, beta = sort([alpha, beta])
                function code(alpha, beta)
                	tmp = 0.0
                	if (alpha <= 8e+22)
                		tmp = Float64(1.0 / Float64(beta * beta));
                	else
                		tmp = Float64(alpha / Float64(beta * beta));
                	end
                	return tmp
                end
                
                alpha, beta = num2cell(sort([alpha, beta])){:}
                function tmp_2 = code(alpha, beta)
                	tmp = 0.0;
                	if (alpha <= 8e+22)
                		tmp = 1.0 / (beta * beta);
                	else
                		tmp = alpha / (beta * beta);
                	end
                	tmp_2 = tmp;
                end
                
                NOTE: alpha and beta should be sorted in increasing order before calling this function.
                code[alpha_, beta_] := If[LessEqual[alpha, 8e+22], N[(1.0 / N[(beta * beta), $MachinePrecision]), $MachinePrecision], N[(alpha / N[(beta * beta), $MachinePrecision]), $MachinePrecision]]
                
                \begin{array}{l}
                [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                \\
                \begin{array}{l}
                \mathbf{if}\;\alpha \leq 8 \cdot 10^{+22}:\\
                \;\;\;\;\frac{1}{\beta \cdot \beta}\\
                
                \mathbf{else}:\\
                \;\;\;\;\frac{\alpha}{\beta \cdot \beta}\\
                
                
                \end{array}
                \end{array}
                
                Derivation
                1. Split input into 2 regimes
                2. if alpha < 8e22

                  1. Initial program 99.8%

                    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  2. Taylor expanded in beta around inf

                    \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                  3. Step-by-step derivation
                    1. lower-/.f64N/A

                      \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
                    2. lower-+.f64N/A

                      \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
                    3. unpow2N/A

                      \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                    4. lower-*.f6450.8

                      \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                  4. Applied rewrites50.8%

                    \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                  5. Taylor expanded in alpha around 0

                    \[\leadsto \frac{1}{\color{blue}{\beta} \cdot \beta} \]
                  6. Step-by-step derivation
                    1. Applied rewrites49.7%

                      \[\leadsto \frac{1}{\color{blue}{\beta} \cdot \beta} \]

                    if 8e22 < alpha

                    1. Initial program 41.2%

                      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    2. Taylor expanded in beta around inf

                      \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                    3. Step-by-step derivation
                      1. lower-/.f64N/A

                        \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
                      2. lower-+.f64N/A

                        \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
                      3. unpow2N/A

                        \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                      4. lower-*.f6466.7

                        \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                    4. Applied rewrites66.7%

                      \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                    5. Taylor expanded in alpha around inf

                      \[\leadsto \frac{\alpha}{\color{blue}{\beta} \cdot \beta} \]
                    6. Step-by-step derivation
                      1. Applied rewrites66.7%

                        \[\leadsto \frac{\alpha}{\color{blue}{\beta} \cdot \beta} \]
                    7. Recombined 2 regimes into one program.
                    8. Add Preprocessing

                    Alternative 16: 52.3% accurate, 4.2× speedup?

                    \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \frac{1 + \alpha}{\beta \cdot \beta} \end{array} \]
                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                    (FPCore (alpha beta) :precision binary64 (/ (+ 1.0 alpha) (* beta beta)))
                    assert(alpha < beta);
                    double code(double alpha, double beta) {
                    	return (1.0 + alpha) / (beta * beta);
                    }
                    
                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                    module fmin_fmax_functions
                        implicit none
                        private
                        public fmax
                        public fmin
                    
                        interface fmax
                            module procedure fmax88
                            module procedure fmax44
                            module procedure fmax84
                            module procedure fmax48
                        end interface
                        interface fmin
                            module procedure fmin88
                            module procedure fmin44
                            module procedure fmin84
                            module procedure fmin48
                        end interface
                    contains
                        real(8) function fmax88(x, y) result (res)
                            real(8), intent (in) :: x
                            real(8), intent (in) :: y
                            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                        end function
                        real(4) function fmax44(x, y) result (res)
                            real(4), intent (in) :: x
                            real(4), intent (in) :: y
                            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                        end function
                        real(8) function fmax84(x, y) result(res)
                            real(8), intent (in) :: x
                            real(4), intent (in) :: y
                            res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                        end function
                        real(8) function fmax48(x, y) result(res)
                            real(4), intent (in) :: x
                            real(8), intent (in) :: y
                            res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                        end function
                        real(8) function fmin88(x, y) result (res)
                            real(8), intent (in) :: x
                            real(8), intent (in) :: y
                            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                        end function
                        real(4) function fmin44(x, y) result (res)
                            real(4), intent (in) :: x
                            real(4), intent (in) :: y
                            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                        end function
                        real(8) function fmin84(x, y) result(res)
                            real(8), intent (in) :: x
                            real(4), intent (in) :: y
                            res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                        end function
                        real(8) function fmin48(x, y) result(res)
                            real(4), intent (in) :: x
                            real(8), intent (in) :: y
                            res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                        end function
                    end module
                    
                    real(8) function code(alpha, beta)
                    use fmin_fmax_functions
                        real(8), intent (in) :: alpha
                        real(8), intent (in) :: beta
                        code = (1.0d0 + alpha) / (beta * beta)
                    end function
                    
                    assert alpha < beta;
                    public static double code(double alpha, double beta) {
                    	return (1.0 + alpha) / (beta * beta);
                    }
                    
                    [alpha, beta] = sort([alpha, beta])
                    def code(alpha, beta):
                    	return (1.0 + alpha) / (beta * beta)
                    
                    alpha, beta = sort([alpha, beta])
                    function code(alpha, beta)
                    	return Float64(Float64(1.0 + alpha) / Float64(beta * beta))
                    end
                    
                    alpha, beta = num2cell(sort([alpha, beta])){:}
                    function tmp = code(alpha, beta)
                    	tmp = (1.0 + alpha) / (beta * beta);
                    end
                    
                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                    code[alpha_, beta_] := N[(N[(1.0 + alpha), $MachinePrecision] / N[(beta * beta), $MachinePrecision]), $MachinePrecision]
                    
                    \begin{array}{l}
                    [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                    \\
                    \frac{1 + \alpha}{\beta \cdot \beta}
                    \end{array}
                    
                    Derivation
                    1. Initial program 94.4%

                      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    2. Taylor expanded in beta around inf

                      \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                    3. Step-by-step derivation
                      1. lower-/.f64N/A

                        \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
                      2. lower-+.f64N/A

                        \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
                      3. unpow2N/A

                        \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                      4. lower-*.f6452.3

                        \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                    4. Applied rewrites52.3%

                      \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                    5. Add Preprocessing

                    Alternative 17: 49.8% accurate, 4.9× speedup?

                    \[\begin{array}{l} [alpha, beta] = \mathsf{sort}([alpha, beta])\\ \\ \frac{1}{\beta \cdot \beta} \end{array} \]
                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                    (FPCore (alpha beta) :precision binary64 (/ 1.0 (* beta beta)))
                    assert(alpha < beta);
                    double code(double alpha, double beta) {
                    	return 1.0 / (beta * beta);
                    }
                    
                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                    module fmin_fmax_functions
                        implicit none
                        private
                        public fmax
                        public fmin
                    
                        interface fmax
                            module procedure fmax88
                            module procedure fmax44
                            module procedure fmax84
                            module procedure fmax48
                        end interface
                        interface fmin
                            module procedure fmin88
                            module procedure fmin44
                            module procedure fmin84
                            module procedure fmin48
                        end interface
                    contains
                        real(8) function fmax88(x, y) result (res)
                            real(8), intent (in) :: x
                            real(8), intent (in) :: y
                            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                        end function
                        real(4) function fmax44(x, y) result (res)
                            real(4), intent (in) :: x
                            real(4), intent (in) :: y
                            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                        end function
                        real(8) function fmax84(x, y) result(res)
                            real(8), intent (in) :: x
                            real(4), intent (in) :: y
                            res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                        end function
                        real(8) function fmax48(x, y) result(res)
                            real(4), intent (in) :: x
                            real(8), intent (in) :: y
                            res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                        end function
                        real(8) function fmin88(x, y) result (res)
                            real(8), intent (in) :: x
                            real(8), intent (in) :: y
                            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                        end function
                        real(4) function fmin44(x, y) result (res)
                            real(4), intent (in) :: x
                            real(4), intent (in) :: y
                            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                        end function
                        real(8) function fmin84(x, y) result(res)
                            real(8), intent (in) :: x
                            real(4), intent (in) :: y
                            res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                        end function
                        real(8) function fmin48(x, y) result(res)
                            real(4), intent (in) :: x
                            real(8), intent (in) :: y
                            res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                        end function
                    end module
                    
                    real(8) function code(alpha, beta)
                    use fmin_fmax_functions
                        real(8), intent (in) :: alpha
                        real(8), intent (in) :: beta
                        code = 1.0d0 / (beta * beta)
                    end function
                    
                    assert alpha < beta;
                    public static double code(double alpha, double beta) {
                    	return 1.0 / (beta * beta);
                    }
                    
                    [alpha, beta] = sort([alpha, beta])
                    def code(alpha, beta):
                    	return 1.0 / (beta * beta)
                    
                    alpha, beta = sort([alpha, beta])
                    function code(alpha, beta)
                    	return Float64(1.0 / Float64(beta * beta))
                    end
                    
                    alpha, beta = num2cell(sort([alpha, beta])){:}
                    function tmp = code(alpha, beta)
                    	tmp = 1.0 / (beta * beta);
                    end
                    
                    NOTE: alpha and beta should be sorted in increasing order before calling this function.
                    code[alpha_, beta_] := N[(1.0 / N[(beta * beta), $MachinePrecision]), $MachinePrecision]
                    
                    \begin{array}{l}
                    [alpha, beta] = \mathsf{sort}([alpha, beta])\\
                    \\
                    \frac{1}{\beta \cdot \beta}
                    \end{array}
                    
                    Derivation
                    1. Initial program 94.4%

                      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    2. Taylor expanded in beta around inf

                      \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                    3. Step-by-step derivation
                      1. lower-/.f64N/A

                        \[\leadsto \frac{1 + \alpha}{\color{blue}{{\beta}^{2}}} \]
                      2. lower-+.f64N/A

                        \[\leadsto \frac{1 + \alpha}{{\color{blue}{\beta}}^{2}} \]
                      3. unpow2N/A

                        \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                      4. lower-*.f6452.3

                        \[\leadsto \frac{1 + \alpha}{\beta \cdot \color{blue}{\beta}} \]
                    4. Applied rewrites52.3%

                      \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                    5. Taylor expanded in alpha around 0

                      \[\leadsto \frac{1}{\color{blue}{\beta} \cdot \beta} \]
                    6. Step-by-step derivation
                      1. Applied rewrites49.8%

                        \[\leadsto \frac{1}{\color{blue}{\beta} \cdot \beta} \]
                      2. Add Preprocessing

                      Reproduce

                      ?
                      herbie shell --seed 2025096 
                      (FPCore (alpha beta)
                        :name "Octave 3.8, jcobi/3"
                        :precision binary64
                        :pre (and (> alpha -1.0) (> beta -1.0))
                        (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))