Octave 3.8, jcobi/3

Percentage Accurate: 94.2% → 99.8%
Time: 10.0s
Alternatives: 16
Speedup: 1.3×

Specification

?
\[\alpha > -1 \land \beta > -1\]
\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ \frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1} \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))))
   (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0))
end
function tmp = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 16 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 94.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\ \frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1} \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0))))
   (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(alpha, beta)
use fmin_fmax_functions
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
    code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + (2.0 * 1.0);
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta):
	t_0 = (alpha + beta) + (2.0 * 1.0)
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0))
end
function tmp = code(alpha, beta)
	t_0 = (alpha + beta) + (2.0 * 1.0);
	tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}

Alternative 1: 99.8% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2\\ t_1 := t\_0 + 1\\ \mathbf{if}\;\alpha \leq 170000000:\\ \;\;\;\;\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_1}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \alpha, 1 - {\alpha}^{-1}\right)}{t\_0}}{t\_1}\\ \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) 2.0)) (t_1 (+ t_0 1.0)))
   (if (<= alpha 170000000.0)
     (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) t_1)
     (/
      (/
       (fma (/ beta (+ (+ beta alpha) 2.0)) alpha (- 1.0 (pow alpha -1.0)))
       t_0)
      t_1))))
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + 2.0;
	double t_1 = t_0 + 1.0;
	double tmp;
	if (alpha <= 170000000.0) {
		tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / t_1;
	} else {
		tmp = (fma((beta / ((beta + alpha) + 2.0)), alpha, (1.0 - pow(alpha, -1.0))) / t_0) / t_1;
	}
	return tmp;
}
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + 2.0)
	t_1 = Float64(t_0 + 1.0)
	tmp = 0.0
	if (alpha <= 170000000.0)
		tmp = Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / t_1);
	else
		tmp = Float64(Float64(fma(Float64(beta / Float64(Float64(beta + alpha) + 2.0)), alpha, Float64(1.0 - (alpha ^ -1.0))) / t_0) / t_1);
	end
	return tmp
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 + 1.0), $MachinePrecision]}, If[LessEqual[alpha, 170000000.0], N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$1), $MachinePrecision], N[(N[(N[(N[(beta / N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] * alpha + N[(1.0 - N[Power[alpha, -1.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$1), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2\\
t_1 := t\_0 + 1\\
\mathbf{if}\;\alpha \leq 170000000:\\
\;\;\;\;\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_1}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \alpha, 1 - {\alpha}^{-1}\right)}{t\_0}}{t\_1}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if alpha < 1.7e8

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Add Preprocessing

    if 1.7e8 < alpha

    1. Initial program 85.8%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \frac{\frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right)} + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. +-commutativeN/A

        \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\beta \cdot \alpha + \left(\alpha + \beta\right)\right)} + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. associate-+l+N/A

        \[\leadsto \frac{\frac{\frac{\color{blue}{\beta \cdot \alpha + \left(\left(\alpha + \beta\right) + 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      6. metadata-evalN/A

        \[\leadsto \frac{\frac{\frac{\beta \cdot \alpha + \left(\left(\alpha + \beta\right) + \color{blue}{\left(2 - 1\right)}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      7. metadata-evalN/A

        \[\leadsto \frac{\frac{\frac{\beta \cdot \alpha + \left(\left(\alpha + \beta\right) + \left(\color{blue}{2 \cdot 1} - 1\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      8. lift-*.f64N/A

        \[\leadsto \frac{\frac{\frac{\beta \cdot \alpha + \left(\left(\alpha + \beta\right) + \left(\color{blue}{2 \cdot 1} - 1\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      9. associate--l+N/A

        \[\leadsto \frac{\frac{\frac{\beta \cdot \alpha + \color{blue}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      10. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{\beta \cdot \alpha + \left(\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      11. div-addN/A

        \[\leadsto \frac{\frac{\color{blue}{\frac{\beta \cdot \alpha}{\left(\alpha + \beta\right) + 2 \cdot 1} + \frac{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      12. lift-*.f64N/A

        \[\leadsto \frac{\frac{\frac{\color{blue}{\beta \cdot \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1} + \frac{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      13. *-rgt-identityN/A

        \[\leadsto \frac{\frac{\frac{\beta \cdot \alpha}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot 1}} + \frac{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      14. times-fracN/A

        \[\leadsto \frac{\frac{\color{blue}{\frac{\beta}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \frac{\alpha}{1}} + \frac{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      15. lower-fma.f64N/A

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{fma}\left(\frac{\beta}{\left(\alpha + \beta\right) + 2 \cdot 1}, \frac{\alpha}{1}, \frac{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1}{\left(\alpha + \beta\right) + 2 \cdot 1}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Applied rewrites99.8%

      \[\leadsto \frac{\frac{\color{blue}{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \frac{\alpha}{1}, \frac{\left(\beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    5. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \color{blue}{\frac{\alpha}{1}}, \frac{\left(\beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. /-rgt-identity99.8

        \[\leadsto \frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \color{blue}{\alpha}, \frac{\left(\beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    6. Applied rewrites99.8%

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \color{blue}{\alpha}, \frac{\left(\beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    7. Taylor expanded in alpha around inf

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \alpha, \color{blue}{1 - \frac{1}{\alpha}}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    8. Step-by-step derivation
      1. lower--.f64N/A

        \[\leadsto \frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \alpha, \color{blue}{1 - \frac{1}{\alpha}}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lower-/.f6499.8

        \[\leadsto \frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \alpha, 1 - \color{blue}{\frac{1}{\alpha}}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    9. Applied rewrites99.8%

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \alpha, \color{blue}{1 - \frac{1}{\alpha}}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\alpha \leq 170000000:\\ \;\;\;\;\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 2\right) + 1}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \alpha, 1 - {\alpha}^{-1}\right)}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 2\right) + 1}\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 94.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\beta + \alpha\right) + 2\\ \mathbf{if}\;\beta \leq 1.6 \cdot 10^{+16}:\\ \;\;\;\;\frac{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{t\_0}}{\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_0}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) - \left(1 + \alpha\right) \cdot \frac{\mathsf{fma}\left(2, \alpha, 4\right)}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2\right) + 1}\\ \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ beta alpha) 2.0)))
   (if (<= beta 1.6e+16)
     (/
      (/ (+ (fma beta alpha (+ beta alpha)) 1.0) t_0)
      (* (+ 3.0 (+ beta alpha)) t_0))
     (/
      (/
       (-
        (+ (+ (/ (+ 1.0 alpha) beta) alpha) 1.0)
        (* (+ 1.0 alpha) (/ (fma 2.0 alpha 4.0) beta)))
       beta)
      (+ (+ (+ alpha beta) 2.0) 1.0)))))
double code(double alpha, double beta) {
	double t_0 = (beta + alpha) + 2.0;
	double tmp;
	if (beta <= 1.6e+16) {
		tmp = ((fma(beta, alpha, (beta + alpha)) + 1.0) / t_0) / ((3.0 + (beta + alpha)) * t_0);
	} else {
		tmp = ((((((1.0 + alpha) / beta) + alpha) + 1.0) - ((1.0 + alpha) * (fma(2.0, alpha, 4.0) / beta))) / beta) / (((alpha + beta) + 2.0) + 1.0);
	}
	return tmp;
}
function code(alpha, beta)
	t_0 = Float64(Float64(beta + alpha) + 2.0)
	tmp = 0.0
	if (beta <= 1.6e+16)
		tmp = Float64(Float64(Float64(fma(beta, alpha, Float64(beta + alpha)) + 1.0) / t_0) / Float64(Float64(3.0 + Float64(beta + alpha)) * t_0));
	else
		tmp = Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.0 + alpha) / beta) + alpha) + 1.0) - Float64(Float64(1.0 + alpha) * Float64(fma(2.0, alpha, 4.0) / beta))) / beta) / Float64(Float64(Float64(alpha + beta) + 2.0) + 1.0));
	end
	return tmp
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[beta, 1.6e+16], N[(N[(N[(N[(beta * alpha + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] + alpha), $MachinePrecision] + 1.0), $MachinePrecision] - N[(N[(1.0 + alpha), $MachinePrecision] * N[(N[(2.0 * alpha + 4.0), $MachinePrecision] / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / beta), $MachinePrecision] / N[(N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\beta + \alpha\right) + 2\\
\mathbf{if}\;\beta \leq 1.6 \cdot 10^{+16}:\\
\;\;\;\;\frac{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{t\_0}}{\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_0}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) - \left(1 + \alpha\right) \cdot \frac{\mathsf{fma}\left(2, \alpha, 4\right)}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2\right) + 1}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 1.6e16

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
      2. lift-/.f64N/A

        \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. associate-/l/N/A

        \[\leadsto \color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
      4. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
    4. Applied rewrites99.4%

      \[\leadsto \color{blue}{\frac{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}}{\left(3 + \left(\beta + \alpha\right)\right) \cdot \left(\left(\beta + \alpha\right) + 2\right)}} \]

    if 1.6e16 < beta

    1. Initial program 84.1%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Add Preprocessing
    3. Taylor expanded in beta around inf

      \[\leadsto \frac{\color{blue}{\frac{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \frac{\color{blue}{\frac{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lower--.f64N/A

        \[\leadsto \frac{\frac{\color{blue}{\left(1 + \left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right)\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. +-commutativeN/A

        \[\leadsto \frac{\frac{\color{blue}{\left(\left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right) + 1\right)} - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. lower-+.f64N/A

        \[\leadsto \frac{\frac{\color{blue}{\left(\left(\alpha + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right) + 1\right)} - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. +-commutativeN/A

        \[\leadsto \frac{\frac{\left(\color{blue}{\left(\left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right) + \alpha\right)} + 1\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      6. lower-+.f64N/A

        \[\leadsto \frac{\frac{\left(\color{blue}{\left(\left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right) + \alpha\right)} + 1\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      7. div-add-revN/A

        \[\leadsto \frac{\frac{\left(\left(\color{blue}{\frac{1 + \alpha}{\beta}} + \alpha\right) + 1\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      8. lower-/.f64N/A

        \[\leadsto \frac{\frac{\left(\left(\color{blue}{\frac{1 + \alpha}{\beta}} + \alpha\right) + 1\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      9. lower-+.f64N/A

        \[\leadsto \frac{\frac{\left(\left(\frac{\color{blue}{1 + \alpha}}{\beta} + \alpha\right) + 1\right) - \frac{\left(1 + \alpha\right) \cdot \left(4 + 2 \cdot \alpha\right)}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      10. associate-/l*N/A

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) - \color{blue}{\left(1 + \alpha\right) \cdot \frac{4 + 2 \cdot \alpha}{\beta}}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      11. lower-*.f64N/A

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) - \color{blue}{\left(1 + \alpha\right) \cdot \frac{4 + 2 \cdot \alpha}{\beta}}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      12. lower-+.f64N/A

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) - \color{blue}{\left(1 + \alpha\right)} \cdot \frac{4 + 2 \cdot \alpha}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      13. lower-/.f64N/A

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) - \left(1 + \alpha\right) \cdot \color{blue}{\frac{4 + 2 \cdot \alpha}{\beta}}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      14. +-commutativeN/A

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) - \left(1 + \alpha\right) \cdot \frac{\color{blue}{2 \cdot \alpha + 4}}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      15. lower-fma.f6485.0

        \[\leadsto \frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) - \left(1 + \alpha\right) \cdot \frac{\color{blue}{\mathsf{fma}\left(2, \alpha, 4\right)}}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    5. Applied rewrites85.0%

      \[\leadsto \frac{\color{blue}{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) - \left(1 + \alpha\right) \cdot \frac{\mathsf{fma}\left(2, \alpha, 4\right)}{\beta}}{\beta}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification95.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\beta \leq 1.6 \cdot 10^{+16}:\\ \;\;\;\;\frac{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}}{\left(3 + \left(\beta + \alpha\right)\right) \cdot \left(\left(\beta + \alpha\right) + 2\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) - \left(1 + \alpha\right) \cdot \frac{\mathsf{fma}\left(2, \alpha, 4\right)}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2\right) + 1}\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 99.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\beta + \alpha\right) + 2\\ t_1 := \left(\alpha + \beta\right) + 2\\ \frac{\frac{\mathsf{fma}\left(\frac{\beta}{t\_0}, \alpha, \frac{\left(\beta + \alpha\right) + 1}{t\_0}\right)}{t\_1}}{t\_1 + 1} \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ beta alpha) 2.0)) (t_1 (+ (+ alpha beta) 2.0)))
   (/
    (/ (fma (/ beta t_0) alpha (/ (+ (+ beta alpha) 1.0) t_0)) t_1)
    (+ t_1 1.0))))
double code(double alpha, double beta) {
	double t_0 = (beta + alpha) + 2.0;
	double t_1 = (alpha + beta) + 2.0;
	return (fma((beta / t_0), alpha, (((beta + alpha) + 1.0) / t_0)) / t_1) / (t_1 + 1.0);
}
function code(alpha, beta)
	t_0 = Float64(Float64(beta + alpha) + 2.0)
	t_1 = Float64(Float64(alpha + beta) + 2.0)
	return Float64(Float64(fma(Float64(beta / t_0), alpha, Float64(Float64(Float64(beta + alpha) + 1.0) / t_0)) / t_1) / Float64(t_1 + 1.0))
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]}, N[(N[(N[(N[(beta / t$95$0), $MachinePrecision] * alpha + N[(N[(N[(beta + alpha), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(t$95$1 + 1.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\beta + \alpha\right) + 2\\
t_1 := \left(\alpha + \beta\right) + 2\\
\frac{\frac{\mathsf{fma}\left(\frac{\beta}{t\_0}, \alpha, \frac{\left(\beta + \alpha\right) + 1}{t\_0}\right)}{t\_1}}{t\_1 + 1}
\end{array}
\end{array}
Derivation
  1. Initial program 95.2%

    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift-/.f64N/A

      \[\leadsto \frac{\frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. lift-+.f64N/A

      \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    3. lift-+.f64N/A

      \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right)} + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. +-commutativeN/A

      \[\leadsto \frac{\frac{\frac{\color{blue}{\left(\beta \cdot \alpha + \left(\alpha + \beta\right)\right)} + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    5. associate-+l+N/A

      \[\leadsto \frac{\frac{\frac{\color{blue}{\beta \cdot \alpha + \left(\left(\alpha + \beta\right) + 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    6. metadata-evalN/A

      \[\leadsto \frac{\frac{\frac{\beta \cdot \alpha + \left(\left(\alpha + \beta\right) + \color{blue}{\left(2 - 1\right)}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    7. metadata-evalN/A

      \[\leadsto \frac{\frac{\frac{\beta \cdot \alpha + \left(\left(\alpha + \beta\right) + \left(\color{blue}{2 \cdot 1} - 1\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    8. lift-*.f64N/A

      \[\leadsto \frac{\frac{\frac{\beta \cdot \alpha + \left(\left(\alpha + \beta\right) + \left(\color{blue}{2 \cdot 1} - 1\right)\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    9. associate--l+N/A

      \[\leadsto \frac{\frac{\frac{\beta \cdot \alpha + \color{blue}{\left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    10. lift-+.f64N/A

      \[\leadsto \frac{\frac{\frac{\beta \cdot \alpha + \left(\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    11. div-addN/A

      \[\leadsto \frac{\frac{\color{blue}{\frac{\beta \cdot \alpha}{\left(\alpha + \beta\right) + 2 \cdot 1} + \frac{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    12. lift-*.f64N/A

      \[\leadsto \frac{\frac{\frac{\color{blue}{\beta \cdot \alpha}}{\left(\alpha + \beta\right) + 2 \cdot 1} + \frac{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    13. *-rgt-identityN/A

      \[\leadsto \frac{\frac{\frac{\beta \cdot \alpha}{\color{blue}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot 1}} + \frac{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    14. times-fracN/A

      \[\leadsto \frac{\frac{\color{blue}{\frac{\beta}{\left(\alpha + \beta\right) + 2 \cdot 1} \cdot \frac{\alpha}{1}} + \frac{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    15. lower-fma.f64N/A

      \[\leadsto \frac{\frac{\color{blue}{\mathsf{fma}\left(\frac{\beta}{\left(\alpha + \beta\right) + 2 \cdot 1}, \frac{\alpha}{1}, \frac{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) - 1}{\left(\alpha + \beta\right) + 2 \cdot 1}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  4. Applied rewrites99.8%

    \[\leadsto \frac{\frac{\color{blue}{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \frac{\alpha}{1}, \frac{\left(\beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  5. Step-by-step derivation
    1. lift-/.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \color{blue}{\frac{\alpha}{1}}, \frac{\left(\beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. /-rgt-identity99.8

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \color{blue}{\alpha}, \frac{\left(\beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  6. Applied rewrites99.8%

    \[\leadsto \frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \color{blue}{\alpha}, \frac{\left(\beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  7. Final simplification99.8%

    \[\leadsto \frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \alpha, \frac{\left(\beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}\right)}{\left(\alpha + \beta\right) + 2}}{\left(\left(\alpha + \beta\right) + 2\right) + 1} \]
  8. Add Preprocessing

Alternative 4: 94.8% accurate, 1.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\beta + \alpha\right) + 2\\ \mathbf{if}\;\beta \leq 1.6 \cdot 10^{+16}:\\ \;\;\;\;\frac{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{t\_0}}{\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_0}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}\\ \end{array} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ beta alpha) 2.0)))
   (if (<= beta 1.6e+16)
     (/
      (/ (+ (fma beta alpha (+ beta alpha)) 1.0) t_0)
      (* (+ 3.0 (+ beta alpha)) t_0))
     (/ (/ (+ 1.0 alpha) (+ 3.0 (+ alpha beta))) (+ 2.0 (+ alpha beta))))))
double code(double alpha, double beta) {
	double t_0 = (beta + alpha) + 2.0;
	double tmp;
	if (beta <= 1.6e+16) {
		tmp = ((fma(beta, alpha, (beta + alpha)) + 1.0) / t_0) / ((3.0 + (beta + alpha)) * t_0);
	} else {
		tmp = ((1.0 + alpha) / (3.0 + (alpha + beta))) / (2.0 + (alpha + beta));
	}
	return tmp;
}
function code(alpha, beta)
	t_0 = Float64(Float64(beta + alpha) + 2.0)
	tmp = 0.0
	if (beta <= 1.6e+16)
		tmp = Float64(Float64(Float64(fma(beta, alpha, Float64(beta + alpha)) + 1.0) / t_0) / Float64(Float64(3.0 + Float64(beta + alpha)) * t_0));
	else
		tmp = Float64(Float64(Float64(1.0 + alpha) / Float64(3.0 + Float64(alpha + beta))) / Float64(2.0 + Float64(alpha + beta)));
	end
	return tmp
end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[beta, 1.6e+16], N[(N[(N[(N[(beta * alpha + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\beta + \alpha\right) + 2\\
\mathbf{if}\;\beta \leq 1.6 \cdot 10^{+16}:\\
\;\;\;\;\frac{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{t\_0}}{\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_0}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if beta < 1.6e16

    1. Initial program 99.9%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
      2. lift-/.f64N/A

        \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. associate-/l/N/A

        \[\leadsto \color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
      4. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
    4. Applied rewrites99.4%

      \[\leadsto \color{blue}{\frac{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{\left(\beta + \alpha\right) + 2}}{\left(3 + \left(\beta + \alpha\right)\right) \cdot \left(\left(\beta + \alpha\right) + 2\right)}} \]

    if 1.6e16 < beta

    1. Initial program 84.1%

      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    2. Add Preprocessing
    3. Taylor expanded in beta around -inf

      \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. lower-neg.f64N/A

        \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. lower--.f64N/A

        \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. mul-1-negN/A

        \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. lower-neg.f6485.6

        \[\leadsto \frac{\frac{-\left(\color{blue}{\left(-\alpha\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    5. Applied rewrites85.6%

      \[\leadsto \frac{\frac{\color{blue}{-\left(\left(-\alpha\right) - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
    6. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
      2. lift-/.f64N/A

        \[\leadsto \frac{\color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      3. associate-/l/N/A

        \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
      4. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
    7. Applied rewrites87.6%

      \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
    8. Taylor expanded in alpha around 0

      \[\leadsto \frac{1 + \color{blue}{\alpha}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
    9. Step-by-step derivation
      1. Applied rewrites87.6%

        \[\leadsto \frac{1 + \color{blue}{\alpha}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
      2. Step-by-step derivation
        1. lift-/.f64N/A

          \[\leadsto \color{blue}{\frac{1 + \alpha}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
        2. lift-*.f64N/A

          \[\leadsto \frac{1 + \alpha}{\color{blue}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
        3. associate-/r*N/A

          \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{\left(\alpha + \beta\right) + 2}} \]
        4. lower-/.f64N/A

          \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{\left(\alpha + \beta\right) + 2}} \]
      3. Applied rewrites85.6%

        \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}} \]
    10. Recombined 2 regimes into one program.
    11. Add Preprocessing

    Alternative 5: 91.8% accurate, 1.4× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\beta + \alpha\right) + 2\\ \mathbf{if}\;\beta \leq 1.65 \cdot 10^{+70}:\\ \;\;\;\;\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{t\_0 \cdot \left(\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_0\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}\\ \end{array} \end{array} \]
    (FPCore (alpha beta)
     :precision binary64
     (let* ((t_0 (+ (+ beta alpha) 2.0)))
       (if (<= beta 1.65e+70)
         (/
          (+ (fma beta alpha (+ beta alpha)) 1.0)
          (* t_0 (* (+ 3.0 (+ beta alpha)) t_0)))
         (/ (/ (+ 1.0 alpha) (+ 3.0 (+ alpha beta))) (+ 2.0 (+ alpha beta))))))
    double code(double alpha, double beta) {
    	double t_0 = (beta + alpha) + 2.0;
    	double tmp;
    	if (beta <= 1.65e+70) {
    		tmp = (fma(beta, alpha, (beta + alpha)) + 1.0) / (t_0 * ((3.0 + (beta + alpha)) * t_0));
    	} else {
    		tmp = ((1.0 + alpha) / (3.0 + (alpha + beta))) / (2.0 + (alpha + beta));
    	}
    	return tmp;
    }
    
    function code(alpha, beta)
    	t_0 = Float64(Float64(beta + alpha) + 2.0)
    	tmp = 0.0
    	if (beta <= 1.65e+70)
    		tmp = Float64(Float64(fma(beta, alpha, Float64(beta + alpha)) + 1.0) / Float64(t_0 * Float64(Float64(3.0 + Float64(beta + alpha)) * t_0)));
    	else
    		tmp = Float64(Float64(Float64(1.0 + alpha) / Float64(3.0 + Float64(alpha + beta))) / Float64(2.0 + Float64(alpha + beta)));
    	end
    	return tmp
    end
    
    code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[beta, 1.65e+70], N[(N[(N[(beta * alpha + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / N[(t$95$0 * N[(N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    t_0 := \left(\beta + \alpha\right) + 2\\
    \mathbf{if}\;\beta \leq 1.65 \cdot 10^{+70}:\\
    \;\;\;\;\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{t\_0 \cdot \left(\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_0\right)}\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if beta < 1.65000000000000008e70

      1. Initial program 99.9%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Add Preprocessing
      3. Step-by-step derivation
        1. lift-/.f64N/A

          \[\leadsto \color{blue}{\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
        2. lift-/.f64N/A

          \[\leadsto \frac{\color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        3. associate-/l/N/A

          \[\leadsto \color{blue}{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
        4. lift-/.f64N/A

          \[\leadsto \frac{\color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)} \]
        5. associate-/l/N/A

          \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)\right)}} \]
        6. lower-/.f64N/A

          \[\leadsto \color{blue}{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)\right)}} \]
      4. Applied rewrites96.9%

        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{\left(\left(\beta + \alpha\right) + 2\right) \cdot \left(\left(3 + \left(\beta + \alpha\right)\right) \cdot \left(\left(\beta + \alpha\right) + 2\right)\right)}} \]

      if 1.65000000000000008e70 < beta

      1. Initial program 79.9%

        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      2. Add Preprocessing
      3. Taylor expanded in beta around -inf

        \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      4. Step-by-step derivation
        1. mul-1-negN/A

          \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. lower-neg.f64N/A

          \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        3. lower--.f64N/A

          \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        4. mul-1-negN/A

          \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        5. lower-neg.f6486.5

          \[\leadsto \frac{\frac{-\left(\color{blue}{\left(-\alpha\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      5. Applied rewrites86.5%

        \[\leadsto \frac{\frac{\color{blue}{-\left(\left(-\alpha\right) - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
      6. Step-by-step derivation
        1. lift-/.f64N/A

          \[\leadsto \color{blue}{\frac{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
        2. lift-/.f64N/A

          \[\leadsto \frac{\color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        3. associate-/l/N/A

          \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
        4. lower-/.f64N/A

          \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
      7. Applied rewrites88.0%

        \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
      8. Taylor expanded in alpha around 0

        \[\leadsto \frac{1 + \color{blue}{\alpha}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
      9. Step-by-step derivation
        1. Applied rewrites88.0%

          \[\leadsto \frac{1 + \color{blue}{\alpha}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
        2. Step-by-step derivation
          1. lift-/.f64N/A

            \[\leadsto \color{blue}{\frac{1 + \alpha}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
          2. lift-*.f64N/A

            \[\leadsto \frac{1 + \alpha}{\color{blue}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
          3. associate-/r*N/A

            \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{\left(\alpha + \beta\right) + 2}} \]
          4. lower-/.f64N/A

            \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{\left(\alpha + \beta\right) + 2}} \]
        3. Applied rewrites86.5%

          \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}} \]
      10. Recombined 2 regimes into one program.
      11. Add Preprocessing

      Alternative 6: 72.1% accurate, 1.8× speedup?

      \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\beta \leq 1.35 \cdot 10^{+15}:\\ \;\;\;\;\frac{\frac{1 + \beta}{2 + \beta}}{\left(3 + \beta\right) \cdot \left(2 + \beta\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}\\ \end{array} \end{array} \]
      (FPCore (alpha beta)
       :precision binary64
       (if (<= beta 1.35e+15)
         (/ (/ (+ 1.0 beta) (+ 2.0 beta)) (* (+ 3.0 beta) (+ 2.0 beta)))
         (/ (/ (+ 1.0 alpha) (+ 3.0 (+ alpha beta))) (+ 2.0 (+ alpha beta)))))
      double code(double alpha, double beta) {
      	double tmp;
      	if (beta <= 1.35e+15) {
      		tmp = ((1.0 + beta) / (2.0 + beta)) / ((3.0 + beta) * (2.0 + beta));
      	} else {
      		tmp = ((1.0 + alpha) / (3.0 + (alpha + beta))) / (2.0 + (alpha + beta));
      	}
      	return tmp;
      }
      
      module fmin_fmax_functions
          implicit none
          private
          public fmax
          public fmin
      
          interface fmax
              module procedure fmax88
              module procedure fmax44
              module procedure fmax84
              module procedure fmax48
          end interface
          interface fmin
              module procedure fmin88
              module procedure fmin44
              module procedure fmin84
              module procedure fmin48
          end interface
      contains
          real(8) function fmax88(x, y) result (res)
              real(8), intent (in) :: x
              real(8), intent (in) :: y
              res = merge(y, merge(x, max(x, y), y /= y), x /= x)
          end function
          real(4) function fmax44(x, y) result (res)
              real(4), intent (in) :: x
              real(4), intent (in) :: y
              res = merge(y, merge(x, max(x, y), y /= y), x /= x)
          end function
          real(8) function fmax84(x, y) result(res)
              real(8), intent (in) :: x
              real(4), intent (in) :: y
              res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
          end function
          real(8) function fmax48(x, y) result(res)
              real(4), intent (in) :: x
              real(8), intent (in) :: y
              res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
          end function
          real(8) function fmin88(x, y) result (res)
              real(8), intent (in) :: x
              real(8), intent (in) :: y
              res = merge(y, merge(x, min(x, y), y /= y), x /= x)
          end function
          real(4) function fmin44(x, y) result (res)
              real(4), intent (in) :: x
              real(4), intent (in) :: y
              res = merge(y, merge(x, min(x, y), y /= y), x /= x)
          end function
          real(8) function fmin84(x, y) result(res)
              real(8), intent (in) :: x
              real(4), intent (in) :: y
              res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
          end function
          real(8) function fmin48(x, y) result(res)
              real(4), intent (in) :: x
              real(8), intent (in) :: y
              res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
          end function
      end module
      
      real(8) function code(alpha, beta)
      use fmin_fmax_functions
          real(8), intent (in) :: alpha
          real(8), intent (in) :: beta
          real(8) :: tmp
          if (beta <= 1.35d+15) then
              tmp = ((1.0d0 + beta) / (2.0d0 + beta)) / ((3.0d0 + beta) * (2.0d0 + beta))
          else
              tmp = ((1.0d0 + alpha) / (3.0d0 + (alpha + beta))) / (2.0d0 + (alpha + beta))
          end if
          code = tmp
      end function
      
      public static double code(double alpha, double beta) {
      	double tmp;
      	if (beta <= 1.35e+15) {
      		tmp = ((1.0 + beta) / (2.0 + beta)) / ((3.0 + beta) * (2.0 + beta));
      	} else {
      		tmp = ((1.0 + alpha) / (3.0 + (alpha + beta))) / (2.0 + (alpha + beta));
      	}
      	return tmp;
      }
      
      def code(alpha, beta):
      	tmp = 0
      	if beta <= 1.35e+15:
      		tmp = ((1.0 + beta) / (2.0 + beta)) / ((3.0 + beta) * (2.0 + beta))
      	else:
      		tmp = ((1.0 + alpha) / (3.0 + (alpha + beta))) / (2.0 + (alpha + beta))
      	return tmp
      
      function code(alpha, beta)
      	tmp = 0.0
      	if (beta <= 1.35e+15)
      		tmp = Float64(Float64(Float64(1.0 + beta) / Float64(2.0 + beta)) / Float64(Float64(3.0 + beta) * Float64(2.0 + beta)));
      	else
      		tmp = Float64(Float64(Float64(1.0 + alpha) / Float64(3.0 + Float64(alpha + beta))) / Float64(2.0 + Float64(alpha + beta)));
      	end
      	return tmp
      end
      
      function tmp_2 = code(alpha, beta)
      	tmp = 0.0;
      	if (beta <= 1.35e+15)
      		tmp = ((1.0 + beta) / (2.0 + beta)) / ((3.0 + beta) * (2.0 + beta));
      	else
      		tmp = ((1.0 + alpha) / (3.0 + (alpha + beta))) / (2.0 + (alpha + beta));
      	end
      	tmp_2 = tmp;
      end
      
      code[alpha_, beta_] := If[LessEqual[beta, 1.35e+15], N[(N[(N[(1.0 + beta), $MachinePrecision] / N[(2.0 + beta), $MachinePrecision]), $MachinePrecision] / N[(N[(3.0 + beta), $MachinePrecision] * N[(2.0 + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
      
      \begin{array}{l}
      
      \\
      \begin{array}{l}
      \mathbf{if}\;\beta \leq 1.35 \cdot 10^{+15}:\\
      \;\;\;\;\frac{\frac{1 + \beta}{2 + \beta}}{\left(3 + \beta\right) \cdot \left(2 + \beta\right)}\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if beta < 1.35e15

        1. Initial program 99.9%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Add Preprocessing
        3. Taylor expanded in alpha around 0

          \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        4. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          2. lower-+.f64N/A

            \[\leadsto \frac{\frac{\frac{\color{blue}{1 + \beta}}{2 + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          3. lower-+.f6488.6

            \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        5. Applied rewrites88.6%

          \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        6. Step-by-step derivation
          1. lift-/.f64N/A

            \[\leadsto \color{blue}{\frac{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
          2. lift-/.f64N/A

            \[\leadsto \frac{\color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          3. associate-/l/N/A

            \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
          4. lower-/.f64N/A

            \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
        7. Applied rewrites88.6%

          \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
        8. Taylor expanded in alpha around 0

          \[\leadsto \frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{\left(2 + \beta\right) \cdot \left(3 + \beta\right)}} \]
        9. Step-by-step derivation
          1. *-commutativeN/A

            \[\leadsto \frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{\left(3 + \beta\right) \cdot \left(2 + \beta\right)}} \]
          2. lower-*.f64N/A

            \[\leadsto \frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{\left(3 + \beta\right) \cdot \left(2 + \beta\right)}} \]
          3. lower-+.f64N/A

            \[\leadsto \frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{\left(3 + \beta\right)} \cdot \left(2 + \beta\right)} \]
          4. lower-+.f6466.7

            \[\leadsto \frac{\frac{1 + \beta}{2 + \beta}}{\left(3 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}} \]
        10. Applied rewrites66.7%

          \[\leadsto \frac{\frac{1 + \beta}{2 + \beta}}{\color{blue}{\left(3 + \beta\right) \cdot \left(2 + \beta\right)}} \]

        if 1.35e15 < beta

        1. Initial program 84.1%

          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        2. Add Preprocessing
        3. Taylor expanded in beta around -inf

          \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        4. Step-by-step derivation
          1. mul-1-negN/A

            \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          2. lower-neg.f64N/A

            \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          3. lower--.f64N/A

            \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          4. mul-1-negN/A

            \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          5. lower-neg.f6485.6

            \[\leadsto \frac{\frac{-\left(\color{blue}{\left(-\alpha\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        5. Applied rewrites85.6%

          \[\leadsto \frac{\frac{\color{blue}{-\left(\left(-\alpha\right) - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
        6. Step-by-step derivation
          1. lift-/.f64N/A

            \[\leadsto \color{blue}{\frac{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
          2. lift-/.f64N/A

            \[\leadsto \frac{\color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          3. associate-/l/N/A

            \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
          4. lower-/.f64N/A

            \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
        7. Applied rewrites87.6%

          \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
        8. Taylor expanded in alpha around 0

          \[\leadsto \frac{1 + \color{blue}{\alpha}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
        9. Step-by-step derivation
          1. Applied rewrites87.6%

            \[\leadsto \frac{1 + \color{blue}{\alpha}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
          2. Step-by-step derivation
            1. lift-/.f64N/A

              \[\leadsto \color{blue}{\frac{1 + \alpha}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
            2. lift-*.f64N/A

              \[\leadsto \frac{1 + \alpha}{\color{blue}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
            3. associate-/r*N/A

              \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{\left(\alpha + \beta\right) + 2}} \]
            4. lower-/.f64N/A

              \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{\left(\alpha + \beta\right) + 2}} \]
          3. Applied rewrites85.6%

            \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}} \]
        10. Recombined 2 regimes into one program.
        11. Add Preprocessing

        Alternative 7: 83.3% accurate, 1.9× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} t_0 := 3 + \left(\alpha + \beta\right)\\ \mathbf{if}\;\beta \leq 9.5:\\ \;\;\;\;\frac{\mathsf{fma}\left(0.25, \beta, 0.5\right)}{t\_0 \cdot \left(\left(\alpha + \beta\right) + 2\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{2 + \left(\alpha + \beta\right)}\\ \end{array} \end{array} \]
        (FPCore (alpha beta)
         :precision binary64
         (let* ((t_0 (+ 3.0 (+ alpha beta))))
           (if (<= beta 9.5)
             (/ (fma 0.25 beta 0.5) (* t_0 (+ (+ alpha beta) 2.0)))
             (/ (/ (+ 1.0 alpha) t_0) (+ 2.0 (+ alpha beta))))))
        double code(double alpha, double beta) {
        	double t_0 = 3.0 + (alpha + beta);
        	double tmp;
        	if (beta <= 9.5) {
        		tmp = fma(0.25, beta, 0.5) / (t_0 * ((alpha + beta) + 2.0));
        	} else {
        		tmp = ((1.0 + alpha) / t_0) / (2.0 + (alpha + beta));
        	}
        	return tmp;
        }
        
        function code(alpha, beta)
        	t_0 = Float64(3.0 + Float64(alpha + beta))
        	tmp = 0.0
        	if (beta <= 9.5)
        		tmp = Float64(fma(0.25, beta, 0.5) / Float64(t_0 * Float64(Float64(alpha + beta) + 2.0)));
        	else
        		tmp = Float64(Float64(Float64(1.0 + alpha) / t_0) / Float64(2.0 + Float64(alpha + beta)));
        	end
        	return tmp
        end
        
        code[alpha_, beta_] := Block[{t$95$0 = N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 9.5], N[(N[(0.25 * beta + 0.5), $MachinePrecision] / N[(t$95$0 * N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        t_0 := 3 + \left(\alpha + \beta\right)\\
        \mathbf{if}\;\beta \leq 9.5:\\
        \;\;\;\;\frac{\mathsf{fma}\left(0.25, \beta, 0.5\right)}{t\_0 \cdot \left(\left(\alpha + \beta\right) + 2\right)}\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{2 + \left(\alpha + \beta\right)}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if beta < 9.5

          1. Initial program 99.9%

            \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          2. Add Preprocessing
          3. Taylor expanded in alpha around 0

            \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          4. Step-by-step derivation
            1. lower-/.f64N/A

              \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            2. lower-+.f64N/A

              \[\leadsto \frac{\frac{\frac{\color{blue}{1 + \beta}}{2 + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            3. lower-+.f6488.8

              \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          5. Applied rewrites88.8%

            \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
          6. Step-by-step derivation
            1. lift-/.f64N/A

              \[\leadsto \color{blue}{\frac{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
            2. lift-/.f64N/A

              \[\leadsto \frac{\color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            3. associate-/l/N/A

              \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
            4. lower-/.f64N/A

              \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
          7. Applied rewrites88.9%

            \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
          8. Taylor expanded in beta around 0

            \[\leadsto \frac{\frac{1}{2} + \color{blue}{\frac{1}{4} \cdot \beta}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
          9. Step-by-step derivation
            1. Applied rewrites87.7%

              \[\leadsto \frac{\mathsf{fma}\left(0.25, \color{blue}{\beta}, 0.5\right)}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]

            if 9.5 < beta

            1. Initial program 84.7%

              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            2. Add Preprocessing
            3. Taylor expanded in beta around -inf

              \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            4. Step-by-step derivation
              1. mul-1-negN/A

                \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. lower-neg.f64N/A

                \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              3. lower--.f64N/A

                \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              4. mul-1-negN/A

                \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              5. lower-neg.f6483.9

                \[\leadsto \frac{\frac{-\left(\color{blue}{\left(-\alpha\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            5. Applied rewrites83.9%

              \[\leadsto \frac{\frac{\color{blue}{-\left(\left(-\alpha\right) - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
            6. Step-by-step derivation
              1. lift-/.f64N/A

                \[\leadsto \color{blue}{\frac{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
              2. lift-/.f64N/A

                \[\leadsto \frac{\color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              3. associate-/l/N/A

                \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
              4. lower-/.f64N/A

                \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
            7. Applied rewrites85.9%

              \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
            8. Taylor expanded in alpha around 0

              \[\leadsto \frac{1 + \color{blue}{\alpha}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
            9. Step-by-step derivation
              1. Applied rewrites85.9%

                \[\leadsto \frac{1 + \color{blue}{\alpha}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
              2. Step-by-step derivation
                1. lift-/.f64N/A

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
                2. lift-*.f64N/A

                  \[\leadsto \frac{1 + \alpha}{\color{blue}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
                3. associate-/r*N/A

                  \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{\left(\alpha + \beta\right) + 2}} \]
                4. lower-/.f64N/A

                  \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{\left(\alpha + \beta\right) + 2}} \]
              3. Applied rewrites83.9%

                \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}} \]
            10. Recombined 2 regimes into one program.
            11. Add Preprocessing

            Alternative 8: 83.0% accurate, 2.0× speedup?

            \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\beta \leq 10:\\ \;\;\;\;\frac{\mathsf{fma}\left(0.25, \beta, 0.5\right)}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
            (FPCore (alpha beta)
             :precision binary64
             (if (<= beta 10.0)
               (/ (fma 0.25 beta 0.5) (* (+ 3.0 (+ alpha beta)) (+ (+ alpha beta) 2.0)))
               (/ (/ (+ 1.0 alpha) beta) beta)))
            double code(double alpha, double beta) {
            	double tmp;
            	if (beta <= 10.0) {
            		tmp = fma(0.25, beta, 0.5) / ((3.0 + (alpha + beta)) * ((alpha + beta) + 2.0));
            	} else {
            		tmp = ((1.0 + alpha) / beta) / beta;
            	}
            	return tmp;
            }
            
            function code(alpha, beta)
            	tmp = 0.0
            	if (beta <= 10.0)
            		tmp = Float64(fma(0.25, beta, 0.5) / Float64(Float64(3.0 + Float64(alpha + beta)) * Float64(Float64(alpha + beta) + 2.0)));
            	else
            		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta);
            	end
            	return tmp
            end
            
            code[alpha_, beta_] := If[LessEqual[beta, 10.0], N[(N[(0.25 * beta + 0.5), $MachinePrecision] / N[(N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision] * N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
            
            \begin{array}{l}
            
            \\
            \begin{array}{l}
            \mathbf{if}\;\beta \leq 10:\\
            \;\;\;\;\frac{\mathsf{fma}\left(0.25, \beta, 0.5\right)}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}\\
            
            \mathbf{else}:\\
            \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
            
            
            \end{array}
            \end{array}
            
            Derivation
            1. Split input into 2 regimes
            2. if beta < 10

              1. Initial program 99.9%

                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              2. Add Preprocessing
              3. Taylor expanded in alpha around 0

                \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              4. Step-by-step derivation
                1. lower-/.f64N/A

                  \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. lower-+.f64N/A

                  \[\leadsto \frac{\frac{\frac{\color{blue}{1 + \beta}}{2 + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                3. lower-+.f6488.8

                  \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              5. Applied rewrites88.8%

                \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
              6. Step-by-step derivation
                1. lift-/.f64N/A

                  \[\leadsto \color{blue}{\frac{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                2. lift-/.f64N/A

                  \[\leadsto \frac{\color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                3. associate-/l/N/A

                  \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
                4. lower-/.f64N/A

                  \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
              7. Applied rewrites88.9%

                \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
              8. Taylor expanded in beta around 0

                \[\leadsto \frac{\frac{1}{2} + \color{blue}{\frac{1}{4} \cdot \beta}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
              9. Step-by-step derivation
                1. Applied rewrites87.7%

                  \[\leadsto \frac{\mathsf{fma}\left(0.25, \color{blue}{\beta}, 0.5\right)}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]

                if 10 < beta

                1. Initial program 84.7%

                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                2. Add Preprocessing
                3. Taylor expanded in beta around inf

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                4. Step-by-step derivation
                  1. lower-/.f64N/A

                    \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                  2. lower-+.f64N/A

                    \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                  3. unpow2N/A

                    \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                  4. lower-*.f6480.9

                    \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                5. Applied rewrites80.9%

                  \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                6. Step-by-step derivation
                  1. Applied rewrites83.3%

                    \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{\beta}}{\beta}} \]
                7. Recombined 2 regimes into one program.
                8. Add Preprocessing

                Alternative 9: 82.6% accurate, 2.4× speedup?

                \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\beta \leq 14:\\ \;\;\;\;\frac{0.5}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
                (FPCore (alpha beta)
                 :precision binary64
                 (if (<= beta 14.0)
                   (/ 0.5 (* (+ 3.0 (+ alpha beta)) (+ (+ alpha beta) 2.0)))
                   (/ (/ (+ 1.0 alpha) beta) beta)))
                double code(double alpha, double beta) {
                	double tmp;
                	if (beta <= 14.0) {
                		tmp = 0.5 / ((3.0 + (alpha + beta)) * ((alpha + beta) + 2.0));
                	} else {
                		tmp = ((1.0 + alpha) / beta) / beta;
                	}
                	return tmp;
                }
                
                module fmin_fmax_functions
                    implicit none
                    private
                    public fmax
                    public fmin
                
                    interface fmax
                        module procedure fmax88
                        module procedure fmax44
                        module procedure fmax84
                        module procedure fmax48
                    end interface
                    interface fmin
                        module procedure fmin88
                        module procedure fmin44
                        module procedure fmin84
                        module procedure fmin48
                    end interface
                contains
                    real(8) function fmax88(x, y) result (res)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                    end function
                    real(4) function fmax44(x, y) result (res)
                        real(4), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                    end function
                    real(8) function fmax84(x, y) result(res)
                        real(8), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                    end function
                    real(8) function fmax48(x, y) result(res)
                        real(4), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                    end function
                    real(8) function fmin88(x, y) result (res)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                    end function
                    real(4) function fmin44(x, y) result (res)
                        real(4), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                    end function
                    real(8) function fmin84(x, y) result(res)
                        real(8), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                    end function
                    real(8) function fmin48(x, y) result(res)
                        real(4), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                    end function
                end module
                
                real(8) function code(alpha, beta)
                use fmin_fmax_functions
                    real(8), intent (in) :: alpha
                    real(8), intent (in) :: beta
                    real(8) :: tmp
                    if (beta <= 14.0d0) then
                        tmp = 0.5d0 / ((3.0d0 + (alpha + beta)) * ((alpha + beta) + 2.0d0))
                    else
                        tmp = ((1.0d0 + alpha) / beta) / beta
                    end if
                    code = tmp
                end function
                
                public static double code(double alpha, double beta) {
                	double tmp;
                	if (beta <= 14.0) {
                		tmp = 0.5 / ((3.0 + (alpha + beta)) * ((alpha + beta) + 2.0));
                	} else {
                		tmp = ((1.0 + alpha) / beta) / beta;
                	}
                	return tmp;
                }
                
                def code(alpha, beta):
                	tmp = 0
                	if beta <= 14.0:
                		tmp = 0.5 / ((3.0 + (alpha + beta)) * ((alpha + beta) + 2.0))
                	else:
                		tmp = ((1.0 + alpha) / beta) / beta
                	return tmp
                
                function code(alpha, beta)
                	tmp = 0.0
                	if (beta <= 14.0)
                		tmp = Float64(0.5 / Float64(Float64(3.0 + Float64(alpha + beta)) * Float64(Float64(alpha + beta) + 2.0)));
                	else
                		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta);
                	end
                	return tmp
                end
                
                function tmp_2 = code(alpha, beta)
                	tmp = 0.0;
                	if (beta <= 14.0)
                		tmp = 0.5 / ((3.0 + (alpha + beta)) * ((alpha + beta) + 2.0));
                	else
                		tmp = ((1.0 + alpha) / beta) / beta;
                	end
                	tmp_2 = tmp;
                end
                
                code[alpha_, beta_] := If[LessEqual[beta, 14.0], N[(0.5 / N[(N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision] * N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
                
                \begin{array}{l}
                
                \\
                \begin{array}{l}
                \mathbf{if}\;\beta \leq 14:\\
                \;\;\;\;\frac{0.5}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}\\
                
                \mathbf{else}:\\
                \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
                
                
                \end{array}
                \end{array}
                
                Derivation
                1. Split input into 2 regimes
                2. if beta < 14

                  1. Initial program 99.9%

                    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  2. Add Preprocessing
                  3. Taylor expanded in alpha around 0

                    \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  4. Step-by-step derivation
                    1. lower-/.f64N/A

                      \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    2. lower-+.f64N/A

                      \[\leadsto \frac{\frac{\frac{\color{blue}{1 + \beta}}{2 + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    3. lower-+.f6488.8

                      \[\leadsto \frac{\frac{\frac{1 + \beta}{\color{blue}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  5. Applied rewrites88.8%

                    \[\leadsto \frac{\frac{\color{blue}{\frac{1 + \beta}{2 + \beta}}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                  6. Step-by-step derivation
                    1. lift-/.f64N/A

                      \[\leadsto \color{blue}{\frac{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                    2. lift-/.f64N/A

                      \[\leadsto \frac{\color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    3. associate-/l/N/A

                      \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
                    4. lower-/.f64N/A

                      \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
                  7. Applied rewrites88.9%

                    \[\leadsto \color{blue}{\frac{\frac{1 + \beta}{2 + \beta}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
                  8. Taylor expanded in beta around 0

                    \[\leadsto \frac{\frac{1}{2}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
                  9. Step-by-step derivation
                    1. Applied rewrites87.4%

                      \[\leadsto \frac{0.5}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]

                    if 14 < beta

                    1. Initial program 84.7%

                      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                    2. Add Preprocessing
                    3. Taylor expanded in beta around inf

                      \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                    4. Step-by-step derivation
                      1. lower-/.f64N/A

                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                      2. lower-+.f64N/A

                        \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                      3. unpow2N/A

                        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                      4. lower-*.f6480.9

                        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                    5. Applied rewrites80.9%

                      \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                    6. Step-by-step derivation
                      1. Applied rewrites83.3%

                        \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{\beta}}{\beta}} \]
                    7. Recombined 2 regimes into one program.
                    8. Add Preprocessing

                    Alternative 10: 36.2% accurate, 2.4× speedup?

                    \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\beta \leq 5 \cdot 10^{+18}:\\ \;\;\;\;\frac{1 + \alpha}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \beta\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
                    (FPCore (alpha beta)
                     :precision binary64
                     (if (<= beta 5e+18)
                       (/ (+ 1.0 alpha) (* (+ 3.0 (+ alpha beta)) (+ 2.0 beta)))
                       (/ (/ (+ 1.0 alpha) beta) beta)))
                    double code(double alpha, double beta) {
                    	double tmp;
                    	if (beta <= 5e+18) {
                    		tmp = (1.0 + alpha) / ((3.0 + (alpha + beta)) * (2.0 + beta));
                    	} else {
                    		tmp = ((1.0 + alpha) / beta) / beta;
                    	}
                    	return tmp;
                    }
                    
                    module fmin_fmax_functions
                        implicit none
                        private
                        public fmax
                        public fmin
                    
                        interface fmax
                            module procedure fmax88
                            module procedure fmax44
                            module procedure fmax84
                            module procedure fmax48
                        end interface
                        interface fmin
                            module procedure fmin88
                            module procedure fmin44
                            module procedure fmin84
                            module procedure fmin48
                        end interface
                    contains
                        real(8) function fmax88(x, y) result (res)
                            real(8), intent (in) :: x
                            real(8), intent (in) :: y
                            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                        end function
                        real(4) function fmax44(x, y) result (res)
                            real(4), intent (in) :: x
                            real(4), intent (in) :: y
                            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                        end function
                        real(8) function fmax84(x, y) result(res)
                            real(8), intent (in) :: x
                            real(4), intent (in) :: y
                            res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                        end function
                        real(8) function fmax48(x, y) result(res)
                            real(4), intent (in) :: x
                            real(8), intent (in) :: y
                            res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                        end function
                        real(8) function fmin88(x, y) result (res)
                            real(8), intent (in) :: x
                            real(8), intent (in) :: y
                            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                        end function
                        real(4) function fmin44(x, y) result (res)
                            real(4), intent (in) :: x
                            real(4), intent (in) :: y
                            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                        end function
                        real(8) function fmin84(x, y) result(res)
                            real(8), intent (in) :: x
                            real(4), intent (in) :: y
                            res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                        end function
                        real(8) function fmin48(x, y) result(res)
                            real(4), intent (in) :: x
                            real(8), intent (in) :: y
                            res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                        end function
                    end module
                    
                    real(8) function code(alpha, beta)
                    use fmin_fmax_functions
                        real(8), intent (in) :: alpha
                        real(8), intent (in) :: beta
                        real(8) :: tmp
                        if (beta <= 5d+18) then
                            tmp = (1.0d0 + alpha) / ((3.0d0 + (alpha + beta)) * (2.0d0 + beta))
                        else
                            tmp = ((1.0d0 + alpha) / beta) / beta
                        end if
                        code = tmp
                    end function
                    
                    public static double code(double alpha, double beta) {
                    	double tmp;
                    	if (beta <= 5e+18) {
                    		tmp = (1.0 + alpha) / ((3.0 + (alpha + beta)) * (2.0 + beta));
                    	} else {
                    		tmp = ((1.0 + alpha) / beta) / beta;
                    	}
                    	return tmp;
                    }
                    
                    def code(alpha, beta):
                    	tmp = 0
                    	if beta <= 5e+18:
                    		tmp = (1.0 + alpha) / ((3.0 + (alpha + beta)) * (2.0 + beta))
                    	else:
                    		tmp = ((1.0 + alpha) / beta) / beta
                    	return tmp
                    
                    function code(alpha, beta)
                    	tmp = 0.0
                    	if (beta <= 5e+18)
                    		tmp = Float64(Float64(1.0 + alpha) / Float64(Float64(3.0 + Float64(alpha + beta)) * Float64(2.0 + beta)));
                    	else
                    		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta);
                    	end
                    	return tmp
                    end
                    
                    function tmp_2 = code(alpha, beta)
                    	tmp = 0.0;
                    	if (beta <= 5e+18)
                    		tmp = (1.0 + alpha) / ((3.0 + (alpha + beta)) * (2.0 + beta));
                    	else
                    		tmp = ((1.0 + alpha) / beta) / beta;
                    	end
                    	tmp_2 = tmp;
                    end
                    
                    code[alpha_, beta_] := If[LessEqual[beta, 5e+18], N[(N[(1.0 + alpha), $MachinePrecision] / N[(N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision] * N[(2.0 + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
                    
                    \begin{array}{l}
                    
                    \\
                    \begin{array}{l}
                    \mathbf{if}\;\beta \leq 5 \cdot 10^{+18}:\\
                    \;\;\;\;\frac{1 + \alpha}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \beta\right)}\\
                    
                    \mathbf{else}:\\
                    \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
                    
                    
                    \end{array}
                    \end{array}
                    
                    Derivation
                    1. Split input into 2 regimes
                    2. if beta < 5e18

                      1. Initial program 99.9%

                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      2. Add Preprocessing
                      3. Taylor expanded in beta around -inf

                        \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      4. Step-by-step derivation
                        1. mul-1-negN/A

                          \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        2. lower-neg.f64N/A

                          \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        3. lower--.f64N/A

                          \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        4. mul-1-negN/A

                          \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        5. lower-neg.f6415.3

                          \[\leadsto \frac{\frac{-\left(\color{blue}{\left(-\alpha\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      5. Applied rewrites15.3%

                        \[\leadsto \frac{\frac{\color{blue}{-\left(\left(-\alpha\right) - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                      6. Step-by-step derivation
                        1. lift-/.f64N/A

                          \[\leadsto \color{blue}{\frac{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                        2. lift-/.f64N/A

                          \[\leadsto \frac{\color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        3. associate-/l/N/A

                          \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
                        4. lower-/.f64N/A

                          \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
                      7. Applied rewrites34.9%

                        \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
                      8. Taylor expanded in alpha around 0

                        \[\leadsto \frac{1 + \color{blue}{\alpha}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
                      9. Step-by-step derivation
                        1. Applied rewrites34.9%

                          \[\leadsto \frac{1 + \color{blue}{\alpha}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
                        2. Taylor expanded in alpha around 0

                          \[\leadsto \frac{1 + \alpha}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \color{blue}{\left(2 + \beta\right)}} \]
                        3. Step-by-step derivation
                          1. lower-+.f6414.3

                            \[\leadsto \frac{1 + \alpha}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \color{blue}{\left(2 + \beta\right)}} \]
                        4. Applied rewrites14.3%

                          \[\leadsto \frac{1 + \alpha}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \color{blue}{\left(2 + \beta\right)}} \]

                        if 5e18 < beta

                        1. Initial program 84.1%

                          \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                        2. Add Preprocessing
                        3. Taylor expanded in beta around inf

                          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                        4. Step-by-step derivation
                          1. lower-/.f64N/A

                            \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                          2. lower-+.f64N/A

                            \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                          3. unpow2N/A

                            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                          4. lower-*.f6482.7

                            \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                        5. Applied rewrites82.7%

                          \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                        6. Step-by-step derivation
                          1. Applied rewrites85.1%

                            \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{\beta}}{\beta}} \]
                        7. Recombined 2 regimes into one program.
                        8. Add Preprocessing

                        Alternative 11: 36.0% accurate, 2.6× speedup?

                        \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\beta \leq 2 \cdot 10^{+16}:\\ \;\;\;\;\frac{1 + \alpha}{\left(3 + \beta\right) \cdot \left(2 + \beta\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
                        (FPCore (alpha beta)
                         :precision binary64
                         (if (<= beta 2e+16)
                           (/ (+ 1.0 alpha) (* (+ 3.0 beta) (+ 2.0 beta)))
                           (/ (/ (+ 1.0 alpha) beta) beta)))
                        double code(double alpha, double beta) {
                        	double tmp;
                        	if (beta <= 2e+16) {
                        		tmp = (1.0 + alpha) / ((3.0 + beta) * (2.0 + beta));
                        	} else {
                        		tmp = ((1.0 + alpha) / beta) / beta;
                        	}
                        	return tmp;
                        }
                        
                        module fmin_fmax_functions
                            implicit none
                            private
                            public fmax
                            public fmin
                        
                            interface fmax
                                module procedure fmax88
                                module procedure fmax44
                                module procedure fmax84
                                module procedure fmax48
                            end interface
                            interface fmin
                                module procedure fmin88
                                module procedure fmin44
                                module procedure fmin84
                                module procedure fmin48
                            end interface
                        contains
                            real(8) function fmax88(x, y) result (res)
                                real(8), intent (in) :: x
                                real(8), intent (in) :: y
                                res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                            end function
                            real(4) function fmax44(x, y) result (res)
                                real(4), intent (in) :: x
                                real(4), intent (in) :: y
                                res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                            end function
                            real(8) function fmax84(x, y) result(res)
                                real(8), intent (in) :: x
                                real(4), intent (in) :: y
                                res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                            end function
                            real(8) function fmax48(x, y) result(res)
                                real(4), intent (in) :: x
                                real(8), intent (in) :: y
                                res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                            end function
                            real(8) function fmin88(x, y) result (res)
                                real(8), intent (in) :: x
                                real(8), intent (in) :: y
                                res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                            end function
                            real(4) function fmin44(x, y) result (res)
                                real(4), intent (in) :: x
                                real(4), intent (in) :: y
                                res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                            end function
                            real(8) function fmin84(x, y) result(res)
                                real(8), intent (in) :: x
                                real(4), intent (in) :: y
                                res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                            end function
                            real(8) function fmin48(x, y) result(res)
                                real(4), intent (in) :: x
                                real(8), intent (in) :: y
                                res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                            end function
                        end module
                        
                        real(8) function code(alpha, beta)
                        use fmin_fmax_functions
                            real(8), intent (in) :: alpha
                            real(8), intent (in) :: beta
                            real(8) :: tmp
                            if (beta <= 2d+16) then
                                tmp = (1.0d0 + alpha) / ((3.0d0 + beta) * (2.0d0 + beta))
                            else
                                tmp = ((1.0d0 + alpha) / beta) / beta
                            end if
                            code = tmp
                        end function
                        
                        public static double code(double alpha, double beta) {
                        	double tmp;
                        	if (beta <= 2e+16) {
                        		tmp = (1.0 + alpha) / ((3.0 + beta) * (2.0 + beta));
                        	} else {
                        		tmp = ((1.0 + alpha) / beta) / beta;
                        	}
                        	return tmp;
                        }
                        
                        def code(alpha, beta):
                        	tmp = 0
                        	if beta <= 2e+16:
                        		tmp = (1.0 + alpha) / ((3.0 + beta) * (2.0 + beta))
                        	else:
                        		tmp = ((1.0 + alpha) / beta) / beta
                        	return tmp
                        
                        function code(alpha, beta)
                        	tmp = 0.0
                        	if (beta <= 2e+16)
                        		tmp = Float64(Float64(1.0 + alpha) / Float64(Float64(3.0 + beta) * Float64(2.0 + beta)));
                        	else
                        		tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta);
                        	end
                        	return tmp
                        end
                        
                        function tmp_2 = code(alpha, beta)
                        	tmp = 0.0;
                        	if (beta <= 2e+16)
                        		tmp = (1.0 + alpha) / ((3.0 + beta) * (2.0 + beta));
                        	else
                        		tmp = ((1.0 + alpha) / beta) / beta;
                        	end
                        	tmp_2 = tmp;
                        end
                        
                        code[alpha_, beta_] := If[LessEqual[beta, 2e+16], N[(N[(1.0 + alpha), $MachinePrecision] / N[(N[(3.0 + beta), $MachinePrecision] * N[(2.0 + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
                        
                        \begin{array}{l}
                        
                        \\
                        \begin{array}{l}
                        \mathbf{if}\;\beta \leq 2 \cdot 10^{+16}:\\
                        \;\;\;\;\frac{1 + \alpha}{\left(3 + \beta\right) \cdot \left(2 + \beta\right)}\\
                        
                        \mathbf{else}:\\
                        \;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
                        
                        
                        \end{array}
                        \end{array}
                        
                        Derivation
                        1. Split input into 2 regimes
                        2. if beta < 2e16

                          1. Initial program 99.9%

                            \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                          2. Add Preprocessing
                          3. Taylor expanded in beta around -inf

                            \[\leadsto \frac{\frac{\color{blue}{-1 \cdot \left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                          4. Step-by-step derivation
                            1. mul-1-negN/A

                              \[\leadsto \frac{\frac{\color{blue}{\mathsf{neg}\left(\left(-1 \cdot \alpha - 1\right)\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            2. lower-neg.f64N/A

                              \[\leadsto \frac{\frac{\color{blue}{-\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            3. lower--.f64N/A

                              \[\leadsto \frac{\frac{-\color{blue}{\left(-1 \cdot \alpha - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            4. mul-1-negN/A

                              \[\leadsto \frac{\frac{-\left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            5. lower-neg.f6415.3

                              \[\leadsto \frac{\frac{-\left(\color{blue}{\left(-\alpha\right)} - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                          5. Applied rewrites15.3%

                            \[\leadsto \frac{\frac{\color{blue}{-\left(\left(-\alpha\right) - 1\right)}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                          6. Step-by-step derivation
                            1. lift-/.f64N/A

                              \[\leadsto \color{blue}{\frac{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}} \]
                            2. lift-/.f64N/A

                              \[\leadsto \frac{\color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\alpha + \beta\right) + 2 \cdot 1}}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            3. associate-/l/N/A

                              \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
                            4. lower-/.f64N/A

                              \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) \cdot \left(\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1\right)}} \]
                          7. Applied rewrites34.9%

                            \[\leadsto \color{blue}{\frac{-\left(\left(-\alpha\right) - 1\right)}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}} \]
                          8. Taylor expanded in alpha around 0

                            \[\leadsto \frac{1 + \color{blue}{\alpha}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
                          9. Step-by-step derivation
                            1. Applied rewrites34.9%

                              \[\leadsto \frac{1 + \color{blue}{\alpha}}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)} \]
                            2. Taylor expanded in alpha around 0

                              \[\leadsto \frac{1 + \alpha}{\color{blue}{\left(2 + \beta\right) \cdot \left(3 + \beta\right)}} \]
                            3. Step-by-step derivation
                              1. *-commutativeN/A

                                \[\leadsto \frac{1 + \alpha}{\color{blue}{\left(3 + \beta\right) \cdot \left(2 + \beta\right)}} \]
                              2. lower-*.f64N/A

                                \[\leadsto \frac{1 + \alpha}{\color{blue}{\left(3 + \beta\right) \cdot \left(2 + \beta\right)}} \]
                              3. lower-+.f64N/A

                                \[\leadsto \frac{1 + \alpha}{\color{blue}{\left(3 + \beta\right)} \cdot \left(2 + \beta\right)} \]
                              4. lower-+.f6413.9

                                \[\leadsto \frac{1 + \alpha}{\left(3 + \beta\right) \cdot \color{blue}{\left(2 + \beta\right)}} \]
                            4. Applied rewrites13.9%

                              \[\leadsto \frac{1 + \alpha}{\color{blue}{\left(3 + \beta\right) \cdot \left(2 + \beta\right)}} \]

                            if 2e16 < beta

                            1. Initial program 84.1%

                              \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                            2. Add Preprocessing
                            3. Taylor expanded in beta around inf

                              \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                            4. Step-by-step derivation
                              1. lower-/.f64N/A

                                \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                              2. lower-+.f64N/A

                                \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                              3. unpow2N/A

                                \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                              4. lower-*.f6482.7

                                \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                            5. Applied rewrites82.7%

                              \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                            6. Step-by-step derivation
                              1. Applied rewrites85.1%

                                \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{\beta}}{\beta}} \]
                            7. Recombined 2 regimes into one program.
                            8. Add Preprocessing

                            Alternative 12: 28.9% accurate, 2.9× speedup?

                            \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\alpha \leq 0.0031:\\ \;\;\;\;\frac{1 + \alpha}{\beta \cdot \beta}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\ \end{array} \end{array} \]
                            (FPCore (alpha beta)
                             :precision binary64
                             (if (<= alpha 0.0031)
                               (/ (+ 1.0 alpha) (* beta beta))
                               (/ (/ alpha beta) beta)))
                            double code(double alpha, double beta) {
                            	double tmp;
                            	if (alpha <= 0.0031) {
                            		tmp = (1.0 + alpha) / (beta * beta);
                            	} else {
                            		tmp = (alpha / beta) / beta;
                            	}
                            	return tmp;
                            }
                            
                            module fmin_fmax_functions
                                implicit none
                                private
                                public fmax
                                public fmin
                            
                                interface fmax
                                    module procedure fmax88
                                    module procedure fmax44
                                    module procedure fmax84
                                    module procedure fmax48
                                end interface
                                interface fmin
                                    module procedure fmin88
                                    module procedure fmin44
                                    module procedure fmin84
                                    module procedure fmin48
                                end interface
                            contains
                                real(8) function fmax88(x, y) result (res)
                                    real(8), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                end function
                                real(4) function fmax44(x, y) result (res)
                                    real(4), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                end function
                                real(8) function fmax84(x, y) result(res)
                                    real(8), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                                end function
                                real(8) function fmax48(x, y) result(res)
                                    real(4), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                                end function
                                real(8) function fmin88(x, y) result (res)
                                    real(8), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                end function
                                real(4) function fmin44(x, y) result (res)
                                    real(4), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                end function
                                real(8) function fmin84(x, y) result(res)
                                    real(8), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                                end function
                                real(8) function fmin48(x, y) result(res)
                                    real(4), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                                end function
                            end module
                            
                            real(8) function code(alpha, beta)
                            use fmin_fmax_functions
                                real(8), intent (in) :: alpha
                                real(8), intent (in) :: beta
                                real(8) :: tmp
                                if (alpha <= 0.0031d0) then
                                    tmp = (1.0d0 + alpha) / (beta * beta)
                                else
                                    tmp = (alpha / beta) / beta
                                end if
                                code = tmp
                            end function
                            
                            public static double code(double alpha, double beta) {
                            	double tmp;
                            	if (alpha <= 0.0031) {
                            		tmp = (1.0 + alpha) / (beta * beta);
                            	} else {
                            		tmp = (alpha / beta) / beta;
                            	}
                            	return tmp;
                            }
                            
                            def code(alpha, beta):
                            	tmp = 0
                            	if alpha <= 0.0031:
                            		tmp = (1.0 + alpha) / (beta * beta)
                            	else:
                            		tmp = (alpha / beta) / beta
                            	return tmp
                            
                            function code(alpha, beta)
                            	tmp = 0.0
                            	if (alpha <= 0.0031)
                            		tmp = Float64(Float64(1.0 + alpha) / Float64(beta * beta));
                            	else
                            		tmp = Float64(Float64(alpha / beta) / beta);
                            	end
                            	return tmp
                            end
                            
                            function tmp_2 = code(alpha, beta)
                            	tmp = 0.0;
                            	if (alpha <= 0.0031)
                            		tmp = (1.0 + alpha) / (beta * beta);
                            	else
                            		tmp = (alpha / beta) / beta;
                            	end
                            	tmp_2 = tmp;
                            end
                            
                            code[alpha_, beta_] := If[LessEqual[alpha, 0.0031], N[(N[(1.0 + alpha), $MachinePrecision] / N[(beta * beta), $MachinePrecision]), $MachinePrecision], N[(N[(alpha / beta), $MachinePrecision] / beta), $MachinePrecision]]
                            
                            \begin{array}{l}
                            
                            \\
                            \begin{array}{l}
                            \mathbf{if}\;\alpha \leq 0.0031:\\
                            \;\;\;\;\frac{1 + \alpha}{\beta \cdot \beta}\\
                            
                            \mathbf{else}:\\
                            \;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\
                            
                            
                            \end{array}
                            \end{array}
                            
                            Derivation
                            1. Split input into 2 regimes
                            2. if alpha < 0.00309999999999999989

                              1. Initial program 99.9%

                                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              2. Add Preprocessing
                              3. Taylor expanded in beta around inf

                                \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                              4. Step-by-step derivation
                                1. lower-/.f64N/A

                                  \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                2. lower-+.f64N/A

                                  \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                3. unpow2N/A

                                  \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                4. lower-*.f6433.0

                                  \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                              5. Applied rewrites33.0%

                                \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]

                              if 0.00309999999999999989 < alpha

                              1. Initial program 85.9%

                                \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                              2. Add Preprocessing
                              3. Taylor expanded in beta around inf

                                \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                              4. Step-by-step derivation
                                1. lower-/.f64N/A

                                  \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                2. lower-+.f64N/A

                                  \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                3. unpow2N/A

                                  \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                4. lower-*.f6415.1

                                  \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                              5. Applied rewrites15.1%

                                \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                              6. Taylor expanded in alpha around inf

                                \[\leadsto \frac{\alpha}{\color{blue}{{\beta}^{2}}} \]
                              7. Step-by-step derivation
                                1. Applied rewrites15.1%

                                  \[\leadsto \frac{\alpha}{\color{blue}{\beta \cdot \beta}} \]
                                2. Step-by-step derivation
                                  1. Applied rewrites16.5%

                                    \[\leadsto \frac{\frac{\alpha}{\beta}}{\beta} \]
                                3. Recombined 2 regimes into one program.
                                4. Add Preprocessing

                                Alternative 13: 29.2% accurate, 3.2× speedup?

                                \[\begin{array}{l} \\ \frac{\frac{1 + \alpha}{\beta}}{\beta} \end{array} \]
                                (FPCore (alpha beta) :precision binary64 (/ (/ (+ 1.0 alpha) beta) beta))
                                double code(double alpha, double beta) {
                                	return ((1.0 + alpha) / beta) / beta;
                                }
                                
                                module fmin_fmax_functions
                                    implicit none
                                    private
                                    public fmax
                                    public fmin
                                
                                    interface fmax
                                        module procedure fmax88
                                        module procedure fmax44
                                        module procedure fmax84
                                        module procedure fmax48
                                    end interface
                                    interface fmin
                                        module procedure fmin88
                                        module procedure fmin44
                                        module procedure fmin84
                                        module procedure fmin48
                                    end interface
                                contains
                                    real(8) function fmax88(x, y) result (res)
                                        real(8), intent (in) :: x
                                        real(8), intent (in) :: y
                                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                    end function
                                    real(4) function fmax44(x, y) result (res)
                                        real(4), intent (in) :: x
                                        real(4), intent (in) :: y
                                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                    end function
                                    real(8) function fmax84(x, y) result(res)
                                        real(8), intent (in) :: x
                                        real(4), intent (in) :: y
                                        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                                    end function
                                    real(8) function fmax48(x, y) result(res)
                                        real(4), intent (in) :: x
                                        real(8), intent (in) :: y
                                        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                                    end function
                                    real(8) function fmin88(x, y) result (res)
                                        real(8), intent (in) :: x
                                        real(8), intent (in) :: y
                                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                    end function
                                    real(4) function fmin44(x, y) result (res)
                                        real(4), intent (in) :: x
                                        real(4), intent (in) :: y
                                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                    end function
                                    real(8) function fmin84(x, y) result(res)
                                        real(8), intent (in) :: x
                                        real(4), intent (in) :: y
                                        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                                    end function
                                    real(8) function fmin48(x, y) result(res)
                                        real(4), intent (in) :: x
                                        real(8), intent (in) :: y
                                        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                                    end function
                                end module
                                
                                real(8) function code(alpha, beta)
                                use fmin_fmax_functions
                                    real(8), intent (in) :: alpha
                                    real(8), intent (in) :: beta
                                    code = ((1.0d0 + alpha) / beta) / beta
                                end function
                                
                                public static double code(double alpha, double beta) {
                                	return ((1.0 + alpha) / beta) / beta;
                                }
                                
                                def code(alpha, beta):
                                	return ((1.0 + alpha) / beta) / beta
                                
                                function code(alpha, beta)
                                	return Float64(Float64(Float64(1.0 + alpha) / beta) / beta)
                                end
                                
                                function tmp = code(alpha, beta)
                                	tmp = ((1.0 + alpha) / beta) / beta;
                                end
                                
                                code[alpha_, beta_] := N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]
                                
                                \begin{array}{l}
                                
                                \\
                                \frac{\frac{1 + \alpha}{\beta}}{\beta}
                                \end{array}
                                
                                Derivation
                                1. Initial program 95.2%

                                  \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                2. Add Preprocessing
                                3. Taylor expanded in beta around inf

                                  \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                4. Step-by-step derivation
                                  1. lower-/.f64N/A

                                    \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                  2. lower-+.f64N/A

                                    \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                  3. unpow2N/A

                                    \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                  4. lower-*.f6427.1

                                    \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                5. Applied rewrites27.1%

                                  \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                6. Step-by-step derivation
                                  1. Applied rewrites27.8%

                                    \[\leadsto \color{blue}{\frac{\frac{1 + \alpha}{\beta}}{\beta}} \]
                                  2. Add Preprocessing

                                  Alternative 14: 28.1% accurate, 3.6× speedup?

                                  \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\alpha \leq 0.0031:\\ \;\;\;\;\frac{1}{\beta \cdot \beta}\\ \mathbf{else}:\\ \;\;\;\;\frac{\alpha}{\beta \cdot \beta}\\ \end{array} \end{array} \]
                                  (FPCore (alpha beta)
                                   :precision binary64
                                   (if (<= alpha 0.0031) (/ 1.0 (* beta beta)) (/ alpha (* beta beta))))
                                  double code(double alpha, double beta) {
                                  	double tmp;
                                  	if (alpha <= 0.0031) {
                                  		tmp = 1.0 / (beta * beta);
                                  	} else {
                                  		tmp = alpha / (beta * beta);
                                  	}
                                  	return tmp;
                                  }
                                  
                                  module fmin_fmax_functions
                                      implicit none
                                      private
                                      public fmax
                                      public fmin
                                  
                                      interface fmax
                                          module procedure fmax88
                                          module procedure fmax44
                                          module procedure fmax84
                                          module procedure fmax48
                                      end interface
                                      interface fmin
                                          module procedure fmin88
                                          module procedure fmin44
                                          module procedure fmin84
                                          module procedure fmin48
                                      end interface
                                  contains
                                      real(8) function fmax88(x, y) result (res)
                                          real(8), intent (in) :: x
                                          real(8), intent (in) :: y
                                          res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                      end function
                                      real(4) function fmax44(x, y) result (res)
                                          real(4), intent (in) :: x
                                          real(4), intent (in) :: y
                                          res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                      end function
                                      real(8) function fmax84(x, y) result(res)
                                          real(8), intent (in) :: x
                                          real(4), intent (in) :: y
                                          res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                                      end function
                                      real(8) function fmax48(x, y) result(res)
                                          real(4), intent (in) :: x
                                          real(8), intent (in) :: y
                                          res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                                      end function
                                      real(8) function fmin88(x, y) result (res)
                                          real(8), intent (in) :: x
                                          real(8), intent (in) :: y
                                          res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                      end function
                                      real(4) function fmin44(x, y) result (res)
                                          real(4), intent (in) :: x
                                          real(4), intent (in) :: y
                                          res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                      end function
                                      real(8) function fmin84(x, y) result(res)
                                          real(8), intent (in) :: x
                                          real(4), intent (in) :: y
                                          res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                                      end function
                                      real(8) function fmin48(x, y) result(res)
                                          real(4), intent (in) :: x
                                          real(8), intent (in) :: y
                                          res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                                      end function
                                  end module
                                  
                                  real(8) function code(alpha, beta)
                                  use fmin_fmax_functions
                                      real(8), intent (in) :: alpha
                                      real(8), intent (in) :: beta
                                      real(8) :: tmp
                                      if (alpha <= 0.0031d0) then
                                          tmp = 1.0d0 / (beta * beta)
                                      else
                                          tmp = alpha / (beta * beta)
                                      end if
                                      code = tmp
                                  end function
                                  
                                  public static double code(double alpha, double beta) {
                                  	double tmp;
                                  	if (alpha <= 0.0031) {
                                  		tmp = 1.0 / (beta * beta);
                                  	} else {
                                  		tmp = alpha / (beta * beta);
                                  	}
                                  	return tmp;
                                  }
                                  
                                  def code(alpha, beta):
                                  	tmp = 0
                                  	if alpha <= 0.0031:
                                  		tmp = 1.0 / (beta * beta)
                                  	else:
                                  		tmp = alpha / (beta * beta)
                                  	return tmp
                                  
                                  function code(alpha, beta)
                                  	tmp = 0.0
                                  	if (alpha <= 0.0031)
                                  		tmp = Float64(1.0 / Float64(beta * beta));
                                  	else
                                  		tmp = Float64(alpha / Float64(beta * beta));
                                  	end
                                  	return tmp
                                  end
                                  
                                  function tmp_2 = code(alpha, beta)
                                  	tmp = 0.0;
                                  	if (alpha <= 0.0031)
                                  		tmp = 1.0 / (beta * beta);
                                  	else
                                  		tmp = alpha / (beta * beta);
                                  	end
                                  	tmp_2 = tmp;
                                  end
                                  
                                  code[alpha_, beta_] := If[LessEqual[alpha, 0.0031], N[(1.0 / N[(beta * beta), $MachinePrecision]), $MachinePrecision], N[(alpha / N[(beta * beta), $MachinePrecision]), $MachinePrecision]]
                                  
                                  \begin{array}{l}
                                  
                                  \\
                                  \begin{array}{l}
                                  \mathbf{if}\;\alpha \leq 0.0031:\\
                                  \;\;\;\;\frac{1}{\beta \cdot \beta}\\
                                  
                                  \mathbf{else}:\\
                                  \;\;\;\;\frac{\alpha}{\beta \cdot \beta}\\
                                  
                                  
                                  \end{array}
                                  \end{array}
                                  
                                  Derivation
                                  1. Split input into 2 regimes
                                  2. if alpha < 0.00309999999999999989

                                    1. Initial program 99.9%

                                      \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                    2. Add Preprocessing
                                    3. Taylor expanded in beta around inf

                                      \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                    4. Step-by-step derivation
                                      1. lower-/.f64N/A

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                      2. lower-+.f64N/A

                                        \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                      3. unpow2N/A

                                        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      4. lower-*.f6433.0

                                        \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                    5. Applied rewrites33.0%

                                      \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                    6. Taylor expanded in alpha around 0

                                      \[\leadsto \frac{1}{\color{blue}{\beta} \cdot \beta} \]
                                    7. Step-by-step derivation
                                      1. Applied rewrites32.7%

                                        \[\leadsto \frac{1}{\color{blue}{\beta} \cdot \beta} \]

                                      if 0.00309999999999999989 < alpha

                                      1. Initial program 85.9%

                                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      2. Add Preprocessing
                                      3. Taylor expanded in beta around inf

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                      4. Step-by-step derivation
                                        1. lower-/.f64N/A

                                          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                        2. lower-+.f64N/A

                                          \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                        3. unpow2N/A

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                        4. lower-*.f6415.1

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      5. Applied rewrites15.1%

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                      6. Taylor expanded in alpha around inf

                                        \[\leadsto \frac{\alpha}{\color{blue}{{\beta}^{2}}} \]
                                      7. Step-by-step derivation
                                        1. Applied rewrites15.1%

                                          \[\leadsto \frac{\alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      8. Recombined 2 regimes into one program.
                                      9. Add Preprocessing

                                      Alternative 15: 28.5% accurate, 4.2× speedup?

                                      \[\begin{array}{l} \\ \frac{1 + \alpha}{\beta \cdot \beta} \end{array} \]
                                      (FPCore (alpha beta) :precision binary64 (/ (+ 1.0 alpha) (* beta beta)))
                                      double code(double alpha, double beta) {
                                      	return (1.0 + alpha) / (beta * beta);
                                      }
                                      
                                      module fmin_fmax_functions
                                          implicit none
                                          private
                                          public fmax
                                          public fmin
                                      
                                          interface fmax
                                              module procedure fmax88
                                              module procedure fmax44
                                              module procedure fmax84
                                              module procedure fmax48
                                          end interface
                                          interface fmin
                                              module procedure fmin88
                                              module procedure fmin44
                                              module procedure fmin84
                                              module procedure fmin48
                                          end interface
                                      contains
                                          real(8) function fmax88(x, y) result (res)
                                              real(8), intent (in) :: x
                                              real(8), intent (in) :: y
                                              res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                          end function
                                          real(4) function fmax44(x, y) result (res)
                                              real(4), intent (in) :: x
                                              real(4), intent (in) :: y
                                              res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                          end function
                                          real(8) function fmax84(x, y) result(res)
                                              real(8), intent (in) :: x
                                              real(4), intent (in) :: y
                                              res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                                          end function
                                          real(8) function fmax48(x, y) result(res)
                                              real(4), intent (in) :: x
                                              real(8), intent (in) :: y
                                              res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                                          end function
                                          real(8) function fmin88(x, y) result (res)
                                              real(8), intent (in) :: x
                                              real(8), intent (in) :: y
                                              res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                          end function
                                          real(4) function fmin44(x, y) result (res)
                                              real(4), intent (in) :: x
                                              real(4), intent (in) :: y
                                              res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                          end function
                                          real(8) function fmin84(x, y) result(res)
                                              real(8), intent (in) :: x
                                              real(4), intent (in) :: y
                                              res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                                          end function
                                          real(8) function fmin48(x, y) result(res)
                                              real(4), intent (in) :: x
                                              real(8), intent (in) :: y
                                              res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                                          end function
                                      end module
                                      
                                      real(8) function code(alpha, beta)
                                      use fmin_fmax_functions
                                          real(8), intent (in) :: alpha
                                          real(8), intent (in) :: beta
                                          code = (1.0d0 + alpha) / (beta * beta)
                                      end function
                                      
                                      public static double code(double alpha, double beta) {
                                      	return (1.0 + alpha) / (beta * beta);
                                      }
                                      
                                      def code(alpha, beta):
                                      	return (1.0 + alpha) / (beta * beta)
                                      
                                      function code(alpha, beta)
                                      	return Float64(Float64(1.0 + alpha) / Float64(beta * beta))
                                      end
                                      
                                      function tmp = code(alpha, beta)
                                      	tmp = (1.0 + alpha) / (beta * beta);
                                      end
                                      
                                      code[alpha_, beta_] := N[(N[(1.0 + alpha), $MachinePrecision] / N[(beta * beta), $MachinePrecision]), $MachinePrecision]
                                      
                                      \begin{array}{l}
                                      
                                      \\
                                      \frac{1 + \alpha}{\beta \cdot \beta}
                                      \end{array}
                                      
                                      Derivation
                                      1. Initial program 95.2%

                                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      2. Add Preprocessing
                                      3. Taylor expanded in beta around inf

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                      4. Step-by-step derivation
                                        1. lower-/.f64N/A

                                          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                        2. lower-+.f64N/A

                                          \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                        3. unpow2N/A

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                        4. lower-*.f6427.1

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      5. Applied rewrites27.1%

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                      6. Add Preprocessing

                                      Alternative 16: 18.0% accurate, 4.9× speedup?

                                      \[\begin{array}{l} \\ \frac{\alpha}{\beta \cdot \beta} \end{array} \]
                                      (FPCore (alpha beta) :precision binary64 (/ alpha (* beta beta)))
                                      double code(double alpha, double beta) {
                                      	return alpha / (beta * beta);
                                      }
                                      
                                      module fmin_fmax_functions
                                          implicit none
                                          private
                                          public fmax
                                          public fmin
                                      
                                          interface fmax
                                              module procedure fmax88
                                              module procedure fmax44
                                              module procedure fmax84
                                              module procedure fmax48
                                          end interface
                                          interface fmin
                                              module procedure fmin88
                                              module procedure fmin44
                                              module procedure fmin84
                                              module procedure fmin48
                                          end interface
                                      contains
                                          real(8) function fmax88(x, y) result (res)
                                              real(8), intent (in) :: x
                                              real(8), intent (in) :: y
                                              res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                          end function
                                          real(4) function fmax44(x, y) result (res)
                                              real(4), intent (in) :: x
                                              real(4), intent (in) :: y
                                              res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                          end function
                                          real(8) function fmax84(x, y) result(res)
                                              real(8), intent (in) :: x
                                              real(4), intent (in) :: y
                                              res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                                          end function
                                          real(8) function fmax48(x, y) result(res)
                                              real(4), intent (in) :: x
                                              real(8), intent (in) :: y
                                              res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                                          end function
                                          real(8) function fmin88(x, y) result (res)
                                              real(8), intent (in) :: x
                                              real(8), intent (in) :: y
                                              res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                          end function
                                          real(4) function fmin44(x, y) result (res)
                                              real(4), intent (in) :: x
                                              real(4), intent (in) :: y
                                              res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                          end function
                                          real(8) function fmin84(x, y) result(res)
                                              real(8), intent (in) :: x
                                              real(4), intent (in) :: y
                                              res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                                          end function
                                          real(8) function fmin48(x, y) result(res)
                                              real(4), intent (in) :: x
                                              real(8), intent (in) :: y
                                              res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                                          end function
                                      end module
                                      
                                      real(8) function code(alpha, beta)
                                      use fmin_fmax_functions
                                          real(8), intent (in) :: alpha
                                          real(8), intent (in) :: beta
                                          code = alpha / (beta * beta)
                                      end function
                                      
                                      public static double code(double alpha, double beta) {
                                      	return alpha / (beta * beta);
                                      }
                                      
                                      def code(alpha, beta):
                                      	return alpha / (beta * beta)
                                      
                                      function code(alpha, beta)
                                      	return Float64(alpha / Float64(beta * beta))
                                      end
                                      
                                      function tmp = code(alpha, beta)
                                      	tmp = alpha / (beta * beta);
                                      end
                                      
                                      code[alpha_, beta_] := N[(alpha / N[(beta * beta), $MachinePrecision]), $MachinePrecision]
                                      
                                      \begin{array}{l}
                                      
                                      \\
                                      \frac{\alpha}{\beta \cdot \beta}
                                      \end{array}
                                      
                                      Derivation
                                      1. Initial program 95.2%

                                        \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
                                      2. Add Preprocessing
                                      3. Taylor expanded in beta around inf

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                      4. Step-by-step derivation
                                        1. lower-/.f64N/A

                                          \[\leadsto \color{blue}{\frac{1 + \alpha}{{\beta}^{2}}} \]
                                        2. lower-+.f64N/A

                                          \[\leadsto \frac{\color{blue}{1 + \alpha}}{{\beta}^{2}} \]
                                        3. unpow2N/A

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                        4. lower-*.f6427.1

                                          \[\leadsto \frac{1 + \alpha}{\color{blue}{\beta \cdot \beta}} \]
                                      5. Applied rewrites27.1%

                                        \[\leadsto \color{blue}{\frac{1 + \alpha}{\beta \cdot \beta}} \]
                                      6. Taylor expanded in alpha around inf

                                        \[\leadsto \frac{\alpha}{\color{blue}{{\beta}^{2}}} \]
                                      7. Step-by-step derivation
                                        1. Applied rewrites16.0%

                                          \[\leadsto \frac{\alpha}{\color{blue}{\beta \cdot \beta}} \]
                                        2. Add Preprocessing

                                        Reproduce

                                        ?
                                        herbie shell --seed 2024360 
                                        (FPCore (alpha beta)
                                          :name "Octave 3.8, jcobi/3"
                                          :precision binary64
                                          :pre (and (> alpha -1.0) (> beta -1.0))
                                          (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))