Average Error: 3.6 → 0.2
Time: 7.0s
Precision: binary64
\[\alpha > -1 \land \beta > -1\]
\[ \begin{array}{c}[alpha, beta] = \mathsf{sort}([alpha, beta])\\ \end{array} \]
\[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
\[{\left(\frac{\sqrt{\frac{1 + \alpha}{\beta + \left(\alpha + 3\right)} \cdot \left(1 + \beta\right)}}{\alpha + \left(\beta + 2\right)}\right)}^{2} \]
(FPCore (alpha beta)
 :precision binary64
 (/
  (/
   (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0)))
   (+ (+ alpha beta) (* 2.0 1.0)))
  (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))
(FPCore (alpha beta)
 :precision binary64
 (pow
  (/
   (sqrt (* (/ (+ 1.0 alpha) (+ beta (+ alpha 3.0))) (+ 1.0 beta)))
   (+ alpha (+ beta 2.0)))
  2.0))
double code(double alpha, double beta) {
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / ((alpha + beta) + (2.0 * 1.0))) / ((alpha + beta) + (2.0 * 1.0))) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
}
double code(double alpha, double beta) {
	return pow((sqrt((((1.0 + alpha) / (beta + (alpha + 3.0))) * (1.0 + beta))) / (alpha + (beta + 2.0))), 2.0);
}
real(8) function code(alpha, beta)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / ((alpha + beta) + (2.0d0 * 1.0d0))) / ((alpha + beta) + (2.0d0 * 1.0d0))) / (((alpha + beta) + (2.0d0 * 1.0d0)) + 1.0d0)
end function
real(8) function code(alpha, beta)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    code = (sqrt((((1.0d0 + alpha) / (beta + (alpha + 3.0d0))) * (1.0d0 + beta))) / (alpha + (beta + 2.0d0))) ** 2.0d0
end function
public static double code(double alpha, double beta) {
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / ((alpha + beta) + (2.0 * 1.0))) / ((alpha + beta) + (2.0 * 1.0))) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
}
public static double code(double alpha, double beta) {
	return Math.pow((Math.sqrt((((1.0 + alpha) / (beta + (alpha + 3.0))) * (1.0 + beta))) / (alpha + (beta + 2.0))), 2.0);
}
def code(alpha, beta):
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / ((alpha + beta) + (2.0 * 1.0))) / ((alpha + beta) + (2.0 * 1.0))) / (((alpha + beta) + (2.0 * 1.0)) + 1.0)
def code(alpha, beta):
	return math.pow((math.sqrt((((1.0 + alpha) / (beta + (alpha + 3.0))) * (1.0 + beta))) / (alpha + (beta + 2.0))), 2.0)
function code(alpha, beta)
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))) / Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))) / Float64(Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) + 1.0))
end
function code(alpha, beta)
	return Float64(sqrt(Float64(Float64(Float64(1.0 + alpha) / Float64(beta + Float64(alpha + 3.0))) * Float64(1.0 + beta))) / Float64(alpha + Float64(beta + 2.0))) ^ 2.0
end
function tmp = code(alpha, beta)
	tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / ((alpha + beta) + (2.0 * 1.0))) / ((alpha + beta) + (2.0 * 1.0))) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
end
function tmp = code(alpha, beta)
	tmp = (sqrt((((1.0 + alpha) / (beta + (alpha + 3.0))) * (1.0 + beta))) / (alpha + (beta + 2.0))) ^ 2.0;
end
code[alpha_, beta_] := N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
code[alpha_, beta_] := N[Power[N[(N[Sqrt[N[(N[(N[(1.0 + alpha), $MachinePrecision] / N[(beta + N[(alpha + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 + beta), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(alpha + N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}
{\left(\frac{\sqrt{\frac{1 + \alpha}{\beta + \left(\alpha + 3\right)} \cdot \left(1 + \beta\right)}}{\alpha + \left(\beta + 2\right)}\right)}^{2}

Error

Try it out

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation

  1. Initial program 3.6

    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  2. Simplified2.2

    \[\leadsto \color{blue}{\frac{\alpha + 1}{\beta + \left(\alpha + 3\right)} \cdot \frac{\beta + 1}{\left(\alpha + \left(\beta + 2\right)\right) \cdot \left(\alpha + \left(\beta + 2\right)\right)}} \]
  3. Applied egg-rr0.2

    \[\leadsto \color{blue}{{\left(\frac{\sqrt{\frac{1 + \alpha}{\beta + \left(\alpha + 3\right)} \cdot \left(\beta + 1\right)}}{\alpha + \left(\beta + 2\right)}\right)}^{2}} \]
  4. Final simplification0.2

    \[\leadsto {\left(\frac{\sqrt{\frac{1 + \alpha}{\beta + \left(\alpha + 3\right)} \cdot \left(1 + \beta\right)}}{\alpha + \left(\beta + 2\right)}\right)}^{2} \]

Reproduce

herbie shell --seed 2022210 
(FPCore (alpha beta)
  :name "Octave 3.8, jcobi/3"
  :precision binary64
  :pre (and (> alpha -1.0) (> beta -1.0))
  (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))