Average Error: 3.6 → 0.1
Time: 7.1s
Precision: binary64
\[\alpha > -1 \land \beta > -1\]
\[ \begin{array}{c}[alpha, beta] = \mathsf{sort}([alpha, beta])\\ \end{array} \]
\[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
\[\begin{array}{l} t_0 := \left(\alpha + \beta\right) + 2\\ \frac{\frac{\left(1 + \alpha\right) \cdot \frac{1 + \beta}{t_0}}{t_0}}{\left(\alpha + \beta\right) + 3} \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (/
  (/
   (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0)))
   (+ (+ alpha beta) (* 2.0 1.0)))
  (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ (+ alpha beta) 2.0)))
   (/ (/ (* (+ 1.0 alpha) (/ (+ 1.0 beta) t_0)) t_0) (+ (+ alpha beta) 3.0))))
double code(double alpha, double beta) {
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / ((alpha + beta) + (2.0 * 1.0))) / ((alpha + beta) + (2.0 * 1.0))) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
}
double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + 2.0;
	return (((1.0 + alpha) * ((1.0 + beta) / t_0)) / t_0) / ((alpha + beta) + 3.0);
}
real(8) function code(alpha, beta)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / ((alpha + beta) + (2.0d0 * 1.0d0))) / ((alpha + beta) + (2.0d0 * 1.0d0))) / (((alpha + beta) + (2.0d0 * 1.0d0)) + 1.0d0)
end function
real(8) function code(alpha, beta)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8) :: t_0
    t_0 = (alpha + beta) + 2.0d0
    code = (((1.0d0 + alpha) * ((1.0d0 + beta) / t_0)) / t_0) / ((alpha + beta) + 3.0d0)
end function
public static double code(double alpha, double beta) {
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / ((alpha + beta) + (2.0 * 1.0))) / ((alpha + beta) + (2.0 * 1.0))) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
}
public static double code(double alpha, double beta) {
	double t_0 = (alpha + beta) + 2.0;
	return (((1.0 + alpha) * ((1.0 + beta) / t_0)) / t_0) / ((alpha + beta) + 3.0);
}
def code(alpha, beta):
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / ((alpha + beta) + (2.0 * 1.0))) / ((alpha + beta) + (2.0 * 1.0))) / (((alpha + beta) + (2.0 * 1.0)) + 1.0)
def code(alpha, beta):
	t_0 = (alpha + beta) + 2.0
	return (((1.0 + alpha) * ((1.0 + beta) / t_0)) / t_0) / ((alpha + beta) + 3.0)
function code(alpha, beta)
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))) / Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))) / Float64(Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) + 1.0))
end
function code(alpha, beta)
	t_0 = Float64(Float64(alpha + beta) + 2.0)
	return Float64(Float64(Float64(Float64(1.0 + alpha) * Float64(Float64(1.0 + beta) / t_0)) / t_0) / Float64(Float64(alpha + beta) + 3.0))
end
function tmp = code(alpha, beta)
	tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / ((alpha + beta) + (2.0 * 1.0))) / ((alpha + beta) + (2.0 * 1.0))) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
end
function tmp = code(alpha, beta)
	t_0 = (alpha + beta) + 2.0;
	tmp = (((1.0 + alpha) * ((1.0 + beta) / t_0)) / t_0) / ((alpha + beta) + 3.0);
end
code[alpha_, beta_] := N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]}, N[(N[(N[(N[(1.0 + alpha), $MachinePrecision] * N[(N[(1.0 + beta), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 3.0), $MachinePrecision]), $MachinePrecision]]
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2\\
\frac{\frac{\left(1 + \alpha\right) \cdot \frac{1 + \beta}{t_0}}{t_0}}{\left(\alpha + \beta\right) + 3}
\end{array}

Error

Try it out

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation

  1. Initial program 3.6

    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  2. Simplified2.1

    \[\leadsto \color{blue}{\frac{\alpha + 1}{\beta + \left(\alpha + 3\right)} \cdot \frac{\beta + 1}{\left(\alpha + \left(\beta + 2\right)\right) \cdot \left(\alpha + \left(\beta + 2\right)\right)}} \]
  3. Applied egg-rr0.2

    \[\leadsto \frac{\alpha + 1}{\beta + \left(\alpha + 3\right)} \cdot \color{blue}{\left(\frac{1}{\alpha + \left(\beta + 2\right)} \cdot \frac{\beta + 1}{\alpha + \left(\beta + 2\right)}\right)} \]
  4. Applied egg-rr0.2

    \[\leadsto \frac{\alpha + 1}{\beta + \left(\alpha + 3\right)} \cdot \left(\frac{1}{\alpha + \left(\beta + 2\right)} \cdot \color{blue}{\log \left(1 + \mathsf{expm1}\left(\frac{1 + \beta}{\alpha + \left(\beta + 2\right)}\right)\right)}\right) \]
  5. Applied egg-rr0.1

    \[\leadsto \color{blue}{\frac{\left(1 + \alpha\right) \cdot \frac{\frac{1 + \beta}{\alpha + \left(\beta + 2\right)}}{\alpha + \left(\beta + 2\right)}}{\left(\beta + \alpha\right) + 3}} \]
  6. Applied egg-rr0.1

    \[\leadsto \frac{\color{blue}{\frac{\left(1 + \alpha\right) \cdot \frac{1 + \beta}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2}}}{\left(\beta + \alpha\right) + 3} \]
  7. Final simplification0.1

    \[\leadsto \frac{\frac{\left(1 + \alpha\right) \cdot \frac{1 + \beta}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 2}}{\left(\alpha + \beta\right) + 3} \]

Reproduce

herbie shell --seed 2022211 
(FPCore (alpha beta)
  :name "Octave 3.8, jcobi/3"
  :precision binary64
  :pre (and (> alpha -1.0) (> beta -1.0))
  (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))