Average Error: 3.8 → 0.2
Time: 5.3s
Precision: binary64
\[\alpha > -1 \land \beta > -1\]
\[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
\[\begin{array}{l} t_0 := \alpha + \left(\beta + 2\right)\\ \mathsf{log1p}\left(\mathsf{expm1}\left(\frac{1 + \alpha}{\beta + \left(\alpha + 3\right)}\right)\right) \cdot \left(\frac{1}{t_0} \cdot \frac{1 + \beta}{t_0}\right) \end{array} \]
(FPCore (alpha beta)
 :precision binary64
 (/
  (/
   (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0)))
   (+ (+ alpha beta) (* 2.0 1.0)))
  (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))
(FPCore (alpha beta)
 :precision binary64
 (let* ((t_0 (+ alpha (+ beta 2.0))))
   (*
    (log1p (expm1 (/ (+ 1.0 alpha) (+ beta (+ alpha 3.0)))))
    (* (/ 1.0 t_0) (/ (+ 1.0 beta) t_0)))))
double code(double alpha, double beta) {
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / ((alpha + beta) + (2.0 * 1.0))) / ((alpha + beta) + (2.0 * 1.0))) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
}
double code(double alpha, double beta) {
	double t_0 = alpha + (beta + 2.0);
	return log1p(expm1(((1.0 + alpha) / (beta + (alpha + 3.0))))) * ((1.0 / t_0) * ((1.0 + beta) / t_0));
}
public static double code(double alpha, double beta) {
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / ((alpha + beta) + (2.0 * 1.0))) / ((alpha + beta) + (2.0 * 1.0))) / (((alpha + beta) + (2.0 * 1.0)) + 1.0);
}
public static double code(double alpha, double beta) {
	double t_0 = alpha + (beta + 2.0);
	return Math.log1p(Math.expm1(((1.0 + alpha) / (beta + (alpha + 3.0))))) * ((1.0 / t_0) * ((1.0 + beta) / t_0));
}
def code(alpha, beta):
	return (((((alpha + beta) + (beta * alpha)) + 1.0) / ((alpha + beta) + (2.0 * 1.0))) / ((alpha + beta) + (2.0 * 1.0))) / (((alpha + beta) + (2.0 * 1.0)) + 1.0)
def code(alpha, beta):
	t_0 = alpha + (beta + 2.0)
	return math.log1p(math.expm1(((1.0 + alpha) / (beta + (alpha + 3.0))))) * ((1.0 / t_0) * ((1.0 + beta) / t_0))
function code(alpha, beta)
	return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))) / Float64(Float64(alpha + beta) + Float64(2.0 * 1.0))) / Float64(Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) + 1.0))
end
function code(alpha, beta)
	t_0 = Float64(alpha + Float64(beta + 2.0))
	return Float64(log1p(expm1(Float64(Float64(1.0 + alpha) / Float64(beta + Float64(alpha + 3.0))))) * Float64(Float64(1.0 / t_0) * Float64(Float64(1.0 + beta) / t_0)))
end
code[alpha_, beta_] := N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
code[alpha_, beta_] := Block[{t$95$0 = N[(alpha + N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]}, N[(N[Log[1 + N[(Exp[N[(N[(1.0 + alpha), $MachinePrecision] / N[(beta + N[(alpha + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]] - 1), $MachinePrecision]], $MachinePrecision] * N[(N[(1.0 / t$95$0), $MachinePrecision] * N[(N[(1.0 + beta), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1}
\begin{array}{l}
t_0 := \alpha + \left(\beta + 2\right)\\
\mathsf{log1p}\left(\mathsf{expm1}\left(\frac{1 + \alpha}{\beta + \left(\alpha + 3\right)}\right)\right) \cdot \left(\frac{1}{t_0} \cdot \frac{1 + \beta}{t_0}\right)
\end{array}

Error

Bits error versus alpha

Bits error versus beta

Try it out

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation

  1. Initial program 3.8

    \[\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\alpha + \beta\right) + 2 \cdot 1}}{\left(\left(\alpha + \beta\right) + 2 \cdot 1\right) + 1} \]
  2. Simplified2.2

    \[\leadsto \color{blue}{\frac{\alpha + 1}{\beta + \left(\alpha + 3\right)} \cdot \frac{\beta + 1}{\left(\alpha + \left(\beta + 2\right)\right) \cdot \left(\alpha + \left(\beta + 2\right)\right)}} \]
  3. Applied egg-rr0.2

    \[\leadsto \frac{\alpha + 1}{\beta + \left(\alpha + 3\right)} \cdot \color{blue}{\left(\frac{1}{\alpha + \left(\beta + 2\right)} \cdot \frac{\beta + 1}{\alpha + \left(\beta + 2\right)}\right)} \]
  4. Applied egg-rr0.2

    \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{expm1}\left(\frac{1 + \alpha}{\beta + \left(\alpha + 3\right)}\right)\right)} \cdot \left(\frac{1}{\alpha + \left(\beta + 2\right)} \cdot \frac{\beta + 1}{\alpha + \left(\beta + 2\right)}\right) \]
  5. Final simplification0.2

    \[\leadsto \mathsf{log1p}\left(\mathsf{expm1}\left(\frac{1 + \alpha}{\beta + \left(\alpha + 3\right)}\right)\right) \cdot \left(\frac{1}{\alpha + \left(\beta + 2\right)} \cdot \frac{1 + \beta}{\alpha + \left(\beta + 2\right)}\right) \]

Reproduce

herbie shell --seed 2022146 
(FPCore (alpha beta)
  :name "Octave 3.8, jcobi/3"
  :precision binary64
  :pre (and (> alpha -1.0) (> beta -1.0))
  (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))