\[\alpha > -1 \land \beta > -1\]
Math FPCore C Java Python Julia Wolfram TeX \[\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} + 1}{2}
\]
↓
\[\begin{array}{l}
\mathbf{if}\;\frac{\beta - \alpha}{\left(\beta + \alpha\right) + 2} \leq -1:\\
\;\;\;\;\frac{\beta + 1}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;\frac{e^{\mathsf{log1p}\left(\frac{\beta - \alpha}{\beta + \left(\alpha + 2\right)}\right)}}{2}\\
\end{array}
\]
(FPCore (alpha beta)
:precision binary64
(/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0)) ↓
(FPCore (alpha beta)
:precision binary64
(if (<= (/ (- beta alpha) (+ (+ beta alpha) 2.0)) -1.0)
(/ (+ beta 1.0) alpha)
(/ (exp (log1p (/ (- beta alpha) (+ beta (+ alpha 2.0))))) 2.0))) double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
↓
double code(double alpha, double beta) {
double tmp;
if (((beta - alpha) / ((beta + alpha) + 2.0)) <= -1.0) {
tmp = (beta + 1.0) / alpha;
} else {
tmp = exp(log1p(((beta - alpha) / (beta + (alpha + 2.0))))) / 2.0;
}
return tmp;
}
public static double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
↓
public static double code(double alpha, double beta) {
double tmp;
if (((beta - alpha) / ((beta + alpha) + 2.0)) <= -1.0) {
tmp = (beta + 1.0) / alpha;
} else {
tmp = Math.exp(Math.log1p(((beta - alpha) / (beta + (alpha + 2.0))))) / 2.0;
}
return tmp;
}
def code(alpha, beta):
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0
↓
def code(alpha, beta):
tmp = 0
if ((beta - alpha) / ((beta + alpha) + 2.0)) <= -1.0:
tmp = (beta + 1.0) / alpha
else:
tmp = math.exp(math.log1p(((beta - alpha) / (beta + (alpha + 2.0))))) / 2.0
return tmp
function code(alpha, beta)
return Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) + 1.0) / 2.0)
end
↓
function code(alpha, beta)
tmp = 0.0
if (Float64(Float64(beta - alpha) / Float64(Float64(beta + alpha) + 2.0)) <= -1.0)
tmp = Float64(Float64(beta + 1.0) / alpha);
else
tmp = Float64(exp(log1p(Float64(Float64(beta - alpha) / Float64(beta + Float64(alpha + 2.0))))) / 2.0);
end
return tmp
end
code[alpha_, beta_] := N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
↓
code[alpha_, beta_] := If[LessEqual[N[(N[(beta - alpha), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], -1.0], N[(N[(beta + 1.0), $MachinePrecision] / alpha), $MachinePrecision], N[(N[Exp[N[Log[1 + N[(N[(beta - alpha), $MachinePrecision] / N[(beta + N[(alpha + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]]
\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} + 1}{2}
↓
\begin{array}{l}
\mathbf{if}\;\frac{\beta - \alpha}{\left(\beta + \alpha\right) + 2} \leq -1:\\
\;\;\;\;\frac{\beta + 1}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;\frac{e^{\mathsf{log1p}\left(\frac{\beta - \alpha}{\beta + \left(\alpha + 2\right)}\right)}}{2}\\
\end{array}
Alternatives Alternative 1 Accuracy 99.4% Cost 14148
\[\begin{array}{l}
\mathbf{if}\;\frac{\beta - \alpha}{\left(\beta + \alpha\right) + 2} \leq -1:\\
\;\;\;\;\frac{\beta + 1}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;\frac{e^{\mathsf{log1p}\left(\frac{\beta - \alpha}{\beta + \left(\alpha + 2\right)}\right)}}{2}\\
\end{array}
\]
Alternative 2 Accuracy 99.4% Cost 1476
\[\begin{array}{l}
t_0 := \frac{\beta - \alpha}{\left(\beta + \alpha\right) + 2}\\
\mathbf{if}\;t_0 \leq -1:\\
\;\;\;\;\frac{\beta + 1}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;\frac{t_0 + 1}{2}\\
\end{array}
\]
Alternative 3 Accuracy 73.7% Cost 844
\[\begin{array}{l}
t_0 := \frac{1 - \alpha \cdot 0.5}{2}\\
\mathbf{if}\;\alpha \leq 5 \cdot 10^{-287}:\\
\;\;\;\;t_0\\
\mathbf{elif}\;\alpha \leq 10^{-271}:\\
\;\;\;\;1\\
\mathbf{elif}\;\alpha \leq 1.95:\\
\;\;\;\;t_0\\
\mathbf{else}:\\
\;\;\;\;\frac{\beta + 1}{\alpha}\\
\end{array}
\]
Alternative 4 Accuracy 73.0% Cost 716
\[\begin{array}{l}
\mathbf{if}\;\alpha \leq 8 \cdot 10^{-287}:\\
\;\;\;\;0.5\\
\mathbf{elif}\;\alpha \leq 6.2 \cdot 10^{-272}:\\
\;\;\;\;1\\
\mathbf{elif}\;\alpha \leq 80000000000:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{\beta + 1}{\alpha}\\
\end{array}
\]
Alternative 5 Accuracy 93.2% Cost 708
\[\begin{array}{l}
\mathbf{if}\;\alpha \leq 680000000000:\\
\;\;\;\;\frac{1 + \frac{\beta}{\beta + 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\beta + 1}{\alpha}\\
\end{array}
\]
Alternative 6 Accuracy 67.7% Cost 588
\[\begin{array}{l}
\mathbf{if}\;\alpha \leq 7.2 \cdot 10^{-287}:\\
\;\;\;\;0.5\\
\mathbf{elif}\;\alpha \leq 6.2 \cdot 10^{-272}:\\
\;\;\;\;1\\
\mathbf{elif}\;\alpha \leq 80000000000:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\alpha}\\
\end{array}
\]
Alternative 7 Accuracy 68.3% Cost 324
\[\begin{array}{l}
\mathbf{if}\;\alpha \leq 80000000000:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\alpha}\\
\end{array}
\]
Alternative 8 Accuracy 49.8% Cost 64
\[0.5
\]