
(FPCore (alpha beta) :precision binary64 (/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))
double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = (((beta - alpha) / ((alpha + beta) + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta): return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0
function code(alpha, beta) return Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta) tmp = (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_] := N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} + 1}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta) :precision binary64 (/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))
double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = (((beta - alpha) / ((alpha + beta) + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta): return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0
function code(alpha, beta) return Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta) tmp = (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_] := N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} + 1}{2}
\end{array}
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (/ (- beta alpha) (+ (+ beta alpha) 2.0)))
(t_1 (/ (+ beta 2.0) alpha)))
(if (<= t_0 -0.5)
(/
(+
(/ beta (+ beta (+ alpha 2.0)))
(- (/ (- beta -2.0) alpha) (- (pow t_1 2.0) (pow t_1 3.0))))
2.0)
(/ (+ t_0 1.0) 2.0))))
double code(double alpha, double beta) {
double t_0 = (beta - alpha) / ((beta + alpha) + 2.0);
double t_1 = (beta + 2.0) / alpha;
double tmp;
if (t_0 <= -0.5) {
tmp = ((beta / (beta + (alpha + 2.0))) + (((beta - -2.0) / alpha) - (pow(t_1, 2.0) - pow(t_1, 3.0)))) / 2.0;
} else {
tmp = (t_0 + 1.0) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = (beta - alpha) / ((beta + alpha) + 2.0d0)
t_1 = (beta + 2.0d0) / alpha
if (t_0 <= (-0.5d0)) then
tmp = ((beta / (beta + (alpha + 2.0d0))) + (((beta - (-2.0d0)) / alpha) - ((t_1 ** 2.0d0) - (t_1 ** 3.0d0)))) / 2.0d0
else
tmp = (t_0 + 1.0d0) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double t_0 = (beta - alpha) / ((beta + alpha) + 2.0);
double t_1 = (beta + 2.0) / alpha;
double tmp;
if (t_0 <= -0.5) {
tmp = ((beta / (beta + (alpha + 2.0))) + (((beta - -2.0) / alpha) - (Math.pow(t_1, 2.0) - Math.pow(t_1, 3.0)))) / 2.0;
} else {
tmp = (t_0 + 1.0) / 2.0;
}
return tmp;
}
def code(alpha, beta): t_0 = (beta - alpha) / ((beta + alpha) + 2.0) t_1 = (beta + 2.0) / alpha tmp = 0 if t_0 <= -0.5: tmp = ((beta / (beta + (alpha + 2.0))) + (((beta - -2.0) / alpha) - (math.pow(t_1, 2.0) - math.pow(t_1, 3.0)))) / 2.0 else: tmp = (t_0 + 1.0) / 2.0 return tmp
function code(alpha, beta) t_0 = Float64(Float64(beta - alpha) / Float64(Float64(beta + alpha) + 2.0)) t_1 = Float64(Float64(beta + 2.0) / alpha) tmp = 0.0 if (t_0 <= -0.5) tmp = Float64(Float64(Float64(beta / Float64(beta + Float64(alpha + 2.0))) + Float64(Float64(Float64(beta - -2.0) / alpha) - Float64((t_1 ^ 2.0) - (t_1 ^ 3.0)))) / 2.0); else tmp = Float64(Float64(t_0 + 1.0) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) t_0 = (beta - alpha) / ((beta + alpha) + 2.0); t_1 = (beta + 2.0) / alpha; tmp = 0.0; if (t_0 <= -0.5) tmp = ((beta / (beta + (alpha + 2.0))) + (((beta - -2.0) / alpha) - ((t_1 ^ 2.0) - (t_1 ^ 3.0)))) / 2.0; else tmp = (t_0 + 1.0) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta - alpha), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(beta + 2.0), $MachinePrecision] / alpha), $MachinePrecision]}, If[LessEqual[t$95$0, -0.5], N[(N[(N[(beta / N[(beta + N[(alpha + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(beta - -2.0), $MachinePrecision] / alpha), $MachinePrecision] - N[(N[Power[t$95$1, 2.0], $MachinePrecision] - N[Power[t$95$1, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(t$95$0 + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\beta - \alpha}{\left(\beta + \alpha\right) + 2}\\
t_1 := \frac{\beta + 2}{\alpha}\\
\mathbf{if}\;t_0 \leq -0.5:\\
\;\;\;\;\frac{\frac{\beta}{\beta + \left(\alpha + 2\right)} + \left(\frac{\beta - -2}{\alpha} - \left({t_1}^{2} - {t_1}^{3}\right)\right)}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{t_0 + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) 2)) < -0.5Initial program 7.8%
+-commutative7.8%
Simplified7.8%
div-sub7.8%
associate-+l-11.3%
associate-+l+11.3%
associate-+l+11.3%
Applied egg-rr11.3%
Taylor expanded in alpha around inf 87.5%
mul-1-neg87.5%
distribute-neg-frac87.5%
+-commutative87.5%
distribute-neg-in87.5%
metadata-eval87.5%
sub-neg87.5%
unpow287.5%
unpow287.5%
times-frac87.5%
unpow287.5%
mul-1-neg87.5%
unsub-neg87.5%
cube-div99.9%
Simplified99.9%
if -0.5 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) 2)) Initial program 100.0%
Final simplification100.0%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (/ (- beta alpha) (+ (+ beta alpha) 2.0))))
(if (<= t_0 -0.5)
(/
(+
(/ beta (+ beta (+ alpha 2.0)))
(- (/ (- beta -2.0) alpha) (pow (/ (+ beta 2.0) alpha) 2.0)))
2.0)
(/ (+ t_0 1.0) 2.0))))
double code(double alpha, double beta) {
double t_0 = (beta - alpha) / ((beta + alpha) + 2.0);
double tmp;
if (t_0 <= -0.5) {
tmp = ((beta / (beta + (alpha + 2.0))) + (((beta - -2.0) / alpha) - pow(((beta + 2.0) / alpha), 2.0))) / 2.0;
} else {
tmp = (t_0 + 1.0) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
real(8) :: tmp
t_0 = (beta - alpha) / ((beta + alpha) + 2.0d0)
if (t_0 <= (-0.5d0)) then
tmp = ((beta / (beta + (alpha + 2.0d0))) + (((beta - (-2.0d0)) / alpha) - (((beta + 2.0d0) / alpha) ** 2.0d0))) / 2.0d0
else
tmp = (t_0 + 1.0d0) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double t_0 = (beta - alpha) / ((beta + alpha) + 2.0);
double tmp;
if (t_0 <= -0.5) {
tmp = ((beta / (beta + (alpha + 2.0))) + (((beta - -2.0) / alpha) - Math.pow(((beta + 2.0) / alpha), 2.0))) / 2.0;
} else {
tmp = (t_0 + 1.0) / 2.0;
}
return tmp;
}
def code(alpha, beta): t_0 = (beta - alpha) / ((beta + alpha) + 2.0) tmp = 0 if t_0 <= -0.5: tmp = ((beta / (beta + (alpha + 2.0))) + (((beta - -2.0) / alpha) - math.pow(((beta + 2.0) / alpha), 2.0))) / 2.0 else: tmp = (t_0 + 1.0) / 2.0 return tmp
function code(alpha, beta) t_0 = Float64(Float64(beta - alpha) / Float64(Float64(beta + alpha) + 2.0)) tmp = 0.0 if (t_0 <= -0.5) tmp = Float64(Float64(Float64(beta / Float64(beta + Float64(alpha + 2.0))) + Float64(Float64(Float64(beta - -2.0) / alpha) - (Float64(Float64(beta + 2.0) / alpha) ^ 2.0))) / 2.0); else tmp = Float64(Float64(t_0 + 1.0) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) t_0 = (beta - alpha) / ((beta + alpha) + 2.0); tmp = 0.0; if (t_0 <= -0.5) tmp = ((beta / (beta + (alpha + 2.0))) + (((beta - -2.0) / alpha) - (((beta + 2.0) / alpha) ^ 2.0))) / 2.0; else tmp = (t_0 + 1.0) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta - alpha), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -0.5], N[(N[(N[(beta / N[(beta + N[(alpha + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(beta - -2.0), $MachinePrecision] / alpha), $MachinePrecision] - N[Power[N[(N[(beta + 2.0), $MachinePrecision] / alpha), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(t$95$0 + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\beta - \alpha}{\left(\beta + \alpha\right) + 2}\\
\mathbf{if}\;t_0 \leq -0.5:\\
\;\;\;\;\frac{\frac{\beta}{\beta + \left(\alpha + 2\right)} + \left(\frac{\beta - -2}{\alpha} - {\left(\frac{\beta + 2}{\alpha}\right)}^{2}\right)}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{t_0 + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) 2)) < -0.5Initial program 7.8%
+-commutative7.8%
Simplified7.8%
div-sub7.8%
associate-+l-11.3%
associate-+l+11.3%
associate-+l+11.3%
Applied egg-rr11.3%
Taylor expanded in alpha around inf 92.9%
mul-1-neg92.9%
distribute-neg-frac92.9%
+-commutative92.9%
distribute-neg-in92.9%
metadata-eval92.9%
sub-neg92.9%
unpow292.9%
unpow292.9%
times-frac99.6%
unpow299.6%
Simplified99.6%
if -0.5 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) 2)) Initial program 100.0%
Final simplification99.9%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (/ (- beta alpha) (+ (+ beta alpha) 2.0))))
(if (<= t_0 -0.5)
(/
(fma
(/ (- (- -2.0 beta) beta) alpha)
(/ (+ beta 2.0) alpha)
(/ (+ beta (+ beta 2.0)) alpha))
2.0)
(/ (+ t_0 1.0) 2.0))))
double code(double alpha, double beta) {
double t_0 = (beta - alpha) / ((beta + alpha) + 2.0);
double tmp;
if (t_0 <= -0.5) {
tmp = fma((((-2.0 - beta) - beta) / alpha), ((beta + 2.0) / alpha), ((beta + (beta + 2.0)) / alpha)) / 2.0;
} else {
tmp = (t_0 + 1.0) / 2.0;
}
return tmp;
}
function code(alpha, beta) t_0 = Float64(Float64(beta - alpha) / Float64(Float64(beta + alpha) + 2.0)) tmp = 0.0 if (t_0 <= -0.5) tmp = Float64(fma(Float64(Float64(Float64(-2.0 - beta) - beta) / alpha), Float64(Float64(beta + 2.0) / alpha), Float64(Float64(beta + Float64(beta + 2.0)) / alpha)) / 2.0); else tmp = Float64(Float64(t_0 + 1.0) / 2.0); end return tmp end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta - alpha), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -0.5], N[(N[(N[(N[(N[(-2.0 - beta), $MachinePrecision] - beta), $MachinePrecision] / alpha), $MachinePrecision] * N[(N[(beta + 2.0), $MachinePrecision] / alpha), $MachinePrecision] + N[(N[(beta + N[(beta + 2.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(t$95$0 + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\beta - \alpha}{\left(\beta + \alpha\right) + 2}\\
\mathbf{if}\;t_0 \leq -0.5:\\
\;\;\;\;\frac{\mathsf{fma}\left(\frac{\left(-2 - \beta\right) - \beta}{\alpha}, \frac{\beta + 2}{\alpha}, \frac{\beta + \left(\beta + 2\right)}{\alpha}\right)}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{t_0 + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) 2)) < -0.5Initial program 7.8%
+-commutative7.8%
Simplified7.8%
Taylor expanded in alpha around -inf 92.9%
Simplified99.6%
if -0.5 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) 2)) Initial program 100.0%
Final simplification99.9%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ beta (+ alpha 2.0))))
(if (<= (/ (- beta alpha) (+ (+ beta alpha) 2.0)) -1.0)
(/ (* 2.0 (+ (/ beta alpha) (/ 1.0 alpha))) 2.0)
(/ (+ (/ beta t_0) (- 1.0 (/ alpha t_0))) 2.0))))
double code(double alpha, double beta) {
double t_0 = beta + (alpha + 2.0);
double tmp;
if (((beta - alpha) / ((beta + alpha) + 2.0)) <= -1.0) {
tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0;
} else {
tmp = ((beta / t_0) + (1.0 - (alpha / t_0))) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
real(8) :: tmp
t_0 = beta + (alpha + 2.0d0)
if (((beta - alpha) / ((beta + alpha) + 2.0d0)) <= (-1.0d0)) then
tmp = (2.0d0 * ((beta / alpha) + (1.0d0 / alpha))) / 2.0d0
else
tmp = ((beta / t_0) + (1.0d0 - (alpha / t_0))) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double t_0 = beta + (alpha + 2.0);
double tmp;
if (((beta - alpha) / ((beta + alpha) + 2.0)) <= -1.0) {
tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0;
} else {
tmp = ((beta / t_0) + (1.0 - (alpha / t_0))) / 2.0;
}
return tmp;
}
def code(alpha, beta): t_0 = beta + (alpha + 2.0) tmp = 0 if ((beta - alpha) / ((beta + alpha) + 2.0)) <= -1.0: tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0 else: tmp = ((beta / t_0) + (1.0 - (alpha / t_0))) / 2.0 return tmp
function code(alpha, beta) t_0 = Float64(beta + Float64(alpha + 2.0)) tmp = 0.0 if (Float64(Float64(beta - alpha) / Float64(Float64(beta + alpha) + 2.0)) <= -1.0) tmp = Float64(Float64(2.0 * Float64(Float64(beta / alpha) + Float64(1.0 / alpha))) / 2.0); else tmp = Float64(Float64(Float64(beta / t_0) + Float64(1.0 - Float64(alpha / t_0))) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) t_0 = beta + (alpha + 2.0); tmp = 0.0; if (((beta - alpha) / ((beta + alpha) + 2.0)) <= -1.0) tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0; else tmp = ((beta / t_0) + (1.0 - (alpha / t_0))) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := Block[{t$95$0 = N[(beta + N[(alpha + 2.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(beta - alpha), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], -1.0], N[(N[(2.0 * N[(N[(beta / alpha), $MachinePrecision] + N[(1.0 / alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(beta / t$95$0), $MachinePrecision] + N[(1.0 - N[(alpha / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \beta + \left(\alpha + 2\right)\\
\mathbf{if}\;\frac{\beta - \alpha}{\left(\beta + \alpha\right) + 2} \leq -1:\\
\;\;\;\;\frac{2 \cdot \left(\frac{\beta}{\alpha} + \frac{1}{\alpha}\right)}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\beta}{t_0} + \left(1 - \frac{\alpha}{t_0}\right)}{2}\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) 2)) < -1Initial program 5.2%
+-commutative5.2%
Simplified5.2%
Taylor expanded in alpha around -inf 100.0%
associate-*r/100.0%
sub-neg100.0%
mul-1-neg100.0%
distribute-lft-in100.0%
neg-mul-1100.0%
mul-1-neg100.0%
remove-double-neg100.0%
neg-mul-1100.0%
mul-1-neg100.0%
remove-double-neg100.0%
+-commutative100.0%
Simplified100.0%
Taylor expanded in beta around 0 100.0%
distribute-lft-out100.0%
*-commutative100.0%
Applied egg-rr100.0%
if -1 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) 2)) Initial program 99.5%
+-commutative99.5%
Simplified99.5%
div-sub99.5%
associate-+l-99.5%
associate-+l+99.5%
associate-+l+99.5%
Applied egg-rr99.5%
Final simplification99.6%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (/ (- beta alpha) (+ (+ beta alpha) 2.0))))
(if (<= t_0 -1.0)
(/ (* 2.0 (+ (/ beta alpha) (/ 1.0 alpha))) 2.0)
(/ (+ t_0 1.0) 2.0))))
double code(double alpha, double beta) {
double t_0 = (beta - alpha) / ((beta + alpha) + 2.0);
double tmp;
if (t_0 <= -1.0) {
tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0;
} else {
tmp = (t_0 + 1.0) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
real(8) :: tmp
t_0 = (beta - alpha) / ((beta + alpha) + 2.0d0)
if (t_0 <= (-1.0d0)) then
tmp = (2.0d0 * ((beta / alpha) + (1.0d0 / alpha))) / 2.0d0
else
tmp = (t_0 + 1.0d0) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double t_0 = (beta - alpha) / ((beta + alpha) + 2.0);
double tmp;
if (t_0 <= -1.0) {
tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0;
} else {
tmp = (t_0 + 1.0) / 2.0;
}
return tmp;
}
def code(alpha, beta): t_0 = (beta - alpha) / ((beta + alpha) + 2.0) tmp = 0 if t_0 <= -1.0: tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0 else: tmp = (t_0 + 1.0) / 2.0 return tmp
function code(alpha, beta) t_0 = Float64(Float64(beta - alpha) / Float64(Float64(beta + alpha) + 2.0)) tmp = 0.0 if (t_0 <= -1.0) tmp = Float64(Float64(2.0 * Float64(Float64(beta / alpha) + Float64(1.0 / alpha))) / 2.0); else tmp = Float64(Float64(t_0 + 1.0) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) t_0 = (beta - alpha) / ((beta + alpha) + 2.0); tmp = 0.0; if (t_0 <= -1.0) tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0; else tmp = (t_0 + 1.0) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta - alpha), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -1.0], N[(N[(2.0 * N[(N[(beta / alpha), $MachinePrecision] + N[(1.0 / alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(t$95$0 + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\beta - \alpha}{\left(\beta + \alpha\right) + 2}\\
\mathbf{if}\;t_0 \leq -1:\\
\;\;\;\;\frac{2 \cdot \left(\frac{\beta}{\alpha} + \frac{1}{\alpha}\right)}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{t_0 + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) 2)) < -1Initial program 5.2%
+-commutative5.2%
Simplified5.2%
Taylor expanded in alpha around -inf 100.0%
associate-*r/100.0%
sub-neg100.0%
mul-1-neg100.0%
distribute-lft-in100.0%
neg-mul-1100.0%
mul-1-neg100.0%
remove-double-neg100.0%
neg-mul-1100.0%
mul-1-neg100.0%
remove-double-neg100.0%
+-commutative100.0%
Simplified100.0%
Taylor expanded in beta around 0 100.0%
distribute-lft-out100.0%
*-commutative100.0%
Applied egg-rr100.0%
if -1 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) 2)) Initial program 99.5%
Final simplification99.6%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 5.8e+31) (/ (+ 1.0 (/ beta (+ beta 2.0))) 2.0) (/ (* 2.0 (+ (/ beta alpha) (/ 1.0 alpha))) 2.0)))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 5.8e+31) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 5.8d+31) then
tmp = (1.0d0 + (beta / (beta + 2.0d0))) / 2.0d0
else
tmp = (2.0d0 * ((beta / alpha) + (1.0d0 / alpha))) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 5.8e+31) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 5.8e+31: tmp = (1.0 + (beta / (beta + 2.0))) / 2.0 else: tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0 return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 5.8e+31) tmp = Float64(Float64(1.0 + Float64(beta / Float64(beta + 2.0))) / 2.0); else tmp = Float64(Float64(2.0 * Float64(Float64(beta / alpha) + Float64(1.0 / alpha))) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 5.8e+31) tmp = (1.0 + (beta / (beta + 2.0))) / 2.0; else tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 5.8e+31], N[(N[(1.0 + N[(beta / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(2.0 * N[(N[(beta / alpha), $MachinePrecision] + N[(1.0 / alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 5.8 \cdot 10^{+31}:\\
\;\;\;\;\frac{1 + \frac{\beta}{\beta + 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{2 \cdot \left(\frac{\beta}{\alpha} + \frac{1}{\alpha}\right)}{2}\\
\end{array}
\end{array}
if alpha < 5.8000000000000001e31Initial program 99.1%
+-commutative99.1%
Simplified99.1%
Taylor expanded in alpha around 0 97.9%
if 5.8000000000000001e31 < alpha Initial program 13.6%
+-commutative13.6%
Simplified13.6%
Taylor expanded in alpha around -inf 91.8%
associate-*r/91.8%
sub-neg91.8%
mul-1-neg91.8%
distribute-lft-in91.8%
neg-mul-191.8%
mul-1-neg91.8%
remove-double-neg91.8%
neg-mul-191.8%
mul-1-neg91.8%
remove-double-neg91.8%
+-commutative91.8%
Simplified91.8%
Taylor expanded in beta around 0 91.8%
distribute-lft-out91.8%
*-commutative91.8%
Applied egg-rr91.8%
Final simplification96.1%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 1.2e+33) (/ (+ 1.0 (/ 1.0 (/ (+ beta 2.0) beta))) 2.0) (/ (* 2.0 (+ (/ beta alpha) (/ 1.0 alpha))) 2.0)))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 1.2e+33) {
tmp = (1.0 + (1.0 / ((beta + 2.0) / beta))) / 2.0;
} else {
tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 1.2d+33) then
tmp = (1.0d0 + (1.0d0 / ((beta + 2.0d0) / beta))) / 2.0d0
else
tmp = (2.0d0 * ((beta / alpha) + (1.0d0 / alpha))) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 1.2e+33) {
tmp = (1.0 + (1.0 / ((beta + 2.0) / beta))) / 2.0;
} else {
tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 1.2e+33: tmp = (1.0 + (1.0 / ((beta + 2.0) / beta))) / 2.0 else: tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0 return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 1.2e+33) tmp = Float64(Float64(1.0 + Float64(1.0 / Float64(Float64(beta + 2.0) / beta))) / 2.0); else tmp = Float64(Float64(2.0 * Float64(Float64(beta / alpha) + Float64(1.0 / alpha))) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 1.2e+33) tmp = (1.0 + (1.0 / ((beta + 2.0) / beta))) / 2.0; else tmp = (2.0 * ((beta / alpha) + (1.0 / alpha))) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 1.2e+33], N[(N[(1.0 + N[(1.0 / N[(N[(beta + 2.0), $MachinePrecision] / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(2.0 * N[(N[(beta / alpha), $MachinePrecision] + N[(1.0 / alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 1.2 \cdot 10^{+33}:\\
\;\;\;\;\frac{1 + \frac{1}{\frac{\beta + 2}{\beta}}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{2 \cdot \left(\frac{\beta}{\alpha} + \frac{1}{\alpha}\right)}{2}\\
\end{array}
\end{array}
if alpha < 1.2e33Initial program 99.1%
+-commutative99.1%
Simplified99.1%
Taylor expanded in alpha around 0 97.9%
clear-num97.9%
inv-pow97.9%
Applied egg-rr97.9%
unpow-197.9%
Simplified97.9%
if 1.2e33 < alpha Initial program 13.6%
+-commutative13.6%
Simplified13.6%
Taylor expanded in alpha around -inf 91.8%
associate-*r/91.8%
sub-neg91.8%
mul-1-neg91.8%
distribute-lft-in91.8%
neg-mul-191.8%
mul-1-neg91.8%
remove-double-neg91.8%
neg-mul-191.8%
mul-1-neg91.8%
remove-double-neg91.8%
+-commutative91.8%
Simplified91.8%
Taylor expanded in beta around 0 91.8%
distribute-lft-out91.8%
*-commutative91.8%
Applied egg-rr91.8%
Final simplification96.1%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 5.8e+31) (/ (+ 1.0 (/ beta (+ beta 2.0))) 2.0) (/ (/ 2.0 alpha) 2.0)))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 5.8e+31) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = (2.0 / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 5.8d+31) then
tmp = (1.0d0 + (beta / (beta + 2.0d0))) / 2.0d0
else
tmp = (2.0d0 / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 5.8e+31) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = (2.0 / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 5.8e+31: tmp = (1.0 + (beta / (beta + 2.0))) / 2.0 else: tmp = (2.0 / alpha) / 2.0 return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 5.8e+31) tmp = Float64(Float64(1.0 + Float64(beta / Float64(beta + 2.0))) / 2.0); else tmp = Float64(Float64(2.0 / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 5.8e+31) tmp = (1.0 + (beta / (beta + 2.0))) / 2.0; else tmp = (2.0 / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 5.8e+31], N[(N[(1.0 + N[(beta / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(2.0 / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 5.8 \cdot 10^{+31}:\\
\;\;\;\;\frac{1 + \frac{\beta}{\beta + 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 5.8000000000000001e31Initial program 99.1%
+-commutative99.1%
Simplified99.1%
Taylor expanded in alpha around 0 97.9%
if 5.8000000000000001e31 < alpha Initial program 13.6%
+-commutative13.6%
Simplified13.6%
Taylor expanded in alpha around -inf 91.8%
associate-*r/91.8%
sub-neg91.8%
mul-1-neg91.8%
distribute-lft-in91.8%
neg-mul-191.8%
mul-1-neg91.8%
remove-double-neg91.8%
neg-mul-191.8%
mul-1-neg91.8%
remove-double-neg91.8%
+-commutative91.8%
Simplified91.8%
Taylor expanded in beta around 0 70.4%
Final simplification89.9%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 3.2e+34) (/ (+ 1.0 (/ beta (+ beta 2.0))) 2.0) (/ (/ (+ beta (+ beta 2.0)) alpha) 2.0)))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 3.2e+34) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = ((beta + (beta + 2.0)) / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 3.2d+34) then
tmp = (1.0d0 + (beta / (beta + 2.0d0))) / 2.0d0
else
tmp = ((beta + (beta + 2.0d0)) / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 3.2e+34) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = ((beta + (beta + 2.0)) / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 3.2e+34: tmp = (1.0 + (beta / (beta + 2.0))) / 2.0 else: tmp = ((beta + (beta + 2.0)) / alpha) / 2.0 return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 3.2e+34) tmp = Float64(Float64(1.0 + Float64(beta / Float64(beta + 2.0))) / 2.0); else tmp = Float64(Float64(Float64(beta + Float64(beta + 2.0)) / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 3.2e+34) tmp = (1.0 + (beta / (beta + 2.0))) / 2.0; else tmp = ((beta + (beta + 2.0)) / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 3.2e+34], N[(N[(1.0 + N[(beta / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(beta + N[(beta + 2.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 3.2 \cdot 10^{+34}:\\
\;\;\;\;\frac{1 + \frac{\beta}{\beta + 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\beta + \left(\beta + 2\right)}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 3.1999999999999998e34Initial program 99.1%
+-commutative99.1%
Simplified99.1%
Taylor expanded in alpha around 0 97.9%
if 3.1999999999999998e34 < alpha Initial program 13.6%
+-commutative13.6%
Simplified13.6%
Taylor expanded in alpha around -inf 91.8%
associate-*r/91.8%
sub-neg91.8%
mul-1-neg91.8%
distribute-lft-in91.8%
neg-mul-191.8%
mul-1-neg91.8%
remove-double-neg91.8%
neg-mul-191.8%
mul-1-neg91.8%
remove-double-neg91.8%
+-commutative91.8%
Simplified91.8%
Final simplification96.1%
(FPCore (alpha beta) :precision binary64 (if (<= beta 2.0) (/ (+ 1.0 (* beta 0.5)) 2.0) 1.0))
double code(double alpha, double beta) {
double tmp;
if (beta <= 2.0) {
tmp = (1.0 + (beta * 0.5)) / 2.0;
} else {
tmp = 1.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 2.0d0) then
tmp = (1.0d0 + (beta * 0.5d0)) / 2.0d0
else
tmp = 1.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 2.0) {
tmp = (1.0 + (beta * 0.5)) / 2.0;
} else {
tmp = 1.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 2.0: tmp = (1.0 + (beta * 0.5)) / 2.0 else: tmp = 1.0 return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 2.0) tmp = Float64(Float64(1.0 + Float64(beta * 0.5)) / 2.0); else tmp = 1.0; end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 2.0) tmp = (1.0 + (beta * 0.5)) / 2.0; else tmp = 1.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 2.0], N[(N[(1.0 + N[(beta * 0.5), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 1.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 2:\\
\;\;\;\;\frac{1 + \beta \cdot 0.5}{2}\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if beta < 2Initial program 70.9%
+-commutative70.9%
Simplified70.9%
Taylor expanded in alpha around 0 69.7%
Taylor expanded in beta around 0 68.8%
*-commutative68.8%
Simplified68.8%
if 2 < beta Initial program 80.8%
+-commutative80.8%
Simplified80.8%
Taylor expanded in beta around inf 77.8%
Final simplification71.7%
(FPCore (alpha beta) :precision binary64 (if (<= beta 1.95) (/ (+ 1.0 (* beta 0.5)) 2.0) (/ (- 2.0 (/ 2.0 beta)) 2.0)))
double code(double alpha, double beta) {
double tmp;
if (beta <= 1.95) {
tmp = (1.0 + (beta * 0.5)) / 2.0;
} else {
tmp = (2.0 - (2.0 / beta)) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 1.95d0) then
tmp = (1.0d0 + (beta * 0.5d0)) / 2.0d0
else
tmp = (2.0d0 - (2.0d0 / beta)) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 1.95) {
tmp = (1.0 + (beta * 0.5)) / 2.0;
} else {
tmp = (2.0 - (2.0 / beta)) / 2.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 1.95: tmp = (1.0 + (beta * 0.5)) / 2.0 else: tmp = (2.0 - (2.0 / beta)) / 2.0 return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 1.95) tmp = Float64(Float64(1.0 + Float64(beta * 0.5)) / 2.0); else tmp = Float64(Float64(2.0 - Float64(2.0 / beta)) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 1.95) tmp = (1.0 + (beta * 0.5)) / 2.0; else tmp = (2.0 - (2.0 / beta)) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 1.95], N[(N[(1.0 + N[(beta * 0.5), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(2.0 - N[(2.0 / beta), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 1.95:\\
\;\;\;\;\frac{1 + \beta \cdot 0.5}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{2 - \frac{2}{\beta}}{2}\\
\end{array}
\end{array}
if beta < 1.94999999999999996Initial program 70.9%
+-commutative70.9%
Simplified70.9%
Taylor expanded in alpha around 0 69.7%
Taylor expanded in beta around 0 68.8%
*-commutative68.8%
Simplified68.8%
if 1.94999999999999996 < beta Initial program 80.8%
+-commutative80.8%
Simplified80.8%
Taylor expanded in alpha around 0 79.5%
Taylor expanded in beta around inf 78.9%
associate-*r/78.9%
metadata-eval78.9%
Simplified78.9%
Final simplification72.0%
(FPCore (alpha beta) :precision binary64 (if (<= beta 95.0) 0.5 1.0))
double code(double alpha, double beta) {
double tmp;
if (beta <= 95.0) {
tmp = 0.5;
} else {
tmp = 1.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 95.0d0) then
tmp = 0.5d0
else
tmp = 1.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 95.0) {
tmp = 0.5;
} else {
tmp = 1.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 95.0: tmp = 0.5 else: tmp = 1.0 return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 95.0) tmp = 0.5; else tmp = 1.0; end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 95.0) tmp = 0.5; else tmp = 1.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 95.0], 0.5, 1.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 95:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if beta < 95Initial program 70.6%
+-commutative70.6%
Simplified70.6%
Taylor expanded in beta around 0 69.4%
+-commutative69.4%
Simplified69.4%
Taylor expanded in alpha around 0 68.2%
if 95 < beta Initial program 81.6%
+-commutative81.6%
Simplified81.6%
Taylor expanded in beta around inf 78.8%
Final simplification71.5%
(FPCore (alpha beta) :precision binary64 0.5)
double code(double alpha, double beta) {
return 0.5;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = 0.5d0
end function
public static double code(double alpha, double beta) {
return 0.5;
}
def code(alpha, beta): return 0.5
function code(alpha, beta) return 0.5 end
function tmp = code(alpha, beta) tmp = 0.5; end
code[alpha_, beta_] := 0.5
\begin{array}{l}
\\
0.5
\end{array}
Initial program 74.0%
+-commutative74.0%
Simplified74.0%
Taylor expanded in beta around 0 52.1%
+-commutative52.1%
Simplified52.1%
Taylor expanded in alpha around 0 52.0%
Final simplification52.0%
herbie shell --seed 2023201
(FPCore (alpha beta)
:name "Octave 3.8, jcobi/1"
:precision binary64
:pre (and (> alpha -1.0) (> beta -1.0))
(/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))