
(FPCore (alpha beta i) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 i)))) (/ (+ (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ t_0 2.0)) 1.0) 2.0)))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * i)
code = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta, i): t_0 = (alpha + beta) + (2.0 * i) return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(t_0 + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta, i) t_0 = (alpha + beta) + (2.0 * i); tmp = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\frac{\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t\_0}}{t\_0 + 2} + 1}{2}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta i) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 i)))) (/ (+ (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ t_0 2.0)) 1.0) 2.0)))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * i)
code = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta, i): t_0 = (alpha + beta) + (2.0 * i) return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(t_0 + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta, i) t_0 = (alpha + beta) + (2.0 * i); tmp = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\frac{\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t\_0}}{t\_0 + 2} + 1}{2}
\end{array}
\end{array}
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ beta (* 2.0 i))) (t_1 (+ (+ alpha beta) (* 2.0 i))))
(if (<=
(/ (/ (* (+ alpha beta) (- beta alpha)) t_1) (+ 2.0 t_1))
-0.9999999)
(/ (/ (+ (- beta beta) (+ t_0 (+ 2.0 t_0))) alpha) 2.0)
(/
(fma
(+ alpha beta)
(/
(/ (- beta alpha) (+ alpha (+ beta (fma 2.0 i 2.0))))
(+ alpha (fma 2.0 i beta)))
1.0)
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = beta + (2.0 * i);
double t_1 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_1) / (2.0 + t_1)) <= -0.9999999) {
tmp = (((beta - beta) + (t_0 + (2.0 + t_0))) / alpha) / 2.0;
} else {
tmp = fma((alpha + beta), (((beta - alpha) / (alpha + (beta + fma(2.0, i, 2.0)))) / (alpha + fma(2.0, i, beta))), 1.0) / 2.0;
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(beta + Float64(2.0 * i)) t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_1) / Float64(2.0 + t_1)) <= -0.9999999) tmp = Float64(Float64(Float64(Float64(beta - beta) + Float64(t_0 + Float64(2.0 + t_0))) / alpha) / 2.0); else tmp = Float64(fma(Float64(alpha + beta), Float64(Float64(Float64(beta - alpha) / Float64(alpha + Float64(beta + fma(2.0, i, 2.0)))) / Float64(alpha + fma(2.0, i, beta))), 1.0) / 2.0); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(beta + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(2.0 + t$95$1), $MachinePrecision]), $MachinePrecision], -0.9999999], N[(N[(N[(N[(beta - beta), $MachinePrecision] + N[(t$95$0 + N[(2.0 + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(alpha + beta), $MachinePrecision] * N[(N[(N[(beta - alpha), $MachinePrecision] / N[(alpha + N[(beta + N[(2.0 * i + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(alpha + N[(2.0 * i + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \beta + 2 \cdot i\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t\_1}}{2 + t\_1} \leq -0.9999999:\\
\;\;\;\;\frac{\frac{\left(\beta - \beta\right) + \left(t\_0 + \left(2 + t\_0\right)\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\alpha + \beta, \frac{\frac{\beta - \alpha}{\alpha + \left(\beta + \mathsf{fma}\left(2, i, 2\right)\right)}}{\alpha + \mathsf{fma}\left(2, i, \beta\right)}, 1\right)}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) < -0.999999900000000053Initial program 2.8%
Simplified16.0%
Taylor expanded in alpha around -inf 90.4%
if -0.999999900000000053 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) Initial program 83.2%
Simplified99.6%
Final simplification97.6%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ beta (* 2.0 i))) (t_1 (+ (+ alpha beta) (* 2.0 i))))
(if (<=
(/ (/ (* (+ alpha beta) (- beta alpha)) t_1) (+ 2.0 t_1))
-0.9999999)
(/ (/ (+ (- beta beta) (+ t_0 (+ 2.0 t_0))) alpha) 2.0)
(/
(+
(/
(* (- beta alpha) (/ (+ alpha beta) (fma 2.0 i (+ alpha beta))))
(+ alpha (+ beta (fma 2.0 i 2.0))))
1.0)
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = beta + (2.0 * i);
double t_1 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_1) / (2.0 + t_1)) <= -0.9999999) {
tmp = (((beta - beta) + (t_0 + (2.0 + t_0))) / alpha) / 2.0;
} else {
tmp = ((((beta - alpha) * ((alpha + beta) / fma(2.0, i, (alpha + beta)))) / (alpha + (beta + fma(2.0, i, 2.0)))) + 1.0) / 2.0;
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(beta + Float64(2.0 * i)) t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_1) / Float64(2.0 + t_1)) <= -0.9999999) tmp = Float64(Float64(Float64(Float64(beta - beta) + Float64(t_0 + Float64(2.0 + t_0))) / alpha) / 2.0); else tmp = Float64(Float64(Float64(Float64(Float64(beta - alpha) * Float64(Float64(alpha + beta) / fma(2.0, i, Float64(alpha + beta)))) / Float64(alpha + Float64(beta + fma(2.0, i, 2.0)))) + 1.0) / 2.0); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(beta + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(2.0 + t$95$1), $MachinePrecision]), $MachinePrecision], -0.9999999], N[(N[(N[(N[(beta - beta), $MachinePrecision] + N[(t$95$0 + N[(2.0 + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[(N[(beta - alpha), $MachinePrecision] * N[(N[(alpha + beta), $MachinePrecision] / N[(2.0 * i + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(alpha + N[(beta + N[(2.0 * i + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \beta + 2 \cdot i\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t\_1}}{2 + t\_1} \leq -0.9999999:\\
\;\;\;\;\frac{\frac{\left(\beta - \beta\right) + \left(t\_0 + \left(2 + t\_0\right)\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\left(\beta - \alpha\right) \cdot \frac{\alpha + \beta}{\mathsf{fma}\left(2, i, \alpha + \beta\right)}}{\alpha + \left(\beta + \mathsf{fma}\left(2, i, 2\right)\right)} + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) < -0.999999900000000053Initial program 2.8%
Simplified16.0%
Taylor expanded in alpha around -inf 90.4%
if -0.999999900000000053 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) Initial program 83.2%
Simplified99.6%
Final simplification97.6%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ beta (* 2.0 i))) (t_1 (+ (+ alpha beta) (* 2.0 i))))
(if (<= (/ (/ (* (+ alpha beta) (- beta alpha)) t_1) (+ 2.0 t_1)) -0.999996)
(/ (/ (+ (- beta beta) (+ t_0 (+ 2.0 t_0))) alpha) 2.0)
(/
(+
(/
(* (- beta alpha) (/ (+ alpha beta) (fma 2.0 i (+ alpha beta))))
(*
beta
(+
(+ (* 2.0 (/ i beta)) (+ (* 2.0 (/ 1.0 beta)) (/ alpha beta)))
1.0)))
1.0)
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = beta + (2.0 * i);
double t_1 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_1) / (2.0 + t_1)) <= -0.999996) {
tmp = (((beta - beta) + (t_0 + (2.0 + t_0))) / alpha) / 2.0;
} else {
tmp = ((((beta - alpha) * ((alpha + beta) / fma(2.0, i, (alpha + beta)))) / (beta * (((2.0 * (i / beta)) + ((2.0 * (1.0 / beta)) + (alpha / beta))) + 1.0))) + 1.0) / 2.0;
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(beta + Float64(2.0 * i)) t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_1) / Float64(2.0 + t_1)) <= -0.999996) tmp = Float64(Float64(Float64(Float64(beta - beta) + Float64(t_0 + Float64(2.0 + t_0))) / alpha) / 2.0); else tmp = Float64(Float64(Float64(Float64(Float64(beta - alpha) * Float64(Float64(alpha + beta) / fma(2.0, i, Float64(alpha + beta)))) / Float64(beta * Float64(Float64(Float64(2.0 * Float64(i / beta)) + Float64(Float64(2.0 * Float64(1.0 / beta)) + Float64(alpha / beta))) + 1.0))) + 1.0) / 2.0); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(beta + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(2.0 + t$95$1), $MachinePrecision]), $MachinePrecision], -0.999996], N[(N[(N[(N[(beta - beta), $MachinePrecision] + N[(t$95$0 + N[(2.0 + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[(N[(beta - alpha), $MachinePrecision] * N[(N[(alpha + beta), $MachinePrecision] / N[(2.0 * i + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(beta * N[(N[(N[(2.0 * N[(i / beta), $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 * N[(1.0 / beta), $MachinePrecision]), $MachinePrecision] + N[(alpha / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \beta + 2 \cdot i\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t\_1}}{2 + t\_1} \leq -0.999996:\\
\;\;\;\;\frac{\frac{\left(\beta - \beta\right) + \left(t\_0 + \left(2 + t\_0\right)\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\left(\beta - \alpha\right) \cdot \frac{\alpha + \beta}{\mathsf{fma}\left(2, i, \alpha + \beta\right)}}{\beta \cdot \left(\left(2 \cdot \frac{i}{\beta} + \left(2 \cdot \frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right) + 1\right)} + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) < -0.999995999999999996Initial program 4.0%
Simplified16.9%
Taylor expanded in alpha around -inf 89.8%
if -0.999995999999999996 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) Initial program 83.3%
Simplified99.7%
Taylor expanded in beta around inf 99.7%
Final simplification97.5%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ beta (* 2.0 i))) (t_1 (+ (+ alpha beta) (* 2.0 i))))
(if (<= (/ (/ (* (+ alpha beta) (- beta alpha)) t_1) (+ 2.0 t_1)) -0.99999)
(/ (/ (+ (- beta beta) (+ t_0 (+ 2.0 t_0))) alpha) 2.0)
(/
(+
(/ (* (- beta alpha) (/ beta t_0)) (+ alpha (+ beta (fma 2.0 i 2.0))))
1.0)
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = beta + (2.0 * i);
double t_1 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_1) / (2.0 + t_1)) <= -0.99999) {
tmp = (((beta - beta) + (t_0 + (2.0 + t_0))) / alpha) / 2.0;
} else {
tmp = ((((beta - alpha) * (beta / t_0)) / (alpha + (beta + fma(2.0, i, 2.0)))) + 1.0) / 2.0;
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(beta + Float64(2.0 * i)) t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_1) / Float64(2.0 + t_1)) <= -0.99999) tmp = Float64(Float64(Float64(Float64(beta - beta) + Float64(t_0 + Float64(2.0 + t_0))) / alpha) / 2.0); else tmp = Float64(Float64(Float64(Float64(Float64(beta - alpha) * Float64(beta / t_0)) / Float64(alpha + Float64(beta + fma(2.0, i, 2.0)))) + 1.0) / 2.0); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(beta + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(2.0 + t$95$1), $MachinePrecision]), $MachinePrecision], -0.99999], N[(N[(N[(N[(beta - beta), $MachinePrecision] + N[(t$95$0 + N[(2.0 + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[(N[(beta - alpha), $MachinePrecision] * N[(beta / t$95$0), $MachinePrecision]), $MachinePrecision] / N[(alpha + N[(beta + N[(2.0 * i + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \beta + 2 \cdot i\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t\_1}}{2 + t\_1} \leq -0.99999:\\
\;\;\;\;\frac{\frac{\left(\beta - \beta\right) + \left(t\_0 + \left(2 + t\_0\right)\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\left(\beta - \alpha\right) \cdot \frac{\beta}{t\_0}}{\alpha + \left(\beta + \mathsf{fma}\left(2, i, 2\right)\right)} + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) < -0.999990000000000046Initial program 5.2%
Simplified17.9%
Taylor expanded in alpha around -inf 89.0%
if -0.999990000000000046 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) Initial program 83.3%
Simplified99.9%
Taylor expanded in alpha around 0 99.8%
Final simplification97.4%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) (* 2.0 i))))
(if (<= (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ 2.0 t_0)) -0.5)
(/ (/ (+ (- beta beta) (+ 2.0 (+ (* beta 2.0) (* i 4.0)))) alpha) 2.0)
(/
(+
(/
beta
(* (+ beta (* 2.0 i)) (+ (+ (* 2.0 (/ i beta)) (/ 2.0 beta)) 1.0)))
1.0)
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_0) / (2.0 + t_0)) <= -0.5) {
tmp = (((beta - beta) + (2.0 + ((beta * 2.0) + (i * 4.0)))) / alpha) / 2.0;
} else {
tmp = ((beta / ((beta + (2.0 * i)) * (((2.0 * (i / beta)) + (2.0 / beta)) + 1.0))) + 1.0) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
real(8) :: tmp
t_0 = (alpha + beta) + (2.0d0 * i)
if (((((alpha + beta) * (beta - alpha)) / t_0) / (2.0d0 + t_0)) <= (-0.5d0)) then
tmp = (((beta - beta) + (2.0d0 + ((beta * 2.0d0) + (i * 4.0d0)))) / alpha) / 2.0d0
else
tmp = ((beta / ((beta + (2.0d0 * i)) * (((2.0d0 * (i / beta)) + (2.0d0 / beta)) + 1.0d0))) + 1.0d0) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_0) / (2.0 + t_0)) <= -0.5) {
tmp = (((beta - beta) + (2.0 + ((beta * 2.0) + (i * 4.0)))) / alpha) / 2.0;
} else {
tmp = ((beta / ((beta + (2.0 * i)) * (((2.0 * (i / beta)) + (2.0 / beta)) + 1.0))) + 1.0) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): t_0 = (alpha + beta) + (2.0 * i) tmp = 0 if ((((alpha + beta) * (beta - alpha)) / t_0) / (2.0 + t_0)) <= -0.5: tmp = (((beta - beta) + (2.0 + ((beta * 2.0) + (i * 4.0)))) / alpha) / 2.0 else: tmp = ((beta / ((beta + (2.0 * i)) * (((2.0 * (i / beta)) + (2.0 / beta)) + 1.0))) + 1.0) / 2.0 return tmp
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(2.0 + t_0)) <= -0.5) tmp = Float64(Float64(Float64(Float64(beta - beta) + Float64(2.0 + Float64(Float64(beta * 2.0) + Float64(i * 4.0)))) / alpha) / 2.0); else tmp = Float64(Float64(Float64(beta / Float64(Float64(beta + Float64(2.0 * i)) * Float64(Float64(Float64(2.0 * Float64(i / beta)) + Float64(2.0 / beta)) + 1.0))) + 1.0) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) t_0 = (alpha + beta) + (2.0 * i); tmp = 0.0; if (((((alpha + beta) * (beta - alpha)) / t_0) / (2.0 + t_0)) <= -0.5) tmp = (((beta - beta) + (2.0 + ((beta * 2.0) + (i * 4.0)))) / alpha) / 2.0; else tmp = ((beta / ((beta + (2.0 * i)) * (((2.0 * (i / beta)) + (2.0 / beta)) + 1.0))) + 1.0) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(2.0 + t$95$0), $MachinePrecision]), $MachinePrecision], -0.5], N[(N[(N[(N[(beta - beta), $MachinePrecision] + N[(2.0 + N[(N[(beta * 2.0), $MachinePrecision] + N[(i * 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(beta / N[(N[(beta + N[(2.0 * i), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(2.0 * N[(i / beta), $MachinePrecision]), $MachinePrecision] + N[(2.0 / beta), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t\_0}}{2 + t\_0} \leq -0.5:\\
\;\;\;\;\frac{\frac{\left(\beta - \beta\right) + \left(2 + \left(\beta \cdot 2 + i \cdot 4\right)\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\beta}{\left(\beta + 2 \cdot i\right) \cdot \left(\left(2 \cdot \frac{i}{\beta} + \frac{2}{\beta}\right) + 1\right)} + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) < -0.5Initial program 6.5%
Simplified18.9%
Taylor expanded in alpha around inf 88.2%
if -0.5 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) Initial program 83.3%
Simplified100.0%
Taylor expanded in beta around inf 100.0%
Taylor expanded in alpha around 0 99.7%
associate-*r/99.7%
metadata-eval99.7%
Simplified99.7%
Final simplification97.1%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ beta (* 2.0 i))) (t_1 (+ (+ alpha beta) (* 2.0 i))))
(if (<= (/ (/ (* (+ alpha beta) (- beta alpha)) t_1) (+ 2.0 t_1)) -0.5)
(/ (/ (+ (- beta beta) (+ t_0 (+ 2.0 t_0))) alpha) 2.0)
(/
(+ (/ beta (* t_0 (+ (+ (* 2.0 (/ i beta)) (/ 2.0 beta)) 1.0))) 1.0)
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = beta + (2.0 * i);
double t_1 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_1) / (2.0 + t_1)) <= -0.5) {
tmp = (((beta - beta) + (t_0 + (2.0 + t_0))) / alpha) / 2.0;
} else {
tmp = ((beta / (t_0 * (((2.0 * (i / beta)) + (2.0 / beta)) + 1.0))) + 1.0) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = beta + (2.0d0 * i)
t_1 = (alpha + beta) + (2.0d0 * i)
if (((((alpha + beta) * (beta - alpha)) / t_1) / (2.0d0 + t_1)) <= (-0.5d0)) then
tmp = (((beta - beta) + (t_0 + (2.0d0 + t_0))) / alpha) / 2.0d0
else
tmp = ((beta / (t_0 * (((2.0d0 * (i / beta)) + (2.0d0 / beta)) + 1.0d0))) + 1.0d0) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double t_0 = beta + (2.0 * i);
double t_1 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_1) / (2.0 + t_1)) <= -0.5) {
tmp = (((beta - beta) + (t_0 + (2.0 + t_0))) / alpha) / 2.0;
} else {
tmp = ((beta / (t_0 * (((2.0 * (i / beta)) + (2.0 / beta)) + 1.0))) + 1.0) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): t_0 = beta + (2.0 * i) t_1 = (alpha + beta) + (2.0 * i) tmp = 0 if ((((alpha + beta) * (beta - alpha)) / t_1) / (2.0 + t_1)) <= -0.5: tmp = (((beta - beta) + (t_0 + (2.0 + t_0))) / alpha) / 2.0 else: tmp = ((beta / (t_0 * (((2.0 * (i / beta)) + (2.0 / beta)) + 1.0))) + 1.0) / 2.0 return tmp
function code(alpha, beta, i) t_0 = Float64(beta + Float64(2.0 * i)) t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_1) / Float64(2.0 + t_1)) <= -0.5) tmp = Float64(Float64(Float64(Float64(beta - beta) + Float64(t_0 + Float64(2.0 + t_0))) / alpha) / 2.0); else tmp = Float64(Float64(Float64(beta / Float64(t_0 * Float64(Float64(Float64(2.0 * Float64(i / beta)) + Float64(2.0 / beta)) + 1.0))) + 1.0) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) t_0 = beta + (2.0 * i); t_1 = (alpha + beta) + (2.0 * i); tmp = 0.0; if (((((alpha + beta) * (beta - alpha)) / t_1) / (2.0 + t_1)) <= -0.5) tmp = (((beta - beta) + (t_0 + (2.0 + t_0))) / alpha) / 2.0; else tmp = ((beta / (t_0 * (((2.0 * (i / beta)) + (2.0 / beta)) + 1.0))) + 1.0) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(beta + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(2.0 + t$95$1), $MachinePrecision]), $MachinePrecision], -0.5], N[(N[(N[(N[(beta - beta), $MachinePrecision] + N[(t$95$0 + N[(2.0 + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(beta / N[(t$95$0 * N[(N[(N[(2.0 * N[(i / beta), $MachinePrecision]), $MachinePrecision] + N[(2.0 / beta), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \beta + 2 \cdot i\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t\_1}}{2 + t\_1} \leq -0.5:\\
\;\;\;\;\frac{\frac{\left(\beta - \beta\right) + \left(t\_0 + \left(2 + t\_0\right)\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\beta}{t\_0 \cdot \left(\left(2 \cdot \frac{i}{\beta} + \frac{2}{\beta}\right) + 1\right)} + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) < -0.5Initial program 6.5%
Simplified18.9%
Taylor expanded in alpha around -inf 88.2%
if -0.5 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) Initial program 83.3%
Simplified100.0%
Taylor expanded in beta around inf 100.0%
Taylor expanded in alpha around 0 99.7%
associate-*r/99.7%
metadata-eval99.7%
Simplified99.7%
Final simplification97.1%
(FPCore (alpha beta i)
:precision binary64
(if (<= alpha 2.1e+35)
(/ (+ (/ 1.0 (+ (/ 2.0 beta) 1.0)) 1.0) 2.0)
(if (<= alpha 9.5e+63)
(/ (/ (+ 2.0 (* beta 2.0)) alpha) 2.0)
(if (<= alpha 1.3e+173)
(/
(+ (/ beta (* (+ beta (* 2.0 i)) (+ (* 2.0 (/ i beta)) 1.0))) 1.0)
2.0)
(/ (/ (+ 2.0 (* i 4.0)) alpha) 2.0)))))
double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 2.1e+35) {
tmp = ((1.0 / ((2.0 / beta) + 1.0)) + 1.0) / 2.0;
} else if (alpha <= 9.5e+63) {
tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0;
} else if (alpha <= 1.3e+173) {
tmp = ((beta / ((beta + (2.0 * i)) * ((2.0 * (i / beta)) + 1.0))) + 1.0) / 2.0;
} else {
tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (alpha <= 2.1d+35) then
tmp = ((1.0d0 / ((2.0d0 / beta) + 1.0d0)) + 1.0d0) / 2.0d0
else if (alpha <= 9.5d+63) then
tmp = ((2.0d0 + (beta * 2.0d0)) / alpha) / 2.0d0
else if (alpha <= 1.3d+173) then
tmp = ((beta / ((beta + (2.0d0 * i)) * ((2.0d0 * (i / beta)) + 1.0d0))) + 1.0d0) / 2.0d0
else
tmp = ((2.0d0 + (i * 4.0d0)) / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 2.1e+35) {
tmp = ((1.0 / ((2.0 / beta) + 1.0)) + 1.0) / 2.0;
} else if (alpha <= 9.5e+63) {
tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0;
} else if (alpha <= 1.3e+173) {
tmp = ((beta / ((beta + (2.0 * i)) * ((2.0 * (i / beta)) + 1.0))) + 1.0) / 2.0;
} else {
tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if alpha <= 2.1e+35: tmp = ((1.0 / ((2.0 / beta) + 1.0)) + 1.0) / 2.0 elif alpha <= 9.5e+63: tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0 elif alpha <= 1.3e+173: tmp = ((beta / ((beta + (2.0 * i)) * ((2.0 * (i / beta)) + 1.0))) + 1.0) / 2.0 else: tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (alpha <= 2.1e+35) tmp = Float64(Float64(Float64(1.0 / Float64(Float64(2.0 / beta) + 1.0)) + 1.0) / 2.0); elseif (alpha <= 9.5e+63) tmp = Float64(Float64(Float64(2.0 + Float64(beta * 2.0)) / alpha) / 2.0); elseif (alpha <= 1.3e+173) tmp = Float64(Float64(Float64(beta / Float64(Float64(beta + Float64(2.0 * i)) * Float64(Float64(2.0 * Float64(i / beta)) + 1.0))) + 1.0) / 2.0); else tmp = Float64(Float64(Float64(2.0 + Float64(i * 4.0)) / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (alpha <= 2.1e+35) tmp = ((1.0 / ((2.0 / beta) + 1.0)) + 1.0) / 2.0; elseif (alpha <= 9.5e+63) tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0; elseif (alpha <= 1.3e+173) tmp = ((beta / ((beta + (2.0 * i)) * ((2.0 * (i / beta)) + 1.0))) + 1.0) / 2.0; else tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[alpha, 2.1e+35], N[(N[(N[(1.0 / N[(N[(2.0 / beta), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[alpha, 9.5e+63], N[(N[(N[(2.0 + N[(beta * 2.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[alpha, 1.3e+173], N[(N[(N[(beta / N[(N[(beta + N[(2.0 * i), $MachinePrecision]), $MachinePrecision] * N[(N[(2.0 * N[(i / beta), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(2.0 + N[(i * 4.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 2.1 \cdot 10^{+35}:\\
\;\;\;\;\frac{\frac{1}{\frac{2}{\beta} + 1} + 1}{2}\\
\mathbf{elif}\;\alpha \leq 9.5 \cdot 10^{+63}:\\
\;\;\;\;\frac{\frac{2 + \beta \cdot 2}{\alpha}}{2}\\
\mathbf{elif}\;\alpha \leq 1.3 \cdot 10^{+173}:\\
\;\;\;\;\frac{\frac{\beta}{\left(\beta + 2 \cdot i\right) \cdot \left(2 \cdot \frac{i}{\beta} + 1\right)} + 1}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2 + i \cdot 4}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 2.0999999999999999e35Initial program 85.6%
Simplified99.0%
Taylor expanded in beta around inf 99.0%
Taylor expanded in alpha around 0 98.0%
associate-*r/98.0%
metadata-eval98.0%
Simplified98.0%
Taylor expanded in i around 0 91.2%
associate-*r/91.2%
metadata-eval91.2%
Simplified91.2%
if 2.0999999999999999e35 < alpha < 9.5000000000000003e63Initial program 3.4%
Simplified13.1%
Taylor expanded in alpha around inf 90.4%
Taylor expanded in i around 0 90.5%
distribute-rgt1-in90.5%
metadata-eval90.5%
mul0-lft90.5%
mul-1-neg90.5%
Simplified90.5%
if 9.5000000000000003e63 < alpha < 1.2999999999999999e173Initial program 37.4%
Simplified68.3%
Taylor expanded in beta around inf 65.4%
Taylor expanded in alpha around 0 63.6%
associate-*r/63.6%
metadata-eval63.6%
Simplified63.6%
Taylor expanded in i around inf 63.6%
if 1.2999999999999999e173 < alpha Initial program 1.1%
Simplified22.1%
Taylor expanded in alpha around inf 84.9%
Taylor expanded in beta around 0 70.6%
Final simplification86.0%
(FPCore (alpha beta i)
:precision binary64
(if (<= alpha 1.7e+35)
(/ (+ (/ 1.0 (+ (/ 2.0 beta) 1.0)) 1.0) 2.0)
(if (<= alpha 1.28e+64)
(/ (/ (+ 2.0 (* beta 2.0)) alpha) 2.0)
(if (<= alpha 3.7e+171)
(/
(+ (/ beta (* (+ beta (* 2.0 i)) (+ (* 2.0 (/ i beta)) 1.0))) 1.0)
2.0)
(/
(+
(* 2.0 (/ beta alpha))
(+ (* 4.0 (/ i alpha)) (* 2.0 (/ 1.0 alpha))))
2.0)))))
double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 1.7e+35) {
tmp = ((1.0 / ((2.0 / beta) + 1.0)) + 1.0) / 2.0;
} else if (alpha <= 1.28e+64) {
tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0;
} else if (alpha <= 3.7e+171) {
tmp = ((beta / ((beta + (2.0 * i)) * ((2.0 * (i / beta)) + 1.0))) + 1.0) / 2.0;
} else {
tmp = ((2.0 * (beta / alpha)) + ((4.0 * (i / alpha)) + (2.0 * (1.0 / alpha)))) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (alpha <= 1.7d+35) then
tmp = ((1.0d0 / ((2.0d0 / beta) + 1.0d0)) + 1.0d0) / 2.0d0
else if (alpha <= 1.28d+64) then
tmp = ((2.0d0 + (beta * 2.0d0)) / alpha) / 2.0d0
else if (alpha <= 3.7d+171) then
tmp = ((beta / ((beta + (2.0d0 * i)) * ((2.0d0 * (i / beta)) + 1.0d0))) + 1.0d0) / 2.0d0
else
tmp = ((2.0d0 * (beta / alpha)) + ((4.0d0 * (i / alpha)) + (2.0d0 * (1.0d0 / alpha)))) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 1.7e+35) {
tmp = ((1.0 / ((2.0 / beta) + 1.0)) + 1.0) / 2.0;
} else if (alpha <= 1.28e+64) {
tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0;
} else if (alpha <= 3.7e+171) {
tmp = ((beta / ((beta + (2.0 * i)) * ((2.0 * (i / beta)) + 1.0))) + 1.0) / 2.0;
} else {
tmp = ((2.0 * (beta / alpha)) + ((4.0 * (i / alpha)) + (2.0 * (1.0 / alpha)))) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if alpha <= 1.7e+35: tmp = ((1.0 / ((2.0 / beta) + 1.0)) + 1.0) / 2.0 elif alpha <= 1.28e+64: tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0 elif alpha <= 3.7e+171: tmp = ((beta / ((beta + (2.0 * i)) * ((2.0 * (i / beta)) + 1.0))) + 1.0) / 2.0 else: tmp = ((2.0 * (beta / alpha)) + ((4.0 * (i / alpha)) + (2.0 * (1.0 / alpha)))) / 2.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (alpha <= 1.7e+35) tmp = Float64(Float64(Float64(1.0 / Float64(Float64(2.0 / beta) + 1.0)) + 1.0) / 2.0); elseif (alpha <= 1.28e+64) tmp = Float64(Float64(Float64(2.0 + Float64(beta * 2.0)) / alpha) / 2.0); elseif (alpha <= 3.7e+171) tmp = Float64(Float64(Float64(beta / Float64(Float64(beta + Float64(2.0 * i)) * Float64(Float64(2.0 * Float64(i / beta)) + 1.0))) + 1.0) / 2.0); else tmp = Float64(Float64(Float64(2.0 * Float64(beta / alpha)) + Float64(Float64(4.0 * Float64(i / alpha)) + Float64(2.0 * Float64(1.0 / alpha)))) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (alpha <= 1.7e+35) tmp = ((1.0 / ((2.0 / beta) + 1.0)) + 1.0) / 2.0; elseif (alpha <= 1.28e+64) tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0; elseif (alpha <= 3.7e+171) tmp = ((beta / ((beta + (2.0 * i)) * ((2.0 * (i / beta)) + 1.0))) + 1.0) / 2.0; else tmp = ((2.0 * (beta / alpha)) + ((4.0 * (i / alpha)) + (2.0 * (1.0 / alpha)))) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[alpha, 1.7e+35], N[(N[(N[(1.0 / N[(N[(2.0 / beta), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[alpha, 1.28e+64], N[(N[(N[(2.0 + N[(beta * 2.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[alpha, 3.7e+171], N[(N[(N[(beta / N[(N[(beta + N[(2.0 * i), $MachinePrecision]), $MachinePrecision] * N[(N[(2.0 * N[(i / beta), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(2.0 * N[(beta / alpha), $MachinePrecision]), $MachinePrecision] + N[(N[(4.0 * N[(i / alpha), $MachinePrecision]), $MachinePrecision] + N[(2.0 * N[(1.0 / alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 1.7 \cdot 10^{+35}:\\
\;\;\;\;\frac{\frac{1}{\frac{2}{\beta} + 1} + 1}{2}\\
\mathbf{elif}\;\alpha \leq 1.28 \cdot 10^{+64}:\\
\;\;\;\;\frac{\frac{2 + \beta \cdot 2}{\alpha}}{2}\\
\mathbf{elif}\;\alpha \leq 3.7 \cdot 10^{+171}:\\
\;\;\;\;\frac{\frac{\beta}{\left(\beta + 2 \cdot i\right) \cdot \left(2 \cdot \frac{i}{\beta} + 1\right)} + 1}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{2 \cdot \frac{\beta}{\alpha} + \left(4 \cdot \frac{i}{\alpha} + 2 \cdot \frac{1}{\alpha}\right)}{2}\\
\end{array}
\end{array}
if alpha < 1.7000000000000001e35Initial program 85.6%
Simplified99.0%
Taylor expanded in beta around inf 99.0%
Taylor expanded in alpha around 0 98.0%
associate-*r/98.0%
metadata-eval98.0%
Simplified98.0%
Taylor expanded in i around 0 91.2%
associate-*r/91.2%
metadata-eval91.2%
Simplified91.2%
if 1.7000000000000001e35 < alpha < 1.28000000000000004e64Initial program 3.4%
Simplified13.1%
Taylor expanded in alpha around inf 90.4%
Taylor expanded in i around 0 90.5%
distribute-rgt1-in90.5%
metadata-eval90.5%
mul0-lft90.5%
mul-1-neg90.5%
Simplified90.5%
if 1.28000000000000004e64 < alpha < 3.69999999999999998e171Initial program 37.4%
Simplified68.3%
Taylor expanded in beta around inf 65.4%
Taylor expanded in alpha around 0 63.6%
associate-*r/63.6%
metadata-eval63.6%
Simplified63.6%
Taylor expanded in i around inf 63.6%
if 3.69999999999999998e171 < alpha Initial program 1.1%
Simplified22.1%
Taylor expanded in alpha around inf 84.9%
Taylor expanded in beta around 0 85.0%
Final simplification88.2%
(FPCore (alpha beta i) :precision binary64 (if (<= alpha 1.85e+35) (/ (+ (/ 1.0 (+ (/ 2.0 beta) 1.0)) 1.0) 2.0) (/ (/ (+ 2.0 (* i 4.0)) alpha) 2.0)))
double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 1.85e+35) {
tmp = ((1.0 / ((2.0 / beta) + 1.0)) + 1.0) / 2.0;
} else {
tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (alpha <= 1.85d+35) then
tmp = ((1.0d0 / ((2.0d0 / beta) + 1.0d0)) + 1.0d0) / 2.0d0
else
tmp = ((2.0d0 + (i * 4.0d0)) / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 1.85e+35) {
tmp = ((1.0 / ((2.0 / beta) + 1.0)) + 1.0) / 2.0;
} else {
tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if alpha <= 1.85e+35: tmp = ((1.0 / ((2.0 / beta) + 1.0)) + 1.0) / 2.0 else: tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (alpha <= 1.85e+35) tmp = Float64(Float64(Float64(1.0 / Float64(Float64(2.0 / beta) + 1.0)) + 1.0) / 2.0); else tmp = Float64(Float64(Float64(2.0 + Float64(i * 4.0)) / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (alpha <= 1.85e+35) tmp = ((1.0 / ((2.0 / beta) + 1.0)) + 1.0) / 2.0; else tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[alpha, 1.85e+35], N[(N[(N[(1.0 / N[(N[(2.0 / beta), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(2.0 + N[(i * 4.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 1.85 \cdot 10^{+35}:\\
\;\;\;\;\frac{\frac{1}{\frac{2}{\beta} + 1} + 1}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2 + i \cdot 4}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 1.85e35Initial program 85.6%
Simplified99.0%
Taylor expanded in beta around inf 99.0%
Taylor expanded in alpha around 0 98.0%
associate-*r/98.0%
metadata-eval98.0%
Simplified98.0%
Taylor expanded in i around 0 91.2%
associate-*r/91.2%
metadata-eval91.2%
Simplified91.2%
if 1.85e35 < alpha Initial program 11.6%
Simplified33.6%
Taylor expanded in alpha around inf 72.7%
Taylor expanded in beta around 0 60.5%
Final simplification83.1%
(FPCore (alpha beta i) :precision binary64 (if (<= alpha 4.8e+23) 0.5 (/ (/ (+ 2.0 (* i 4.0)) alpha) 2.0)))
double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 4.8e+23) {
tmp = 0.5;
} else {
tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (alpha <= 4.8d+23) then
tmp = 0.5d0
else
tmp = ((2.0d0 + (i * 4.0d0)) / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 4.8e+23) {
tmp = 0.5;
} else {
tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if alpha <= 4.8e+23: tmp = 0.5 else: tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (alpha <= 4.8e+23) tmp = 0.5; else tmp = Float64(Float64(Float64(2.0 + Float64(i * 4.0)) / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (alpha <= 4.8e+23) tmp = 0.5; else tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[alpha, 4.8e+23], 0.5, N[(N[(N[(2.0 + N[(i * 4.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 4.8 \cdot 10^{+23}:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2 + i \cdot 4}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 4.8e23Initial program 85.9%
Simplified99.5%
Taylor expanded in i around inf 81.1%
if 4.8e23 < alpha Initial program 12.8%
Simplified34.1%
Taylor expanded in alpha around inf 72.2%
Taylor expanded in beta around 0 60.4%
Final simplification75.4%
(FPCore (alpha beta i) :precision binary64 (if (<= beta 1.16e+47) 0.5 1.0))
double code(double alpha, double beta, double i) {
double tmp;
if (beta <= 1.16e+47) {
tmp = 0.5;
} else {
tmp = 1.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (beta <= 1.16d+47) then
tmp = 0.5d0
else
tmp = 1.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (beta <= 1.16e+47) {
tmp = 0.5;
} else {
tmp = 1.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if beta <= 1.16e+47: tmp = 0.5 else: tmp = 1.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (beta <= 1.16e+47) tmp = 0.5; else tmp = 1.0; end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (beta <= 1.16e+47) tmp = 0.5; else tmp = 1.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[beta, 1.16e+47], 0.5, 1.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 1.16 \cdot 10^{+47}:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if beta < 1.1600000000000001e47Initial program 75.3%
Simplified78.4%
Taylor expanded in i around inf 74.1%
if 1.1600000000000001e47 < beta Initial program 35.2%
Simplified92.0%
Taylor expanded in beta around inf 73.4%
Final simplification73.9%
(FPCore (alpha beta i) :precision binary64 0.5)
double code(double alpha, double beta, double i) {
return 0.5;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = 0.5d0
end function
public static double code(double alpha, double beta, double i) {
return 0.5;
}
def code(alpha, beta, i): return 0.5
function code(alpha, beta, i) return 0.5 end
function tmp = code(alpha, beta, i) tmp = 0.5; end
code[alpha_, beta_, i_] := 0.5
\begin{array}{l}
\\
0.5
\end{array}
Initial program 65.9%
Simplified81.6%
Taylor expanded in i around inf 64.7%
Final simplification64.7%
herbie shell --seed 2024068
(FPCore (alpha beta i)
:name "Octave 3.8, jcobi/2"
:precision binary64
:pre (and (and (> alpha -1.0) (> beta -1.0)) (> i 0.0))
(/ (+ (/ (/ (* (+ alpha beta) (- beta alpha)) (+ (+ alpha beta) (* 2.0 i))) (+ (+ (+ alpha beta) (* 2.0 i)) 2.0)) 1.0) 2.0))