
(FPCore (alpha beta i) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 i)))) (/ (+ (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ t_0 2.0)) 1.0) 2.0)))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * i)
code = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta, i): t_0 = (alpha + beta) + (2.0 * i) return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(t_0 + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta, i) t_0 = (alpha + beta) + (2.0 * i); tmp = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\frac{\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t_0}}{t_0 + 2} + 1}{2}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta i) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 i)))) (/ (+ (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ t_0 2.0)) 1.0) 2.0)))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * i)
code = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta, i): t_0 = (alpha + beta) + (2.0 * i) return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(t_0 + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta, i) t_0 = (alpha + beta) + (2.0 * i); tmp = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\frac{\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t_0}}{t_0 + 2} + 1}{2}
\end{array}
\end{array}
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ beta (fma 2.0 i 2.0)))
(t_1 (/ (* alpha alpha) i))
(t_2 (+ (+ alpha beta) (* 2.0 i))))
(if (<= (/ (/ (* (+ alpha beta) (- beta alpha)) t_2) (+ 2.0 t_2)) -0.5)
(/
(+
(fma
-2.0
(/ (+ beta t_0) t_1)
(+
(/ beta alpha)
(fma
-2.0
(/ (fma 2.0 i beta) t_1)
(fma
2.0
(/ i alpha)
(/
(- (- (- beta) (fma 2.0 i 2.0)) beta)
(/ (* alpha alpha) t_0))))))
(/ t_0 alpha))
2.0)
(/
(+
(*
(/ (- beta alpha) (+ (+ alpha beta) (fma 2.0 i 2.0)))
(/ (+ alpha beta) (fma 2.0 i (+ alpha beta))))
1.0)
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = beta + fma(2.0, i, 2.0);
double t_1 = (alpha * alpha) / i;
double t_2 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_2) / (2.0 + t_2)) <= -0.5) {
tmp = (fma(-2.0, ((beta + t_0) / t_1), ((beta / alpha) + fma(-2.0, (fma(2.0, i, beta) / t_1), fma(2.0, (i / alpha), (((-beta - fma(2.0, i, 2.0)) - beta) / ((alpha * alpha) / t_0)))))) + (t_0 / alpha)) / 2.0;
} else {
tmp = ((((beta - alpha) / ((alpha + beta) + fma(2.0, i, 2.0))) * ((alpha + beta) / fma(2.0, i, (alpha + beta)))) + 1.0) / 2.0;
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(beta + fma(2.0, i, 2.0)) t_1 = Float64(Float64(alpha * alpha) / i) t_2 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_2) / Float64(2.0 + t_2)) <= -0.5) tmp = Float64(Float64(fma(-2.0, Float64(Float64(beta + t_0) / t_1), Float64(Float64(beta / alpha) + fma(-2.0, Float64(fma(2.0, i, beta) / t_1), fma(2.0, Float64(i / alpha), Float64(Float64(Float64(Float64(-beta) - fma(2.0, i, 2.0)) - beta) / Float64(Float64(alpha * alpha) / t_0)))))) + Float64(t_0 / alpha)) / 2.0); else tmp = Float64(Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + fma(2.0, i, 2.0))) * Float64(Float64(alpha + beta) / fma(2.0, i, Float64(alpha + beta)))) + 1.0) / 2.0); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(beta + N[(2.0 * i + 2.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha * alpha), $MachinePrecision] / i), $MachinePrecision]}, Block[{t$95$2 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$2), $MachinePrecision] / N[(2.0 + t$95$2), $MachinePrecision]), $MachinePrecision], -0.5], N[(N[(N[(-2.0 * N[(N[(beta + t$95$0), $MachinePrecision] / t$95$1), $MachinePrecision] + N[(N[(beta / alpha), $MachinePrecision] + N[(-2.0 * N[(N[(2.0 * i + beta), $MachinePrecision] / t$95$1), $MachinePrecision] + N[(2.0 * N[(i / alpha), $MachinePrecision] + N[(N[(N[((-beta) - N[(2.0 * i + 2.0), $MachinePrecision]), $MachinePrecision] - beta), $MachinePrecision] / N[(N[(alpha * alpha), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(t$95$0 / alpha), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(alpha + beta), $MachinePrecision] / N[(2.0 * i + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \beta + \mathsf{fma}\left(2, i, 2\right)\\
t_1 := \frac{\alpha \cdot \alpha}{i}\\
t_2 := \left(\alpha + \beta\right) + 2 \cdot i\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t_2}}{2 + t_2} \leq -0.5:\\
\;\;\;\;\frac{\mathsf{fma}\left(-2, \frac{\beta + t_0}{t_1}, \frac{\beta}{\alpha} + \mathsf{fma}\left(-2, \frac{\mathsf{fma}\left(2, i, \beta\right)}{t_1}, \mathsf{fma}\left(2, \frac{i}{\alpha}, \frac{\left(\left(-\beta\right) - \mathsf{fma}\left(2, i, 2\right)\right) - \beta}{\frac{\alpha \cdot \alpha}{t_0}}\right)\right)\right) + \frac{t_0}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + \mathsf{fma}\left(2, i, 2\right)} \cdot \frac{\alpha + \beta}{\mathsf{fma}\left(2, i, \alpha + \beta\right)} + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) < -0.5Initial program 3.6%
associate-/l/2.9%
*-commutative2.9%
times-frac14.9%
fma-def14.9%
associate-+l+14.9%
fma-def14.9%
associate-+l+14.9%
+-commutative14.9%
fma-def14.9%
Simplified14.9%
Taylor expanded in alpha around inf 77.5%
Simplified92.6%
if -0.5 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) Initial program 83.4%
associate-/l/82.8%
*-commutative82.8%
times-frac100.0%
associate-+l+100.0%
fma-def100.0%
+-commutative100.0%
fma-def100.0%
Simplified100.0%
Final simplification98.3%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ beta (fma 2.0 i 2.0))) (t_1 (+ (+ alpha beta) (* 2.0 i))))
(if (<= (/ (/ (* (+ alpha beta) (- beta alpha)) t_1) (+ 2.0 t_1)) -0.5)
(/
(+
(* (/ (- (- (* i -2.0) t_0) beta) alpha) (/ (- t_0 (* i -2.0)) alpha))
(+
(fma 2.0 (/ i (/ (* alpha alpha) t_0)) (/ beta alpha))
(- (/ t_0 alpha) (* -2.0 (/ i alpha)))))
2.0)
(/
(+
(*
(/ (- beta alpha) (+ (+ alpha beta) (fma 2.0 i 2.0)))
(/ (+ alpha beta) (fma 2.0 i (+ alpha beta))))
1.0)
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = beta + fma(2.0, i, 2.0);
double t_1 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_1) / (2.0 + t_1)) <= -0.5) {
tmp = ((((((i * -2.0) - t_0) - beta) / alpha) * ((t_0 - (i * -2.0)) / alpha)) + (fma(2.0, (i / ((alpha * alpha) / t_0)), (beta / alpha)) + ((t_0 / alpha) - (-2.0 * (i / alpha))))) / 2.0;
} else {
tmp = ((((beta - alpha) / ((alpha + beta) + fma(2.0, i, 2.0))) * ((alpha + beta) / fma(2.0, i, (alpha + beta)))) + 1.0) / 2.0;
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(beta + fma(2.0, i, 2.0)) t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_1) / Float64(2.0 + t_1)) <= -0.5) tmp = Float64(Float64(Float64(Float64(Float64(Float64(Float64(i * -2.0) - t_0) - beta) / alpha) * Float64(Float64(t_0 - Float64(i * -2.0)) / alpha)) + Float64(fma(2.0, Float64(i / Float64(Float64(alpha * alpha) / t_0)), Float64(beta / alpha)) + Float64(Float64(t_0 / alpha) - Float64(-2.0 * Float64(i / alpha))))) / 2.0); else tmp = Float64(Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + fma(2.0, i, 2.0))) * Float64(Float64(alpha + beta) / fma(2.0, i, Float64(alpha + beta)))) + 1.0) / 2.0); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(beta + N[(2.0 * i + 2.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(2.0 + t$95$1), $MachinePrecision]), $MachinePrecision], -0.5], N[(N[(N[(N[(N[(N[(N[(i * -2.0), $MachinePrecision] - t$95$0), $MachinePrecision] - beta), $MachinePrecision] / alpha), $MachinePrecision] * N[(N[(t$95$0 - N[(i * -2.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 * N[(i / N[(N[(alpha * alpha), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] + N[(beta / alpha), $MachinePrecision]), $MachinePrecision] + N[(N[(t$95$0 / alpha), $MachinePrecision] - N[(-2.0 * N[(i / alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(alpha + beta), $MachinePrecision] / N[(2.0 * i + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \beta + \mathsf{fma}\left(2, i, 2\right)\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t_1}}{2 + t_1} \leq -0.5:\\
\;\;\;\;\frac{\frac{\left(i \cdot -2 - t_0\right) - \beta}{\alpha} \cdot \frac{t_0 - i \cdot -2}{\alpha} + \left(\mathsf{fma}\left(2, \frac{i}{\frac{\alpha \cdot \alpha}{t_0}}, \frac{\beta}{\alpha}\right) + \left(\frac{t_0}{\alpha} - -2 \cdot \frac{i}{\alpha}\right)\right)}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + \mathsf{fma}\left(2, i, 2\right)} \cdot \frac{\alpha + \beta}{\mathsf{fma}\left(2, i, \alpha + \beta\right)} + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) < -0.5Initial program 3.6%
associate-/l/2.9%
*-commutative2.9%
times-frac14.9%
associate-+l+14.9%
fma-def14.9%
+-commutative14.9%
fma-def14.9%
Simplified14.9%
Taylor expanded in beta around 0 14.9%
Taylor expanded in alpha around -inf 77.5%
associate--l+77.5%
Simplified92.5%
if -0.5 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) Initial program 83.4%
associate-/l/82.8%
*-commutative82.8%
times-frac100.0%
associate-+l+100.0%
fma-def100.0%
+-commutative100.0%
fma-def100.0%
Simplified100.0%
Final simplification98.3%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) (* 2.0 i))))
(if (<= (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ 2.0 t_0)) -0.5)
(/ (/ (- beta (- (* i -2.0) (+ beta (+ 2.0 (* 2.0 i))))) alpha) 2.0)
(/
(+
(*
(/ (- beta alpha) (+ (+ alpha beta) (fma 2.0 i 2.0)))
(/ (+ alpha beta) (fma 2.0 i (+ alpha beta))))
1.0)
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_0) / (2.0 + t_0)) <= -0.5) {
tmp = ((beta - ((i * -2.0) - (beta + (2.0 + (2.0 * i))))) / alpha) / 2.0;
} else {
tmp = ((((beta - alpha) / ((alpha + beta) + fma(2.0, i, 2.0))) * ((alpha + beta) / fma(2.0, i, (alpha + beta)))) + 1.0) / 2.0;
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(2.0 + t_0)) <= -0.5) tmp = Float64(Float64(Float64(beta - Float64(Float64(i * -2.0) - Float64(beta + Float64(2.0 + Float64(2.0 * i))))) / alpha) / 2.0); else tmp = Float64(Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + fma(2.0, i, 2.0))) * Float64(Float64(alpha + beta) / fma(2.0, i, Float64(alpha + beta)))) + 1.0) / 2.0); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(2.0 + t$95$0), $MachinePrecision]), $MachinePrecision], -0.5], N[(N[(N[(beta - N[(N[(i * -2.0), $MachinePrecision] - N[(beta + N[(2.0 + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(alpha + beta), $MachinePrecision] / N[(2.0 * i + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t_0}}{2 + t_0} \leq -0.5:\\
\;\;\;\;\frac{\frac{\beta - \left(i \cdot -2 - \left(\beta + \left(2 + 2 \cdot i\right)\right)\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + \mathsf{fma}\left(2, i, 2\right)} \cdot \frac{\alpha + \beta}{\mathsf{fma}\left(2, i, \alpha + \beta\right)} + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) < -0.5Initial program 3.6%
associate-/l/2.9%
*-commutative2.9%
times-frac14.9%
associate-+l+14.9%
fma-def14.9%
+-commutative14.9%
fma-def14.9%
Simplified14.9%
Taylor expanded in beta around 0 14.9%
Taylor expanded in alpha around -inf 91.5%
if -0.5 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) Initial program 83.4%
associate-/l/82.8%
*-commutative82.8%
times-frac100.0%
associate-+l+100.0%
fma-def100.0%
+-commutative100.0%
fma-def100.0%
Simplified100.0%
Final simplification98.1%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) (* 2.0 i))) (t_1 (+ 2.0 t_0)))
(if (<= (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) t_1) -0.5)
(/ (/ (- beta (- (* i -2.0) (+ beta (+ 2.0 (* 2.0 i))))) alpha) 2.0)
(/ (+ 1.0 (/ (- beta alpha) t_1)) 2.0))))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
double t_1 = 2.0 + t_0;
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_0) / t_1) <= -0.5) {
tmp = ((beta - ((i * -2.0) - (beta + (2.0 + (2.0 * i))))) / alpha) / 2.0;
} else {
tmp = (1.0 + ((beta - alpha) / t_1)) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = (alpha + beta) + (2.0d0 * i)
t_1 = 2.0d0 + t_0
if (((((alpha + beta) * (beta - alpha)) / t_0) / t_1) <= (-0.5d0)) then
tmp = ((beta - ((i * (-2.0d0)) - (beta + (2.0d0 + (2.0d0 * i))))) / alpha) / 2.0d0
else
tmp = (1.0d0 + ((beta - alpha) / t_1)) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
double t_1 = 2.0 + t_0;
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_0) / t_1) <= -0.5) {
tmp = ((beta - ((i * -2.0) - (beta + (2.0 + (2.0 * i))))) / alpha) / 2.0;
} else {
tmp = (1.0 + ((beta - alpha) / t_1)) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): t_0 = (alpha + beta) + (2.0 * i) t_1 = 2.0 + t_0 tmp = 0 if ((((alpha + beta) * (beta - alpha)) / t_0) / t_1) <= -0.5: tmp = ((beta - ((i * -2.0) - (beta + (2.0 + (2.0 * i))))) / alpha) / 2.0 else: tmp = (1.0 + ((beta - alpha) / t_1)) / 2.0 return tmp
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) t_1 = Float64(2.0 + t_0) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / t_1) <= -0.5) tmp = Float64(Float64(Float64(beta - Float64(Float64(i * -2.0) - Float64(beta + Float64(2.0 + Float64(2.0 * i))))) / alpha) / 2.0); else tmp = Float64(Float64(1.0 + Float64(Float64(beta - alpha) / t_1)) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) t_0 = (alpha + beta) + (2.0 * i); t_1 = 2.0 + t_0; tmp = 0.0; if (((((alpha + beta) * (beta - alpha)) / t_0) / t_1) <= -0.5) tmp = ((beta - ((i * -2.0) - (beta + (2.0 + (2.0 * i))))) / alpha) / 2.0; else tmp = (1.0 + ((beta - alpha) / t_1)) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(2.0 + t$95$0), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$1), $MachinePrecision], -0.5], N[(N[(N[(beta - N[(N[(i * -2.0), $MachinePrecision] - N[(beta + N[(2.0 + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(1.0 + N[(N[(beta - alpha), $MachinePrecision] / t$95$1), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
t_1 := 2 + t_0\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t_0}}{t_1} \leq -0.5:\\
\;\;\;\;\frac{\frac{\beta - \left(i \cdot -2 - \left(\beta + \left(2 + 2 \cdot i\right)\right)\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{1 + \frac{\beta - \alpha}{t_1}}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) < -0.5Initial program 3.6%
associate-/l/2.9%
*-commutative2.9%
times-frac14.9%
associate-+l+14.9%
fma-def14.9%
+-commutative14.9%
fma-def14.9%
Simplified14.9%
Taylor expanded in beta around 0 14.9%
Taylor expanded in alpha around -inf 91.5%
if -0.5 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) Initial program 83.4%
Taylor expanded in i around 0 99.2%
Final simplification97.4%
(FPCore (alpha beta i) :precision binary64 (if (<= alpha 1.22e+148) (/ (+ 1.0 (/ (- beta alpha) (+ 2.0 (+ (+ alpha beta) (* 2.0 i))))) 2.0) (/ (/ (+ (* i 4.0) (+ 2.0 (* beta 2.0))) alpha) 2.0)))
double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 1.22e+148) {
tmp = (1.0 + ((beta - alpha) / (2.0 + ((alpha + beta) + (2.0 * i))))) / 2.0;
} else {
tmp = (((i * 4.0) + (2.0 + (beta * 2.0))) / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (alpha <= 1.22d+148) then
tmp = (1.0d0 + ((beta - alpha) / (2.0d0 + ((alpha + beta) + (2.0d0 * i))))) / 2.0d0
else
tmp = (((i * 4.0d0) + (2.0d0 + (beta * 2.0d0))) / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 1.22e+148) {
tmp = (1.0 + ((beta - alpha) / (2.0 + ((alpha + beta) + (2.0 * i))))) / 2.0;
} else {
tmp = (((i * 4.0) + (2.0 + (beta * 2.0))) / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if alpha <= 1.22e+148: tmp = (1.0 + ((beta - alpha) / (2.0 + ((alpha + beta) + (2.0 * i))))) / 2.0 else: tmp = (((i * 4.0) + (2.0 + (beta * 2.0))) / alpha) / 2.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (alpha <= 1.22e+148) tmp = Float64(Float64(1.0 + Float64(Float64(beta - alpha) / Float64(2.0 + Float64(Float64(alpha + beta) + Float64(2.0 * i))))) / 2.0); else tmp = Float64(Float64(Float64(Float64(i * 4.0) + Float64(2.0 + Float64(beta * 2.0))) / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (alpha <= 1.22e+148) tmp = (1.0 + ((beta - alpha) / (2.0 + ((alpha + beta) + (2.0 * i))))) / 2.0; else tmp = (((i * 4.0) + (2.0 + (beta * 2.0))) / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[alpha, 1.22e+148], N[(N[(1.0 + N[(N[(beta - alpha), $MachinePrecision] / N[(2.0 + N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[(i * 4.0), $MachinePrecision] + N[(2.0 + N[(beta * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 1.22 \cdot 10^{+148}:\\
\;\;\;\;\frac{1 + \frac{\beta - \alpha}{2 + \left(\left(\alpha + \beta\right) + 2 \cdot i\right)}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{i \cdot 4 + \left(2 + \beta \cdot 2\right)}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 1.22000000000000007e148Initial program 78.6%
Taylor expanded in i around 0 92.7%
if 1.22000000000000007e148 < alpha Initial program 1.3%
associate-/l/0.3%
*-commutative0.3%
times-frac18.4%
associate-+l+18.4%
fma-def18.4%
+-commutative18.4%
fma-def18.4%
Simplified18.4%
Taylor expanded in beta around 0 18.4%
Taylor expanded in alpha around inf 88.2%
Final simplification91.9%
(FPCore (alpha beta i) :precision binary64 (if (<= alpha 1.22e+148) (/ (+ 1.0 (/ beta (+ beta 2.0))) 2.0) (/ (/ (+ (* i 4.0) (+ 2.0 (* beta 2.0))) alpha) 2.0)))
double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 1.22e+148) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = (((i * 4.0) + (2.0 + (beta * 2.0))) / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (alpha <= 1.22d+148) then
tmp = (1.0d0 + (beta / (beta + 2.0d0))) / 2.0d0
else
tmp = (((i * 4.0d0) + (2.0d0 + (beta * 2.0d0))) / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 1.22e+148) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = (((i * 4.0) + (2.0 + (beta * 2.0))) / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if alpha <= 1.22e+148: tmp = (1.0 + (beta / (beta + 2.0))) / 2.0 else: tmp = (((i * 4.0) + (2.0 + (beta * 2.0))) / alpha) / 2.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (alpha <= 1.22e+148) tmp = Float64(Float64(1.0 + Float64(beta / Float64(beta + 2.0))) / 2.0); else tmp = Float64(Float64(Float64(Float64(i * 4.0) + Float64(2.0 + Float64(beta * 2.0))) / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (alpha <= 1.22e+148) tmp = (1.0 + (beta / (beta + 2.0))) / 2.0; else tmp = (((i * 4.0) + (2.0 + (beta * 2.0))) / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[alpha, 1.22e+148], N[(N[(1.0 + N[(beta / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[(i * 4.0), $MachinePrecision] + N[(2.0 + N[(beta * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 1.22 \cdot 10^{+148}:\\
\;\;\;\;\frac{1 + \frac{\beta}{\beta + 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{i \cdot 4 + \left(2 + \beta \cdot 2\right)}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 1.22000000000000007e148Initial program 78.6%
Simplified82.2%
Taylor expanded in i around 0 70.5%
associate-/r*80.8%
+-commutative80.8%
+-commutative80.8%
Simplified80.8%
Taylor expanded in alpha around 0 86.4%
if 1.22000000000000007e148 < alpha Initial program 1.3%
associate-/l/0.3%
*-commutative0.3%
times-frac18.4%
associate-+l+18.4%
fma-def18.4%
+-commutative18.4%
fma-def18.4%
Simplified18.4%
Taylor expanded in beta around 0 18.4%
Taylor expanded in alpha around inf 88.2%
Final simplification86.7%
(FPCore (alpha beta i) :precision binary64 (if (<= alpha 9.2e+148) (/ (+ 1.0 (/ beta (+ beta 2.0))) 2.0) (/ (/ 2.0 alpha) 2.0)))
double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 9.2e+148) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = (2.0 / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (alpha <= 9.2d+148) then
tmp = (1.0d0 + (beta / (beta + 2.0d0))) / 2.0d0
else
tmp = (2.0d0 / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 9.2e+148) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = (2.0 / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if alpha <= 9.2e+148: tmp = (1.0 + (beta / (beta + 2.0))) / 2.0 else: tmp = (2.0 / alpha) / 2.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (alpha <= 9.2e+148) tmp = Float64(Float64(1.0 + Float64(beta / Float64(beta + 2.0))) / 2.0); else tmp = Float64(Float64(2.0 / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (alpha <= 9.2e+148) tmp = (1.0 + (beta / (beta + 2.0))) / 2.0; else tmp = (2.0 / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[alpha, 9.2e+148], N[(N[(1.0 + N[(beta / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(2.0 / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 9.2 \cdot 10^{+148}:\\
\;\;\;\;\frac{1 + \frac{\beta}{\beta + 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 9.2000000000000002e148Initial program 78.2%
Simplified81.8%
Taylor expanded in i around 0 70.2%
associate-/r*80.4%
+-commutative80.4%
+-commutative80.4%
Simplified80.4%
Taylor expanded in alpha around 0 86.0%
if 9.2000000000000002e148 < alpha Initial program 1.3%
associate-/l/0.2%
*-commutative0.2%
times-frac18.7%
associate-+l+18.7%
fma-def18.7%
+-commutative18.7%
fma-def18.7%
Simplified18.7%
Taylor expanded in beta around 0 0.2%
associate-*r/0.2%
mul-1-neg0.2%
unpow20.2%
Simplified0.2%
Taylor expanded in alpha around inf 6.0%
Taylor expanded in i around 0 40.3%
Final simplification78.3%
(FPCore (alpha beta i) :precision binary64 (if (<= alpha 1.22e+148) (/ (+ 1.0 (/ beta (+ beta 2.0))) 2.0) (/ (/ (+ 2.0 (* beta 2.0)) alpha) 2.0)))
double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 1.22e+148) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (alpha <= 1.22d+148) then
tmp = (1.0d0 + (beta / (beta + 2.0d0))) / 2.0d0
else
tmp = ((2.0d0 + (beta * 2.0d0)) / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 1.22e+148) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if alpha <= 1.22e+148: tmp = (1.0 + (beta / (beta + 2.0))) / 2.0 else: tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (alpha <= 1.22e+148) tmp = Float64(Float64(1.0 + Float64(beta / Float64(beta + 2.0))) / 2.0); else tmp = Float64(Float64(Float64(2.0 + Float64(beta * 2.0)) / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (alpha <= 1.22e+148) tmp = (1.0 + (beta / (beta + 2.0))) / 2.0; else tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[alpha, 1.22e+148], N[(N[(1.0 + N[(beta / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(2.0 + N[(beta * 2.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 1.22 \cdot 10^{+148}:\\
\;\;\;\;\frac{1 + \frac{\beta}{\beta + 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2 + \beta \cdot 2}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 1.22000000000000007e148Initial program 78.6%
Simplified82.2%
Taylor expanded in i around 0 70.5%
associate-/r*80.8%
+-commutative80.8%
+-commutative80.8%
Simplified80.8%
Taylor expanded in alpha around 0 86.4%
if 1.22000000000000007e148 < alpha Initial program 1.3%
Simplified10.6%
Taylor expanded in i around 0 10.6%
associate-/r*10.2%
+-commutative10.2%
+-commutative10.2%
Simplified10.2%
Taylor expanded in alpha around inf 57.2%
distribute-rgt1-in57.2%
metadata-eval57.2%
mul0-lft57.2%
neg-sub057.2%
mul-1-neg57.2%
remove-double-neg57.2%
Simplified57.2%
Final simplification81.4%
(FPCore (alpha beta i) :precision binary64 (if (<= alpha 1.24e+148) (/ (+ 1.0 (/ beta (+ beta 2.0))) 2.0) (/ (/ (+ 2.0 (* i 4.0)) alpha) 2.0)))
double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 1.24e+148) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (alpha <= 1.24d+148) then
tmp = (1.0d0 + (beta / (beta + 2.0d0))) / 2.0d0
else
tmp = ((2.0d0 + (i * 4.0d0)) / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 1.24e+148) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if alpha <= 1.24e+148: tmp = (1.0 + (beta / (beta + 2.0))) / 2.0 else: tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (alpha <= 1.24e+148) tmp = Float64(Float64(1.0 + Float64(beta / Float64(beta + 2.0))) / 2.0); else tmp = Float64(Float64(Float64(2.0 + Float64(i * 4.0)) / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (alpha <= 1.24e+148) tmp = (1.0 + (beta / (beta + 2.0))) / 2.0; else tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[alpha, 1.24e+148], N[(N[(1.0 + N[(beta / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(2.0 + N[(i * 4.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 1.24 \cdot 10^{+148}:\\
\;\;\;\;\frac{1 + \frac{\beta}{\beta + 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2 + i \cdot 4}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 1.24e148Initial program 78.2%
Simplified81.8%
Taylor expanded in i around 0 70.2%
associate-/r*80.4%
+-commutative80.4%
+-commutative80.4%
Simplified80.4%
Taylor expanded in alpha around 0 86.0%
if 1.24e148 < alpha Initial program 1.3%
associate-/l/0.2%
*-commutative0.2%
times-frac18.7%
associate-+l+18.7%
fma-def18.7%
+-commutative18.7%
fma-def18.7%
Simplified18.7%
Taylor expanded in beta around 0 0.2%
associate-*r/0.2%
mul-1-neg0.2%
unpow20.2%
Simplified0.2%
Taylor expanded in alpha around inf 71.0%
Final simplification83.5%
(FPCore (alpha beta i) :precision binary64 (if (<= beta 5.8e+74) 0.5 1.0))
double code(double alpha, double beta, double i) {
double tmp;
if (beta <= 5.8e+74) {
tmp = 0.5;
} else {
tmp = 1.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (beta <= 5.8d+74) then
tmp = 0.5d0
else
tmp = 1.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (beta <= 5.8e+74) {
tmp = 0.5;
} else {
tmp = 1.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if beta <= 5.8e+74: tmp = 0.5 else: tmp = 1.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (beta <= 5.8e+74) tmp = 0.5; else tmp = 1.0; end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (beta <= 5.8e+74) tmp = 0.5; else tmp = 1.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[beta, 5.8e+74], 0.5, 1.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 5.8 \cdot 10^{+74}:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if beta < 5.8000000000000005e74Initial program 75.6%
associate-/l/75.5%
*-commutative75.5%
times-frac78.1%
associate-+l+78.1%
fma-def78.1%
+-commutative78.1%
fma-def78.1%
Simplified78.1%
Taylor expanded in i around inf 72.6%
if 5.8000000000000005e74 < beta Initial program 39.3%
associate-/l/37.8%
*-commutative37.8%
times-frac87.3%
associate-+l+87.3%
fma-def87.3%
+-commutative87.3%
fma-def87.3%
Simplified87.3%
Taylor expanded in beta around inf 77.2%
Final simplification73.9%
(FPCore (alpha beta i) :precision binary64 0.5)
double code(double alpha, double beta, double i) {
return 0.5;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = 0.5d0
end function
public static double code(double alpha, double beta, double i) {
return 0.5;
}
def code(alpha, beta, i): return 0.5
function code(alpha, beta, i) return 0.5 end
function tmp = code(alpha, beta, i) tmp = 0.5; end
code[alpha_, beta_, i_] := 0.5
\begin{array}{l}
\\
0.5
\end{array}
Initial program 65.3%
associate-/l/64.7%
*-commutative64.7%
times-frac80.7%
associate-+l+80.7%
fma-def80.7%
+-commutative80.7%
fma-def80.7%
Simplified80.7%
Taylor expanded in i around inf 59.0%
Final simplification59.0%
herbie shell --seed 2023174
(FPCore (alpha beta i)
:name "Octave 3.8, jcobi/2"
:precision binary64
:pre (and (and (> alpha -1.0) (> beta -1.0)) (> i 0.0))
(/ (+ (/ (/ (* (+ alpha beta) (- beta alpha)) (+ (+ alpha beta) (* 2.0 i))) (+ (+ (+ alpha beta) (* 2.0 i)) 2.0)) 1.0) 2.0))