
(FPCore (alpha beta i) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 i)))) (/ (+ (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ t_0 2.0)) 1.0) 2.0)))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * i)
code = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta, i): t_0 = (alpha + beta) + (2.0 * i) return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(t_0 + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta, i) t_0 = (alpha + beta) + (2.0 * i); tmp = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\frac{\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t_0}}{t_0 + 2} + 1}{2}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta i) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 i)))) (/ (+ (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ t_0 2.0)) 1.0) 2.0)))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * i)
code = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta, i): t_0 = (alpha + beta) + (2.0 * i) return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(t_0 + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta, i) t_0 = (alpha + beta) + (2.0 * i); tmp = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\frac{\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t_0}}{t_0 + 2} + 1}{2}
\end{array}
\end{array}
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) (* 2.0 i))))
(if (<= (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ 2.0 t_0)) -0.9)
(/ (/ (+ (+ beta 2.0) (- beta (* i -4.0))) alpha) 2.0)
(/
(+
(*
(/ (- beta alpha) (+ (+ alpha beta) (fma 2.0 i 2.0)))
(/ (+ alpha beta) (fma 2.0 i (+ alpha beta))))
1.0)
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_0) / (2.0 + t_0)) <= -0.9) {
tmp = (((beta + 2.0) + (beta - (i * -4.0))) / alpha) / 2.0;
} else {
tmp = ((((beta - alpha) / ((alpha + beta) + fma(2.0, i, 2.0))) * ((alpha + beta) / fma(2.0, i, (alpha + beta)))) + 1.0) / 2.0;
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(2.0 + t_0)) <= -0.9) tmp = Float64(Float64(Float64(Float64(beta + 2.0) + Float64(beta - Float64(i * -4.0))) / alpha) / 2.0); else tmp = Float64(Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + fma(2.0, i, 2.0))) * Float64(Float64(alpha + beta) / fma(2.0, i, Float64(alpha + beta)))) + 1.0) / 2.0); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(2.0 + t$95$0), $MachinePrecision]), $MachinePrecision], -0.9], N[(N[(N[(N[(beta + 2.0), $MachinePrecision] + N[(beta - N[(i * -4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(alpha + beta), $MachinePrecision] / N[(2.0 * i + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t_0}}{2 + t_0} \leq -0.9:\\
\;\;\;\;\frac{\frac{\left(\beta + 2\right) + \left(\beta - i \cdot -4\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + \mathsf{fma}\left(2, i, 2\right)} \cdot \frac{\alpha + \beta}{\mathsf{fma}\left(2, i, \alpha + \beta\right)} + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) < -0.900000000000000022Initial program 2.1%
Taylor expanded in alpha around inf 9.6%
Taylor expanded in alpha around -inf 83.4%
Taylor expanded in alpha around inf 83.4%
neg-mul-183.4%
associate--r+83.4%
cancel-sign-sub-inv83.4%
metadata-eval83.4%
+-commutative83.4%
unsub-neg83.4%
*-commutative83.4%
Simplified83.4%
if -0.900000000000000022 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) Initial program 84.4%
associate-/l/83.9%
*-commutative83.9%
times-frac99.7%
associate-+l+99.7%
fma-def99.7%
+-commutative99.7%
fma-def99.7%
Simplified99.7%
Final simplification96.1%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) (* 2.0 i))))
(if (<= (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ 2.0 t_0)) -0.5)
(/ (/ (+ (+ beta 2.0) (- beta (* i -4.0))) alpha) 2.0)
(/
(+
1.0
(*
(/ (- beta alpha) (+ (+ alpha beta) (fma 2.0 i 2.0)))
(/ beta (+ beta (* 2.0 i)))))
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_0) / (2.0 + t_0)) <= -0.5) {
tmp = (((beta + 2.0) + (beta - (i * -4.0))) / alpha) / 2.0;
} else {
tmp = (1.0 + (((beta - alpha) / ((alpha + beta) + fma(2.0, i, 2.0))) * (beta / (beta + (2.0 * i))))) / 2.0;
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(2.0 + t_0)) <= -0.5) tmp = Float64(Float64(Float64(Float64(beta + 2.0) + Float64(beta - Float64(i * -4.0))) / alpha) / 2.0); else tmp = Float64(Float64(1.0 + Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + fma(2.0, i, 2.0))) * Float64(beta / Float64(beta + Float64(2.0 * i))))) / 2.0); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(2.0 + t$95$0), $MachinePrecision]), $MachinePrecision], -0.5], N[(N[(N[(N[(beta + 2.0), $MachinePrecision] + N[(beta - N[(i * -4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(1.0 + N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(beta / N[(beta + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t_0}}{2 + t_0} \leq -0.5:\\
\;\;\;\;\frac{\frac{\left(\beta + 2\right) + \left(\beta - i \cdot -4\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{1 + \frac{\beta - \alpha}{\left(\alpha + \beta\right) + \mathsf{fma}\left(2, i, 2\right)} \cdot \frac{\beta}{\beta + 2 \cdot i}}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) < -0.5Initial program 3.8%
Taylor expanded in alpha around inf 11.1%
Taylor expanded in alpha around -inf 82.4%
Taylor expanded in alpha around inf 82.4%
neg-mul-182.4%
associate--r+82.4%
cancel-sign-sub-inv82.4%
metadata-eval82.4%
+-commutative82.4%
unsub-neg82.4%
*-commutative82.4%
Simplified82.4%
if -0.5 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) Initial program 84.3%
associate-/l/83.8%
*-commutative83.8%
times-frac99.7%
associate-+l+99.7%
fma-def99.7%
+-commutative99.7%
fma-def99.7%
Simplified99.7%
Taylor expanded in alpha around 0 99.4%
Final simplification95.6%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) (* 2.0 i))))
(if (<= (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ 2.0 t_0)) -0.5)
(/ (/ (+ (+ beta 2.0) (- beta (* i -4.0))) alpha) 2.0)
(/
(+ 1.0 (* (/ beta (+ (* 2.0 i) (+ beta 2.0))) (/ beta (fma 2.0 i beta))))
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_0) / (2.0 + t_0)) <= -0.5) {
tmp = (((beta + 2.0) + (beta - (i * -4.0))) / alpha) / 2.0;
} else {
tmp = (1.0 + ((beta / ((2.0 * i) + (beta + 2.0))) * (beta / fma(2.0, i, beta)))) / 2.0;
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(2.0 + t_0)) <= -0.5) tmp = Float64(Float64(Float64(Float64(beta + 2.0) + Float64(beta - Float64(i * -4.0))) / alpha) / 2.0); else tmp = Float64(Float64(1.0 + Float64(Float64(beta / Float64(Float64(2.0 * i) + Float64(beta + 2.0))) * Float64(beta / fma(2.0, i, beta)))) / 2.0); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(2.0 + t$95$0), $MachinePrecision]), $MachinePrecision], -0.5], N[(N[(N[(N[(beta + 2.0), $MachinePrecision] + N[(beta - N[(i * -4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(1.0 + N[(N[(beta / N[(N[(2.0 * i), $MachinePrecision] + N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(beta / N[(2.0 * i + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t_0}}{2 + t_0} \leq -0.5:\\
\;\;\;\;\frac{\frac{\left(\beta + 2\right) + \left(\beta - i \cdot -4\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{1 + \frac{\beta}{2 \cdot i + \left(\beta + 2\right)} \cdot \frac{\beta}{\mathsf{fma}\left(2, i, \beta\right)}}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) < -0.5Initial program 3.8%
Taylor expanded in alpha around inf 11.1%
Taylor expanded in alpha around -inf 82.4%
Taylor expanded in alpha around inf 82.4%
neg-mul-182.4%
associate--r+82.4%
cancel-sign-sub-inv82.4%
metadata-eval82.4%
+-commutative82.4%
unsub-neg82.4%
*-commutative82.4%
Simplified82.4%
if -0.5 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) Initial program 84.3%
associate-/l/83.8%
*-commutative83.8%
times-frac99.7%
associate-+l+99.7%
fma-def99.7%
+-commutative99.7%
fma-def99.7%
Simplified99.7%
associate-*r/99.7%
+-commutative99.7%
+-commutative99.7%
fma-udef99.7%
+-commutative99.7%
associate-+r+99.7%
+-commutative99.7%
fma-udef99.7%
Applied egg-rr99.7%
Taylor expanded in alpha around 0 98.8%
Taylor expanded in alpha around 0 83.1%
unpow283.1%
*-commutative83.1%
times-frac98.8%
associate-+r+98.8%
+-commutative98.8%
fma-udef98.8%
Simplified98.8%
Final simplification95.1%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) (* 2.0 i))) (t_1 (+ 2.0 t_0)))
(if (<= (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) t_1) -0.5)
(/ (/ (+ (+ beta 2.0) (- beta (* i -4.0))) alpha) 2.0)
(/ (+ 1.0 (/ beta t_1)) 2.0))))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
double t_1 = 2.0 + t_0;
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_0) / t_1) <= -0.5) {
tmp = (((beta + 2.0) + (beta - (i * -4.0))) / alpha) / 2.0;
} else {
tmp = (1.0 + (beta / t_1)) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = (alpha + beta) + (2.0d0 * i)
t_1 = 2.0d0 + t_0
if (((((alpha + beta) * (beta - alpha)) / t_0) / t_1) <= (-0.5d0)) then
tmp = (((beta + 2.0d0) + (beta - (i * (-4.0d0)))) / alpha) / 2.0d0
else
tmp = (1.0d0 + (beta / t_1)) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
double t_1 = 2.0 + t_0;
double tmp;
if (((((alpha + beta) * (beta - alpha)) / t_0) / t_1) <= -0.5) {
tmp = (((beta + 2.0) + (beta - (i * -4.0))) / alpha) / 2.0;
} else {
tmp = (1.0 + (beta / t_1)) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): t_0 = (alpha + beta) + (2.0 * i) t_1 = 2.0 + t_0 tmp = 0 if ((((alpha + beta) * (beta - alpha)) / t_0) / t_1) <= -0.5: tmp = (((beta + 2.0) + (beta - (i * -4.0))) / alpha) / 2.0 else: tmp = (1.0 + (beta / t_1)) / 2.0 return tmp
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) t_1 = Float64(2.0 + t_0) tmp = 0.0 if (Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / t_1) <= -0.5) tmp = Float64(Float64(Float64(Float64(beta + 2.0) + Float64(beta - Float64(i * -4.0))) / alpha) / 2.0); else tmp = Float64(Float64(1.0 + Float64(beta / t_1)) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) t_0 = (alpha + beta) + (2.0 * i); t_1 = 2.0 + t_0; tmp = 0.0; if (((((alpha + beta) * (beta - alpha)) / t_0) / t_1) <= -0.5) tmp = (((beta + 2.0) + (beta - (i * -4.0))) / alpha) / 2.0; else tmp = (1.0 + (beta / t_1)) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(2.0 + t$95$0), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$1), $MachinePrecision], -0.5], N[(N[(N[(N[(beta + 2.0), $MachinePrecision] + N[(beta - N[(i * -4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(1.0 + N[(beta / t$95$1), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
t_1 := 2 + t_0\\
\mathbf{if}\;\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t_0}}{t_1} \leq -0.5:\\
\;\;\;\;\frac{\frac{\left(\beta + 2\right) + \left(\beta - i \cdot -4\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{1 + \frac{\beta}{t_1}}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) < -0.5Initial program 3.8%
Taylor expanded in alpha around inf 11.1%
Taylor expanded in alpha around -inf 82.4%
Taylor expanded in alpha around inf 82.4%
neg-mul-182.4%
associate--r+82.4%
cancel-sign-sub-inv82.4%
metadata-eval82.4%
+-commutative82.4%
unsub-neg82.4%
*-commutative82.4%
Simplified82.4%
if -0.5 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 2 i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 2 i)) 2)) Initial program 84.3%
Taylor expanded in beta around inf 98.2%
Final simplification94.6%
(FPCore (alpha beta i) :precision binary64 (if (<= alpha 2.5e+131) (/ (+ 1.0 (/ beta (+ beta 2.0))) 2.0) (/ (/ (+ (+ beta 2.0) (- beta (* i -4.0))) alpha) 2.0)))
double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 2.5e+131) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = (((beta + 2.0) + (beta - (i * -4.0))) / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (alpha <= 2.5d+131) then
tmp = (1.0d0 + (beta / (beta + 2.0d0))) / 2.0d0
else
tmp = (((beta + 2.0d0) + (beta - (i * (-4.0d0)))) / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 2.5e+131) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = (((beta + 2.0) + (beta - (i * -4.0))) / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if alpha <= 2.5e+131: tmp = (1.0 + (beta / (beta + 2.0))) / 2.0 else: tmp = (((beta + 2.0) + (beta - (i * -4.0))) / alpha) / 2.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (alpha <= 2.5e+131) tmp = Float64(Float64(1.0 + Float64(beta / Float64(beta + 2.0))) / 2.0); else tmp = Float64(Float64(Float64(Float64(beta + 2.0) + Float64(beta - Float64(i * -4.0))) / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (alpha <= 2.5e+131) tmp = (1.0 + (beta / (beta + 2.0))) / 2.0; else tmp = (((beta + 2.0) + (beta - (i * -4.0))) / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[alpha, 2.5e+131], N[(N[(1.0 + N[(beta / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[(beta + 2.0), $MachinePrecision] + N[(beta - N[(i * -4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 2.5 \cdot 10^{+131}:\\
\;\;\;\;\frac{1 + \frac{\beta}{\beta + 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\left(\beta + 2\right) + \left(\beta - i \cdot -4\right)}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 2.49999999999999998e131Initial program 80.6%
associate-/l/80.2%
*-commutative80.2%
times-frac94.1%
associate-+l+94.1%
fma-def94.1%
+-commutative94.1%
fma-def94.1%
Simplified94.1%
associate-*r/94.1%
+-commutative94.1%
+-commutative94.1%
fma-udef94.1%
+-commutative94.1%
associate-+r+94.1%
+-commutative94.1%
fma-udef94.1%
Applied egg-rr94.1%
Taylor expanded in i around 0 81.6%
+-commutative81.6%
Simplified81.6%
Taylor expanded in alpha around 0 85.1%
if 2.49999999999999998e131 < alpha Initial program 1.3%
Taylor expanded in alpha around inf 14.4%
Taylor expanded in alpha around -inf 73.9%
Taylor expanded in alpha around inf 73.9%
neg-mul-173.9%
associate--r+73.9%
cancel-sign-sub-inv73.9%
metadata-eval73.9%
+-commutative73.9%
unsub-neg73.9%
*-commutative73.9%
Simplified73.9%
Final simplification83.0%
(FPCore (alpha beta i) :precision binary64 (if (<= alpha 4.7e+181) (/ (+ 1.0 (/ beta (+ beta 2.0))) 2.0) (/ (/ (+ 2.0 (* i 4.0)) alpha) 2.0)))
double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 4.7e+181) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (alpha <= 4.7d+181) then
tmp = (1.0d0 + (beta / (beta + 2.0d0))) / 2.0d0
else
tmp = ((2.0d0 + (i * 4.0d0)) / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (alpha <= 4.7e+181) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if alpha <= 4.7e+181: tmp = (1.0 + (beta / (beta + 2.0))) / 2.0 else: tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (alpha <= 4.7e+181) tmp = Float64(Float64(1.0 + Float64(beta / Float64(beta + 2.0))) / 2.0); else tmp = Float64(Float64(Float64(2.0 + Float64(i * 4.0)) / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (alpha <= 4.7e+181) tmp = (1.0 + (beta / (beta + 2.0))) / 2.0; else tmp = ((2.0 + (i * 4.0)) / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[alpha, 4.7e+181], N[(N[(1.0 + N[(beta / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(2.0 + N[(i * 4.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 4.7 \cdot 10^{+181}:\\
\;\;\;\;\frac{1 + \frac{\beta}{\beta + 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2 + i \cdot 4}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 4.70000000000000027e181Initial program 77.4%
associate-/l/77.0%
*-commutative77.0%
times-frac92.1%
associate-+l+92.1%
fma-def92.1%
+-commutative92.1%
fma-def92.1%
Simplified92.1%
associate-*r/92.1%
+-commutative92.1%
+-commutative92.1%
fma-udef92.1%
+-commutative92.1%
associate-+r+92.1%
+-commutative92.1%
fma-udef92.1%
Applied egg-rr92.1%
Taylor expanded in i around 0 79.3%
+-commutative79.3%
Simplified79.3%
Taylor expanded in alpha around 0 83.6%
if 4.70000000000000027e181 < alpha Initial program 1.0%
associate-/l/0.0%
*-commutative0.0%
times-frac27.5%
associate-+l+27.5%
fma-def27.5%
+-commutative27.5%
fma-def27.5%
Simplified27.5%
Taylor expanded in beta around 0 0.0%
mul-1-neg0.0%
unsub-neg0.0%
unpow20.0%
associate-+r+0.0%
+-commutative0.0%
Simplified0.0%
Taylor expanded in alpha around inf 60.1%
Final simplification80.1%
(FPCore (alpha beta i) :precision binary64 (/ (+ 1.0 (/ beta (+ beta 2.0))) 2.0))
double code(double alpha, double beta, double i) {
return (1.0 + (beta / (beta + 2.0))) / 2.0;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = (1.0d0 + (beta / (beta + 2.0d0))) / 2.0d0
end function
public static double code(double alpha, double beta, double i) {
return (1.0 + (beta / (beta + 2.0))) / 2.0;
}
def code(alpha, beta, i): return (1.0 + (beta / (beta + 2.0))) / 2.0
function code(alpha, beta, i) return Float64(Float64(1.0 + Float64(beta / Float64(beta + 2.0))) / 2.0) end
function tmp = code(alpha, beta, i) tmp = (1.0 + (beta / (beta + 2.0))) / 2.0; end
code[alpha_, beta_, i_] := N[(N[(1.0 + N[(beta / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 + \frac{\beta}{\beta + 2}}{2}
\end{array}
Initial program 66.1%
associate-/l/65.5%
*-commutative65.5%
times-frac82.5%
associate-+l+82.5%
fma-def82.5%
+-commutative82.5%
fma-def82.5%
Simplified82.5%
associate-*r/82.5%
+-commutative82.5%
+-commutative82.5%
fma-udef82.5%
+-commutative82.5%
associate-+r+82.5%
+-commutative82.5%
fma-udef82.5%
Applied egg-rr82.5%
Taylor expanded in i around 0 68.6%
+-commutative68.6%
Simplified68.6%
Taylor expanded in alpha around 0 73.9%
Final simplification73.9%
(FPCore (alpha beta i) :precision binary64 (if (<= beta 2e+39) 0.5 1.0))
double code(double alpha, double beta, double i) {
double tmp;
if (beta <= 2e+39) {
tmp = 0.5;
} else {
tmp = 1.0;
}
return tmp;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (beta <= 2d+39) then
tmp = 0.5d0
else
tmp = 1.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (beta <= 2e+39) {
tmp = 0.5;
} else {
tmp = 1.0;
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if beta <= 2e+39: tmp = 0.5 else: tmp = 1.0 return tmp
function code(alpha, beta, i) tmp = 0.0 if (beta <= 2e+39) tmp = 0.5; else tmp = 1.0; end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (beta <= 2e+39) tmp = 0.5; else tmp = 1.0; end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[beta, 2e+39], 0.5, 1.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 2 \cdot 10^{+39}:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if beta < 1.99999999999999988e39Initial program 74.9%
associate-/l/74.7%
*-commutative74.7%
times-frac79.9%
associate-+l+79.9%
fma-def79.9%
+-commutative79.9%
fma-def79.9%
Simplified79.9%
Taylor expanded in i around inf 76.0%
if 1.99999999999999988e39 < beta Initial program 43.9%
associate-/l/42.5%
*-commutative42.5%
times-frac89.1%
associate-+l+89.1%
fma-def89.1%
+-commutative89.1%
fma-def89.1%
Simplified89.1%
Taylor expanded in beta around inf 72.5%
Final simplification75.0%
(FPCore (alpha beta i) :precision binary64 0.5)
double code(double alpha, double beta, double i) {
return 0.5;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = 0.5d0
end function
public static double code(double alpha, double beta, double i) {
return 0.5;
}
def code(alpha, beta, i): return 0.5
function code(alpha, beta, i) return 0.5 end
function tmp = code(alpha, beta, i) tmp = 0.5; end
code[alpha_, beta_, i_] := 0.5
\begin{array}{l}
\\
0.5
\end{array}
Initial program 66.1%
associate-/l/65.5%
*-commutative65.5%
times-frac82.5%
associate-+l+82.5%
fma-def82.5%
+-commutative82.5%
fma-def82.5%
Simplified82.5%
Taylor expanded in i around inf 63.4%
Final simplification63.4%
herbie shell --seed 2023201
(FPCore (alpha beta i)
:name "Octave 3.8, jcobi/2"
:precision binary64
:pre (and (and (> alpha -1.0) (> beta -1.0)) (> i 0.0))
(/ (+ (/ (/ (* (+ alpha beta) (- beta alpha)) (+ (+ alpha beta) (* 2.0 i))) (+ (+ (+ alpha beta) (* 2.0 i)) 2.0)) 1.0) 2.0))