
(FPCore (alpha beta i) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 i)))) (/ (+ (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ t_0 2.0)) 1.0) 2.0)))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * i)
code = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta, i): t_0 = (alpha + beta) + (2.0 * i) return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(t_0 + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta, i) t_0 = (alpha + beta) + (2.0 * i); tmp = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\frac{\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t\_0}}{t\_0 + 2} + 1}{2}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 14 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta i) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 i)))) (/ (+ (/ (/ (* (+ alpha beta) (- beta alpha)) t_0) (+ t_0 2.0)) 1.0) 2.0)))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * i)
code = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta, i): t_0 = (alpha + beta) + (2.0 * i) return (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) * Float64(beta - alpha)) / t_0) / Float64(t_0 + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta, i) t_0 = (alpha + beta) + (2.0 * i); tmp = (((((alpha + beta) * (beta - alpha)) / t_0) / (t_0 + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] * N[(beta - alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
\frac{\frac{\frac{\left(\alpha + \beta\right) \cdot \left(\beta - \alpha\right)}{t\_0}}{t\_0 + 2} + 1}{2}
\end{array}
\end{array}
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (fma 2.0 i (+ beta alpha)))
(t_1 (+ (* i 2.0) (+ beta alpha)))
(t_2 (+ (fma 4.0 i (* 2.0 beta)) 2.0)))
(if (<= (/ (/ (* (- beta alpha) (+ beta alpha)) t_1) (+ t_1 2.0)) -0.98)
(/
(/
(-
(/ (* beta beta) alpha)
(-
(fma
(- -2.0 (fma i 2.0 beta))
(/ (fma i 2.0 beta) alpha)
(* (/ t_2 alpha) t_2))
t_2))
alpha)
2.0)
(/
(- 1.0 (/ (+ beta alpha) (* (/ t_0 (- alpha beta)) (+ t_0 2.0))))
2.0))))
double code(double alpha, double beta, double i) {
double t_0 = fma(2.0, i, (beta + alpha));
double t_1 = (i * 2.0) + (beta + alpha);
double t_2 = fma(4.0, i, (2.0 * beta)) + 2.0;
double tmp;
if (((((beta - alpha) * (beta + alpha)) / t_1) / (t_1 + 2.0)) <= -0.98) {
tmp = ((((beta * beta) / alpha) - (fma((-2.0 - fma(i, 2.0, beta)), (fma(i, 2.0, beta) / alpha), ((t_2 / alpha) * t_2)) - t_2)) / alpha) / 2.0;
} else {
tmp = (1.0 - ((beta + alpha) / ((t_0 / (alpha - beta)) * (t_0 + 2.0)))) / 2.0;
}
return tmp;
}
function code(alpha, beta, i) t_0 = fma(2.0, i, Float64(beta + alpha)) t_1 = Float64(Float64(i * 2.0) + Float64(beta + alpha)) t_2 = Float64(fma(4.0, i, Float64(2.0 * beta)) + 2.0) tmp = 0.0 if (Float64(Float64(Float64(Float64(beta - alpha) * Float64(beta + alpha)) / t_1) / Float64(t_1 + 2.0)) <= -0.98) tmp = Float64(Float64(Float64(Float64(Float64(beta * beta) / alpha) - Float64(fma(Float64(-2.0 - fma(i, 2.0, beta)), Float64(fma(i, 2.0, beta) / alpha), Float64(Float64(t_2 / alpha) * t_2)) - t_2)) / alpha) / 2.0); else tmp = Float64(Float64(1.0 - Float64(Float64(beta + alpha) / Float64(Float64(t_0 / Float64(alpha - beta)) * Float64(t_0 + 2.0)))) / 2.0); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(2.0 * i + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(i * 2.0), $MachinePrecision] + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(4.0 * i + N[(2.0 * beta), $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[N[(N[(N[(N[(beta - alpha), $MachinePrecision] * N[(beta + alpha), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(t$95$1 + 2.0), $MachinePrecision]), $MachinePrecision], -0.98], N[(N[(N[(N[(N[(beta * beta), $MachinePrecision] / alpha), $MachinePrecision] - N[(N[(N[(-2.0 - N[(i * 2.0 + beta), $MachinePrecision]), $MachinePrecision] * N[(N[(i * 2.0 + beta), $MachinePrecision] / alpha), $MachinePrecision] + N[(N[(t$95$2 / alpha), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] - t$95$2), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(1.0 - N[(N[(beta + alpha), $MachinePrecision] / N[(N[(t$95$0 / N[(alpha - beta), $MachinePrecision]), $MachinePrecision] * N[(t$95$0 + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(2, i, \beta + \alpha\right)\\
t_1 := i \cdot 2 + \left(\beta + \alpha\right)\\
t_2 := \mathsf{fma}\left(4, i, 2 \cdot \beta\right) + 2\\
\mathbf{if}\;\frac{\frac{\left(\beta - \alpha\right) \cdot \left(\beta + \alpha\right)}{t\_1}}{t\_1 + 2} \leq -0.98:\\
\;\;\;\;\frac{\frac{\frac{\beta \cdot \beta}{\alpha} - \left(\mathsf{fma}\left(-2 - \mathsf{fma}\left(i, 2, \beta\right), \frac{\mathsf{fma}\left(i, 2, \beta\right)}{\alpha}, \frac{t\_2}{\alpha} \cdot t\_2\right) - t\_2\right)}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - \frac{\beta + \alpha}{\frac{t\_0}{\alpha - \beta} \cdot \left(t\_0 + 2\right)}}{2}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) < -0.97999999999999998Initial program 3.7%
Taylor expanded in alpha around inf
Applied rewrites91.1%
if -0.97999999999999998 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) Initial program 79.3%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
associate-/l*N/A
lower-*.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-/.f64N/A
Applied rewrites100.0%
lift-*.f64N/A
lift-/.f64N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
div-invN/A
lift-/.f64N/A
clear-numN/A
lower-*.f64N/A
lift-fma.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
Applied rewrites100.0%
Final simplification98.2%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (* i 2.0) (+ beta alpha)))
(t_1 (/ (/ (* (- beta alpha) (+ beta alpha)) t_0) (+ t_0 2.0))))
(if (<= t_1 -0.98)
(* 0.5 (/ (+ (fma 4.0 i (* 2.0 beta)) 2.0) alpha))
(if (<= t_1 2e-24)
(fma
(* (* alpha alpha) 0.5)
(/ 1.0 (* (- -2.0 (fma i 2.0 alpha)) (fma i 2.0 alpha)))
0.5)
(fma (/ (- beta alpha) (+ (+ 2.0 beta) alpha)) 0.5 0.5)))))
double code(double alpha, double beta, double i) {
double t_0 = (i * 2.0) + (beta + alpha);
double t_1 = (((beta - alpha) * (beta + alpha)) / t_0) / (t_0 + 2.0);
double tmp;
if (t_1 <= -0.98) {
tmp = 0.5 * ((fma(4.0, i, (2.0 * beta)) + 2.0) / alpha);
} else if (t_1 <= 2e-24) {
tmp = fma(((alpha * alpha) * 0.5), (1.0 / ((-2.0 - fma(i, 2.0, alpha)) * fma(i, 2.0, alpha))), 0.5);
} else {
tmp = fma(((beta - alpha) / ((2.0 + beta) + alpha)), 0.5, 0.5);
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(Float64(i * 2.0) + Float64(beta + alpha)) t_1 = Float64(Float64(Float64(Float64(beta - alpha) * Float64(beta + alpha)) / t_0) / Float64(t_0 + 2.0)) tmp = 0.0 if (t_1 <= -0.98) tmp = Float64(0.5 * Float64(Float64(fma(4.0, i, Float64(2.0 * beta)) + 2.0) / alpha)); elseif (t_1 <= 2e-24) tmp = fma(Float64(Float64(alpha * alpha) * 0.5), Float64(1.0 / Float64(Float64(-2.0 - fma(i, 2.0, alpha)) * fma(i, 2.0, alpha))), 0.5); else tmp = fma(Float64(Float64(beta - alpha) / Float64(Float64(2.0 + beta) + alpha)), 0.5, 0.5); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(i * 2.0), $MachinePrecision] + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(N[(beta - alpha), $MachinePrecision] * N[(beta + alpha), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 2.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -0.98], N[(0.5 * N[(N[(N[(4.0 * i + N[(2.0 * beta), $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision] / alpha), $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$1, 2e-24], N[(N[(N[(alpha * alpha), $MachinePrecision] * 0.5), $MachinePrecision] * N[(1.0 / N[(N[(-2.0 - N[(i * 2.0 + alpha), $MachinePrecision]), $MachinePrecision] * N[(i * 2.0 + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(2.0 + beta), $MachinePrecision] + alpha), $MachinePrecision]), $MachinePrecision] * 0.5 + 0.5), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := i \cdot 2 + \left(\beta + \alpha\right)\\
t_1 := \frac{\frac{\left(\beta - \alpha\right) \cdot \left(\beta + \alpha\right)}{t\_0}}{t\_0 + 2}\\
\mathbf{if}\;t\_1 \leq -0.98:\\
\;\;\;\;0.5 \cdot \frac{\mathsf{fma}\left(4, i, 2 \cdot \beta\right) + 2}{\alpha}\\
\mathbf{elif}\;t\_1 \leq 2 \cdot 10^{-24}:\\
\;\;\;\;\mathsf{fma}\left(\left(\alpha \cdot \alpha\right) \cdot 0.5, \frac{1}{\left(-2 - \mathsf{fma}\left(i, 2, \alpha\right)\right) \cdot \mathsf{fma}\left(i, 2, \alpha\right)}, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{\beta - \alpha}{\left(2 + \beta\right) + \alpha}, 0.5, 0.5\right)\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) < -0.97999999999999998Initial program 4.4%
Taylor expanded in alpha around inf
distribute-rgt1-inN/A
metadata-evalN/A
mul0-lftN/A
neg-sub0N/A
mul-1-negN/A
remove-double-negN/A
lower-*.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
+-commutativeN/A
lower-fma.f64N/A
lower-*.f6490.9
Applied rewrites90.9%
if -0.97999999999999998 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) < 1.99999999999999985e-24Initial program 100.0%
Taylor expanded in beta around 0
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites99.6%
Applied rewrites99.6%
if 1.99999999999999985e-24 < (/.f64 (/.f64 (*.f64 (+.f64 alpha beta) (-.f64 beta alpha)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) #s(literal 2 binary64))) Initial program 39.1%
Taylor expanded in alpha around 0
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
lower-+.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6436.3
Applied rewrites36.3%
Taylor expanded in beta around inf
Applied rewrites85.1%
Taylor expanded in i around 0
*-commutativeN/A
lower-*.f64N/A
associate--l+N/A
div-subN/A
lower-+.f64N/A
lower-/.f64N/A
lower--.f64N/A
associate-+r+N/A
lower-+.f64N/A
+-commutativeN/A
lower-+.f6491.9
Applied rewrites91.9%
Applied rewrites91.9%
Final simplification95.6%
herbie shell --seed 2024234
(FPCore (alpha beta i)
:name "Octave 3.8, jcobi/2"
:precision binary64
:pre (and (and (> alpha -1.0) (> beta -1.0)) (> i 0.0))
(/ (+ (/ (/ (* (+ alpha beta) (- beta alpha)) (+ (+ alpha beta) (* 2.0 i))) (+ (+ (+ alpha beta) (* 2.0 i)) 2.0)) 1.0) 2.0))