
(FPCore (alpha beta) :precision binary64 (/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))
double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = (((beta - alpha) / ((alpha + beta) + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta): return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0
function code(alpha, beta) return Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta) tmp = (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_] := N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} + 1}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta) :precision binary64 (/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))
double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = (((beta - alpha) / ((alpha + beta) + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta): return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0
function code(alpha, beta) return Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta) tmp = (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_] := N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} + 1}{2}
\end{array}
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ 2.0 (+ alpha beta)))
(t_1 (+ (/ beta t_0) 1.0))
(t_2 (+ (+ alpha beta) 2.0))
(t_3 (/ alpha t_2)))
(if (<= (/ (- beta alpha) t_2) -0.999999995)
(/ (+ 1.0 beta) alpha)
(/ (/ (- (* t_1 t_1) (* t_3 t_3)) (+ t_1 (/ alpha t_0))) 2.0))))
double code(double alpha, double beta) {
double t_0 = 2.0 + (alpha + beta);
double t_1 = (beta / t_0) + 1.0;
double t_2 = (alpha + beta) + 2.0;
double t_3 = alpha / t_2;
double tmp;
if (((beta - alpha) / t_2) <= -0.999999995) {
tmp = (1.0 + beta) / alpha;
} else {
tmp = (((t_1 * t_1) - (t_3 * t_3)) / (t_1 + (alpha / t_0))) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
real(8) :: t_3
real(8) :: tmp
t_0 = 2.0d0 + (alpha + beta)
t_1 = (beta / t_0) + 1.0d0
t_2 = (alpha + beta) + 2.0d0
t_3 = alpha / t_2
if (((beta - alpha) / t_2) <= (-0.999999995d0)) then
tmp = (1.0d0 + beta) / alpha
else
tmp = (((t_1 * t_1) - (t_3 * t_3)) / (t_1 + (alpha / t_0))) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double t_0 = 2.0 + (alpha + beta);
double t_1 = (beta / t_0) + 1.0;
double t_2 = (alpha + beta) + 2.0;
double t_3 = alpha / t_2;
double tmp;
if (((beta - alpha) / t_2) <= -0.999999995) {
tmp = (1.0 + beta) / alpha;
} else {
tmp = (((t_1 * t_1) - (t_3 * t_3)) / (t_1 + (alpha / t_0))) / 2.0;
}
return tmp;
}
def code(alpha, beta): t_0 = 2.0 + (alpha + beta) t_1 = (beta / t_0) + 1.0 t_2 = (alpha + beta) + 2.0 t_3 = alpha / t_2 tmp = 0 if ((beta - alpha) / t_2) <= -0.999999995: tmp = (1.0 + beta) / alpha else: tmp = (((t_1 * t_1) - (t_3 * t_3)) / (t_1 + (alpha / t_0))) / 2.0 return tmp
function code(alpha, beta) t_0 = Float64(2.0 + Float64(alpha + beta)) t_1 = Float64(Float64(beta / t_0) + 1.0) t_2 = Float64(Float64(alpha + beta) + 2.0) t_3 = Float64(alpha / t_2) tmp = 0.0 if (Float64(Float64(beta - alpha) / t_2) <= -0.999999995) tmp = Float64(Float64(1.0 + beta) / alpha); else tmp = Float64(Float64(Float64(Float64(t_1 * t_1) - Float64(t_3 * t_3)) / Float64(t_1 + Float64(alpha / t_0))) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) t_0 = 2.0 + (alpha + beta); t_1 = (beta / t_0) + 1.0; t_2 = (alpha + beta) + 2.0; t_3 = alpha / t_2; tmp = 0.0; if (((beta - alpha) / t_2) <= -0.999999995) tmp = (1.0 + beta) / alpha; else tmp = (((t_1 * t_1) - (t_3 * t_3)) / (t_1 + (alpha / t_0))) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := Block[{t$95$0 = N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(beta / t$95$0), $MachinePrecision] + 1.0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]}, Block[{t$95$3 = N[(alpha / t$95$2), $MachinePrecision]}, If[LessEqual[N[(N[(beta - alpha), $MachinePrecision] / t$95$2), $MachinePrecision], -0.999999995], N[(N[(1.0 + beta), $MachinePrecision] / alpha), $MachinePrecision], N[(N[(N[(N[(t$95$1 * t$95$1), $MachinePrecision] - N[(t$95$3 * t$95$3), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 + N[(alpha / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 2 + \left(\alpha + \beta\right)\\
t_1 := \frac{\beta}{t\_0} + 1\\
t_2 := \left(\alpha + \beta\right) + 2\\
t_3 := \frac{\alpha}{t\_2}\\
\mathbf{if}\;\frac{\beta - \alpha}{t\_2} \leq -0.999999995:\\
\;\;\;\;\frac{1 + \beta}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{t\_1 \cdot t\_1 - t\_3 \cdot t\_3}{t\_1 + \frac{\alpha}{t\_0}}}{2}\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.99999999500000003Initial program 6.8%
Taylor expanded in alpha around inf
associate-*r/N/A
lower-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6499.4
Applied rewrites99.4%
if -0.99999999500000003 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 99.5%
lift-+.f64N/A
+-commutativeN/A
lift-/.f64N/A
lift--.f64N/A
div-subN/A
associate-+r-N/A
flip--N/A
lower-/.f64N/A
Applied rewrites99.5%
lift-pow.f64N/A
unpow2N/A
lower-*.f6499.5
lift-+.f64N/A
+-commutativeN/A
lower-+.f6499.5
lift-+.f64N/A
+-commutativeN/A
lower-+.f6499.5
Applied rewrites99.5%
(FPCore (alpha beta) :precision binary64 (if (<= (/ (- beta alpha) (+ (+ alpha beta) 2.0)) -0.999999995) (/ (+ 1.0 beta) alpha) (/ (fma (- beta alpha) (pow (+ 2.0 (+ alpha beta)) -1.0) 1.0) 2.0)))
double code(double alpha, double beta) {
double tmp;
if (((beta - alpha) / ((alpha + beta) + 2.0)) <= -0.999999995) {
tmp = (1.0 + beta) / alpha;
} else {
tmp = fma((beta - alpha), pow((2.0 + (alpha + beta)), -1.0), 1.0) / 2.0;
}
return tmp;
}
function code(alpha, beta) tmp = 0.0 if (Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) <= -0.999999995) tmp = Float64(Float64(1.0 + beta) / alpha); else tmp = Float64(fma(Float64(beta - alpha), (Float64(2.0 + Float64(alpha + beta)) ^ -1.0), 1.0) / 2.0); end return tmp end
code[alpha_, beta_] := If[LessEqual[N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], -0.999999995], N[(N[(1.0 + beta), $MachinePrecision] / alpha), $MachinePrecision], N[(N[(N[(beta - alpha), $MachinePrecision] * N[Power[N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} \leq -0.999999995:\\
\;\;\;\;\frac{1 + \beta}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\beta - \alpha, {\left(2 + \left(\alpha + \beta\right)\right)}^{-1}, 1\right)}{2}\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.99999999500000003Initial program 6.8%
Taylor expanded in alpha around inf
associate-*r/N/A
lower-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6499.4
Applied rewrites99.4%
if -0.99999999500000003 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 99.5%
lift-+.f64N/A
lift-/.f64N/A
*-rgt-identityN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f6499.5
lift-+.f64N/A
+-commutativeN/A
lower-+.f6499.5
Applied rewrites99.5%
Final simplification99.5%
(FPCore (alpha beta) :precision binary64 (if (<= (/ (- beta alpha) (+ (+ alpha beta) 2.0)) -0.5) (pow alpha -1.0) 1.0))
double code(double alpha, double beta) {
double tmp;
if (((beta - alpha) / ((alpha + beta) + 2.0)) <= -0.5) {
tmp = pow(alpha, -1.0);
} else {
tmp = 1.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (((beta - alpha) / ((alpha + beta) + 2.0d0)) <= (-0.5d0)) then
tmp = alpha ** (-1.0d0)
else
tmp = 1.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (((beta - alpha) / ((alpha + beta) + 2.0)) <= -0.5) {
tmp = Math.pow(alpha, -1.0);
} else {
tmp = 1.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if ((beta - alpha) / ((alpha + beta) + 2.0)) <= -0.5: tmp = math.pow(alpha, -1.0) else: tmp = 1.0 return tmp
function code(alpha, beta) tmp = 0.0 if (Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) <= -0.5) tmp = alpha ^ -1.0; else tmp = 1.0; end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (((beta - alpha) / ((alpha + beta) + 2.0)) <= -0.5) tmp = alpha ^ -1.0; else tmp = 1.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], -0.5], N[Power[alpha, -1.0], $MachinePrecision], 1.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} \leq -0.5:\\
\;\;\;\;{\alpha}^{-1}\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.5Initial program 11.3%
Taylor expanded in alpha around inf
associate-*r/N/A
lower-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6495.8
Applied rewrites95.8%
Taylor expanded in beta around 0
Applied rewrites75.6%
if -0.5 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 100.0%
Taylor expanded in beta around inf
Applied rewrites48.5%
Final simplification57.2%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (/ (- beta alpha) (+ (+ alpha beta) 2.0))))
(if (<= t_0 -0.999999995)
(/ (+ 1.0 beta) alpha)
(if (<= t_0 0.001) (fma -0.5 (/ alpha (+ 2.0 alpha)) 0.5) 1.0))))
double code(double alpha, double beta) {
double t_0 = (beta - alpha) / ((alpha + beta) + 2.0);
double tmp;
if (t_0 <= -0.999999995) {
tmp = (1.0 + beta) / alpha;
} else if (t_0 <= 0.001) {
tmp = fma(-0.5, (alpha / (2.0 + alpha)), 0.5);
} else {
tmp = 1.0;
}
return tmp;
}
function code(alpha, beta) t_0 = Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) tmp = 0.0 if (t_0 <= -0.999999995) tmp = Float64(Float64(1.0 + beta) / alpha); elseif (t_0 <= 0.001) tmp = fma(-0.5, Float64(alpha / Float64(2.0 + alpha)), 0.5); else tmp = 1.0; end return tmp end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -0.999999995], N[(N[(1.0 + beta), $MachinePrecision] / alpha), $MachinePrecision], If[LessEqual[t$95$0, 0.001], N[(-0.5 * N[(alpha / N[(2.0 + alpha), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision], 1.0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2}\\
\mathbf{if}\;t\_0 \leq -0.999999995:\\
\;\;\;\;\frac{1 + \beta}{\alpha}\\
\mathbf{elif}\;t\_0 \leq 0.001:\\
\;\;\;\;\mathsf{fma}\left(-0.5, \frac{\alpha}{2 + \alpha}, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.99999999500000003Initial program 6.8%
Taylor expanded in alpha around inf
associate-*r/N/A
lower-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6499.4
Applied rewrites99.4%
if -0.99999999500000003 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < 1e-3Initial program 99.2%
lift-/.f64N/A
lift-+.f64N/A
div-addN/A
*-rgt-identityN/A
associate-/l*N/A
lower-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
metadata-evalN/A
metadata-eval99.2
Applied rewrites99.2%
Taylor expanded in beta around 0
+-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
lower-+.f6493.3
Applied rewrites93.3%
if 1e-3 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 100.0%
Taylor expanded in beta around inf
Applied rewrites98.3%
(FPCore (alpha beta) :precision binary64 (if (<= (/ (- beta alpha) (+ (+ alpha beta) 2.0)) -0.999999995) (/ (+ 1.0 beta) alpha) (fma (/ (- beta alpha) (+ 2.0 (+ alpha beta))) 0.5 0.5)))
double code(double alpha, double beta) {
double tmp;
if (((beta - alpha) / ((alpha + beta) + 2.0)) <= -0.999999995) {
tmp = (1.0 + beta) / alpha;
} else {
tmp = fma(((beta - alpha) / (2.0 + (alpha + beta))), 0.5, 0.5);
}
return tmp;
}
function code(alpha, beta) tmp = 0.0 if (Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) <= -0.999999995) tmp = Float64(Float64(1.0 + beta) / alpha); else tmp = fma(Float64(Float64(beta - alpha) / Float64(2.0 + Float64(alpha + beta))), 0.5, 0.5); end return tmp end
code[alpha_, beta_] := If[LessEqual[N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], -0.999999995], N[(N[(1.0 + beta), $MachinePrecision] / alpha), $MachinePrecision], N[(N[(N[(beta - alpha), $MachinePrecision] / N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.5 + 0.5), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} \leq -0.999999995:\\
\;\;\;\;\frac{1 + \beta}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{\beta - \alpha}{2 + \left(\alpha + \beta\right)}, 0.5, 0.5\right)\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.99999999500000003Initial program 6.8%
Taylor expanded in alpha around inf
associate-*r/N/A
lower-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6499.4
Applied rewrites99.4%
if -0.99999999500000003 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 99.5%
lift-/.f64N/A
lift-+.f64N/A
div-addN/A
*-rgt-identityN/A
associate-/l*N/A
lower-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
metadata-evalN/A
metadata-eval99.5
Applied rewrites99.5%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) 2.0)))
(if (<= (/ (- beta alpha) t_0) -0.999999995)
(/ (+ 1.0 beta) alpha)
(fma (- beta alpha) (/ 0.5 t_0) 0.5))))
double code(double alpha, double beta) {
double t_0 = (alpha + beta) + 2.0;
double tmp;
if (((beta - alpha) / t_0) <= -0.999999995) {
tmp = (1.0 + beta) / alpha;
} else {
tmp = fma((beta - alpha), (0.5 / t_0), 0.5);
}
return tmp;
}
function code(alpha, beta) t_0 = Float64(Float64(alpha + beta) + 2.0) tmp = 0.0 if (Float64(Float64(beta - alpha) / t_0) <= -0.999999995) tmp = Float64(Float64(1.0 + beta) / alpha); else tmp = fma(Float64(beta - alpha), Float64(0.5 / t_0), 0.5); end return tmp end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[N[(N[(beta - alpha), $MachinePrecision] / t$95$0), $MachinePrecision], -0.999999995], N[(N[(1.0 + beta), $MachinePrecision] / alpha), $MachinePrecision], N[(N[(beta - alpha), $MachinePrecision] * N[(0.5 / t$95$0), $MachinePrecision] + 0.5), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2\\
\mathbf{if}\;\frac{\beta - \alpha}{t\_0} \leq -0.999999995:\\
\;\;\;\;\frac{1 + \beta}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\beta - \alpha, \frac{0.5}{t\_0}, 0.5\right)\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.99999999500000003Initial program 6.8%
Taylor expanded in alpha around inf
associate-*r/N/A
lower-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6499.4
Applied rewrites99.4%
if -0.99999999500000003 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 99.5%
lift-/.f64N/A
lift-+.f64N/A
div-addN/A
*-rgt-identityN/A
associate-/l*N/A
lower-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
metadata-evalN/A
metadata-eval99.5
Applied rewrites99.5%
lift-fma.f64N/A
lift-/.f64N/A
associate-*l/N/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f6499.5
lift-+.f64N/A
+-commutativeN/A
lower-+.f6499.5
Applied rewrites99.5%
(FPCore (alpha beta) :precision binary64 (if (<= (/ (- beta alpha) (+ (+ alpha beta) 2.0)) -0.5) (/ (+ 1.0 beta) alpha) (fma (- beta alpha) (/ 0.5 (+ 2.0 beta)) 0.5)))
double code(double alpha, double beta) {
double tmp;
if (((beta - alpha) / ((alpha + beta) + 2.0)) <= -0.5) {
tmp = (1.0 + beta) / alpha;
} else {
tmp = fma((beta - alpha), (0.5 / (2.0 + beta)), 0.5);
}
return tmp;
}
function code(alpha, beta) tmp = 0.0 if (Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) <= -0.5) tmp = Float64(Float64(1.0 + beta) / alpha); else tmp = fma(Float64(beta - alpha), Float64(0.5 / Float64(2.0 + beta)), 0.5); end return tmp end
code[alpha_, beta_] := If[LessEqual[N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], -0.5], N[(N[(1.0 + beta), $MachinePrecision] / alpha), $MachinePrecision], N[(N[(beta - alpha), $MachinePrecision] * N[(0.5 / N[(2.0 + beta), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} \leq -0.5:\\
\;\;\;\;\frac{1 + \beta}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\beta - \alpha, \frac{0.5}{2 + \beta}, 0.5\right)\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.5Initial program 11.3%
Taylor expanded in alpha around inf
associate-*r/N/A
lower-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6495.8
Applied rewrites95.8%
if -0.5 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 100.0%
lift-/.f64N/A
lift-+.f64N/A
div-addN/A
*-rgt-identityN/A
associate-/l*N/A
lower-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
metadata-evalN/A
metadata-eval100.0
Applied rewrites100.0%
lift-fma.f64N/A
lift-/.f64N/A
associate-*l/N/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64100.0
lift-+.f64N/A
+-commutativeN/A
lower-+.f64100.0
Applied rewrites100.0%
Taylor expanded in alpha around 0
lower-+.f6499.6
Applied rewrites99.6%
(FPCore (alpha beta) :precision binary64 (if (<= (/ (- beta alpha) (+ (+ alpha beta) 2.0)) -0.5) (/ (+ 1.0 beta) alpha) (fma (/ beta (+ 2.0 beta)) 0.5 0.5)))
double code(double alpha, double beta) {
double tmp;
if (((beta - alpha) / ((alpha + beta) + 2.0)) <= -0.5) {
tmp = (1.0 + beta) / alpha;
} else {
tmp = fma((beta / (2.0 + beta)), 0.5, 0.5);
}
return tmp;
}
function code(alpha, beta) tmp = 0.0 if (Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) <= -0.5) tmp = Float64(Float64(1.0 + beta) / alpha); else tmp = fma(Float64(beta / Float64(2.0 + beta)), 0.5, 0.5); end return tmp end
code[alpha_, beta_] := If[LessEqual[N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], -0.5], N[(N[(1.0 + beta), $MachinePrecision] / alpha), $MachinePrecision], N[(N[(beta / N[(2.0 + beta), $MachinePrecision]), $MachinePrecision] * 0.5 + 0.5), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} \leq -0.5:\\
\;\;\;\;\frac{1 + \beta}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{\beta}{2 + \beta}, 0.5, 0.5\right)\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.5Initial program 11.3%
Taylor expanded in alpha around inf
associate-*r/N/A
lower-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6495.8
Applied rewrites95.8%
if -0.5 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 100.0%
Taylor expanded in alpha around 0
+-commutativeN/A
distribute-rgt-inN/A
metadata-evalN/A
lower-fma.f64N/A
lower-/.f64N/A
lower-+.f6498.7
Applied rewrites98.7%
(FPCore (alpha beta) :precision binary64 (if (<= (/ (- beta alpha) (+ (+ alpha beta) 2.0)) -0.5) (/ (+ 1.0 beta) alpha) 1.0))
double code(double alpha, double beta) {
double tmp;
if (((beta - alpha) / ((alpha + beta) + 2.0)) <= -0.5) {
tmp = (1.0 + beta) / alpha;
} else {
tmp = 1.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (((beta - alpha) / ((alpha + beta) + 2.0d0)) <= (-0.5d0)) then
tmp = (1.0d0 + beta) / alpha
else
tmp = 1.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (((beta - alpha) / ((alpha + beta) + 2.0)) <= -0.5) {
tmp = (1.0 + beta) / alpha;
} else {
tmp = 1.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if ((beta - alpha) / ((alpha + beta) + 2.0)) <= -0.5: tmp = (1.0 + beta) / alpha else: tmp = 1.0 return tmp
function code(alpha, beta) tmp = 0.0 if (Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) <= -0.5) tmp = Float64(Float64(1.0 + beta) / alpha); else tmp = 1.0; end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (((beta - alpha) / ((alpha + beta) + 2.0)) <= -0.5) tmp = (1.0 + beta) / alpha; else tmp = 1.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], -0.5], N[(N[(1.0 + beta), $MachinePrecision] / alpha), $MachinePrecision], 1.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} \leq -0.5:\\
\;\;\;\;\frac{1 + \beta}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.5Initial program 11.3%
Taylor expanded in alpha around inf
associate-*r/N/A
lower-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6495.8
Applied rewrites95.8%
if -0.5 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 100.0%
Taylor expanded in beta around inf
Applied rewrites48.5%
(FPCore (alpha beta) :precision binary64 1.0)
double code(double alpha, double beta) {
return 1.0;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = 1.0d0
end function
public static double code(double alpha, double beta) {
return 1.0;
}
def code(alpha, beta): return 1.0
function code(alpha, beta) return 1.0 end
function tmp = code(alpha, beta) tmp = 1.0; end
code[alpha_, beta_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 71.6%
Taylor expanded in beta around inf
Applied rewrites34.7%
herbie shell --seed 2024327
(FPCore (alpha beta)
:name "Octave 3.8, jcobi/1"
:precision binary64
:pre (and (> alpha -1.0) (> beta -1.0))
(/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))