
(FPCore (alpha beta) :precision binary64 (/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))
double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = (((beta - alpha) / ((alpha + beta) + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta): return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0
function code(alpha, beta) return Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta) tmp = (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_] := N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} + 1}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta) :precision binary64 (/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))
double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = (((beta - alpha) / ((alpha + beta) + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta): return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0
function code(alpha, beta) return Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta) tmp = (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_] := N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} + 1}{2}
\end{array}
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ beta (+ alpha 2.0)))
(t_1 (+ 1.0 (/ beta t_0)))
(t_2 (/ alpha t_0)))
(if (<= (/ (- beta alpha) (+ (+ beta alpha) 2.0)) -0.9996)
(/
(fma 0.5 (* (+ beta 2.0) (/ (- (- -2.0 beta) beta) alpha)) (+ beta 1.0))
alpha)
(/
(/ (fma (/ alpha (- (- -2.0 alpha) beta)) t_2 (* t_1 t_1)) (+ t_1 t_2))
2.0))))
double code(double alpha, double beta) {
double t_0 = beta + (alpha + 2.0);
double t_1 = 1.0 + (beta / t_0);
double t_2 = alpha / t_0;
double tmp;
if (((beta - alpha) / ((beta + alpha) + 2.0)) <= -0.9996) {
tmp = fma(0.5, ((beta + 2.0) * (((-2.0 - beta) - beta) / alpha)), (beta + 1.0)) / alpha;
} else {
tmp = (fma((alpha / ((-2.0 - alpha) - beta)), t_2, (t_1 * t_1)) / (t_1 + t_2)) / 2.0;
}
return tmp;
}
function code(alpha, beta) t_0 = Float64(beta + Float64(alpha + 2.0)) t_1 = Float64(1.0 + Float64(beta / t_0)) t_2 = Float64(alpha / t_0) tmp = 0.0 if (Float64(Float64(beta - alpha) / Float64(Float64(beta + alpha) + 2.0)) <= -0.9996) tmp = Float64(fma(0.5, Float64(Float64(beta + 2.0) * Float64(Float64(Float64(-2.0 - beta) - beta) / alpha)), Float64(beta + 1.0)) / alpha); else tmp = Float64(Float64(fma(Float64(alpha / Float64(Float64(-2.0 - alpha) - beta)), t_2, Float64(t_1 * t_1)) / Float64(t_1 + t_2)) / 2.0); end return tmp end
code[alpha_, beta_] := Block[{t$95$0 = N[(beta + N[(alpha + 2.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(1.0 + N[(beta / t$95$0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(alpha / t$95$0), $MachinePrecision]}, If[LessEqual[N[(N[(beta - alpha), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], -0.9996], N[(N[(0.5 * N[(N[(beta + 2.0), $MachinePrecision] * N[(N[(N[(-2.0 - beta), $MachinePrecision] - beta), $MachinePrecision] / alpha), $MachinePrecision]), $MachinePrecision] + N[(beta + 1.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision], N[(N[(N[(N[(alpha / N[(N[(-2.0 - alpha), $MachinePrecision] - beta), $MachinePrecision]), $MachinePrecision] * t$95$2 + N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 + t$95$2), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \beta + \left(\alpha + 2\right)\\
t_1 := 1 + \frac{\beta}{t\_0}\\
t_2 := \frac{\alpha}{t\_0}\\
\mathbf{if}\;\frac{\beta - \alpha}{\left(\beta + \alpha\right) + 2} \leq -0.9996:\\
\;\;\;\;\frac{\mathsf{fma}\left(0.5, \left(\beta + 2\right) \cdot \frac{\left(-2 - \beta\right) - \beta}{\alpha}, \beta + 1\right)}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\mathsf{fma}\left(\frac{\alpha}{\left(-2 - \alpha\right) - \beta}, t\_2, t\_1 \cdot t\_1\right)}{t\_1 + t\_2}}{2}\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.99960000000000004Initial program 6.8%
Taylor expanded in alpha around inf
lower-/.f64N/A
Applied rewrites99.8%
if -0.99960000000000004 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 99.7%
lift-+.f64N/A
+-commutativeN/A
lift-/.f64N/A
lift--.f64N/A
div-subN/A
associate-+r-N/A
flip--N/A
lower-/.f64N/A
Applied rewrites99.8%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
Applied rewrites99.8%
Final simplification99.8%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (/ (- beta alpha) (+ (+ beta alpha) 2.0))))
(if (<= t_0 -0.999)
(/ (+ beta 1.0) alpha)
(if (<= t_0 0.2) (fma (/ alpha (+ alpha 2.0)) -0.5 0.5) 1.0))))
double code(double alpha, double beta) {
double t_0 = (beta - alpha) / ((beta + alpha) + 2.0);
double tmp;
if (t_0 <= -0.999) {
tmp = (beta + 1.0) / alpha;
} else if (t_0 <= 0.2) {
tmp = fma((alpha / (alpha + 2.0)), -0.5, 0.5);
} else {
tmp = 1.0;
}
return tmp;
}
function code(alpha, beta) t_0 = Float64(Float64(beta - alpha) / Float64(Float64(beta + alpha) + 2.0)) tmp = 0.0 if (t_0 <= -0.999) tmp = Float64(Float64(beta + 1.0) / alpha); elseif (t_0 <= 0.2) tmp = fma(Float64(alpha / Float64(alpha + 2.0)), -0.5, 0.5); else tmp = 1.0; end return tmp end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta - alpha), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -0.999], N[(N[(beta + 1.0), $MachinePrecision] / alpha), $MachinePrecision], If[LessEqual[t$95$0, 0.2], N[(N[(alpha / N[(alpha + 2.0), $MachinePrecision]), $MachinePrecision] * -0.5 + 0.5), $MachinePrecision], 1.0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\beta - \alpha}{\left(\beta + \alpha\right) + 2}\\
\mathbf{if}\;t\_0 \leq -0.999:\\
\;\;\;\;\frac{\beta + 1}{\alpha}\\
\mathbf{elif}\;t\_0 \leq 0.2:\\
\;\;\;\;\mathsf{fma}\left(\frac{\alpha}{\alpha + 2}, -0.5, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.998999999999999999Initial program 7.6%
Taylor expanded in alpha around inf
associate-*r/N/A
lower-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6498.5
Applied rewrites98.5%
if -0.998999999999999999 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < 0.20000000000000001Initial program 99.9%
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
lift-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
associate-+l+N/A
lower-+.f64N/A
lower-+.f64N/A
metadata-evalN/A
metadata-eval99.9
Applied rewrites99.9%
Taylor expanded in beta around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f6497.8
Applied rewrites97.8%
if 0.20000000000000001 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 100.0%
Taylor expanded in beta around inf
Applied rewrites96.8%
Final simplification97.7%
herbie shell --seed 2024223
(FPCore (alpha beta)
:name "Octave 3.8, jcobi/1"
:precision binary64
:pre (and (> alpha -1.0) (> beta -1.0))
(/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))