
(FPCore (alpha beta) :precision binary64 (/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))
double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = (((beta - alpha) / ((alpha + beta) + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta): return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0
function code(alpha, beta) return Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta) tmp = (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_] := N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} + 1}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 14 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta) :precision binary64 (/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))
double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = (((beta - alpha) / ((alpha + beta) + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta): return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0
function code(alpha, beta) return Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta) tmp = (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_] := N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} + 1}{2}
\end{array}
(FPCore (alpha beta)
:precision binary64
(if (<= (/ (- beta alpha) (+ 2.0 (+ beta alpha))) -0.999995)
(/
(fma 0.5 (* (- (- -2.0 beta) beta) (/ (+ beta 2.0) alpha)) (+ beta 1.0))
alpha)
(/ (fma (/ -1.0 (- -2.0 (+ beta alpha))) (- beta alpha) 1.0) 2.0)))
double code(double alpha, double beta) {
double tmp;
if (((beta - alpha) / (2.0 + (beta + alpha))) <= -0.999995) {
tmp = fma(0.5, (((-2.0 - beta) - beta) * ((beta + 2.0) / alpha)), (beta + 1.0)) / alpha;
} else {
tmp = fma((-1.0 / (-2.0 - (beta + alpha))), (beta - alpha), 1.0) / 2.0;
}
return tmp;
}
function code(alpha, beta) tmp = 0.0 if (Float64(Float64(beta - alpha) / Float64(2.0 + Float64(beta + alpha))) <= -0.999995) tmp = Float64(fma(0.5, Float64(Float64(Float64(-2.0 - beta) - beta) * Float64(Float64(beta + 2.0) / alpha)), Float64(beta + 1.0)) / alpha); else tmp = Float64(fma(Float64(-1.0 / Float64(-2.0 - Float64(beta + alpha))), Float64(beta - alpha), 1.0) / 2.0); end return tmp end
code[alpha_, beta_] := If[LessEqual[N[(N[(beta - alpha), $MachinePrecision] / N[(2.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -0.999995], N[(N[(0.5 * N[(N[(N[(-2.0 - beta), $MachinePrecision] - beta), $MachinePrecision] * N[(N[(beta + 2.0), $MachinePrecision] / alpha), $MachinePrecision]), $MachinePrecision] + N[(beta + 1.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision], N[(N[(N[(-1.0 / N[(-2.0 - N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(beta - alpha), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{\beta - \alpha}{2 + \left(\beta + \alpha\right)} \leq -0.999995:\\
\;\;\;\;\frac{\mathsf{fma}\left(0.5, \left(\left(-2 - \beta\right) - \beta\right) \cdot \frac{\beta + 2}{\alpha}, \beta + 1\right)}{\alpha}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\frac{-1}{-2 - \left(\beta + \alpha\right)}, \beta - \alpha, 1\right)}{2}\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.99999499999999997Initial program 8.5%
Taylor expanded in alpha around inf
lower-/.f64N/A
Simplified99.8%
if -0.99999499999999997 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 99.9%
lift-+.f64N/A
lift-+.f64N/A
lift--.f64N/A
lift-/.f64N/A
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
lower-fma.f64N/A
frac-2negN/A
metadata-evalN/A
lower-/.f64N/A
lift-+.f64N/A
+-commutativeN/A
distribute-neg-inN/A
unsub-negN/A
lower--.f64N/A
metadata-eval99.9
lift-+.f64N/A
+-commutativeN/A
lower-+.f6499.9
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (/ (- beta alpha) (+ 2.0 (+ beta alpha)))))
(if (<= t_0 -0.5)
(/ (+ beta 1.0) alpha)
(if (<= t_0 0.02)
(fma beta (fma beta (fma beta 0.0625 -0.125) 0.25) 0.5)
(+ 1.0 (/ -1.0 beta))))))
double code(double alpha, double beta) {
double t_0 = (beta - alpha) / (2.0 + (beta + alpha));
double tmp;
if (t_0 <= -0.5) {
tmp = (beta + 1.0) / alpha;
} else if (t_0 <= 0.02) {
tmp = fma(beta, fma(beta, fma(beta, 0.0625, -0.125), 0.25), 0.5);
} else {
tmp = 1.0 + (-1.0 / beta);
}
return tmp;
}
function code(alpha, beta) t_0 = Float64(Float64(beta - alpha) / Float64(2.0 + Float64(beta + alpha))) tmp = 0.0 if (t_0 <= -0.5) tmp = Float64(Float64(beta + 1.0) / alpha); elseif (t_0 <= 0.02) tmp = fma(beta, fma(beta, fma(beta, 0.0625, -0.125), 0.25), 0.5); else tmp = Float64(1.0 + Float64(-1.0 / beta)); end return tmp end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta - alpha), $MachinePrecision] / N[(2.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -0.5], N[(N[(beta + 1.0), $MachinePrecision] / alpha), $MachinePrecision], If[LessEqual[t$95$0, 0.02], N[(beta * N[(beta * N[(beta * 0.0625 + -0.125), $MachinePrecision] + 0.25), $MachinePrecision] + 0.5), $MachinePrecision], N[(1.0 + N[(-1.0 / beta), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\beta - \alpha}{2 + \left(\beta + \alpha\right)}\\
\mathbf{if}\;t\_0 \leq -0.5:\\
\;\;\;\;\frac{\beta + 1}{\alpha}\\
\mathbf{elif}\;t\_0 \leq 0.02:\\
\;\;\;\;\mathsf{fma}\left(\beta, \mathsf{fma}\left(\beta, \mathsf{fma}\left(\beta, 0.0625, -0.125\right), 0.25\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;1 + \frac{-1}{\beta}\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < -0.5Initial program 9.0%
Taylor expanded in alpha around inf
associate-*r/N/A
lower-/.f64N/A
distribute-lft-inN/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6497.4
Simplified97.4%
if -0.5 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) < 0.0200000000000000004Initial program 100.0%
Taylor expanded in alpha around 0
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
lower-fma.f64N/A
lower-/.f64N/A
lower-+.f6498.1
Simplified98.1%
Taylor expanded in beta around 0
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6497.8
Simplified97.8%
if 0.0200000000000000004 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) #s(literal 2 binary64))) Initial program 100.0%
Taylor expanded in alpha around 0
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
lower-fma.f64N/A
lower-/.f64N/A
lower-+.f6498.9
Simplified98.9%
Taylor expanded in beta around inf
sub-negN/A
lower-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f6497.7
Simplified97.7%
Final simplification97.7%
herbie shell --seed 2024218
(FPCore (alpha beta)
:name "Octave 3.8, jcobi/1"
:precision binary64
:pre (and (> alpha -1.0) (> beta -1.0))
(/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))