
(FPCore (alpha beta) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0)))) (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta): t_0 = (alpha + beta) + (2.0 * 1.0) return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0)) end
function tmp = code(alpha, beta) t_0 = (alpha + beta) + (2.0 * 1.0); tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0); end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 14 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0)))) (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta): t_0 = (alpha + beta) + (2.0 * 1.0) return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0)) end
function tmp = code(alpha, beta) t_0 = (alpha + beta) + (2.0 * 1.0); tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0); end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ (+ beta alpha) 2.0)))
(if (<= beta 4.9e+116)
(/
(/ (+ (fma alpha beta (+ beta alpha)) 1.0) t_0)
(* t_0 (+ alpha (+ beta 3.0))))
(/
(/
(+
(+ (+ alpha 1.0) (+ (/ 1.0 beta) (/ alpha beta)))
(* (- -1.0 alpha) (/ (fma 2.0 alpha 4.0) beta)))
beta)
(+ 1.0 t_0)))))assert(alpha < beta);
double code(double alpha, double beta) {
double t_0 = (beta + alpha) + 2.0;
double tmp;
if (beta <= 4.9e+116) {
tmp = ((fma(alpha, beta, (beta + alpha)) + 1.0) / t_0) / (t_0 * (alpha + (beta + 3.0)));
} else {
tmp = ((((alpha + 1.0) + ((1.0 / beta) + (alpha / beta))) + ((-1.0 - alpha) * (fma(2.0, alpha, 4.0) / beta))) / beta) / (1.0 + t_0);
}
return tmp;
}
alpha, beta = sort([alpha, beta]) function code(alpha, beta) t_0 = Float64(Float64(beta + alpha) + 2.0) tmp = 0.0 if (beta <= 4.9e+116) tmp = Float64(Float64(Float64(fma(alpha, beta, Float64(beta + alpha)) + 1.0) / t_0) / Float64(t_0 * Float64(alpha + Float64(beta + 3.0)))); else tmp = Float64(Float64(Float64(Float64(Float64(alpha + 1.0) + Float64(Float64(1.0 / beta) + Float64(alpha / beta))) + Float64(Float64(-1.0 - alpha) * Float64(fma(2.0, alpha, 4.0) / beta))) / beta) / Float64(1.0 + t_0)); end return tmp end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[beta, 4.9e+116], N[(N[(N[(N[(alpha * beta + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 * N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(alpha + 1.0), $MachinePrecision] + N[(N[(1.0 / beta), $MachinePrecision] + N[(alpha / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(-1.0 - alpha), $MachinePrecision] * N[(N[(2.0 * alpha + 4.0), $MachinePrecision] / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / beta), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
t_0 := \left(\beta + \alpha\right) + 2\\
\mathbf{if}\;\beta \leq 4.9 \cdot 10^{+116}:\\
\;\;\;\;\frac{\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{t\_0}}{t\_0 \cdot \left(\alpha + \left(\beta + 3\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\left(\left(\alpha + 1\right) + \left(\frac{1}{\beta} + \frac{\alpha}{\beta}\right)\right) + \left(-1 - \alpha\right) \cdot \frac{\mathsf{fma}\left(2, \alpha, 4\right)}{\beta}}{\beta}}{1 + t\_0}\\
\end{array}
\end{array}
if beta < 4.8999999999999998e116Initial program 99.7%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lift-*.f64N/A
metadata-evalN/A
*-commutativeN/A
Applied rewrites99.8%
if 4.8999999999999998e116 < beta Initial program 84.5%
Taylor expanded in beta around inf
lower-/.f64N/A
Applied rewrites99.9%
Final simplification99.8%
NOTE: alpha and beta should be sorted in increasing order before calling this function.
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ (+ beta alpha) 2.0)))
(if (<= beta 4.9e+116)
(/
(/ (+ (fma alpha beta (+ beta alpha)) 1.0) t_0)
(* t_0 (+ alpha (+ beta 3.0))))
(/ (/ (+ alpha 1.0) t_0) (+ (+ beta alpha) 3.0)))))assert(alpha < beta);
double code(double alpha, double beta) {
double t_0 = (beta + alpha) + 2.0;
double tmp;
if (beta <= 4.9e+116) {
tmp = ((fma(alpha, beta, (beta + alpha)) + 1.0) / t_0) / (t_0 * (alpha + (beta + 3.0)));
} else {
tmp = ((alpha + 1.0) / t_0) / ((beta + alpha) + 3.0);
}
return tmp;
}
alpha, beta = sort([alpha, beta]) function code(alpha, beta) t_0 = Float64(Float64(beta + alpha) + 2.0) tmp = 0.0 if (beta <= 4.9e+116) tmp = Float64(Float64(Float64(fma(alpha, beta, Float64(beta + alpha)) + 1.0) / t_0) / Float64(t_0 * Float64(alpha + Float64(beta + 3.0)))); else tmp = Float64(Float64(Float64(alpha + 1.0) / t_0) / Float64(Float64(beta + alpha) + 3.0)); end return tmp end
NOTE: alpha and beta should be sorted in increasing order before calling this function.
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[beta, 4.9e+116], N[(N[(N[(N[(alpha * beta + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 * N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(alpha + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 3.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
[alpha, beta] = \mathsf{sort}([alpha, beta])\\
\\
\begin{array}{l}
t_0 := \left(\beta + \alpha\right) + 2\\
\mathbf{if}\;\beta \leq 4.9 \cdot 10^{+116}:\\
\;\;\;\;\frac{\frac{\mathsf{fma}\left(\alpha, \beta, \beta + \alpha\right) + 1}{t\_0}}{t\_0 \cdot \left(\alpha + \left(\beta + 3\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\alpha + 1}{t\_0}}{\left(\beta + \alpha\right) + 3}\\
\end{array}
\end{array}
if beta < 4.8999999999999998e116Initial program 99.8%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lift-*.f64N/A
metadata-evalN/A
*-commutativeN/A
Applied rewrites99.8%
if 4.8999999999999998e116 < beta Initial program 86.1%
Taylor expanded in beta around inf
lower-+.f6499.1
Applied rewrites99.1%
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
lift-*.f64N/A
metadata-evalN/A
clear-numN/A
frac-timesN/A
metadata-evalN/A
Applied rewrites97.5%
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-/.f64N/A
clear-numN/A
un-div-invN/A
clear-numN/A
lower-/.f64N/A
Applied rewrites99.1%
Final simplification99.5%
herbie shell --seed 2024227
(FPCore (alpha beta)
:name "Octave 3.8, jcobi/3"
:precision binary64
:pre (and (> alpha -1.0) (> beta -1.0))
(/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))