
(FPCore (alpha beta) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0)))) (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta): t_0 = (alpha + beta) + (2.0 * 1.0) return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0)) end
function tmp = code(alpha, beta) t_0 = (alpha + beta) + (2.0 * 1.0); tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0); end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 24 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0)))) (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta): t_0 = (alpha + beta) + (2.0 * 1.0) return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0)) end
function tmp = code(alpha, beta) t_0 = (alpha + beta) + (2.0 * 1.0); tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0); end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) 2.0))
(t_1
(/
(/ (/ (+ (+ (+ alpha beta) (* alpha beta)) 1.0) t_0) t_0)
(+ 1.0 t_0))))
(if (<= t_1 0.1)
t_1
(/
1.0
(*
(*
t_0
(+
(/
(+
(/ 2.0 (+ beta 1.0))
(+
(/ beta (+ beta 1.0))
(/ (- -1.0 beta) (* (- -1.0 beta) (- -1.0 beta)))))
alpha)
(/ 1.0 (+ beta 1.0))))
(+ alpha (+ beta 3.0)))))))
double code(double alpha, double beta) {
double t_0 = (alpha + beta) + 2.0;
double t_1 = (((((alpha + beta) + (alpha * beta)) + 1.0) / t_0) / t_0) / (1.0 + t_0);
double tmp;
if (t_1 <= 0.1) {
tmp = t_1;
} else {
tmp = 1.0 / ((t_0 * ((((2.0 / (beta + 1.0)) + ((beta / (beta + 1.0)) + ((-1.0 - beta) / ((-1.0 - beta) * (-1.0 - beta))))) / alpha) + (1.0 / (beta + 1.0)))) * (alpha + (beta + 3.0)));
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = (alpha + beta) + 2.0d0
t_1 = (((((alpha + beta) + (alpha * beta)) + 1.0d0) / t_0) / t_0) / (1.0d0 + t_0)
if (t_1 <= 0.1d0) then
tmp = t_1
else
tmp = 1.0d0 / ((t_0 * ((((2.0d0 / (beta + 1.0d0)) + ((beta / (beta + 1.0d0)) + (((-1.0d0) - beta) / (((-1.0d0) - beta) * ((-1.0d0) - beta))))) / alpha) + (1.0d0 / (beta + 1.0d0)))) * (alpha + (beta + 3.0d0)))
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double t_0 = (alpha + beta) + 2.0;
double t_1 = (((((alpha + beta) + (alpha * beta)) + 1.0) / t_0) / t_0) / (1.0 + t_0);
double tmp;
if (t_1 <= 0.1) {
tmp = t_1;
} else {
tmp = 1.0 / ((t_0 * ((((2.0 / (beta + 1.0)) + ((beta / (beta + 1.0)) + ((-1.0 - beta) / ((-1.0 - beta) * (-1.0 - beta))))) / alpha) + (1.0 / (beta + 1.0)))) * (alpha + (beta + 3.0)));
}
return tmp;
}
def code(alpha, beta): t_0 = (alpha + beta) + 2.0 t_1 = (((((alpha + beta) + (alpha * beta)) + 1.0) / t_0) / t_0) / (1.0 + t_0) tmp = 0 if t_1 <= 0.1: tmp = t_1 else: tmp = 1.0 / ((t_0 * ((((2.0 / (beta + 1.0)) + ((beta / (beta + 1.0)) + ((-1.0 - beta) / ((-1.0 - beta) * (-1.0 - beta))))) / alpha) + (1.0 / (beta + 1.0)))) * (alpha + (beta + 3.0))) return tmp
function code(alpha, beta) t_0 = Float64(Float64(alpha + beta) + 2.0) t_1 = Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(alpha * beta)) + 1.0) / t_0) / t_0) / Float64(1.0 + t_0)) tmp = 0.0 if (t_1 <= 0.1) tmp = t_1; else tmp = Float64(1.0 / Float64(Float64(t_0 * Float64(Float64(Float64(Float64(2.0 / Float64(beta + 1.0)) + Float64(Float64(beta / Float64(beta + 1.0)) + Float64(Float64(-1.0 - beta) / Float64(Float64(-1.0 - beta) * Float64(-1.0 - beta))))) / alpha) + Float64(1.0 / Float64(beta + 1.0)))) * Float64(alpha + Float64(beta + 3.0)))); end return tmp end
function tmp_2 = code(alpha, beta) t_0 = (alpha + beta) + 2.0; t_1 = (((((alpha + beta) + (alpha * beta)) + 1.0) / t_0) / t_0) / (1.0 + t_0); tmp = 0.0; if (t_1 <= 0.1) tmp = t_1; else tmp = 1.0 / ((t_0 * ((((2.0 / (beta + 1.0)) + ((beta / (beta + 1.0)) + ((-1.0 - beta) / ((-1.0 - beta) * (-1.0 - beta))))) / alpha) + (1.0 / (beta + 1.0)))) * (alpha + (beta + 3.0))); end tmp_2 = tmp; end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(alpha * beta), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, 0.1], t$95$1, N[(1.0 / N[(N[(t$95$0 * N[(N[(N[(N[(2.0 / N[(beta + 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[(beta / N[(beta + 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[(-1.0 - beta), $MachinePrecision] / N[(N[(-1.0 - beta), $MachinePrecision] * N[(-1.0 - beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] + N[(1.0 / N[(beta + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2\\
t_1 := \frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \alpha \cdot \beta\right) + 1}{t\_0}}{t\_0}}{1 + t\_0}\\
\mathbf{if}\;t\_1 \leq 0.1:\\
\;\;\;\;t\_1\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\left(t\_0 \cdot \left(\frac{\frac{2}{\beta + 1} + \left(\frac{\beta}{\beta + 1} + \frac{-1 - \beta}{\left(-1 - \beta\right) \cdot \left(-1 - \beta\right)}\right)}{\alpha} + \frac{1}{\beta + 1}\right)\right) \cdot \left(\alpha + \left(\beta + 3\right)\right)}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (/.f64 (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 beta alpha)) #s(literal 1 binary64)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) #s(literal 1 binary64)))) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) #s(literal 1 binary64)))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) #s(literal 1 binary64))) #s(literal 1 binary64))) < 0.10000000000000001Initial program 99.9%
if 0.10000000000000001 < (/.f64 (/.f64 (/.f64 (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 beta alpha)) #s(literal 1 binary64)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) #s(literal 1 binary64)))) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) #s(literal 1 binary64)))) (+.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) #s(literal 1 binary64))) #s(literal 1 binary64))) Initial program 1.6%
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
clear-numN/A
frac-timesN/A
metadata-evalN/A
lower-/.f64N/A
lower-*.f64N/A
Applied rewrites1.6%
Taylor expanded in alpha around -inf
lower--.f64N/A
Applied rewrites99.7%
Final simplification99.9%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) 2.0)))
(if (<= beta 2.3e+82)
(/
(/ (+ 1.0 (fma alpha beta (+ alpha beta))) t_0)
(* t_0 (+ alpha (+ beta 3.0))))
(/
(/
(+
(+ (/ 1.0 beta) (+ alpha (/ alpha beta)))
(+ 1.0 (* (- -1.0 alpha) (/ (+ alpha 2.0) beta))))
t_0)
(+ 1.0 t_0)))))
double code(double alpha, double beta) {
double t_0 = (alpha + beta) + 2.0;
double tmp;
if (beta <= 2.3e+82) {
tmp = ((1.0 + fma(alpha, beta, (alpha + beta))) / t_0) / (t_0 * (alpha + (beta + 3.0)));
} else {
tmp = ((((1.0 / beta) + (alpha + (alpha / beta))) + (1.0 + ((-1.0 - alpha) * ((alpha + 2.0) / beta)))) / t_0) / (1.0 + t_0);
}
return tmp;
}
function code(alpha, beta) t_0 = Float64(Float64(alpha + beta) + 2.0) tmp = 0.0 if (beta <= 2.3e+82) tmp = Float64(Float64(Float64(1.0 + fma(alpha, beta, Float64(alpha + beta))) / t_0) / Float64(t_0 * Float64(alpha + Float64(beta + 3.0)))); else tmp = Float64(Float64(Float64(Float64(Float64(1.0 / beta) + Float64(alpha + Float64(alpha / beta))) + Float64(1.0 + Float64(Float64(-1.0 - alpha) * Float64(Float64(alpha + 2.0) / beta)))) / t_0) / Float64(1.0 + t_0)); end return tmp end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[beta, 2.3e+82], N[(N[(N[(1.0 + N[(alpha * beta + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 * N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(1.0 / beta), $MachinePrecision] + N[(alpha + N[(alpha / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(N[(-1.0 - alpha), $MachinePrecision] * N[(N[(alpha + 2.0), $MachinePrecision] / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2\\
\mathbf{if}\;\beta \leq 2.3 \cdot 10^{+82}:\\
\;\;\;\;\frac{\frac{1 + \mathsf{fma}\left(\alpha, \beta, \alpha + \beta\right)}{t\_0}}{t\_0 \cdot \left(\alpha + \left(\beta + 3\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\left(\frac{1}{\beta} + \left(\alpha + \frac{\alpha}{\beta}\right)\right) + \left(1 + \left(-1 - \alpha\right) \cdot \frac{\alpha + 2}{\beta}\right)}{t\_0}}{1 + t\_0}\\
\end{array}
\end{array}
if beta < 2.29999999999999988e82Initial program 99.2%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lift-*.f64N/A
metadata-evalN/A
*-commutativeN/A
Applied rewrites98.7%
if 2.29999999999999988e82 < beta Initial program 78.6%
Taylor expanded in beta around inf
sub-negN/A
+-commutativeN/A
associate-+l+N/A
lower-+.f64N/A
+-commutativeN/A
associate-+l+N/A
lower-+.f64N/A
lower-/.f64N/A
lower-+.f64N/A
lower-/.f64N/A
lower-+.f64N/A
associate-/l*N/A
distribute-lft-neg-inN/A
mul-1-negN/A
lower-*.f64N/A
distribute-lft-inN/A
metadata-evalN/A
mul-1-negN/A
unsub-negN/A
lower--.f64N/A
lower-/.f64N/A
Applied rewrites88.2%
Final simplification96.2%
herbie shell --seed 2024223
(FPCore (alpha beta)
:name "Octave 3.8, jcobi/3"
:precision binary64
:pre (and (> alpha -1.0) (> beta -1.0))
(/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))