
(FPCore (alpha beta) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0)))) (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta): t_0 = (alpha + beta) + (2.0 * 1.0) return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0)) end
function tmp = code(alpha, beta) t_0 = (alpha + beta) + (2.0 * 1.0); tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0); end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0)))) (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta): t_0 = (alpha + beta) + (2.0 * 1.0) return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0)) end
function tmp = code(alpha, beta) t_0 = (alpha + beta) + (2.0 * 1.0); tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0); end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) 2.0)) (t_1 (+ t_0 1.0)))
(if (<= alpha 170000000.0)
(/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) t_1)
(/
(/
(fma (/ beta (+ (+ beta alpha) 2.0)) alpha (- 1.0 (pow alpha -1.0)))
t_0)
t_1))))
double code(double alpha, double beta) {
double t_0 = (alpha + beta) + 2.0;
double t_1 = t_0 + 1.0;
double tmp;
if (alpha <= 170000000.0) {
tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / t_1;
} else {
tmp = (fma((beta / ((beta + alpha) + 2.0)), alpha, (1.0 - pow(alpha, -1.0))) / t_0) / t_1;
}
return tmp;
}
function code(alpha, beta) t_0 = Float64(Float64(alpha + beta) + 2.0) t_1 = Float64(t_0 + 1.0) tmp = 0.0 if (alpha <= 170000000.0) tmp = Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / t_1); else tmp = Float64(Float64(fma(Float64(beta / Float64(Float64(beta + alpha) + 2.0)), alpha, Float64(1.0 - (alpha ^ -1.0))) / t_0) / t_1); end return tmp end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 + 1.0), $MachinePrecision]}, If[LessEqual[alpha, 170000000.0], N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$1), $MachinePrecision], N[(N[(N[(N[(beta / N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] * alpha + N[(1.0 - N[Power[alpha, -1.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$1), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2\\
t_1 := t\_0 + 1\\
\mathbf{if}\;\alpha \leq 170000000:\\
\;\;\;\;\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_1}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\mathsf{fma}\left(\frac{\beta}{\left(\beta + \alpha\right) + 2}, \alpha, 1 - {\alpha}^{-1}\right)}{t\_0}}{t\_1}\\
\end{array}
\end{array}
if alpha < 1.7e8Initial program 99.9%
if 1.7e8 < alpha Initial program 85.8%
lift-/.f64N/A
lift-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
associate-+l+N/A
metadata-evalN/A
metadata-evalN/A
lift-*.f64N/A
associate--l+N/A
lift-+.f64N/A
div-addN/A
lift-*.f64N/A
*-rgt-identityN/A
times-fracN/A
lower-fma.f64N/A
Applied rewrites99.8%
lift-/.f64N/A
/-rgt-identity99.8
Applied rewrites99.8%
Taylor expanded in alpha around inf
lower--.f64N/A
lower-/.f6499.8
Applied rewrites99.8%
Final simplification99.9%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ (+ beta alpha) 2.0)))
(if (<= beta 1.6e+16)
(/
(/ (+ (fma beta alpha (+ beta alpha)) 1.0) t_0)
(* (+ 3.0 (+ beta alpha)) t_0))
(/
(/
(-
(+ (+ (/ (+ 1.0 alpha) beta) alpha) 1.0)
(* (+ 1.0 alpha) (/ (fma 2.0 alpha 4.0) beta)))
beta)
(+ (+ (+ alpha beta) 2.0) 1.0)))))
double code(double alpha, double beta) {
double t_0 = (beta + alpha) + 2.0;
double tmp;
if (beta <= 1.6e+16) {
tmp = ((fma(beta, alpha, (beta + alpha)) + 1.0) / t_0) / ((3.0 + (beta + alpha)) * t_0);
} else {
tmp = ((((((1.0 + alpha) / beta) + alpha) + 1.0) - ((1.0 + alpha) * (fma(2.0, alpha, 4.0) / beta))) / beta) / (((alpha + beta) + 2.0) + 1.0);
}
return tmp;
}
function code(alpha, beta) t_0 = Float64(Float64(beta + alpha) + 2.0) tmp = 0.0 if (beta <= 1.6e+16) tmp = Float64(Float64(Float64(fma(beta, alpha, Float64(beta + alpha)) + 1.0) / t_0) / Float64(Float64(3.0 + Float64(beta + alpha)) * t_0)); else tmp = Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.0 + alpha) / beta) + alpha) + 1.0) - Float64(Float64(1.0 + alpha) * Float64(fma(2.0, alpha, 4.0) / beta))) / beta) / Float64(Float64(Float64(alpha + beta) + 2.0) + 1.0)); end return tmp end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[beta, 1.6e+16], N[(N[(N[(N[(beta * alpha + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] + alpha), $MachinePrecision] + 1.0), $MachinePrecision] - N[(N[(1.0 + alpha), $MachinePrecision] * N[(N[(2.0 * alpha + 4.0), $MachinePrecision] / beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / beta), $MachinePrecision] / N[(N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\beta + \alpha\right) + 2\\
\mathbf{if}\;\beta \leq 1.6 \cdot 10^{+16}:\\
\;\;\;\;\frac{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{t\_0}}{\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_0}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\left(\left(\frac{1 + \alpha}{\beta} + \alpha\right) + 1\right) - \left(1 + \alpha\right) \cdot \frac{\mathsf{fma}\left(2, \alpha, 4\right)}{\beta}}{\beta}}{\left(\left(\alpha + \beta\right) + 2\right) + 1}\\
\end{array}
\end{array}
if beta < 1.6e16Initial program 99.9%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites99.4%
if 1.6e16 < beta Initial program 84.1%
Taylor expanded in beta around inf
lower-/.f64N/A
lower--.f64N/A
+-commutativeN/A
lower-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
div-add-revN/A
lower-/.f64N/A
lower-+.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-+.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-fma.f6485.0
Applied rewrites85.0%
Final simplification95.2%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ (+ beta alpha) 2.0)) (t_1 (+ (+ alpha beta) 2.0)))
(/
(/ (fma (/ beta t_0) alpha (/ (+ (+ beta alpha) 1.0) t_0)) t_1)
(+ t_1 1.0))))
double code(double alpha, double beta) {
double t_0 = (beta + alpha) + 2.0;
double t_1 = (alpha + beta) + 2.0;
return (fma((beta / t_0), alpha, (((beta + alpha) + 1.0) / t_0)) / t_1) / (t_1 + 1.0);
}
function code(alpha, beta) t_0 = Float64(Float64(beta + alpha) + 2.0) t_1 = Float64(Float64(alpha + beta) + 2.0) return Float64(Float64(fma(Float64(beta / t_0), alpha, Float64(Float64(Float64(beta + alpha) + 1.0) / t_0)) / t_1) / Float64(t_1 + 1.0)) end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]}, N[(N[(N[(N[(beta / t$95$0), $MachinePrecision] * alpha + N[(N[(N[(beta + alpha), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(t$95$1 + 1.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\beta + \alpha\right) + 2\\
t_1 := \left(\alpha + \beta\right) + 2\\
\frac{\frac{\mathsf{fma}\left(\frac{\beta}{t\_0}, \alpha, \frac{\left(\beta + \alpha\right) + 1}{t\_0}\right)}{t\_1}}{t\_1 + 1}
\end{array}
\end{array}
Initial program 95.2%
lift-/.f64N/A
lift-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
associate-+l+N/A
metadata-evalN/A
metadata-evalN/A
lift-*.f64N/A
associate--l+N/A
lift-+.f64N/A
div-addN/A
lift-*.f64N/A
*-rgt-identityN/A
times-fracN/A
lower-fma.f64N/A
Applied rewrites99.8%
lift-/.f64N/A
/-rgt-identity99.8
Applied rewrites99.8%
Final simplification99.8%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ (+ beta alpha) 2.0)))
(if (<= beta 1.6e+16)
(/
(/ (+ (fma beta alpha (+ beta alpha)) 1.0) t_0)
(* (+ 3.0 (+ beta alpha)) t_0))
(/ (/ (+ 1.0 alpha) (+ 3.0 (+ alpha beta))) (+ 2.0 (+ alpha beta))))))
double code(double alpha, double beta) {
double t_0 = (beta + alpha) + 2.0;
double tmp;
if (beta <= 1.6e+16) {
tmp = ((fma(beta, alpha, (beta + alpha)) + 1.0) / t_0) / ((3.0 + (beta + alpha)) * t_0);
} else {
tmp = ((1.0 + alpha) / (3.0 + (alpha + beta))) / (2.0 + (alpha + beta));
}
return tmp;
}
function code(alpha, beta) t_0 = Float64(Float64(beta + alpha) + 2.0) tmp = 0.0 if (beta <= 1.6e+16) tmp = Float64(Float64(Float64(fma(beta, alpha, Float64(beta + alpha)) + 1.0) / t_0) / Float64(Float64(3.0 + Float64(beta + alpha)) * t_0)); else tmp = Float64(Float64(Float64(1.0 + alpha) / Float64(3.0 + Float64(alpha + beta))) / Float64(2.0 + Float64(alpha + beta))); end return tmp end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[beta, 1.6e+16], N[(N[(N[(N[(beta * alpha + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\beta + \alpha\right) + 2\\
\mathbf{if}\;\beta \leq 1.6 \cdot 10^{+16}:\\
\;\;\;\;\frac{\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{t\_0}}{\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_0}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}\\
\end{array}
\end{array}
if beta < 1.6e16Initial program 99.9%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites99.4%
if 1.6e16 < beta Initial program 84.1%
Taylor expanded in beta around -inf
mul-1-negN/A
lower-neg.f64N/A
lower--.f64N/A
mul-1-negN/A
lower-neg.f6485.6
Applied rewrites85.6%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites87.6%
Taylor expanded in alpha around 0
Applied rewrites87.6%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
Applied rewrites85.6%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ (+ beta alpha) 2.0)))
(if (<= beta 1.65e+70)
(/
(+ (fma beta alpha (+ beta alpha)) 1.0)
(* t_0 (* (+ 3.0 (+ beta alpha)) t_0)))
(/ (/ (+ 1.0 alpha) (+ 3.0 (+ alpha beta))) (+ 2.0 (+ alpha beta))))))
double code(double alpha, double beta) {
double t_0 = (beta + alpha) + 2.0;
double tmp;
if (beta <= 1.65e+70) {
tmp = (fma(beta, alpha, (beta + alpha)) + 1.0) / (t_0 * ((3.0 + (beta + alpha)) * t_0));
} else {
tmp = ((1.0 + alpha) / (3.0 + (alpha + beta))) / (2.0 + (alpha + beta));
}
return tmp;
}
function code(alpha, beta) t_0 = Float64(Float64(beta + alpha) + 2.0) tmp = 0.0 if (beta <= 1.65e+70) tmp = Float64(Float64(fma(beta, alpha, Float64(beta + alpha)) + 1.0) / Float64(t_0 * Float64(Float64(3.0 + Float64(beta + alpha)) * t_0))); else tmp = Float64(Float64(Float64(1.0 + alpha) / Float64(3.0 + Float64(alpha + beta))) / Float64(2.0 + Float64(alpha + beta))); end return tmp end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[beta, 1.65e+70], N[(N[(N[(beta * alpha + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / N[(t$95$0 * N[(N[(3.0 + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\beta + \alpha\right) + 2\\
\mathbf{if}\;\beta \leq 1.65 \cdot 10^{+70}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\beta, \alpha, \beta + \alpha\right) + 1}{t\_0 \cdot \left(\left(3 + \left(\beta + \alpha\right)\right) \cdot t\_0\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}\\
\end{array}
\end{array}
if beta < 1.65000000000000008e70Initial program 99.9%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites96.9%
if 1.65000000000000008e70 < beta Initial program 79.9%
Taylor expanded in beta around -inf
mul-1-negN/A
lower-neg.f64N/A
lower--.f64N/A
mul-1-negN/A
lower-neg.f6486.5
Applied rewrites86.5%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites88.0%
Taylor expanded in alpha around 0
Applied rewrites88.0%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
Applied rewrites86.5%
(FPCore (alpha beta) :precision binary64 (if (<= beta 1.35e+15) (/ (/ (+ 1.0 beta) (+ 2.0 beta)) (* (+ 3.0 beta) (+ 2.0 beta))) (/ (/ (+ 1.0 alpha) (+ 3.0 (+ alpha beta))) (+ 2.0 (+ alpha beta)))))
double code(double alpha, double beta) {
double tmp;
if (beta <= 1.35e+15) {
tmp = ((1.0 + beta) / (2.0 + beta)) / ((3.0 + beta) * (2.0 + beta));
} else {
tmp = ((1.0 + alpha) / (3.0 + (alpha + beta))) / (2.0 + (alpha + beta));
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 1.35d+15) then
tmp = ((1.0d0 + beta) / (2.0d0 + beta)) / ((3.0d0 + beta) * (2.0d0 + beta))
else
tmp = ((1.0d0 + alpha) / (3.0d0 + (alpha + beta))) / (2.0d0 + (alpha + beta))
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 1.35e+15) {
tmp = ((1.0 + beta) / (2.0 + beta)) / ((3.0 + beta) * (2.0 + beta));
} else {
tmp = ((1.0 + alpha) / (3.0 + (alpha + beta))) / (2.0 + (alpha + beta));
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 1.35e+15: tmp = ((1.0 + beta) / (2.0 + beta)) / ((3.0 + beta) * (2.0 + beta)) else: tmp = ((1.0 + alpha) / (3.0 + (alpha + beta))) / (2.0 + (alpha + beta)) return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 1.35e+15) tmp = Float64(Float64(Float64(1.0 + beta) / Float64(2.0 + beta)) / Float64(Float64(3.0 + beta) * Float64(2.0 + beta))); else tmp = Float64(Float64(Float64(1.0 + alpha) / Float64(3.0 + Float64(alpha + beta))) / Float64(2.0 + Float64(alpha + beta))); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 1.35e+15) tmp = ((1.0 + beta) / (2.0 + beta)) / ((3.0 + beta) * (2.0 + beta)); else tmp = ((1.0 + alpha) / (3.0 + (alpha + beta))) / (2.0 + (alpha + beta)); end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 1.35e+15], N[(N[(N[(1.0 + beta), $MachinePrecision] / N[(2.0 + beta), $MachinePrecision]), $MachinePrecision] / N[(N[(3.0 + beta), $MachinePrecision] * N[(2.0 + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 1.35 \cdot 10^{+15}:\\
\;\;\;\;\frac{\frac{1 + \beta}{2 + \beta}}{\left(3 + \beta\right) \cdot \left(2 + \beta\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{3 + \left(\alpha + \beta\right)}}{2 + \left(\alpha + \beta\right)}\\
\end{array}
\end{array}
if beta < 1.35e15Initial program 99.9%
Taylor expanded in alpha around 0
lower-/.f64N/A
lower-+.f64N/A
lower-+.f6488.6
Applied rewrites88.6%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites88.6%
Taylor expanded in alpha around 0
*-commutativeN/A
lower-*.f64N/A
lower-+.f64N/A
lower-+.f6466.7
Applied rewrites66.7%
if 1.35e15 < beta Initial program 84.1%
Taylor expanded in beta around -inf
mul-1-negN/A
lower-neg.f64N/A
lower--.f64N/A
mul-1-negN/A
lower-neg.f6485.6
Applied rewrites85.6%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites87.6%
Taylor expanded in alpha around 0
Applied rewrites87.6%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
Applied rewrites85.6%
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (+ 3.0 (+ alpha beta))))
(if (<= beta 9.5)
(/ (fma 0.25 beta 0.5) (* t_0 (+ (+ alpha beta) 2.0)))
(/ (/ (+ 1.0 alpha) t_0) (+ 2.0 (+ alpha beta))))))
double code(double alpha, double beta) {
double t_0 = 3.0 + (alpha + beta);
double tmp;
if (beta <= 9.5) {
tmp = fma(0.25, beta, 0.5) / (t_0 * ((alpha + beta) + 2.0));
} else {
tmp = ((1.0 + alpha) / t_0) / (2.0 + (alpha + beta));
}
return tmp;
}
function code(alpha, beta) t_0 = Float64(3.0 + Float64(alpha + beta)) tmp = 0.0 if (beta <= 9.5) tmp = Float64(fma(0.25, beta, 0.5) / Float64(t_0 * Float64(Float64(alpha + beta) + 2.0))); else tmp = Float64(Float64(Float64(1.0 + alpha) / t_0) / Float64(2.0 + Float64(alpha + beta))); end return tmp end
code[alpha_, beta_] := Block[{t$95$0 = N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[beta, 9.5], N[(N[(0.25 * beta + 0.5), $MachinePrecision] / N[(t$95$0 * N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 3 + \left(\alpha + \beta\right)\\
\mathbf{if}\;\beta \leq 9.5:\\
\;\;\;\;\frac{\mathsf{fma}\left(0.25, \beta, 0.5\right)}{t\_0 \cdot \left(\left(\alpha + \beta\right) + 2\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{t\_0}}{2 + \left(\alpha + \beta\right)}\\
\end{array}
\end{array}
if beta < 9.5Initial program 99.9%
Taylor expanded in alpha around 0
lower-/.f64N/A
lower-+.f64N/A
lower-+.f6488.8
Applied rewrites88.8%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites88.9%
Taylor expanded in beta around 0
Applied rewrites87.7%
if 9.5 < beta Initial program 84.7%
Taylor expanded in beta around -inf
mul-1-negN/A
lower-neg.f64N/A
lower--.f64N/A
mul-1-negN/A
lower-neg.f6483.9
Applied rewrites83.9%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites85.9%
Taylor expanded in alpha around 0
Applied rewrites85.9%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
Applied rewrites83.9%
(FPCore (alpha beta) :precision binary64 (if (<= beta 10.0) (/ (fma 0.25 beta 0.5) (* (+ 3.0 (+ alpha beta)) (+ (+ alpha beta) 2.0))) (/ (/ (+ 1.0 alpha) beta) beta)))
double code(double alpha, double beta) {
double tmp;
if (beta <= 10.0) {
tmp = fma(0.25, beta, 0.5) / ((3.0 + (alpha + beta)) * ((alpha + beta) + 2.0));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
function code(alpha, beta) tmp = 0.0 if (beta <= 10.0) tmp = Float64(fma(0.25, beta, 0.5) / Float64(Float64(3.0 + Float64(alpha + beta)) * Float64(Float64(alpha + beta) + 2.0))); else tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta); end return tmp end
code[alpha_, beta_] := If[LessEqual[beta, 10.0], N[(N[(0.25 * beta + 0.5), $MachinePrecision] / N[(N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision] * N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 10:\\
\;\;\;\;\frac{\mathsf{fma}\left(0.25, \beta, 0.5\right)}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
\end{array}
\end{array}
if beta < 10Initial program 99.9%
Taylor expanded in alpha around 0
lower-/.f64N/A
lower-+.f64N/A
lower-+.f6488.8
Applied rewrites88.8%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites88.9%
Taylor expanded in beta around 0
Applied rewrites87.7%
if 10 < beta Initial program 84.7%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-+.f64N/A
unpow2N/A
lower-*.f6480.9
Applied rewrites80.9%
Applied rewrites83.3%
(FPCore (alpha beta) :precision binary64 (if (<= beta 14.0) (/ 0.5 (* (+ 3.0 (+ alpha beta)) (+ (+ alpha beta) 2.0))) (/ (/ (+ 1.0 alpha) beta) beta)))
double code(double alpha, double beta) {
double tmp;
if (beta <= 14.0) {
tmp = 0.5 / ((3.0 + (alpha + beta)) * ((alpha + beta) + 2.0));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 14.0d0) then
tmp = 0.5d0 / ((3.0d0 + (alpha + beta)) * ((alpha + beta) + 2.0d0))
else
tmp = ((1.0d0 + alpha) / beta) / beta
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 14.0) {
tmp = 0.5 / ((3.0 + (alpha + beta)) * ((alpha + beta) + 2.0));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 14.0: tmp = 0.5 / ((3.0 + (alpha + beta)) * ((alpha + beta) + 2.0)) else: tmp = ((1.0 + alpha) / beta) / beta return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 14.0) tmp = Float64(0.5 / Float64(Float64(3.0 + Float64(alpha + beta)) * Float64(Float64(alpha + beta) + 2.0))); else tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 14.0) tmp = 0.5 / ((3.0 + (alpha + beta)) * ((alpha + beta) + 2.0)); else tmp = ((1.0 + alpha) / beta) / beta; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 14.0], N[(0.5 / N[(N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision] * N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 14:\\
\;\;\;\;\frac{0.5}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(\left(\alpha + \beta\right) + 2\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
\end{array}
\end{array}
if beta < 14Initial program 99.9%
Taylor expanded in alpha around 0
lower-/.f64N/A
lower-+.f64N/A
lower-+.f6488.8
Applied rewrites88.8%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites88.9%
Taylor expanded in beta around 0
Applied rewrites87.4%
if 14 < beta Initial program 84.7%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-+.f64N/A
unpow2N/A
lower-*.f6480.9
Applied rewrites80.9%
Applied rewrites83.3%
(FPCore (alpha beta) :precision binary64 (if (<= beta 5e+18) (/ (+ 1.0 alpha) (* (+ 3.0 (+ alpha beta)) (+ 2.0 beta))) (/ (/ (+ 1.0 alpha) beta) beta)))
double code(double alpha, double beta) {
double tmp;
if (beta <= 5e+18) {
tmp = (1.0 + alpha) / ((3.0 + (alpha + beta)) * (2.0 + beta));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 5d+18) then
tmp = (1.0d0 + alpha) / ((3.0d0 + (alpha + beta)) * (2.0d0 + beta))
else
tmp = ((1.0d0 + alpha) / beta) / beta
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 5e+18) {
tmp = (1.0 + alpha) / ((3.0 + (alpha + beta)) * (2.0 + beta));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 5e+18: tmp = (1.0 + alpha) / ((3.0 + (alpha + beta)) * (2.0 + beta)) else: tmp = ((1.0 + alpha) / beta) / beta return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 5e+18) tmp = Float64(Float64(1.0 + alpha) / Float64(Float64(3.0 + Float64(alpha + beta)) * Float64(2.0 + beta))); else tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 5e+18) tmp = (1.0 + alpha) / ((3.0 + (alpha + beta)) * (2.0 + beta)); else tmp = ((1.0 + alpha) / beta) / beta; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 5e+18], N[(N[(1.0 + alpha), $MachinePrecision] / N[(N[(3.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision] * N[(2.0 + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 5 \cdot 10^{+18}:\\
\;\;\;\;\frac{1 + \alpha}{\left(3 + \left(\alpha + \beta\right)\right) \cdot \left(2 + \beta\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
\end{array}
\end{array}
if beta < 5e18Initial program 99.9%
Taylor expanded in beta around -inf
mul-1-negN/A
lower-neg.f64N/A
lower--.f64N/A
mul-1-negN/A
lower-neg.f6415.3
Applied rewrites15.3%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites34.9%
Taylor expanded in alpha around 0
Applied rewrites34.9%
Taylor expanded in alpha around 0
lower-+.f6414.3
Applied rewrites14.3%
if 5e18 < beta Initial program 84.1%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-+.f64N/A
unpow2N/A
lower-*.f6482.7
Applied rewrites82.7%
Applied rewrites85.1%
(FPCore (alpha beta) :precision binary64 (if (<= beta 2e+16) (/ (+ 1.0 alpha) (* (+ 3.0 beta) (+ 2.0 beta))) (/ (/ (+ 1.0 alpha) beta) beta)))
double code(double alpha, double beta) {
double tmp;
if (beta <= 2e+16) {
tmp = (1.0 + alpha) / ((3.0 + beta) * (2.0 + beta));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 2d+16) then
tmp = (1.0d0 + alpha) / ((3.0d0 + beta) * (2.0d0 + beta))
else
tmp = ((1.0d0 + alpha) / beta) / beta
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 2e+16) {
tmp = (1.0 + alpha) / ((3.0 + beta) * (2.0 + beta));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 2e+16: tmp = (1.0 + alpha) / ((3.0 + beta) * (2.0 + beta)) else: tmp = ((1.0 + alpha) / beta) / beta return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 2e+16) tmp = Float64(Float64(1.0 + alpha) / Float64(Float64(3.0 + beta) * Float64(2.0 + beta))); else tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 2e+16) tmp = (1.0 + alpha) / ((3.0 + beta) * (2.0 + beta)); else tmp = ((1.0 + alpha) / beta) / beta; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 2e+16], N[(N[(1.0 + alpha), $MachinePrecision] / N[(N[(3.0 + beta), $MachinePrecision] * N[(2.0 + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 2 \cdot 10^{+16}:\\
\;\;\;\;\frac{1 + \alpha}{\left(3 + \beta\right) \cdot \left(2 + \beta\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
\end{array}
\end{array}
if beta < 2e16Initial program 99.9%
Taylor expanded in beta around -inf
mul-1-negN/A
lower-neg.f64N/A
lower--.f64N/A
mul-1-negN/A
lower-neg.f6415.3
Applied rewrites15.3%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites34.9%
Taylor expanded in alpha around 0
Applied rewrites34.9%
Taylor expanded in alpha around 0
*-commutativeN/A
lower-*.f64N/A
lower-+.f64N/A
lower-+.f6413.9
Applied rewrites13.9%
if 2e16 < beta Initial program 84.1%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-+.f64N/A
unpow2N/A
lower-*.f6482.7
Applied rewrites82.7%
Applied rewrites85.1%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 0.0031) (/ (+ 1.0 alpha) (* beta beta)) (/ (/ alpha beta) beta)))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 0.0031) {
tmp = (1.0 + alpha) / (beta * beta);
} else {
tmp = (alpha / beta) / beta;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 0.0031d0) then
tmp = (1.0d0 + alpha) / (beta * beta)
else
tmp = (alpha / beta) / beta
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 0.0031) {
tmp = (1.0 + alpha) / (beta * beta);
} else {
tmp = (alpha / beta) / beta;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 0.0031: tmp = (1.0 + alpha) / (beta * beta) else: tmp = (alpha / beta) / beta return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 0.0031) tmp = Float64(Float64(1.0 + alpha) / Float64(beta * beta)); else tmp = Float64(Float64(alpha / beta) / beta); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 0.0031) tmp = (1.0 + alpha) / (beta * beta); else tmp = (alpha / beta) / beta; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 0.0031], N[(N[(1.0 + alpha), $MachinePrecision] / N[(beta * beta), $MachinePrecision]), $MachinePrecision], N[(N[(alpha / beta), $MachinePrecision] / beta), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 0.0031:\\
\;\;\;\;\frac{1 + \alpha}{\beta \cdot \beta}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\
\end{array}
\end{array}
if alpha < 0.00309999999999999989Initial program 99.9%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-+.f64N/A
unpow2N/A
lower-*.f6433.0
Applied rewrites33.0%
if 0.00309999999999999989 < alpha Initial program 85.9%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-+.f64N/A
unpow2N/A
lower-*.f6415.1
Applied rewrites15.1%
Taylor expanded in alpha around inf
Applied rewrites15.1%
Applied rewrites16.5%
(FPCore (alpha beta) :precision binary64 (/ (/ (+ 1.0 alpha) beta) beta))
double code(double alpha, double beta) {
return ((1.0 + alpha) / beta) / beta;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = ((1.0d0 + alpha) / beta) / beta
end function
public static double code(double alpha, double beta) {
return ((1.0 + alpha) / beta) / beta;
}
def code(alpha, beta): return ((1.0 + alpha) / beta) / beta
function code(alpha, beta) return Float64(Float64(Float64(1.0 + alpha) / beta) / beta) end
function tmp = code(alpha, beta) tmp = ((1.0 + alpha) / beta) / beta; end
code[alpha_, beta_] := N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{1 + \alpha}{\beta}}{\beta}
\end{array}
Initial program 95.2%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-+.f64N/A
unpow2N/A
lower-*.f6427.1
Applied rewrites27.1%
Applied rewrites27.8%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 0.0031) (/ 1.0 (* beta beta)) (/ alpha (* beta beta))))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 0.0031) {
tmp = 1.0 / (beta * beta);
} else {
tmp = alpha / (beta * beta);
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 0.0031d0) then
tmp = 1.0d0 / (beta * beta)
else
tmp = alpha / (beta * beta)
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 0.0031) {
tmp = 1.0 / (beta * beta);
} else {
tmp = alpha / (beta * beta);
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 0.0031: tmp = 1.0 / (beta * beta) else: tmp = alpha / (beta * beta) return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 0.0031) tmp = Float64(1.0 / Float64(beta * beta)); else tmp = Float64(alpha / Float64(beta * beta)); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 0.0031) tmp = 1.0 / (beta * beta); else tmp = alpha / (beta * beta); end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 0.0031], N[(1.0 / N[(beta * beta), $MachinePrecision]), $MachinePrecision], N[(alpha / N[(beta * beta), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 0.0031:\\
\;\;\;\;\frac{1}{\beta \cdot \beta}\\
\mathbf{else}:\\
\;\;\;\;\frac{\alpha}{\beta \cdot \beta}\\
\end{array}
\end{array}
if alpha < 0.00309999999999999989Initial program 99.9%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-+.f64N/A
unpow2N/A
lower-*.f6433.0
Applied rewrites33.0%
Taylor expanded in alpha around 0
Applied rewrites32.7%
if 0.00309999999999999989 < alpha Initial program 85.9%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-+.f64N/A
unpow2N/A
lower-*.f6415.1
Applied rewrites15.1%
Taylor expanded in alpha around inf
Applied rewrites15.1%
(FPCore (alpha beta) :precision binary64 (/ (+ 1.0 alpha) (* beta beta)))
double code(double alpha, double beta) {
return (1.0 + alpha) / (beta * beta);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = (1.0d0 + alpha) / (beta * beta)
end function
public static double code(double alpha, double beta) {
return (1.0 + alpha) / (beta * beta);
}
def code(alpha, beta): return (1.0 + alpha) / (beta * beta)
function code(alpha, beta) return Float64(Float64(1.0 + alpha) / Float64(beta * beta)) end
function tmp = code(alpha, beta) tmp = (1.0 + alpha) / (beta * beta); end
code[alpha_, beta_] := N[(N[(1.0 + alpha), $MachinePrecision] / N[(beta * beta), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 + \alpha}{\beta \cdot \beta}
\end{array}
Initial program 95.2%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-+.f64N/A
unpow2N/A
lower-*.f6427.1
Applied rewrites27.1%
(FPCore (alpha beta) :precision binary64 (/ alpha (* beta beta)))
double code(double alpha, double beta) {
return alpha / (beta * beta);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = alpha / (beta * beta)
end function
public static double code(double alpha, double beta) {
return alpha / (beta * beta);
}
def code(alpha, beta): return alpha / (beta * beta)
function code(alpha, beta) return Float64(alpha / Float64(beta * beta)) end
function tmp = code(alpha, beta) tmp = alpha / (beta * beta); end
code[alpha_, beta_] := N[(alpha / N[(beta * beta), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\alpha}{\beta \cdot \beta}
\end{array}
Initial program 95.2%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-+.f64N/A
unpow2N/A
lower-*.f6427.1
Applied rewrites27.1%
Taylor expanded in alpha around inf
Applied rewrites16.0%
herbie shell --seed 2024360
(FPCore (alpha beta)
:name "Octave 3.8, jcobi/3"
:precision binary64
:pre (and (> alpha -1.0) (> beta -1.0))
(/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))