
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (* i (+ (+ alpha beta) i)))
(t_1 (+ (+ alpha beta) (* 2.0 i)))
(t_2 (* t_1 t_1)))
(/ (/ (* t_0 (+ (* beta alpha) t_0)) t_2) (- t_2 1.0))))double code(double alpha, double beta, double i) {
double t_0 = i * ((alpha + beta) + i);
double t_1 = (alpha + beta) + (2.0 * i);
double t_2 = t_1 * t_1;
return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta, i)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
t_0 = i * ((alpha + beta) + i)
t_1 = (alpha + beta) + (2.0d0 * i)
t_2 = t_1 * t_1
code = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0d0)
end function
public static double code(double alpha, double beta, double i) {
double t_0 = i * ((alpha + beta) + i);
double t_1 = (alpha + beta) + (2.0 * i);
double t_2 = t_1 * t_1;
return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
def code(alpha, beta, i): t_0 = i * ((alpha + beta) + i) t_1 = (alpha + beta) + (2.0 * i) t_2 = t_1 * t_1 return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0)
function code(alpha, beta, i) t_0 = Float64(i * Float64(Float64(alpha + beta) + i)) t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) t_2 = Float64(t_1 * t_1) return Float64(Float64(Float64(t_0 * Float64(Float64(beta * alpha) + t_0)) / t_2) / Float64(t_2 - 1.0)) end
function tmp = code(alpha, beta, i) t_0 = i * ((alpha + beta) + i); t_1 = (alpha + beta) + (2.0 * i); t_2 = t_1 * t_1; tmp = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0); end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(i * N[(N[(alpha + beta), $MachinePrecision] + i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * t$95$1), $MachinePrecision]}, N[(N[(N[(t$95$0 * N[(N[(beta * alpha), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$2), $MachinePrecision] / N[(t$95$2 - 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
t_0 := i \cdot \left(\left(\alpha + \beta\right) + i\right)\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
t_2 := t\_1 \cdot t\_1\\
\frac{\frac{t\_0 \cdot \left(\beta \cdot \alpha + t\_0\right)}{t\_2}}{t\_2 - 1}
\end{array}
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (* i (+ (+ alpha beta) i)))
(t_1 (+ (+ alpha beta) (* 2.0 i)))
(t_2 (* t_1 t_1)))
(/ (/ (* t_0 (+ (* beta alpha) t_0)) t_2) (- t_2 1.0))))double code(double alpha, double beta, double i) {
double t_0 = i * ((alpha + beta) + i);
double t_1 = (alpha + beta) + (2.0 * i);
double t_2 = t_1 * t_1;
return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta, i)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
t_0 = i * ((alpha + beta) + i)
t_1 = (alpha + beta) + (2.0d0 * i)
t_2 = t_1 * t_1
code = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0d0)
end function
public static double code(double alpha, double beta, double i) {
double t_0 = i * ((alpha + beta) + i);
double t_1 = (alpha + beta) + (2.0 * i);
double t_2 = t_1 * t_1;
return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
def code(alpha, beta, i): t_0 = i * ((alpha + beta) + i) t_1 = (alpha + beta) + (2.0 * i) t_2 = t_1 * t_1 return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0)
function code(alpha, beta, i) t_0 = Float64(i * Float64(Float64(alpha + beta) + i)) t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) t_2 = Float64(t_1 * t_1) return Float64(Float64(Float64(t_0 * Float64(Float64(beta * alpha) + t_0)) / t_2) / Float64(t_2 - 1.0)) end
function tmp = code(alpha, beta, i) t_0 = i * ((alpha + beta) + i); t_1 = (alpha + beta) + (2.0 * i); t_2 = t_1 * t_1; tmp = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0); end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(i * N[(N[(alpha + beta), $MachinePrecision] + i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * t$95$1), $MachinePrecision]}, N[(N[(N[(t$95$0 * N[(N[(beta * alpha), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$2), $MachinePrecision] / N[(t$95$2 - 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
t_0 := i \cdot \left(\left(\alpha + \beta\right) + i\right)\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
t_2 := t\_1 \cdot t\_1\\
\frac{\frac{t\_0 \cdot \left(\beta \cdot \alpha + t\_0\right)}{t\_2}}{t\_2 - 1}
\end{array}
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ (+ alpha i) beta) i))
(t_1 (/ i t_0))
(t_2 (+ (+ beta alpha) i)))
(*
(/ (fma t_1 t_2 (* beta (/ alpha t_0))) (- t_0 1.0))
(/ (* t_2 t_1) (- t_0 -1.0)))))double code(double alpha, double beta, double i) {
double t_0 = ((alpha + i) + beta) + i;
double t_1 = i / t_0;
double t_2 = (beta + alpha) + i;
return (fma(t_1, t_2, (beta * (alpha / t_0))) / (t_0 - 1.0)) * ((t_2 * t_1) / (t_0 - -1.0));
}
function code(alpha, beta, i) t_0 = Float64(Float64(Float64(alpha + i) + beta) + i) t_1 = Float64(i / t_0) t_2 = Float64(Float64(beta + alpha) + i) return Float64(Float64(fma(t_1, t_2, Float64(beta * Float64(alpha / t_0))) / Float64(t_0 - 1.0)) * Float64(Float64(t_2 * t_1) / Float64(t_0 - -1.0))) end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(N[(alpha + i), $MachinePrecision] + beta), $MachinePrecision] + i), $MachinePrecision]}, Block[{t$95$1 = N[(i / t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(beta + alpha), $MachinePrecision] + i), $MachinePrecision]}, N[(N[(N[(t$95$1 * t$95$2 + N[(beta * N[(alpha / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 - 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[(t$95$2 * t$95$1), $MachinePrecision] / N[(t$95$0 - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
t_0 := \left(\left(\alpha + i\right) + \beta\right) + i\\
t_1 := \frac{i}{t\_0}\\
t_2 := \left(\beta + \alpha\right) + i\\
\frac{\mathsf{fma}\left(t\_1, t\_2, \beta \cdot \frac{\alpha}{t\_0}\right)}{t\_0 - 1} \cdot \frac{t\_2 \cdot t\_1}{t\_0 - -1}
\end{array}
Initial program 16.5%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
times-fracN/A
lift--.f64N/A
lift-*.f64N/A
Applied rewrites42.6%
lift-/.f64N/A
lift-fma.f64N/A
div-addN/A
associate-*r/N/A
lift-/.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6499.6%
Applied rewrites99.6%
lift-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
count-2-revN/A
associate-+r+N/A
lower-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
lower-+.f6499.7%
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
+-commutativeN/A
lower-+.f64N/A
lower-+.f6499.7%
Applied rewrites99.7%
lift-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
count-2-revN/A
associate-+r+N/A
lower-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
lower-+.f6499.7%
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
+-commutativeN/A
lower-+.f64N/A
lower-+.f6499.7%
Applied rewrites99.7%
lift-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
count-2-revN/A
associate-+r+N/A
lower-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
lower-+.f6499.7%
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
+-commutativeN/A
lower-+.f64N/A
lower-+.f6499.7%
Applied rewrites99.7%
lift-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
count-2-revN/A
associate-+r+N/A
lower-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
lower-+.f6499.7%
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
+-commutativeN/A
lower-+.f64N/A
lower-+.f6499.6%
Applied rewrites99.6%
lift-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
count-2-revN/A
associate-+r+N/A
lower-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
lower-+.f6499.6%
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
+-commutativeN/A
lower-+.f64N/A
lower-+.f6499.6%
Applied rewrites99.6%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (fma 2.0 i (+ beta alpha)))
(t_1 (/ i t_0))
(t_2 (+ (+ beta alpha) i)))
(*
(/ (fma t_1 t_2 (* beta (/ alpha t_0))) (- t_0 1.0))
(/ (* t_2 t_1) (- t_0 -1.0)))))double code(double alpha, double beta, double i) {
double t_0 = fma(2.0, i, (beta + alpha));
double t_1 = i / t_0;
double t_2 = (beta + alpha) + i;
return (fma(t_1, t_2, (beta * (alpha / t_0))) / (t_0 - 1.0)) * ((t_2 * t_1) / (t_0 - -1.0));
}
function code(alpha, beta, i) t_0 = fma(2.0, i, Float64(beta + alpha)) t_1 = Float64(i / t_0) t_2 = Float64(Float64(beta + alpha) + i) return Float64(Float64(fma(t_1, t_2, Float64(beta * Float64(alpha / t_0))) / Float64(t_0 - 1.0)) * Float64(Float64(t_2 * t_1) / Float64(t_0 - -1.0))) end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(2.0 * i + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(i / t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(beta + alpha), $MachinePrecision] + i), $MachinePrecision]}, N[(N[(N[(t$95$1 * t$95$2 + N[(beta * N[(alpha / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 - 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[(t$95$2 * t$95$1), $MachinePrecision] / N[(t$95$0 - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
t_0 := \mathsf{fma}\left(2, i, \beta + \alpha\right)\\
t_1 := \frac{i}{t\_0}\\
t_2 := \left(\beta + \alpha\right) + i\\
\frac{\mathsf{fma}\left(t\_1, t\_2, \beta \cdot \frac{\alpha}{t\_0}\right)}{t\_0 - 1} \cdot \frac{t\_2 \cdot t\_1}{t\_0 - -1}
\end{array}
Initial program 16.5%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
times-fracN/A
lift--.f64N/A
lift-*.f64N/A
Applied rewrites42.6%
lift-/.f64N/A
lift-fma.f64N/A
div-addN/A
associate-*r/N/A
lift-/.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6499.6%
Applied rewrites99.6%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ beta alpha) i)) (t_1 (fma 2.0 i (+ beta alpha))))
(*
(/ (fma i (/ t_0 t_1) (* beta (/ alpha t_1))) (- t_1 1.0))
(/ (* t_0 (/ i t_1)) (- t_1 -1.0)))))double code(double alpha, double beta, double i) {
double t_0 = (beta + alpha) + i;
double t_1 = fma(2.0, i, (beta + alpha));
return (fma(i, (t_0 / t_1), (beta * (alpha / t_1))) / (t_1 - 1.0)) * ((t_0 * (i / t_1)) / (t_1 - -1.0));
}
function code(alpha, beta, i) t_0 = Float64(Float64(beta + alpha) + i) t_1 = fma(2.0, i, Float64(beta + alpha)) return Float64(Float64(fma(i, Float64(t_0 / t_1), Float64(beta * Float64(alpha / t_1))) / Float64(t_1 - 1.0)) * Float64(Float64(t_0 * Float64(i / t_1)) / Float64(t_1 - -1.0))) end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + i), $MachinePrecision]}, Block[{t$95$1 = N[(2.0 * i + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(i * N[(t$95$0 / t$95$1), $MachinePrecision] + N[(beta * N[(alpha / t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 - 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[(t$95$0 * N[(i / t$95$1), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
t_0 := \left(\beta + \alpha\right) + i\\
t_1 := \mathsf{fma}\left(2, i, \beta + \alpha\right)\\
\frac{\mathsf{fma}\left(i, \frac{t\_0}{t\_1}, \beta \cdot \frac{\alpha}{t\_1}\right)}{t\_1 - 1} \cdot \frac{t\_0 \cdot \frac{i}{t\_1}}{t\_1 - -1}
\end{array}
Initial program 16.5%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
times-fracN/A
lift--.f64N/A
lift-*.f64N/A
Applied rewrites42.6%
lift-/.f64N/A
lift-fma.f64N/A
div-addN/A
*-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6499.6%
Applied rewrites99.6%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ i (fmax alpha beta)) i))
(t_1 (/ i t_0))
(t_2 (+ (+ (fmax alpha beta) (fmin alpha beta)) i)))
(*
(/
(fma t_1 t_2 (* (fmax alpha beta) (/ (fmin alpha beta) t_0)))
(- t_0 1.0))
(/ (* t_2 t_1) (- t_0 -1.0)))))double code(double alpha, double beta, double i) {
double t_0 = (i + fmax(alpha, beta)) + i;
double t_1 = i / t_0;
double t_2 = (fmax(alpha, beta) + fmin(alpha, beta)) + i;
return (fma(t_1, t_2, (fmax(alpha, beta) * (fmin(alpha, beta) / t_0))) / (t_0 - 1.0)) * ((t_2 * t_1) / (t_0 - -1.0));
}
function code(alpha, beta, i) t_0 = Float64(Float64(i + fmax(alpha, beta)) + i) t_1 = Float64(i / t_0) t_2 = Float64(Float64(fmax(alpha, beta) + fmin(alpha, beta)) + i) return Float64(Float64(fma(t_1, t_2, Float64(fmax(alpha, beta) * Float64(fmin(alpha, beta) / t_0))) / Float64(t_0 - 1.0)) * Float64(Float64(t_2 * t_1) / Float64(t_0 - -1.0))) end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(i + N[Max[alpha, beta], $MachinePrecision]), $MachinePrecision] + i), $MachinePrecision]}, Block[{t$95$1 = N[(i / t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(N[Max[alpha, beta], $MachinePrecision] + N[Min[alpha, beta], $MachinePrecision]), $MachinePrecision] + i), $MachinePrecision]}, N[(N[(N[(t$95$1 * t$95$2 + N[(N[Max[alpha, beta], $MachinePrecision] * N[(N[Min[alpha, beta], $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 - 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[(t$95$2 * t$95$1), $MachinePrecision] / N[(t$95$0 - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
t_0 := \left(i + \mathsf{max}\left(\alpha, \beta\right)\right) + i\\
t_1 := \frac{i}{t\_0}\\
t_2 := \left(\mathsf{max}\left(\alpha, \beta\right) + \mathsf{min}\left(\alpha, \beta\right)\right) + i\\
\frac{\mathsf{fma}\left(t\_1, t\_2, \mathsf{max}\left(\alpha, \beta\right) \cdot \frac{\mathsf{min}\left(\alpha, \beta\right)}{t\_0}\right)}{t\_0 - 1} \cdot \frac{t\_2 \cdot t\_1}{t\_0 - -1}
\end{array}
Initial program 16.5%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
times-fracN/A
lift--.f64N/A
lift-*.f64N/A
Applied rewrites42.6%
lift-/.f64N/A
lift-fma.f64N/A
div-addN/A
associate-*r/N/A
lift-/.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6499.6%
Applied rewrites99.6%
lift-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
count-2-revN/A
associate-+r+N/A
lower-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
lower-+.f6499.7%
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
+-commutativeN/A
lower-+.f64N/A
lower-+.f6499.7%
Applied rewrites99.7%
lift-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
count-2-revN/A
associate-+r+N/A
lower-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
lower-+.f6499.7%
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
+-commutativeN/A
lower-+.f64N/A
lower-+.f6499.7%
Applied rewrites99.7%
lift-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
count-2-revN/A
associate-+r+N/A
lower-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
lower-+.f6499.7%
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
+-commutativeN/A
lower-+.f64N/A
lower-+.f6499.7%
Applied rewrites99.7%
lift-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
count-2-revN/A
associate-+r+N/A
lower-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
lower-+.f6499.7%
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
+-commutativeN/A
lower-+.f64N/A
lower-+.f6499.6%
Applied rewrites99.6%
lift-fma.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
count-2-revN/A
associate-+r+N/A
lower-+.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-+.f64N/A
lower-+.f6499.6%
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
+-commutativeN/A
lower-+.f64N/A
lower-+.f6499.6%
Applied rewrites99.6%
Taylor expanded in alpha around 0
Applied rewrites86.2%
Taylor expanded in alpha around 0
Applied rewrites85.2%
Taylor expanded in alpha around 0
Applied rewrites84.9%
Taylor expanded in alpha around 0
Applied rewrites84.7%
Taylor expanded in alpha around 0
Applied rewrites84.6%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (fma 2.0 i (fmax alpha beta)))
(t_1 (/ i t_0))
(t_2 (+ (fmax alpha beta) i)))
(* (/ (* t_1 t_2) (- t_0 1.0)) (/ (* t_2 t_1) (- t_0 -1.0)))))double code(double alpha, double beta, double i) {
double t_0 = fma(2.0, i, fmax(alpha, beta));
double t_1 = i / t_0;
double t_2 = fmax(alpha, beta) + i;
return ((t_1 * t_2) / (t_0 - 1.0)) * ((t_2 * t_1) / (t_0 - -1.0));
}
function code(alpha, beta, i) t_0 = fma(2.0, i, fmax(alpha, beta)) t_1 = Float64(i / t_0) t_2 = Float64(fmax(alpha, beta) + i) return Float64(Float64(Float64(t_1 * t_2) / Float64(t_0 - 1.0)) * Float64(Float64(t_2 * t_1) / Float64(t_0 - -1.0))) end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(2.0 * i + N[Max[alpha, beta], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(i / t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[Max[alpha, beta], $MachinePrecision] + i), $MachinePrecision]}, N[(N[(N[(t$95$1 * t$95$2), $MachinePrecision] / N[(t$95$0 - 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[(t$95$2 * t$95$1), $MachinePrecision] / N[(t$95$0 - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
t_0 := \mathsf{fma}\left(2, i, \mathsf{max}\left(\alpha, \beta\right)\right)\\
t_1 := \frac{i}{t\_0}\\
t_2 := \mathsf{max}\left(\alpha, \beta\right) + i\\
\frac{t\_1 \cdot t\_2}{t\_0 - 1} \cdot \frac{t\_2 \cdot t\_1}{t\_0 - -1}
\end{array}
Initial program 16.5%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
times-fracN/A
lift--.f64N/A
lift-*.f64N/A
Applied rewrites42.6%
Taylor expanded in alpha around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f64N/A
lower-+.f64N/A
lower-*.f6440.3%
Applied rewrites40.3%
Taylor expanded in alpha around 0
Applied rewrites36.5%
Taylor expanded in alpha around 0
Applied rewrites40.2%
Taylor expanded in alpha around 0
Applied rewrites36.4%
Taylor expanded in alpha around 0
Applied rewrites36.2%
lift-/.f64N/A
lift-*.f64N/A
associate-*l/N/A
lift-+.f64N/A
lift-*.f64N/A
+-commutativeN/A
lift-fma.f64N/A
lift-/.f64N/A
lower-*.f6483.9%
Applied rewrites83.9%
(FPCore (alpha beta i) :precision binary64 (let* ((t_0 (+ (fmax alpha beta) i)) (t_1 (fma 2.0 i (fmax alpha beta)))) (* (/ (* i (/ t_0 t_1)) (- t_1 1.0)) (/ (* t_0 (/ i t_1)) (- t_1 -1.0)))))
double code(double alpha, double beta, double i) {
double t_0 = fmax(alpha, beta) + i;
double t_1 = fma(2.0, i, fmax(alpha, beta));
return ((i * (t_0 / t_1)) / (t_1 - 1.0)) * ((t_0 * (i / t_1)) / (t_1 - -1.0));
}
function code(alpha, beta, i) t_0 = Float64(fmax(alpha, beta) + i) t_1 = fma(2.0, i, fmax(alpha, beta)) return Float64(Float64(Float64(i * Float64(t_0 / t_1)) / Float64(t_1 - 1.0)) * Float64(Float64(t_0 * Float64(i / t_1)) / Float64(t_1 - -1.0))) end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[Max[alpha, beta], $MachinePrecision] + i), $MachinePrecision]}, Block[{t$95$1 = N[(2.0 * i + N[Max[alpha, beta], $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(i * N[(t$95$0 / t$95$1), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 - 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[(t$95$0 * N[(i / t$95$1), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
t_0 := \mathsf{max}\left(\alpha, \beta\right) + i\\
t_1 := \mathsf{fma}\left(2, i, \mathsf{max}\left(\alpha, \beta\right)\right)\\
\frac{i \cdot \frac{t\_0}{t\_1}}{t\_1 - 1} \cdot \frac{t\_0 \cdot \frac{i}{t\_1}}{t\_1 - -1}
\end{array}
Initial program 16.5%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
times-fracN/A
lift--.f64N/A
lift-*.f64N/A
Applied rewrites42.6%
Taylor expanded in alpha around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f64N/A
lower-+.f64N/A
lower-*.f6440.3%
Applied rewrites40.3%
Taylor expanded in alpha around 0
Applied rewrites36.5%
Taylor expanded in alpha around 0
Applied rewrites40.2%
Taylor expanded in alpha around 0
Applied rewrites36.4%
Taylor expanded in alpha around 0
Applied rewrites36.2%
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6483.9%
lift-+.f64N/A
lift-*.f64N/A
+-commutativeN/A
lift-fma.f6483.9%
Applied rewrites83.9%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (fma 2.0 i (fmax alpha beta))))
(if (<= (fmax alpha beta) 1.1e+135)
0.0625
(*
(/ (* -1.0 (fma -1.0 (fmin alpha beta) (* -1.0 i))) (- t_0 1.0))
(/ (* (+ (fmax alpha beta) i) (/ i t_0)) (- t_0 -1.0))))))double code(double alpha, double beta, double i) {
double t_0 = fma(2.0, i, fmax(alpha, beta));
double tmp;
if (fmax(alpha, beta) <= 1.1e+135) {
tmp = 0.0625;
} else {
tmp = ((-1.0 * fma(-1.0, fmin(alpha, beta), (-1.0 * i))) / (t_0 - 1.0)) * (((fmax(alpha, beta) + i) * (i / t_0)) / (t_0 - -1.0));
}
return tmp;
}
function code(alpha, beta, i) t_0 = fma(2.0, i, fmax(alpha, beta)) tmp = 0.0 if (fmax(alpha, beta) <= 1.1e+135) tmp = 0.0625; else tmp = Float64(Float64(Float64(-1.0 * fma(-1.0, fmin(alpha, beta), Float64(-1.0 * i))) / Float64(t_0 - 1.0)) * Float64(Float64(Float64(fmax(alpha, beta) + i) * Float64(i / t_0)) / Float64(t_0 - -1.0))); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(2.0 * i + N[Max[alpha, beta], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Max[alpha, beta], $MachinePrecision], 1.1e+135], 0.0625, N[(N[(N[(-1.0 * N[(-1.0 * N[Min[alpha, beta], $MachinePrecision] + N[(-1.0 * i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 - 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[Max[alpha, beta], $MachinePrecision] + i), $MachinePrecision] * N[(i / t$95$0), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
t_0 := \mathsf{fma}\left(2, i, \mathsf{max}\left(\alpha, \beta\right)\right)\\
\mathbf{if}\;\mathsf{max}\left(\alpha, \beta\right) \leq 1.1 \cdot 10^{+135}:\\
\;\;\;\;0.0625\\
\mathbf{else}:\\
\;\;\;\;\frac{-1 \cdot \mathsf{fma}\left(-1, \mathsf{min}\left(\alpha, \beta\right), -1 \cdot i\right)}{t\_0 - 1} \cdot \frac{\left(\mathsf{max}\left(\alpha, \beta\right) + i\right) \cdot \frac{i}{t\_0}}{t\_0 - -1}\\
\end{array}
if beta < 1.1e135Initial program 16.5%
Taylor expanded in i around inf
Applied rewrites70.6%
if 1.1e135 < beta Initial program 16.5%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
times-fracN/A
lift--.f64N/A
lift-*.f64N/A
Applied rewrites42.6%
Taylor expanded in alpha around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f64N/A
lower-+.f64N/A
lower-*.f6440.3%
Applied rewrites40.3%
Taylor expanded in alpha around 0
Applied rewrites36.5%
Taylor expanded in alpha around 0
Applied rewrites40.2%
Taylor expanded in alpha around 0
Applied rewrites36.4%
Taylor expanded in alpha around 0
Applied rewrites36.2%
Taylor expanded in beta around -inf
lower-*.f64N/A
lower-fma.f64N/A
lower-*.f6427.9%
Applied rewrites27.9%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (fma 2.0 i (fmax alpha beta))))
(if (<= (fmax alpha beta) 5.5e+143)
0.0625
(*
(* -1.0 (/ (fma -1.0 (fmin alpha beta) (* -1.0 i)) (fmax alpha beta)))
(/ (* (+ (fmax alpha beta) i) (/ i t_0)) (- t_0 -1.0))))))double code(double alpha, double beta, double i) {
double t_0 = fma(2.0, i, fmax(alpha, beta));
double tmp;
if (fmax(alpha, beta) <= 5.5e+143) {
tmp = 0.0625;
} else {
tmp = (-1.0 * (fma(-1.0, fmin(alpha, beta), (-1.0 * i)) / fmax(alpha, beta))) * (((fmax(alpha, beta) + i) * (i / t_0)) / (t_0 - -1.0));
}
return tmp;
}
function code(alpha, beta, i) t_0 = fma(2.0, i, fmax(alpha, beta)) tmp = 0.0 if (fmax(alpha, beta) <= 5.5e+143) tmp = 0.0625; else tmp = Float64(Float64(-1.0 * Float64(fma(-1.0, fmin(alpha, beta), Float64(-1.0 * i)) / fmax(alpha, beta))) * Float64(Float64(Float64(fmax(alpha, beta) + i) * Float64(i / t_0)) / Float64(t_0 - -1.0))); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(2.0 * i + N[Max[alpha, beta], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Max[alpha, beta], $MachinePrecision], 5.5e+143], 0.0625, N[(N[(-1.0 * N[(N[(-1.0 * N[Min[alpha, beta], $MachinePrecision] + N[(-1.0 * i), $MachinePrecision]), $MachinePrecision] / N[Max[alpha, beta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[Max[alpha, beta], $MachinePrecision] + i), $MachinePrecision] * N[(i / t$95$0), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
t_0 := \mathsf{fma}\left(2, i, \mathsf{max}\left(\alpha, \beta\right)\right)\\
\mathbf{if}\;\mathsf{max}\left(\alpha, \beta\right) \leq 5.5 \cdot 10^{+143}:\\
\;\;\;\;0.0625\\
\mathbf{else}:\\
\;\;\;\;\left(-1 \cdot \frac{\mathsf{fma}\left(-1, \mathsf{min}\left(\alpha, \beta\right), -1 \cdot i\right)}{\mathsf{max}\left(\alpha, \beta\right)}\right) \cdot \frac{\left(\mathsf{max}\left(\alpha, \beta\right) + i\right) \cdot \frac{i}{t\_0}}{t\_0 - -1}\\
\end{array}
if beta < 5.4999999999999997e143Initial program 16.5%
Taylor expanded in i around inf
Applied rewrites70.6%
if 5.4999999999999997e143 < beta Initial program 16.5%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
times-fracN/A
lift--.f64N/A
lift-*.f64N/A
Applied rewrites42.6%
Taylor expanded in alpha around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f64N/A
lower-+.f64N/A
lower-*.f6440.3%
Applied rewrites40.3%
Taylor expanded in alpha around 0
Applied rewrites36.5%
Taylor expanded in alpha around 0
Applied rewrites40.2%
Taylor expanded in alpha around 0
Applied rewrites36.4%
Taylor expanded in alpha around 0
Applied rewrites36.2%
Taylor expanded in beta around -inf
lower-*.f64N/A
lower-/.f64N/A
lower-fma.f64N/A
lower-*.f6416.7%
Applied rewrites16.7%
(FPCore (alpha beta i)
:precision binary64
(if (<= (fmax alpha beta) 5.5e+143)
0.0625
(*
(/
(* -1.0 (fma -1.0 (fmin alpha beta) (* -1.0 i)))
(- (fma 2.0 i (+ (fmax alpha beta) (fmin alpha beta))) 1.0))
(/ i (+ 1.0 (+ (fmin alpha beta) (fmax alpha beta)))))))double code(double alpha, double beta, double i) {
double tmp;
if (fmax(alpha, beta) <= 5.5e+143) {
tmp = 0.0625;
} else {
tmp = ((-1.0 * fma(-1.0, fmin(alpha, beta), (-1.0 * i))) / (fma(2.0, i, (fmax(alpha, beta) + fmin(alpha, beta))) - 1.0)) * (i / (1.0 + (fmin(alpha, beta) + fmax(alpha, beta))));
}
return tmp;
}
function code(alpha, beta, i) tmp = 0.0 if (fmax(alpha, beta) <= 5.5e+143) tmp = 0.0625; else tmp = Float64(Float64(Float64(-1.0 * fma(-1.0, fmin(alpha, beta), Float64(-1.0 * i))) / Float64(fma(2.0, i, Float64(fmax(alpha, beta) + fmin(alpha, beta))) - 1.0)) * Float64(i / Float64(1.0 + Float64(fmin(alpha, beta) + fmax(alpha, beta))))); end return tmp end
code[alpha_, beta_, i_] := If[LessEqual[N[Max[alpha, beta], $MachinePrecision], 5.5e+143], 0.0625, N[(N[(N[(-1.0 * N[(-1.0 * N[Min[alpha, beta], $MachinePrecision] + N[(-1.0 * i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(2.0 * i + N[(N[Max[alpha, beta], $MachinePrecision] + N[Min[alpha, beta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision] * N[(i / N[(1.0 + N[(N[Min[alpha, beta], $MachinePrecision] + N[Max[alpha, beta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\mathbf{if}\;\mathsf{max}\left(\alpha, \beta\right) \leq 5.5 \cdot 10^{+143}:\\
\;\;\;\;0.0625\\
\mathbf{else}:\\
\;\;\;\;\frac{-1 \cdot \mathsf{fma}\left(-1, \mathsf{min}\left(\alpha, \beta\right), -1 \cdot i\right)}{\mathsf{fma}\left(2, i, \mathsf{max}\left(\alpha, \beta\right) + \mathsf{min}\left(\alpha, \beta\right)\right) - 1} \cdot \frac{i}{1 + \left(\mathsf{min}\left(\alpha, \beta\right) + \mathsf{max}\left(\alpha, \beta\right)\right)}\\
\end{array}
if beta < 5.4999999999999997e143Initial program 16.5%
Taylor expanded in i around inf
Applied rewrites70.6%
if 5.4999999999999997e143 < beta Initial program 16.5%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
times-fracN/A
lift--.f64N/A
lift-*.f64N/A
Applied rewrites42.6%
Taylor expanded in i around 0
lower-/.f64N/A
lower-+.f64N/A
lower-+.f6416.0%
Applied rewrites16.0%
Taylor expanded in beta around -inf
lower-*.f64N/A
lower-fma.f64N/A
lower-*.f6419.2%
Applied rewrites19.2%
(FPCore (alpha beta i)
:precision binary64
(if (<= (fmax alpha beta) 5.5e+143)
0.0625
(*
(* -1.0 (/ (fma -1.0 (fmin alpha beta) (* -1.0 i)) (fmax alpha beta)))
(/ i (+ 1.0 (+ (fmin alpha beta) (fmax alpha beta)))))))double code(double alpha, double beta, double i) {
double tmp;
if (fmax(alpha, beta) <= 5.5e+143) {
tmp = 0.0625;
} else {
tmp = (-1.0 * (fma(-1.0, fmin(alpha, beta), (-1.0 * i)) / fmax(alpha, beta))) * (i / (1.0 + (fmin(alpha, beta) + fmax(alpha, beta))));
}
return tmp;
}
function code(alpha, beta, i) tmp = 0.0 if (fmax(alpha, beta) <= 5.5e+143) tmp = 0.0625; else tmp = Float64(Float64(-1.0 * Float64(fma(-1.0, fmin(alpha, beta), Float64(-1.0 * i)) / fmax(alpha, beta))) * Float64(i / Float64(1.0 + Float64(fmin(alpha, beta) + fmax(alpha, beta))))); end return tmp end
code[alpha_, beta_, i_] := If[LessEqual[N[Max[alpha, beta], $MachinePrecision], 5.5e+143], 0.0625, N[(N[(-1.0 * N[(N[(-1.0 * N[Min[alpha, beta], $MachinePrecision] + N[(-1.0 * i), $MachinePrecision]), $MachinePrecision] / N[Max[alpha, beta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(i / N[(1.0 + N[(N[Min[alpha, beta], $MachinePrecision] + N[Max[alpha, beta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\mathbf{if}\;\mathsf{max}\left(\alpha, \beta\right) \leq 5.5 \cdot 10^{+143}:\\
\;\;\;\;0.0625\\
\mathbf{else}:\\
\;\;\;\;\left(-1 \cdot \frac{\mathsf{fma}\left(-1, \mathsf{min}\left(\alpha, \beta\right), -1 \cdot i\right)}{\mathsf{max}\left(\alpha, \beta\right)}\right) \cdot \frac{i}{1 + \left(\mathsf{min}\left(\alpha, \beta\right) + \mathsf{max}\left(\alpha, \beta\right)\right)}\\
\end{array}
if beta < 5.4999999999999997e143Initial program 16.5%
Taylor expanded in i around inf
Applied rewrites70.6%
if 5.4999999999999997e143 < beta Initial program 16.5%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
times-fracN/A
lift--.f64N/A
lift-*.f64N/A
Applied rewrites42.6%
Taylor expanded in i around 0
lower-/.f64N/A
lower-+.f64N/A
lower-+.f6416.0%
Applied rewrites16.0%
Taylor expanded in beta around -inf
lower-*.f64N/A
lower-/.f64N/A
lower-fma.f64N/A
lower-*.f6416.4%
Applied rewrites16.4%
(FPCore (alpha beta i) :precision binary64 (- (+ 0.0625 (* 0.125 (/ (fmax alpha beta) i))) (* 0.125 (/ (+ (fmin alpha beta) (fmax alpha beta)) i))))
double code(double alpha, double beta, double i) {
return (0.0625 + (0.125 * (fmax(alpha, beta) / i))) - (0.125 * ((fmin(alpha, beta) + fmax(alpha, beta)) / i));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta, i)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = (0.0625d0 + (0.125d0 * (fmax(alpha, beta) / i))) - (0.125d0 * ((fmin(alpha, beta) + fmax(alpha, beta)) / i))
end function
public static double code(double alpha, double beta, double i) {
return (0.0625 + (0.125 * (fmax(alpha, beta) / i))) - (0.125 * ((fmin(alpha, beta) + fmax(alpha, beta)) / i));
}
def code(alpha, beta, i): return (0.0625 + (0.125 * (fmax(alpha, beta) / i))) - (0.125 * ((fmin(alpha, beta) + fmax(alpha, beta)) / i))
function code(alpha, beta, i) return Float64(Float64(0.0625 + Float64(0.125 * Float64(fmax(alpha, beta) / i))) - Float64(0.125 * Float64(Float64(fmin(alpha, beta) + fmax(alpha, beta)) / i))) end
function tmp = code(alpha, beta, i) tmp = (0.0625 + (0.125 * (max(alpha, beta) / i))) - (0.125 * ((min(alpha, beta) + max(alpha, beta)) / i)); end
code[alpha_, beta_, i_] := N[(N[(0.0625 + N[(0.125 * N[(N[Max[alpha, beta], $MachinePrecision] / i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(0.125 * N[(N[(N[Min[alpha, beta], $MachinePrecision] + N[Max[alpha, beta], $MachinePrecision]), $MachinePrecision] / i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\left(0.0625 + 0.125 \cdot \frac{\mathsf{max}\left(\alpha, \beta\right)}{i}\right) - 0.125 \cdot \frac{\mathsf{min}\left(\alpha, \beta\right) + \mathsf{max}\left(\alpha, \beta\right)}{i}
Initial program 16.5%
Taylor expanded in i around inf
lower--.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-+.f6477.2%
Applied rewrites77.2%
Taylor expanded in alpha around 0
lower-*.f64N/A
lower-/.f6472.9%
Applied rewrites72.9%
(FPCore (alpha beta i)
:precision binary64
(if (<= (fmax alpha beta) 1.1e+249)
0.0625
(fma
(/ (+ (fmin alpha beta) (fmax alpha beta)) i)
-0.125
(* 0.125 (/ (fmax alpha beta) i)))))double code(double alpha, double beta, double i) {
double tmp;
if (fmax(alpha, beta) <= 1.1e+249) {
tmp = 0.0625;
} else {
tmp = fma(((fmin(alpha, beta) + fmax(alpha, beta)) / i), -0.125, (0.125 * (fmax(alpha, beta) / i)));
}
return tmp;
}
function code(alpha, beta, i) tmp = 0.0 if (fmax(alpha, beta) <= 1.1e+249) tmp = 0.0625; else tmp = fma(Float64(Float64(fmin(alpha, beta) + fmax(alpha, beta)) / i), -0.125, Float64(0.125 * Float64(fmax(alpha, beta) / i))); end return tmp end
code[alpha_, beta_, i_] := If[LessEqual[N[Max[alpha, beta], $MachinePrecision], 1.1e+249], 0.0625, N[(N[(N[(N[Min[alpha, beta], $MachinePrecision] + N[Max[alpha, beta], $MachinePrecision]), $MachinePrecision] / i), $MachinePrecision] * -0.125 + N[(0.125 * N[(N[Max[alpha, beta], $MachinePrecision] / i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\mathbf{if}\;\mathsf{max}\left(\alpha, \beta\right) \leq 1.1 \cdot 10^{+249}:\\
\;\;\;\;0.0625\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{\mathsf{min}\left(\alpha, \beta\right) + \mathsf{max}\left(\alpha, \beta\right)}{i}, -0.125, 0.125 \cdot \frac{\mathsf{max}\left(\alpha, \beta\right)}{i}\right)\\
\end{array}
if beta < 1.0999999999999999e249Initial program 16.5%
Taylor expanded in i around inf
Applied rewrites70.6%
if 1.0999999999999999e249 < beta Initial program 16.5%
Taylor expanded in i around inf
lower--.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-+.f6477.2%
Applied rewrites77.2%
Taylor expanded in alpha around inf
lower-*.f64N/A
lower-/.f646.5%
Applied rewrites6.5%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
metadata-eval6.5%
lift-*.f64N/A
*-commutativeN/A
lower-*.f646.5%
Applied rewrites6.5%
Taylor expanded in beta around inf
lower-*.f64N/A
lower-/.f646.1%
Applied rewrites6.1%
(FPCore (alpha beta i) :precision binary64 0.0625)
double code(double alpha, double beta, double i) {
return 0.0625;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta, i)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = 0.0625d0
end function
public static double code(double alpha, double beta, double i) {
return 0.0625;
}
def code(alpha, beta, i): return 0.0625
function code(alpha, beta, i) return 0.0625 end
function tmp = code(alpha, beta, i) tmp = 0.0625; end
code[alpha_, beta_, i_] := 0.0625
0.0625
Initial program 16.5%
Taylor expanded in i around inf
Applied rewrites70.6%
herbie shell --seed 2025183
(FPCore (alpha beta i)
:name "Octave 3.8, jcobi/4"
:precision binary64
:pre (and (and (> alpha -1.0) (> beta -1.0)) (> i 1.0))
(/ (/ (* (* i (+ (+ alpha beta) i)) (+ (* beta alpha) (* i (+ (+ alpha beta) i)))) (* (+ (+ alpha beta) (* 2.0 i)) (+ (+ alpha beta) (* 2.0 i)))) (- (* (+ (+ alpha beta) (* 2.0 i)) (+ (+ alpha beta) (* 2.0 i))) 1.0)))