
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (* i (+ (+ alpha beta) i)))
(t_1 (+ (+ alpha beta) (* 2.0 i)))
(t_2 (* t_1 t_1)))
(/ (/ (* t_0 (+ (* beta alpha) t_0)) t_2) (- t_2 1.0))))
double code(double alpha, double beta, double i) {
double t_0 = i * ((alpha + beta) + i);
double t_1 = (alpha + beta) + (2.0 * i);
double t_2 = t_1 * t_1;
return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta, i)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
t_0 = i * ((alpha + beta) + i)
t_1 = (alpha + beta) + (2.0d0 * i)
t_2 = t_1 * t_1
code = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0d0)
end function
public static double code(double alpha, double beta, double i) {
double t_0 = i * ((alpha + beta) + i);
double t_1 = (alpha + beta) + (2.0 * i);
double t_2 = t_1 * t_1;
return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
def code(alpha, beta, i): t_0 = i * ((alpha + beta) + i) t_1 = (alpha + beta) + (2.0 * i) t_2 = t_1 * t_1 return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0)
function code(alpha, beta, i) t_0 = Float64(i * Float64(Float64(alpha + beta) + i)) t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) t_2 = Float64(t_1 * t_1) return Float64(Float64(Float64(t_0 * Float64(Float64(beta * alpha) + t_0)) / t_2) / Float64(t_2 - 1.0)) end
function tmp = code(alpha, beta, i) t_0 = i * ((alpha + beta) + i); t_1 = (alpha + beta) + (2.0 * i); t_2 = t_1 * t_1; tmp = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0); end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(i * N[(N[(alpha + beta), $MachinePrecision] + i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * t$95$1), $MachinePrecision]}, N[(N[(N[(t$95$0 * N[(N[(beta * alpha), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$2), $MachinePrecision] / N[(t$95$2 - 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := i \cdot \left(\left(\alpha + \beta\right) + i\right)\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
t_2 := t\_1 \cdot t\_1\\
\frac{\frac{t\_0 \cdot \left(\beta \cdot \alpha + t\_0\right)}{t\_2}}{t\_2 - 1}
\end{array}
\end{array}
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (* i (+ (+ alpha beta) i)))
(t_1 (+ (+ alpha beta) (* 2.0 i)))
(t_2 (* t_1 t_1)))
(/ (/ (* t_0 (+ (* beta alpha) t_0)) t_2) (- t_2 1.0))))
double code(double alpha, double beta, double i) {
double t_0 = i * ((alpha + beta) + i);
double t_1 = (alpha + beta) + (2.0 * i);
double t_2 = t_1 * t_1;
return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta, i)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
t_0 = i * ((alpha + beta) + i)
t_1 = (alpha + beta) + (2.0d0 * i)
t_2 = t_1 * t_1
code = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0d0)
end function
public static double code(double alpha, double beta, double i) {
double t_0 = i * ((alpha + beta) + i);
double t_1 = (alpha + beta) + (2.0 * i);
double t_2 = t_1 * t_1;
return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
def code(alpha, beta, i): t_0 = i * ((alpha + beta) + i) t_1 = (alpha + beta) + (2.0 * i) t_2 = t_1 * t_1 return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0)
function code(alpha, beta, i) t_0 = Float64(i * Float64(Float64(alpha + beta) + i)) t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) t_2 = Float64(t_1 * t_1) return Float64(Float64(Float64(t_0 * Float64(Float64(beta * alpha) + t_0)) / t_2) / Float64(t_2 - 1.0)) end
function tmp = code(alpha, beta, i) t_0 = i * ((alpha + beta) + i); t_1 = (alpha + beta) + (2.0 * i); t_2 = t_1 * t_1; tmp = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0); end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(i * N[(N[(alpha + beta), $MachinePrecision] + i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * t$95$1), $MachinePrecision]}, N[(N[(N[(t$95$0 * N[(N[(beta * alpha), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$2), $MachinePrecision] / N[(t$95$2 - 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := i \cdot \left(\left(\alpha + \beta\right) + i\right)\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
t_2 := t\_1 \cdot t\_1\\
\frac{\frac{t\_0 \cdot \left(\beta \cdot \alpha + t\_0\right)}{t\_2}}{t\_2 - 1}
\end{array}
\end{array}
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ beta alpha) i))
(t_1 (fma 2.0 i (+ beta alpha)))
(t_2 (- t_1 1.0)))
(if (<= i 7.5e+125)
(*
(/ (* t_0 (/ i t_1)) (- t_1 -1.0))
(/ (/ (fma t_0 i (* beta alpha)) t_1) t_2))
(* 0.25 (* (/ t_0 (fma 2.0 i (- (+ beta alpha) -1.0))) (/ i t_2))))))
double code(double alpha, double beta, double i) {
double t_0 = (beta + alpha) + i;
double t_1 = fma(2.0, i, (beta + alpha));
double t_2 = t_1 - 1.0;
double tmp;
if (i <= 7.5e+125) {
tmp = ((t_0 * (i / t_1)) / (t_1 - -1.0)) * ((fma(t_0, i, (beta * alpha)) / t_1) / t_2);
} else {
tmp = 0.25 * ((t_0 / fma(2.0, i, ((beta + alpha) - -1.0))) * (i / t_2));
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(Float64(beta + alpha) + i) t_1 = fma(2.0, i, Float64(beta + alpha)) t_2 = Float64(t_1 - 1.0) tmp = 0.0 if (i <= 7.5e+125) tmp = Float64(Float64(Float64(t_0 * Float64(i / t_1)) / Float64(t_1 - -1.0)) * Float64(Float64(fma(t_0, i, Float64(beta * alpha)) / t_1) / t_2)); else tmp = Float64(0.25 * Float64(Float64(t_0 / fma(2.0, i, Float64(Float64(beta + alpha) - -1.0))) * Float64(i / t_2))); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(beta + alpha), $MachinePrecision] + i), $MachinePrecision]}, Block[{t$95$1 = N[(2.0 * i + N[(beta + alpha), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 - 1.0), $MachinePrecision]}, If[LessEqual[i, 7.5e+125], N[(N[(N[(t$95$0 * N[(i / t$95$1), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 - -1.0), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(t$95$0 * i + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / t$95$2), $MachinePrecision]), $MachinePrecision], N[(0.25 * N[(N[(t$95$0 / N[(2.0 * i + N[(N[(beta + alpha), $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(i / t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\beta + \alpha\right) + i\\
t_1 := \mathsf{fma}\left(2, i, \beta + \alpha\right)\\
t_2 := t\_1 - 1\\
\mathbf{if}\;i \leq 7.5 \cdot 10^{+125}:\\
\;\;\;\;\frac{t\_0 \cdot \frac{i}{t\_1}}{t\_1 - -1} \cdot \frac{\frac{\mathsf{fma}\left(t\_0, i, \beta \cdot \alpha\right)}{t\_1}}{t\_2}\\
\mathbf{else}:\\
\;\;\;\;0.25 \cdot \left(\frac{t\_0}{\mathsf{fma}\left(2, i, \left(\beta + \alpha\right) - -1\right)} \cdot \frac{i}{t\_2}\right)\\
\end{array}
\end{array}
if i < 7.5000000000000006e125Initial program 17.2%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
lift-*.f64N/A
times-fracN/A
lift--.f64N/A
lift-*.f64N/A
difference-of-sqr-1N/A
Applied rewrites43.5%
if 7.5000000000000006e125 < i Initial program 17.2%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lift-*.f64N/A
*-commutativeN/A
Applied rewrites38.7%
Taylor expanded in i around inf
Applied rewrites32.8%
lift-/.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
difference-of-sqr--1N/A
metadata-evalN/A
sub-flipN/A
lift-fma.f64N/A
lift-+.f64N/A
lift--.f64N/A
times-fracN/A
lower-*.f64N/A
Applied rewrites71.9%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) (* 2.0 i)))
(t_1 (* t_0 t_0))
(t_2 (* i (+ (+ alpha beta) i))))
(if (<= (/ (/ (* t_2 (+ (* beta alpha) t_2)) t_1) (- t_1 1.0)) INFINITY)
(*
(/
(fma (+ beta i) i (* beta alpha))
(* (fma 2.0 i beta) (fma 2.0 i beta)))
(/ (* (+ beta i) i) (fma (fma 2.0 i beta) (fma 2.0 i beta) -1.0)))
(-
(/ (fma 0.0625 i (* (+ beta alpha) 0.125)) i)
(* 0.125 (/ (+ alpha beta) i))))))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
double t_1 = t_0 * t_0;
double t_2 = i * ((alpha + beta) + i);
double tmp;
if ((((t_2 * ((beta * alpha) + t_2)) / t_1) / (t_1 - 1.0)) <= ((double) INFINITY)) {
tmp = (fma((beta + i), i, (beta * alpha)) / (fma(2.0, i, beta) * fma(2.0, i, beta))) * (((beta + i) * i) / fma(fma(2.0, i, beta), fma(2.0, i, beta), -1.0));
} else {
tmp = (fma(0.0625, i, ((beta + alpha) * 0.125)) / i) - (0.125 * ((alpha + beta) / i));
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) t_1 = Float64(t_0 * t_0) t_2 = Float64(i * Float64(Float64(alpha + beta) + i)) tmp = 0.0 if (Float64(Float64(Float64(t_2 * Float64(Float64(beta * alpha) + t_2)) / t_1) / Float64(t_1 - 1.0)) <= Inf) tmp = Float64(Float64(fma(Float64(beta + i), i, Float64(beta * alpha)) / Float64(fma(2.0, i, beta) * fma(2.0, i, beta))) * Float64(Float64(Float64(beta + i) * i) / fma(fma(2.0, i, beta), fma(2.0, i, beta), -1.0))); else tmp = Float64(Float64(fma(0.0625, i, Float64(Float64(beta + alpha) * 0.125)) / i) - Float64(0.125 * Float64(Float64(alpha + beta) / i))); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(i * N[(N[(alpha + beta), $MachinePrecision] + i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(t$95$2 * N[(N[(beta * alpha), $MachinePrecision] + t$95$2), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(t$95$1 - 1.0), $MachinePrecision]), $MachinePrecision], Infinity], N[(N[(N[(N[(beta + i), $MachinePrecision] * i + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] / N[(N[(2.0 * i + beta), $MachinePrecision] * N[(2.0 * i + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(beta + i), $MachinePrecision] * i), $MachinePrecision] / N[(N[(2.0 * i + beta), $MachinePrecision] * N[(2.0 * i + beta), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(0.0625 * i + N[(N[(beta + alpha), $MachinePrecision] * 0.125), $MachinePrecision]), $MachinePrecision] / i), $MachinePrecision] - N[(0.125 * N[(N[(alpha + beta), $MachinePrecision] / i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
t_1 := t\_0 \cdot t\_0\\
t_2 := i \cdot \left(\left(\alpha + \beta\right) + i\right)\\
\mathbf{if}\;\frac{\frac{t\_2 \cdot \left(\beta \cdot \alpha + t\_2\right)}{t\_1}}{t\_1 - 1} \leq \infty:\\
\;\;\;\;\frac{\mathsf{fma}\left(\beta + i, i, \beta \cdot \alpha\right)}{\mathsf{fma}\left(2, i, \beta\right) \cdot \mathsf{fma}\left(2, i, \beta\right)} \cdot \frac{\left(\beta + i\right) \cdot i}{\mathsf{fma}\left(\mathsf{fma}\left(2, i, \beta\right), \mathsf{fma}\left(2, i, \beta\right), -1\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(0.0625, i, \left(\beta + \alpha\right) \cdot 0.125\right)}{i} - 0.125 \cdot \frac{\alpha + \beta}{i}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (*.f64 i (+.f64 (+.f64 alpha beta) i)) (+.f64 (*.f64 beta alpha) (*.f64 i (+.f64 (+.f64 alpha beta) i)))) (*.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)))) (-.f64 (*.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) #s(literal 1 binary64))) < +inf.0Initial program 17.2%
Taylor expanded in alpha around 0
Applied rewrites16.2%
Taylor expanded in alpha around 0
Applied rewrites17.4%
Taylor expanded in alpha around 0
Applied rewrites17.5%
Taylor expanded in alpha around 0
Applied rewrites17.6%
Taylor expanded in alpha around 0
Applied rewrites16.2%
Taylor expanded in alpha around 0
Applied rewrites15.7%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
Applied rewrites34.2%
if +inf.0 < (/.f64 (/.f64 (*.f64 (*.f64 i (+.f64 (+.f64 alpha beta) i)) (+.f64 (*.f64 beta alpha) (*.f64 i (+.f64 (+.f64 alpha beta) i)))) (*.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)))) (-.f64 (*.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) #s(literal 1 binary64))) Initial program 17.2%
Taylor expanded in i around inf
lower--.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-+.f6477.9
Applied rewrites77.9%
lift-+.f64N/A
lift-*.f64N/A
lift-/.f64N/A
associate-*r/N/A
add-to-fractionN/A
lower-/.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
distribute-lft-outN/A
lift-+.f64N/A
associate-*r*N/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6477.9
lift-+.f64N/A
+-commutativeN/A
lift-+.f6477.9
Applied rewrites77.9%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (+ (+ alpha beta) (* 2.0 i)))
(t_1 (* t_0 t_0))
(t_2 (* i (+ (+ alpha beta) i))))
(if (<= (/ (/ (* t_2 (+ (* beta alpha) t_2)) t_1) (- t_1 1.0)) INFINITY)
(*
(* (+ beta i) i)
(/
(/
(fma (+ beta i) i (* beta alpha))
(* (fma 2.0 i beta) (fma 2.0 i beta)))
(fma (fma 2.0 i beta) (fma 2.0 i beta) -1.0)))
(-
(/ (fma 0.0625 i (* (+ beta alpha) 0.125)) i)
(* 0.125 (/ (+ alpha beta) i))))))
double code(double alpha, double beta, double i) {
double t_0 = (alpha + beta) + (2.0 * i);
double t_1 = t_0 * t_0;
double t_2 = i * ((alpha + beta) + i);
double tmp;
if ((((t_2 * ((beta * alpha) + t_2)) / t_1) / (t_1 - 1.0)) <= ((double) INFINITY)) {
tmp = ((beta + i) * i) * ((fma((beta + i), i, (beta * alpha)) / (fma(2.0, i, beta) * fma(2.0, i, beta))) / fma(fma(2.0, i, beta), fma(2.0, i, beta), -1.0));
} else {
tmp = (fma(0.0625, i, ((beta + alpha) * 0.125)) / i) - (0.125 * ((alpha + beta) / i));
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) t_1 = Float64(t_0 * t_0) t_2 = Float64(i * Float64(Float64(alpha + beta) + i)) tmp = 0.0 if (Float64(Float64(Float64(t_2 * Float64(Float64(beta * alpha) + t_2)) / t_1) / Float64(t_1 - 1.0)) <= Inf) tmp = Float64(Float64(Float64(beta + i) * i) * Float64(Float64(fma(Float64(beta + i), i, Float64(beta * alpha)) / Float64(fma(2.0, i, beta) * fma(2.0, i, beta))) / fma(fma(2.0, i, beta), fma(2.0, i, beta), -1.0))); else tmp = Float64(Float64(fma(0.0625, i, Float64(Float64(beta + alpha) * 0.125)) / i) - Float64(0.125 * Float64(Float64(alpha + beta) / i))); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(i * N[(N[(alpha + beta), $MachinePrecision] + i), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[(t$95$2 * N[(N[(beta * alpha), $MachinePrecision] + t$95$2), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(t$95$1 - 1.0), $MachinePrecision]), $MachinePrecision], Infinity], N[(N[(N[(beta + i), $MachinePrecision] * i), $MachinePrecision] * N[(N[(N[(N[(beta + i), $MachinePrecision] * i + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] / N[(N[(2.0 * i + beta), $MachinePrecision] * N[(2.0 * i + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(2.0 * i + beta), $MachinePrecision] * N[(2.0 * i + beta), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(0.0625 * i + N[(N[(beta + alpha), $MachinePrecision] * 0.125), $MachinePrecision]), $MachinePrecision] / i), $MachinePrecision] - N[(0.125 * N[(N[(alpha + beta), $MachinePrecision] / i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot i\\
t_1 := t\_0 \cdot t\_0\\
t_2 := i \cdot \left(\left(\alpha + \beta\right) + i\right)\\
\mathbf{if}\;\frac{\frac{t\_2 \cdot \left(\beta \cdot \alpha + t\_2\right)}{t\_1}}{t\_1 - 1} \leq \infty:\\
\;\;\;\;\left(\left(\beta + i\right) \cdot i\right) \cdot \frac{\frac{\mathsf{fma}\left(\beta + i, i, \beta \cdot \alpha\right)}{\mathsf{fma}\left(2, i, \beta\right) \cdot \mathsf{fma}\left(2, i, \beta\right)}}{\mathsf{fma}\left(\mathsf{fma}\left(2, i, \beta\right), \mathsf{fma}\left(2, i, \beta\right), -1\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(0.0625, i, \left(\beta + \alpha\right) \cdot 0.125\right)}{i} - 0.125 \cdot \frac{\alpha + \beta}{i}\\
\end{array}
\end{array}
if (/.f64 (/.f64 (*.f64 (*.f64 i (+.f64 (+.f64 alpha beta) i)) (+.f64 (*.f64 beta alpha) (*.f64 i (+.f64 (+.f64 alpha beta) i)))) (*.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)))) (-.f64 (*.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) #s(literal 1 binary64))) < +inf.0Initial program 17.2%
Taylor expanded in alpha around 0
Applied rewrites16.2%
Taylor expanded in alpha around 0
Applied rewrites17.4%
Taylor expanded in alpha around 0
Applied rewrites17.5%
Taylor expanded in alpha around 0
Applied rewrites17.6%
Taylor expanded in alpha around 0
Applied rewrites16.2%
Taylor expanded in alpha around 0
Applied rewrites15.7%
lift-/.f64N/A
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
Applied rewrites33.3%
if +inf.0 < (/.f64 (/.f64 (*.f64 (*.f64 i (+.f64 (+.f64 alpha beta) i)) (+.f64 (*.f64 beta alpha) (*.f64 i (+.f64 (+.f64 alpha beta) i)))) (*.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)))) (-.f64 (*.f64 (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i)) (+.f64 (+.f64 alpha beta) (*.f64 #s(literal 2 binary64) i))) #s(literal 1 binary64))) Initial program 17.2%
Taylor expanded in i around inf
lower--.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-+.f6477.9
Applied rewrites77.9%
lift-+.f64N/A
lift-*.f64N/A
lift-/.f64N/A
associate-*r/N/A
add-to-fractionN/A
lower-/.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
distribute-lft-outN/A
lift-+.f64N/A
associate-*r*N/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6477.9
lift-+.f64N/A
+-commutativeN/A
lift-+.f6477.9
Applied rewrites77.9%
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (- (fma 2.0 i (+ beta alpha)) 1.0)))
(if (<= beta 3.9e+137)
(*
0.25
(*
(/ (+ (+ beta alpha) i) (fma 2.0 i (- (+ beta alpha) -1.0)))
(/ i t_0)))
(/ (/ (* i (+ alpha i)) beta) t_0))))
double code(double alpha, double beta, double i) {
double t_0 = fma(2.0, i, (beta + alpha)) - 1.0;
double tmp;
if (beta <= 3.9e+137) {
tmp = 0.25 * ((((beta + alpha) + i) / fma(2.0, i, ((beta + alpha) - -1.0))) * (i / t_0));
} else {
tmp = ((i * (alpha + i)) / beta) / t_0;
}
return tmp;
}
function code(alpha, beta, i) t_0 = Float64(fma(2.0, i, Float64(beta + alpha)) - 1.0) tmp = 0.0 if (beta <= 3.9e+137) tmp = Float64(0.25 * Float64(Float64(Float64(Float64(beta + alpha) + i) / fma(2.0, i, Float64(Float64(beta + alpha) - -1.0))) * Float64(i / t_0))); else tmp = Float64(Float64(Float64(i * Float64(alpha + i)) / beta) / t_0); end return tmp end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(N[(2.0 * i + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]}, If[LessEqual[beta, 3.9e+137], N[(0.25 * N[(N[(N[(N[(beta + alpha), $MachinePrecision] + i), $MachinePrecision] / N[(2.0 * i + N[(N[(beta + alpha), $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(i / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(i * N[(alpha + i), $MachinePrecision]), $MachinePrecision] / beta), $MachinePrecision] / t$95$0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(2, i, \beta + \alpha\right) - 1\\
\mathbf{if}\;\beta \leq 3.9 \cdot 10^{+137}:\\
\;\;\;\;0.25 \cdot \left(\frac{\left(\beta + \alpha\right) + i}{\mathsf{fma}\left(2, i, \left(\beta + \alpha\right) - -1\right)} \cdot \frac{i}{t\_0}\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{i \cdot \left(\alpha + i\right)}{\beta}}{t\_0}\\
\end{array}
\end{array}
if beta < 3.90000000000000029e137Initial program 17.2%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lift-*.f64N/A
*-commutativeN/A
Applied rewrites38.7%
Taylor expanded in i around inf
Applied rewrites32.8%
lift-/.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
difference-of-sqr--1N/A
metadata-evalN/A
sub-flipN/A
lift-fma.f64N/A
lift-+.f64N/A
lift--.f64N/A
times-fracN/A
lower-*.f64N/A
Applied rewrites71.9%
if 3.90000000000000029e137 < beta Initial program 17.2%
Taylor expanded in beta around -inf
lower-*.f64N/A
lower-*.f64N/A
lower-fma.f64N/A
lower-*.f6413.8
Applied rewrites13.8%
lift-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
metadata-evalN/A
difference-of-squares-revN/A
associate-/r*N/A
Applied rewrites17.5%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6412.7
Applied rewrites12.7%
(FPCore (alpha beta i) :precision binary64 (if (<= beta 3.9e+137) 0.0625 (/ (/ (* i (+ alpha i)) beta) (- (fma 2.0 i (+ beta alpha)) 1.0))))
double code(double alpha, double beta, double i) {
double tmp;
if (beta <= 3.9e+137) {
tmp = 0.0625;
} else {
tmp = ((i * (alpha + i)) / beta) / (fma(2.0, i, (beta + alpha)) - 1.0);
}
return tmp;
}
function code(alpha, beta, i) tmp = 0.0 if (beta <= 3.9e+137) tmp = 0.0625; else tmp = Float64(Float64(Float64(i * Float64(alpha + i)) / beta) / Float64(fma(2.0, i, Float64(beta + alpha)) - 1.0)); end return tmp end
code[alpha_, beta_, i_] := If[LessEqual[beta, 3.9e+137], 0.0625, N[(N[(N[(i * N[(alpha + i), $MachinePrecision]), $MachinePrecision] / beta), $MachinePrecision] / N[(N[(2.0 * i + N[(beta + alpha), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 3.9 \cdot 10^{+137}:\\
\;\;\;\;0.0625\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{i \cdot \left(\alpha + i\right)}{\beta}}{\mathsf{fma}\left(2, i, \beta + \alpha\right) - 1}\\
\end{array}
\end{array}
if beta < 3.90000000000000029e137Initial program 17.2%
Taylor expanded in i around inf
Applied rewrites71.0%
if 3.90000000000000029e137 < beta Initial program 17.2%
Taylor expanded in beta around -inf
lower-*.f64N/A
lower-*.f64N/A
lower-fma.f64N/A
lower-*.f6413.8
Applied rewrites13.8%
lift-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
metadata-evalN/A
difference-of-squares-revN/A
associate-/r*N/A
Applied rewrites17.5%
Taylor expanded in beta around inf
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6412.7
Applied rewrites12.7%
(FPCore (alpha beta i) :precision binary64 (- (+ 0.0625 (* 0.125 (/ beta i))) (* 0.125 (/ (+ alpha beta) i))))
double code(double alpha, double beta, double i) {
return (0.0625 + (0.125 * (beta / i))) - (0.125 * ((alpha + beta) / i));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta, i)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = (0.0625d0 + (0.125d0 * (beta / i))) - (0.125d0 * ((alpha + beta) / i))
end function
public static double code(double alpha, double beta, double i) {
return (0.0625 + (0.125 * (beta / i))) - (0.125 * ((alpha + beta) / i));
}
def code(alpha, beta, i): return (0.0625 + (0.125 * (beta / i))) - (0.125 * ((alpha + beta) / i))
function code(alpha, beta, i) return Float64(Float64(0.0625 + Float64(0.125 * Float64(beta / i))) - Float64(0.125 * Float64(Float64(alpha + beta) / i))) end
function tmp = code(alpha, beta, i) tmp = (0.0625 + (0.125 * (beta / i))) - (0.125 * ((alpha + beta) / i)); end
code[alpha_, beta_, i_] := N[(N[(0.0625 + N[(0.125 * N[(beta / i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(0.125 * N[(N[(alpha + beta), $MachinePrecision] / i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(0.0625 + 0.125 \cdot \frac{\beta}{i}\right) - 0.125 \cdot \frac{\alpha + \beta}{i}
\end{array}
Initial program 17.2%
Taylor expanded in i around inf
lower--.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-+.f6477.9
Applied rewrites77.9%
Taylor expanded in alpha around 0
lower-*.f64N/A
lower-/.f6473.9
Applied rewrites73.9%
(FPCore (alpha beta i) :precision binary64 (if (<= beta 1.9e+233) 0.0625 (- (* beta (/ 0.125 i)) (* 0.125 (/ (+ alpha beta) i)))))
double code(double alpha, double beta, double i) {
double tmp;
if (beta <= 1.9e+233) {
tmp = 0.0625;
} else {
tmp = (beta * (0.125 / i)) - (0.125 * ((alpha + beta) / i));
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta, i)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: tmp
if (beta <= 1.9d+233) then
tmp = 0.0625d0
else
tmp = (beta * (0.125d0 / i)) - (0.125d0 * ((alpha + beta) / i))
end if
code = tmp
end function
public static double code(double alpha, double beta, double i) {
double tmp;
if (beta <= 1.9e+233) {
tmp = 0.0625;
} else {
tmp = (beta * (0.125 / i)) - (0.125 * ((alpha + beta) / i));
}
return tmp;
}
def code(alpha, beta, i): tmp = 0 if beta <= 1.9e+233: tmp = 0.0625 else: tmp = (beta * (0.125 / i)) - (0.125 * ((alpha + beta) / i)) return tmp
function code(alpha, beta, i) tmp = 0.0 if (beta <= 1.9e+233) tmp = 0.0625; else tmp = Float64(Float64(beta * Float64(0.125 / i)) - Float64(0.125 * Float64(Float64(alpha + beta) / i))); end return tmp end
function tmp_2 = code(alpha, beta, i) tmp = 0.0; if (beta <= 1.9e+233) tmp = 0.0625; else tmp = (beta * (0.125 / i)) - (0.125 * ((alpha + beta) / i)); end tmp_2 = tmp; end
code[alpha_, beta_, i_] := If[LessEqual[beta, 1.9e+233], 0.0625, N[(N[(beta * N[(0.125 / i), $MachinePrecision]), $MachinePrecision] - N[(0.125 * N[(N[(alpha + beta), $MachinePrecision] / i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 1.9 \cdot 10^{+233}:\\
\;\;\;\;0.0625\\
\mathbf{else}:\\
\;\;\;\;\beta \cdot \frac{0.125}{i} - 0.125 \cdot \frac{\alpha + \beta}{i}\\
\end{array}
\end{array}
if beta < 1.8999999999999999e233Initial program 17.2%
Taylor expanded in i around inf
Applied rewrites71.0%
if 1.8999999999999999e233 < beta Initial program 17.2%
Taylor expanded in i around inf
lower--.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-+.f6477.9
Applied rewrites77.9%
Taylor expanded in beta around inf
lower-*.f64N/A
lower-fma.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-fma.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-/.f6474.3
Applied rewrites74.3%
Taylor expanded in beta around inf
lower-/.f645.7
Applied rewrites5.7%
(FPCore (alpha beta i) :precision binary64 0.0625)
double code(double alpha, double beta, double i) {
return 0.0625;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(alpha, beta, i)
use fmin_fmax_functions
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = 0.0625d0
end function
public static double code(double alpha, double beta, double i) {
return 0.0625;
}
def code(alpha, beta, i): return 0.0625
function code(alpha, beta, i) return 0.0625 end
function tmp = code(alpha, beta, i) tmp = 0.0625; end
code[alpha_, beta_, i_] := 0.0625
\begin{array}{l}
\\
0.0625
\end{array}
Initial program 17.2%
Taylor expanded in i around inf
Applied rewrites71.0%
herbie shell --seed 2025143
(FPCore (alpha beta i)
:name "Octave 3.8, jcobi/4"
:precision binary64
:pre (and (and (> alpha -1.0) (> beta -1.0)) (> i 1.0))
(/ (/ (* (* i (+ (+ alpha beta) i)) (+ (* beta alpha) (* i (+ (+ alpha beta) i)))) (* (+ (+ alpha beta) (* 2.0 i)) (+ (+ alpha beta) (* 2.0 i)))) (- (* (+ (+ alpha beta) (* 2.0 i)) (+ (+ alpha beta) (* 2.0 i))) 1.0)))