
(FPCore (x) :precision binary64 (/ 10.0 (- 1.0 (* x x))))
double code(double x) {
return 10.0 / (1.0 - (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0 / (1.0d0 - (x * x))
end function
public static double code(double x) {
return 10.0 / (1.0 - (x * x));
}
def code(x): return 10.0 / (1.0 - (x * x))
function code(x) return Float64(10.0 / Float64(1.0 - Float64(x * x))) end
function tmp = code(x) tmp = 10.0 / (1.0 - (x * x)); end
code[x_] := N[(10.0 / N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10}{1 - x \cdot x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ 10.0 (- 1.0 (* x x))))
double code(double x) {
return 10.0 / (1.0 - (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0 / (1.0d0 - (x * x))
end function
public static double code(double x) {
return 10.0 / (1.0 - (x * x));
}
def code(x): return 10.0 / (1.0 - (x * x))
function code(x) return Float64(10.0 / Float64(1.0 - Float64(x * x))) end
function tmp = code(x) tmp = 10.0 / (1.0 - (x * x)); end
code[x_] := N[(10.0 / N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10}{1 - x \cdot x}
\end{array}
(FPCore (x) :precision binary64 (/ -10.0 (fma x x -1.0)))
double code(double x) {
return -10.0 / fma(x, x, -1.0);
}
function code(x) return Float64(-10.0 / fma(x, x, -1.0)) end
code[x_] := N[(-10.0 / N[(x * x + -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-10}{\mathsf{fma}\left(x, x, -1\right)}
\end{array}
Initial program 87.4%
Applied rewrites99.6%
(FPCore (x) :precision binary64 (/ 10.0 (- 1.0 x)))
double code(double x) {
return 10.0 / (1.0 - x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0 / (1.0d0 - x)
end function
public static double code(double x) {
return 10.0 / (1.0 - x);
}
def code(x): return 10.0 / (1.0 - x)
function code(x) return Float64(10.0 / Float64(1.0 - x)) end
function tmp = code(x) tmp = 10.0 / (1.0 - x); end
code[x_] := N[(10.0 / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10}{1 - x}
\end{array}
Initial program 87.8%
Applied rewrites99.6%
lift-/.f64N/A
metadata-evalN/A
distribute-neg-fracN/A
lift-fma.f64N/A
difference-of-sqr--1N/A
associate-/r*N/A
distribute-neg-frac2N/A
lower-/.f64N/A
lower-/.f64N/A
lower-+.f64N/A
flip--N/A
metadata-evalN/A
difference-of-sqr-1N/A
difference-of-sqr--1N/A
lift-fma.f64N/A
distribute-neg-fracN/A
lift-fma.f64N/A
lift-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
metadata-evalN/A
sub-negN/A
lift-*.f64N/A
metadata-evalN/A
+-commutativeN/A
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites18.8%
herbie shell --seed 2024223
(FPCore (x)
:name "ENA, Section 1.4, Mentioned, B"
:precision binary64
:pre (and (<= 0.999 x) (<= x 1.001))
(/ 10.0 (- 1.0 (* x x))))