
(FPCore (x) :precision binary64 (/ 10.0 (- 1.0 (* x x))))
double code(double x) {
return 10.0 / (1.0 - (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0 / (1.0d0 - (x * x))
end function
public static double code(double x) {
return 10.0 / (1.0 - (x * x));
}
def code(x): return 10.0 / (1.0 - (x * x))
function code(x) return Float64(10.0 / Float64(1.0 - Float64(x * x))) end
function tmp = code(x) tmp = 10.0 / (1.0 - (x * x)); end
code[x_] := N[(10.0 / N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10}{1 - x \cdot x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ 10.0 (- 1.0 (* x x))))
double code(double x) {
return 10.0 / (1.0 - (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0 / (1.0d0 - (x * x))
end function
public static double code(double x) {
return 10.0 / (1.0 - (x * x));
}
def code(x): return 10.0 / (1.0 - (x * x))
function code(x) return Float64(10.0 / Float64(1.0 - Float64(x * x))) end
function tmp = code(x) tmp = 10.0 / (1.0 - (x * x)); end
code[x_] := N[(10.0 / N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10}{1 - x \cdot x}
\end{array}
(FPCore (x) :precision binary64 (/ -10.0 (fma x x -1.0)))
double code(double x) {
return -10.0 / fma(x, x, -1.0);
}
function code(x) return Float64(-10.0 / fma(x, x, -1.0)) end
code[x_] := N[(-10.0 / N[(x * x + -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-10}{\mathsf{fma}\left(x, x, -1\right)}
\end{array}
Initial program 87.5%
sqr-neg87.5%
remove-double-neg87.5%
distribute-neg-frac87.5%
distribute-frac-neg287.5%
metadata-eval87.5%
neg-sub087.5%
associate--r-87.5%
metadata-eval87.5%
+-commutative87.5%
sqr-neg87.5%
fma-define99.5%
Simplified99.5%
(FPCore (x) :precision binary64 (/ 10.0 (- 1.0 (* x x))))
double code(double x) {
return 10.0 / (1.0 - (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0 / (1.0d0 - (x * x))
end function
public static double code(double x) {
return 10.0 / (1.0 - (x * x));
}
def code(x): return 10.0 / (1.0 - (x * x))
function code(x) return Float64(10.0 / Float64(1.0 - Float64(x * x))) end
function tmp = code(x) tmp = 10.0 / (1.0 - (x * x)); end
code[x_] := N[(10.0 / N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10}{1 - x \cdot x}
\end{array}
Initial program 87.5%
(FPCore (x) :precision binary64 10.0)
double code(double x) {
return 10.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0
end function
public static double code(double x) {
return 10.0;
}
def code(x): return 10.0
function code(x) return 10.0 end
function tmp = code(x) tmp = 10.0; end
code[x_] := 10.0
\begin{array}{l}
\\
10
\end{array}
Initial program 87.5%
sqr-neg87.5%
remove-double-neg87.5%
distribute-neg-frac87.5%
distribute-frac-neg287.5%
metadata-eval87.5%
neg-sub087.5%
associate--r-87.5%
metadata-eval87.5%
+-commutative87.5%
sqr-neg87.5%
fma-define99.5%
Simplified99.5%
Taylor expanded in x around 0 9.6%
herbie shell --seed 2024089
(FPCore (x)
:name "ENA, Section 1.4, Mentioned, B"
:precision binary64
:pre (and (<= 0.999 x) (<= x 1.001))
(/ 10.0 (- 1.0 (* x x))))