
(FPCore (x) :precision binary64 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x): return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x) return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0)); end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x): return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x) return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0)); end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}
(FPCore (x) :precision binary64 (/ (/ (/ (+ 2.0 (/ 2.0 (* x x))) x) x) x))
double code(double x) {
return (((2.0 + (2.0 / (x * x))) / x) / x) / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (((2.0d0 + (2.0d0 / (x * x))) / x) / x) / x
end function
public static double code(double x) {
return (((2.0 + (2.0 / (x * x))) / x) / x) / x;
}
def code(x): return (((2.0 + (2.0 / (x * x))) / x) / x) / x
function code(x) return Float64(Float64(Float64(Float64(2.0 + Float64(2.0 / Float64(x * x))) / x) / x) / x) end
function tmp = code(x) tmp = (((2.0 + (2.0 / (x * x))) / x) / x) / x; end
code[x_] := N[(N[(N[(N[(2.0 + N[(2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] / x), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\frac{2 + \frac{2}{x \cdot x}}{x}}{x}}{x}
\end{array}
Initial program 69.2%
Taylor expanded in x around inf
lower-/.f64N/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.2
Applied rewrites99.2%
Applied rewrites99.6%
(FPCore (x) :precision binary64 (/ 2.0 (* (fma x x x) (+ x -1.0))))
double code(double x) {
return 2.0 / (fma(x, x, x) * (x + -1.0));
}
function code(x) return Float64(2.0 / Float64(fma(x, x, x) * Float64(x + -1.0))) end
code[x_] := N[(2.0 / N[(N[(x * x + x), $MachinePrecision] * N[(x + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\mathsf{fma}\left(x, x, x\right) \cdot \left(x + -1\right)}
\end{array}
Initial program 69.1%
lift-+.f64N/A
+-commutativeN/A
lift-/.f64N/A
lift--.f64N/A
lift-/.f64N/A
lift-/.f64N/A
frac-subN/A
frac-addN/A
lower-/.f64N/A
Applied rewrites20.1%
Taylor expanded in x around 0
Applied rewrites99.2%
lift-*.f64N/A
lift-+.f64N/A
metadata-evalN/A
sub-negN/A
*-lft-identityN/A
lift-+.f64N/A
lift-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
associate-*r*N/A
*-lft-identityN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lift-+.f6499.2
Applied rewrites99.2%
(FPCore (x) :precision binary64 (/ 2.0 (* x (- (* x x) 1.0))))
double code(double x) {
return 2.0 / (x * ((x * x) - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (x * ((x * x) - 1.0d0))
end function
public static double code(double x) {
return 2.0 / (x * ((x * x) - 1.0));
}
def code(x): return 2.0 / (x * ((x * x) - 1.0))
function code(x) return Float64(2.0 / Float64(x * Float64(Float64(x * x) - 1.0))) end
function tmp = code(x) tmp = 2.0 / (x * ((x * x) - 1.0)); end
code[x_] := N[(2.0 / N[(x * N[(N[(x * x), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{x \cdot \left(x \cdot x - 1\right)}
\end{array}
herbie shell --seed 2024222
(FPCore (x)
:name "3frac (problem 3.3.3)"
:precision binary64
:pre (> (fabs x) 1.0)
:alt
(! :herbie-platform default (/ 2 (* x (- (* x x) 1))))
(+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))