
(FPCore (x) :precision binary64 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x): return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x) return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0)); end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x): return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x) return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0)); end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}
(FPCore (x) :precision binary64 (* (fma 2.0 (pow x -2.0) 2.0) (pow x -3.0)))
double code(double x) {
return fma(2.0, pow(x, -2.0), 2.0) * pow(x, -3.0);
}
function code(x) return Float64(fma(2.0, (x ^ -2.0), 2.0) * (x ^ -3.0)) end
code[x_] := N[(N[(2.0 * N[Power[x, -2.0], $MachinePrecision] + 2.0), $MachinePrecision] * N[Power[x, -3.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(2, {x}^{-2}, 2\right) \cdot {x}^{-3}
\end{array}
Initial program 73.8%
Taylor expanded in x around inf 98.6%
associate-*r/98.6%
metadata-eval98.6%
Simplified98.6%
div-inv98.6%
+-commutative98.6%
div-inv98.6%
fma-define98.6%
pow-flip98.6%
metadata-eval98.6%
pow-flip99.5%
metadata-eval99.5%
Applied egg-rr99.5%
(FPCore (x) :precision binary64 (/ (/ (+ 2.0 (/ 2.0 (pow x 2.0))) (pow x 2.0)) x))
double code(double x) {
return ((2.0 + (2.0 / pow(x, 2.0))) / pow(x, 2.0)) / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((2.0d0 + (2.0d0 / (x ** 2.0d0))) / (x ** 2.0d0)) / x
end function
public static double code(double x) {
return ((2.0 + (2.0 / Math.pow(x, 2.0))) / Math.pow(x, 2.0)) / x;
}
def code(x): return ((2.0 + (2.0 / math.pow(x, 2.0))) / math.pow(x, 2.0)) / x
function code(x) return Float64(Float64(Float64(2.0 + Float64(2.0 / (x ^ 2.0))) / (x ^ 2.0)) / x) end
function tmp = code(x) tmp = ((2.0 + (2.0 / (x ^ 2.0))) / (x ^ 2.0)) / x; end
code[x_] := N[(N[(N[(2.0 + N[(2.0 / N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{2 + \frac{2}{{x}^{2}}}{{x}^{2}}}{x}
\end{array}
Initial program 73.8%
Taylor expanded in x around inf 98.6%
associate-*r/98.6%
metadata-eval98.6%
Simplified98.6%
*-un-lft-identity98.6%
cube-mult98.6%
unpow298.6%
times-frac99.4%
+-commutative99.4%
div-inv99.4%
fma-define99.4%
pow-flip99.4%
metadata-eval99.4%
Applied egg-rr99.4%
associate-*l/99.4%
*-lft-identity99.4%
Simplified99.4%
Taylor expanded in x around inf 99.4%
associate-*r/99.4%
metadata-eval99.4%
Simplified99.4%
(FPCore (x) :precision binary64 (* 2.0 (pow x -3.0)))
double code(double x) {
return 2.0 * pow(x, -3.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 * (x ** (-3.0d0))
end function
public static double code(double x) {
return 2.0 * Math.pow(x, -3.0);
}
def code(x): return 2.0 * math.pow(x, -3.0)
function code(x) return Float64(2.0 * (x ^ -3.0)) end
function tmp = code(x) tmp = 2.0 * (x ^ -3.0); end
code[x_] := N[(2.0 * N[Power[x, -3.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot {x}^{-3}
\end{array}
Initial program 73.8%
Taylor expanded in x around inf 98.1%
div-inv98.1%
pow-flip99.0%
metadata-eval99.0%
Applied egg-rr99.0%
(FPCore (x) :precision binary64 (/ (/ 1.0 x) (* x (+ x -1.0))))
double code(double x) {
return (1.0 / x) / (x * (x + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / x) / (x * (x + (-1.0d0)))
end function
public static double code(double x) {
return (1.0 / x) / (x * (x + -1.0));
}
def code(x): return (1.0 / x) / (x * (x + -1.0))
function code(x) return Float64(Float64(1.0 / x) / Float64(x * Float64(x + -1.0))) end
function tmp = code(x) tmp = (1.0 / x) / (x * (x + -1.0)); end
code[x_] := N[(N[(1.0 / x), $MachinePrecision] / N[(x * N[(x + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{1}{x}}{x \cdot \left(x + -1\right)}
\end{array}
Initial program 73.8%
Taylor expanded in x around inf 72.7%
associate-*r/72.7%
neg-mul-172.7%
distribute-neg-in72.7%
metadata-eval72.7%
unsub-neg72.7%
Simplified72.7%
+-commutative72.7%
frac-add72.7%
*-un-lft-identity72.7%
sub-neg72.7%
metadata-eval72.7%
sub-neg72.7%
neg-mul-172.7%
div-inv72.7%
sub-neg72.7%
metadata-eval72.7%
Applied egg-rr72.7%
Taylor expanded in x around 0 76.1%
Final simplification76.1%
(FPCore (x) :precision binary64 (+ (/ -1.0 x) (/ 1.0 (+ x -1.0))))
double code(double x) {
return (-1.0 / x) + (1.0 / (x + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((-1.0d0) / x) + (1.0d0 / (x + (-1.0d0)))
end function
public static double code(double x) {
return (-1.0 / x) + (1.0 / (x + -1.0));
}
def code(x): return (-1.0 / x) + (1.0 / (x + -1.0))
function code(x) return Float64(Float64(-1.0 / x) + Float64(1.0 / Float64(x + -1.0))) end
function tmp = code(x) tmp = (-1.0 / x) + (1.0 / (x + -1.0)); end
code[x_] := N[(N[(-1.0 / x), $MachinePrecision] + N[(1.0 / N[(x + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x} + \frac{1}{x + -1}
\end{array}
Initial program 73.8%
Taylor expanded in x around inf 72.4%
Final simplification72.4%
(FPCore (x) :precision binary64 (/ -2.0 x))
double code(double x) {
return -2.0 / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-2.0d0) / x
end function
public static double code(double x) {
return -2.0 / x;
}
def code(x): return -2.0 / x
function code(x) return Float64(-2.0 / x) end
function tmp = code(x) tmp = -2.0 / x; end
code[x_] := N[(-2.0 / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2}{x}
\end{array}
Initial program 73.8%
Taylor expanded in x around 0 5.0%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 73.8%
Taylor expanded in x around 0 3.4%
Taylor expanded in x around inf 3.4%
(FPCore (x) :precision binary64 (/ 2.0 (* x (- (* x x) 1.0))))
double code(double x) {
return 2.0 / (x * ((x * x) - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (x * ((x * x) - 1.0d0))
end function
public static double code(double x) {
return 2.0 / (x * ((x * x) - 1.0));
}
def code(x): return 2.0 / (x * ((x * x) - 1.0))
function code(x) return Float64(2.0 / Float64(x * Float64(Float64(x * x) - 1.0))) end
function tmp = code(x) tmp = 2.0 / (x * ((x * x) - 1.0)); end
code[x_] := N[(2.0 / N[(x * N[(N[(x * x), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{x \cdot \left(x \cdot x - 1\right)}
\end{array}
herbie shell --seed 2024096
(FPCore (x)
:name "3frac (problem 3.3.3)"
:precision binary64
:pre (> (fabs x) 1.0)
:alt
(/ 2.0 (* x (- (* x x) 1.0)))
(+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))