
(FPCore (x) :precision binary64 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x): return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x) return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0)); end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x): return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x) return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0)); end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}
(FPCore (x) :precision binary64 (* (- 2.0 (* 2.0 (/ -1.0 (* x x)))) (pow x -3.0)))
double code(double x) {
return (2.0 - (2.0 * (-1.0 / (x * x)))) * pow(x, -3.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (2.0d0 - (2.0d0 * ((-1.0d0) / (x * x)))) * (x ** (-3.0d0))
end function
public static double code(double x) {
return (2.0 - (2.0 * (-1.0 / (x * x)))) * Math.pow(x, -3.0);
}
def code(x): return (2.0 - (2.0 * (-1.0 / (x * x)))) * math.pow(x, -3.0)
function code(x) return Float64(Float64(2.0 - Float64(2.0 * Float64(-1.0 / Float64(x * x)))) * (x ^ -3.0)) end
function tmp = code(x) tmp = (2.0 - (2.0 * (-1.0 / (x * x)))) * (x ^ -3.0); end
code[x_] := N[(N[(2.0 - N[(2.0 * N[(-1.0 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Power[x, -3.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(2 - 2 \cdot \frac{-1}{x \cdot x}\right) \cdot {x}^{-3}
\end{array}
Initial program 65.7%
+-commutative65.7%
associate-+r-65.6%
sub-neg65.6%
remove-double-neg65.6%
neg-sub065.6%
associate-+l-65.6%
neg-sub065.6%
distribute-neg-frac265.6%
distribute-frac-neg265.6%
associate-+r+65.7%
+-commutative65.7%
remove-double-neg65.7%
distribute-neg-frac265.7%
sub0-neg65.7%
associate-+l-65.7%
neg-sub065.7%
Simplified65.7%
Taylor expanded in x around inf 99.4%
associate-*r/99.4%
metadata-eval99.4%
Simplified99.4%
div-inv99.4%
+-commutative99.4%
div-inv99.4%
fma-define99.4%
pow-flip99.4%
metadata-eval99.4%
pow-flip100.0%
metadata-eval100.0%
Applied egg-rr100.0%
fma-undefine100.0%
Applied egg-rr100.0%
metadata-eval100.0%
pow-prod-up100.0%
inv-pow100.0%
inv-pow100.0%
frac-2neg100.0%
metadata-eval100.0%
frac-times100.0%
metadata-eval100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (* 2.0 (pow x -3.0)))
double code(double x) {
return 2.0 * pow(x, -3.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 * (x ** (-3.0d0))
end function
public static double code(double x) {
return 2.0 * Math.pow(x, -3.0);
}
def code(x): return 2.0 * math.pow(x, -3.0)
function code(x) return Float64(2.0 * (x ^ -3.0)) end
function tmp = code(x) tmp = 2.0 * (x ^ -3.0); end
code[x_] := N[(2.0 * N[Power[x, -3.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot {x}^{-3}
\end{array}
Initial program 65.7%
+-commutative65.7%
associate-+r-65.6%
sub-neg65.6%
remove-double-neg65.6%
neg-sub065.6%
associate-+l-65.6%
neg-sub065.6%
distribute-neg-frac265.6%
distribute-frac-neg265.6%
associate-+r+65.7%
+-commutative65.7%
remove-double-neg65.7%
distribute-neg-frac265.7%
sub0-neg65.7%
associate-+l-65.7%
neg-sub065.7%
Simplified65.7%
Taylor expanded in x around inf 99.4%
associate-*r/99.4%
metadata-eval99.4%
Simplified99.4%
div-inv99.4%
+-commutative99.4%
div-inv99.4%
fma-define99.4%
pow-flip99.4%
metadata-eval99.4%
pow-flip100.0%
metadata-eval100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf 99.4%
(FPCore (x) :precision binary64 (/ -2.0 (* x (* (+ -1.0 x) (- -1.0 x)))))
double code(double x) {
return -2.0 / (x * ((-1.0 + x) * (-1.0 - x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-2.0d0) / (x * (((-1.0d0) + x) * ((-1.0d0) - x)))
end function
public static double code(double x) {
return -2.0 / (x * ((-1.0 + x) * (-1.0 - x)));
}
def code(x): return -2.0 / (x * ((-1.0 + x) * (-1.0 - x)))
function code(x) return Float64(-2.0 / Float64(x * Float64(Float64(-1.0 + x) * Float64(-1.0 - x)))) end
function tmp = code(x) tmp = -2.0 / (x * ((-1.0 + x) * (-1.0 - x))); end
code[x_] := N[(-2.0 / N[(x * N[(N[(-1.0 + x), $MachinePrecision] * N[(-1.0 - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2}{x \cdot \left(\left(-1 + x\right) \cdot \left(-1 - x\right)\right)}
\end{array}
Initial program 65.7%
+-commutative65.7%
associate-+r-65.6%
sub-neg65.6%
remove-double-neg65.6%
neg-sub065.6%
associate-+l-65.6%
neg-sub065.6%
distribute-neg-frac265.6%
distribute-frac-neg265.6%
associate-+r+65.7%
+-commutative65.7%
remove-double-neg65.7%
distribute-neg-frac265.7%
sub0-neg65.7%
associate-+l-65.7%
neg-sub065.7%
Simplified65.7%
+-commutative65.7%
associate-+l-65.6%
Applied egg-rr65.6%
frac-sub17.1%
*-un-lft-identity17.1%
Applied egg-rr17.1%
*-rgt-identity17.1%
associate--l+17.1%
*-commutative17.1%
Simplified17.1%
frac-sub19.1%
fmm-def19.1%
associate--r-19.1%
metadata-eval19.1%
Applied egg-rr19.1%
fmm-undef19.1%
+-lft-identity19.1%
Simplified19.1%
Taylor expanded in x around 0 99.4%
Final simplification99.4%
(FPCore (x) :precision binary64 (+ (/ (+ 1.0 (/ 1.0 x)) x) (/ -1.0 x)))
double code(double x) {
return ((1.0 + (1.0 / x)) / x) + (-1.0 / x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 + (1.0d0 / x)) / x) + ((-1.0d0) / x)
end function
public static double code(double x) {
return ((1.0 + (1.0 / x)) / x) + (-1.0 / x);
}
def code(x): return ((1.0 + (1.0 / x)) / x) + (-1.0 / x)
function code(x) return Float64(Float64(Float64(1.0 + Float64(1.0 / x)) / x) + Float64(-1.0 / x)) end
function tmp = code(x) tmp = ((1.0 + (1.0 / x)) / x) + (-1.0 / x); end
code[x_] := N[(N[(N[(1.0 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 + \frac{1}{x}}{x} + \frac{-1}{x}
\end{array}
Initial program 65.7%
+-commutative65.7%
associate-+r-65.6%
sub-neg65.6%
remove-double-neg65.6%
neg-sub065.6%
associate-+l-65.6%
neg-sub065.6%
distribute-neg-frac265.6%
distribute-frac-neg265.6%
associate-+r+65.7%
+-commutative65.7%
remove-double-neg65.7%
distribute-neg-frac265.7%
sub0-neg65.7%
associate-+l-65.7%
neg-sub065.7%
Simplified65.7%
Taylor expanded in x around inf 65.1%
Taylor expanded in x around inf 64.8%
(FPCore (x) :precision binary64 (+ (/ -1.0 x) (/ 1.0 (+ -1.0 x))))
double code(double x) {
return (-1.0 / x) + (1.0 / (-1.0 + x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((-1.0d0) / x) + (1.0d0 / ((-1.0d0) + x))
end function
public static double code(double x) {
return (-1.0 / x) + (1.0 / (-1.0 + x));
}
def code(x): return (-1.0 / x) + (1.0 / (-1.0 + x))
function code(x) return Float64(Float64(-1.0 / x) + Float64(1.0 / Float64(-1.0 + x))) end
function tmp = code(x) tmp = (-1.0 / x) + (1.0 / (-1.0 + x)); end
code[x_] := N[(N[(-1.0 / x), $MachinePrecision] + N[(1.0 / N[(-1.0 + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x} + \frac{1}{-1 + x}
\end{array}
Initial program 65.7%
+-commutative65.7%
associate-+r-65.6%
sub-neg65.6%
remove-double-neg65.6%
neg-sub065.6%
associate-+l-65.6%
neg-sub065.6%
distribute-neg-frac265.6%
distribute-frac-neg265.6%
associate-+r+65.7%
+-commutative65.7%
remove-double-neg65.7%
distribute-neg-frac265.7%
sub0-neg65.7%
associate-+l-65.7%
neg-sub065.7%
Simplified65.7%
Taylor expanded in x around inf 64.8%
Final simplification64.8%
(FPCore (x) :precision binary64 (- (/ -2.0 x) (/ -2.0 x)))
double code(double x) {
return (-2.0 / x) - (-2.0 / x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((-2.0d0) / x) - ((-2.0d0) / x)
end function
public static double code(double x) {
return (-2.0 / x) - (-2.0 / x);
}
def code(x): return (-2.0 / x) - (-2.0 / x)
function code(x) return Float64(Float64(-2.0 / x) - Float64(-2.0 / x)) end
function tmp = code(x) tmp = (-2.0 / x) - (-2.0 / x); end
code[x_] := N[(N[(-2.0 / x), $MachinePrecision] - N[(-2.0 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2}{x} - \frac{-2}{x}
\end{array}
Initial program 65.7%
+-commutative65.7%
associate-+r-65.6%
sub-neg65.6%
remove-double-neg65.6%
neg-sub065.6%
associate-+l-65.6%
neg-sub065.6%
distribute-neg-frac265.6%
distribute-frac-neg265.6%
associate-+r+65.7%
+-commutative65.7%
remove-double-neg65.7%
distribute-neg-frac265.7%
sub0-neg65.7%
associate-+l-65.7%
neg-sub065.7%
Simplified65.7%
+-commutative65.7%
associate-+l-65.6%
Applied egg-rr65.6%
frac-sub17.1%
*-un-lft-identity17.1%
Applied egg-rr17.1%
*-rgt-identity17.1%
associate--l+17.1%
*-commutative17.1%
Simplified17.1%
Taylor expanded in x around inf 64.5%
(FPCore (x) :precision binary64 (+ -1.0 (/ (- x 2.0) x)))
double code(double x) {
return -1.0 + ((x - 2.0) / x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) + ((x - 2.0d0) / x)
end function
public static double code(double x) {
return -1.0 + ((x - 2.0) / x);
}
def code(x): return -1.0 + ((x - 2.0) / x)
function code(x) return Float64(-1.0 + Float64(Float64(x - 2.0) / x)) end
function tmp = code(x) tmp = -1.0 + ((x - 2.0) / x); end
code[x_] := N[(-1.0 + N[(N[(x - 2.0), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-1 + \frac{x - 2}{x}
\end{array}
Initial program 65.7%
+-commutative65.7%
associate-+r-65.6%
sub-neg65.6%
remove-double-neg65.6%
neg-sub065.6%
associate-+l-65.6%
neg-sub065.6%
distribute-neg-frac265.6%
distribute-frac-neg265.6%
associate-+r+65.7%
+-commutative65.7%
remove-double-neg65.7%
distribute-neg-frac265.7%
sub0-neg65.7%
associate-+l-65.7%
neg-sub065.7%
Simplified65.7%
Taylor expanded in x around 0 3.4%
Taylor expanded in x around 0 64.4%
(FPCore (x) :precision binary64 (/ -1.0 x))
double code(double x) {
return -1.0 / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) / x
end function
public static double code(double x) {
return -1.0 / x;
}
def code(x): return -1.0 / x
function code(x) return Float64(-1.0 / x) end
function tmp = code(x) tmp = -1.0 / x; end
code[x_] := N[(-1.0 / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x}
\end{array}
Initial program 65.7%
+-commutative65.7%
associate-+r-65.6%
sub-neg65.6%
remove-double-neg65.6%
neg-sub065.6%
associate-+l-65.6%
neg-sub065.6%
distribute-neg-frac265.6%
distribute-frac-neg265.6%
associate-+r+65.7%
+-commutative65.7%
remove-double-neg65.7%
distribute-neg-frac265.7%
sub0-neg65.7%
associate-+l-65.7%
neg-sub065.7%
Simplified65.7%
Taylor expanded in x around 0 3.4%
Taylor expanded in x around inf 3.4%
Taylor expanded in x around 0 5.0%
(FPCore (x) :precision binary64 (/ -2.0 x))
double code(double x) {
return -2.0 / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-2.0d0) / x
end function
public static double code(double x) {
return -2.0 / x;
}
def code(x): return -2.0 / x
function code(x) return Float64(-2.0 / x) end
function tmp = code(x) tmp = -2.0 / x; end
code[x_] := N[(-2.0 / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2}{x}
\end{array}
Initial program 65.7%
+-commutative65.7%
associate-+r-65.6%
sub-neg65.6%
remove-double-neg65.6%
neg-sub065.6%
associate-+l-65.6%
neg-sub065.6%
distribute-neg-frac265.6%
distribute-frac-neg265.6%
associate-+r+65.7%
+-commutative65.7%
remove-double-neg65.7%
distribute-neg-frac265.7%
sub0-neg65.7%
associate-+l-65.7%
neg-sub065.7%
Simplified65.7%
Taylor expanded in x around 0 5.0%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 65.7%
+-commutative65.7%
associate-+r-65.6%
sub-neg65.6%
remove-double-neg65.6%
neg-sub065.6%
associate-+l-65.6%
neg-sub065.6%
distribute-neg-frac265.6%
distribute-frac-neg265.6%
associate-+r+65.7%
+-commutative65.7%
remove-double-neg65.7%
distribute-neg-frac265.7%
sub0-neg65.7%
associate-+l-65.7%
neg-sub065.7%
Simplified65.7%
Taylor expanded in x around 0 3.4%
Taylor expanded in x around inf 3.4%
(FPCore (x) :precision binary64 (/ 2.0 (* x (- (* x x) 1.0))))
double code(double x) {
return 2.0 / (x * ((x * x) - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (x * ((x * x) - 1.0d0))
end function
public static double code(double x) {
return 2.0 / (x * ((x * x) - 1.0));
}
def code(x): return 2.0 / (x * ((x * x) - 1.0))
function code(x) return Float64(2.0 / Float64(x * Float64(Float64(x * x) - 1.0))) end
function tmp = code(x) tmp = 2.0 / (x * ((x * x) - 1.0)); end
code[x_] := N[(2.0 / N[(x * N[(N[(x * x), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{x \cdot \left(x \cdot x - 1\right)}
\end{array}
herbie shell --seed 2024151
(FPCore (x)
:name "3frac (problem 3.3.3)"
:precision binary64
:pre (> (fabs x) 1.0)
:alt
(! :herbie-platform default (/ 2 (* x (- (* x x) 1))))
(+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))