
(FPCore (x) :precision binary64 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x): return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x) return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0)); end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x): return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x) return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0)); end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}
(FPCore (x) :precision binary64 (/ (/ -2.0 (* (- 1.0 x) (+ 1.0 x))) x))
double code(double x) {
return (-2.0 / ((1.0 - x) * (1.0 + x))) / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((-2.0d0) / ((1.0d0 - x) * (1.0d0 + x))) / x
end function
public static double code(double x) {
return (-2.0 / ((1.0 - x) * (1.0 + x))) / x;
}
def code(x): return (-2.0 / ((1.0 - x) * (1.0 + x))) / x
function code(x) return Float64(Float64(-2.0 / Float64(Float64(1.0 - x) * Float64(1.0 + x))) / x) end
function tmp = code(x) tmp = (-2.0 / ((1.0 - x) * (1.0 + x))) / x; end
code[x_] := N[(N[(-2.0 / N[(N[(1.0 - x), $MachinePrecision] * N[(1.0 + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{-2}{\left(1 - x\right) \cdot \left(1 + x\right)}}{x}
\end{array}
Initial program 66.7%
sub-neg66.7%
distribute-neg-frac66.7%
metadata-eval66.7%
metadata-eval66.7%
metadata-eval66.7%
associate-/r*66.7%
metadata-eval66.7%
neg-mul-166.7%
+-commutative66.7%
associate-+l+66.6%
+-commutative66.6%
neg-mul-166.6%
metadata-eval66.6%
associate-/r*66.6%
metadata-eval66.6%
metadata-eval66.6%
+-commutative66.6%
+-commutative66.6%
Simplified66.6%
+-commutative66.6%
frac-add17.8%
frac-add17.3%
*-un-lft-identity17.3%
*-commutative17.3%
neg-mul-117.3%
distribute-neg-in17.3%
metadata-eval17.3%
+-commutative17.3%
+-commutative17.3%
Applied egg-rr17.3%
Taylor expanded in x around 0 99.6%
associate-/r*99.8%
div-inv99.8%
Applied egg-rr99.8%
associate-*r/99.8%
*-rgt-identity99.8%
*-commutative99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (/ -2.0 (* x (* (- 1.0 x) (+ 1.0 x)))))
double code(double x) {
return -2.0 / (x * ((1.0 - x) * (1.0 + x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-2.0d0) / (x * ((1.0d0 - x) * (1.0d0 + x)))
end function
public static double code(double x) {
return -2.0 / (x * ((1.0 - x) * (1.0 + x)));
}
def code(x): return -2.0 / (x * ((1.0 - x) * (1.0 + x)))
function code(x) return Float64(-2.0 / Float64(x * Float64(Float64(1.0 - x) * Float64(1.0 + x)))) end
function tmp = code(x) tmp = -2.0 / (x * ((1.0 - x) * (1.0 + x))); end
code[x_] := N[(-2.0 / N[(x * N[(N[(1.0 - x), $MachinePrecision] * N[(1.0 + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2}{x \cdot \left(\left(1 - x\right) \cdot \left(1 + x\right)\right)}
\end{array}
Initial program 66.7%
sub-neg66.7%
distribute-neg-frac66.7%
metadata-eval66.7%
metadata-eval66.7%
metadata-eval66.7%
associate-/r*66.7%
metadata-eval66.7%
neg-mul-166.7%
+-commutative66.7%
associate-+l+66.6%
+-commutative66.6%
neg-mul-166.6%
metadata-eval66.6%
associate-/r*66.6%
metadata-eval66.6%
metadata-eval66.6%
+-commutative66.6%
+-commutative66.6%
Simplified66.6%
+-commutative66.6%
frac-add17.8%
frac-add17.3%
*-un-lft-identity17.3%
*-commutative17.3%
neg-mul-117.3%
distribute-neg-in17.3%
metadata-eval17.3%
+-commutative17.3%
+-commutative17.3%
Applied egg-rr17.3%
Taylor expanded in x around 0 99.6%
Final simplification99.6%
(FPCore (x) :precision binary64 (/ (/ -2.0 x) (* (- 1.0 x) (+ 1.0 x))))
double code(double x) {
return (-2.0 / x) / ((1.0 - x) * (1.0 + x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((-2.0d0) / x) / ((1.0d0 - x) * (1.0d0 + x))
end function
public static double code(double x) {
return (-2.0 / x) / ((1.0 - x) * (1.0 + x));
}
def code(x): return (-2.0 / x) / ((1.0 - x) * (1.0 + x))
function code(x) return Float64(Float64(-2.0 / x) / Float64(Float64(1.0 - x) * Float64(1.0 + x))) end
function tmp = code(x) tmp = (-2.0 / x) / ((1.0 - x) * (1.0 + x)); end
code[x_] := N[(N[(-2.0 / x), $MachinePrecision] / N[(N[(1.0 - x), $MachinePrecision] * N[(1.0 + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{-2}{x}}{\left(1 - x\right) \cdot \left(1 + x\right)}
\end{array}
Initial program 66.7%
sub-neg66.7%
distribute-neg-frac66.7%
metadata-eval66.7%
metadata-eval66.7%
metadata-eval66.7%
associate-/r*66.7%
metadata-eval66.7%
neg-mul-166.7%
+-commutative66.7%
associate-+l+66.6%
+-commutative66.6%
neg-mul-166.6%
metadata-eval66.6%
associate-/r*66.6%
metadata-eval66.6%
metadata-eval66.6%
+-commutative66.6%
+-commutative66.6%
Simplified66.6%
+-commutative66.6%
frac-add17.8%
frac-add17.3%
*-un-lft-identity17.3%
*-commutative17.3%
neg-mul-117.3%
distribute-neg-in17.3%
metadata-eval17.3%
+-commutative17.3%
+-commutative17.3%
Applied egg-rr17.3%
Taylor expanded in x around 0 99.6%
associate-/r*99.8%
div-inv99.8%
Applied egg-rr99.8%
*-commutative99.8%
*-commutative99.8%
Simplified99.8%
associate-*r/99.8%
associate-*l/99.8%
metadata-eval99.8%
+-commutative99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (+ (/ -2.0 x) (/ 2.0 x)))
double code(double x) {
return (-2.0 / x) + (2.0 / x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((-2.0d0) / x) + (2.0d0 / x)
end function
public static double code(double x) {
return (-2.0 / x) + (2.0 / x);
}
def code(x): return (-2.0 / x) + (2.0 / x)
function code(x) return Float64(Float64(-2.0 / x) + Float64(2.0 / x)) end
function tmp = code(x) tmp = (-2.0 / x) + (2.0 / x); end
code[x_] := N[(N[(-2.0 / x), $MachinePrecision] + N[(2.0 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2}{x} + \frac{2}{x}
\end{array}
Initial program 66.7%
sub-neg66.7%
distribute-neg-frac66.7%
metadata-eval66.7%
metadata-eval66.7%
metadata-eval66.7%
associate-/r*66.7%
metadata-eval66.7%
neg-mul-166.7%
+-commutative66.7%
associate-+l+66.6%
+-commutative66.6%
neg-mul-166.6%
metadata-eval66.6%
associate-/r*66.6%
metadata-eval66.6%
metadata-eval66.6%
+-commutative66.6%
+-commutative66.6%
Simplified66.6%
Taylor expanded in x around inf 66.2%
Final simplification66.2%
(FPCore (x) :precision binary64 (/ -2.0 x))
double code(double x) {
return -2.0 / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-2.0d0) / x
end function
public static double code(double x) {
return -2.0 / x;
}
def code(x): return -2.0 / x
function code(x) return Float64(-2.0 / x) end
function tmp = code(x) tmp = -2.0 / x; end
code[x_] := N[(-2.0 / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2}{x}
\end{array}
Initial program 66.7%
sub-neg66.7%
distribute-neg-frac66.7%
metadata-eval66.7%
metadata-eval66.7%
metadata-eval66.7%
associate-/r*66.7%
metadata-eval66.7%
neg-mul-166.7%
+-commutative66.7%
associate-+l+66.6%
+-commutative66.6%
neg-mul-166.6%
metadata-eval66.6%
associate-/r*66.6%
metadata-eval66.6%
metadata-eval66.6%
+-commutative66.6%
+-commutative66.6%
Simplified66.6%
Taylor expanded in x around 0 5.1%
Final simplification5.1%
(FPCore (x) :precision binary64 (/ 2.0 (* x (- (* x x) 1.0))))
double code(double x) {
return 2.0 / (x * ((x * x) - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (x * ((x * x) - 1.0d0))
end function
public static double code(double x) {
return 2.0 / (x * ((x * x) - 1.0));
}
def code(x): return 2.0 / (x * ((x * x) - 1.0))
function code(x) return Float64(2.0 / Float64(x * Float64(Float64(x * x) - 1.0))) end
function tmp = code(x) tmp = 2.0 / (x * ((x * x) - 1.0)); end
code[x_] := N[(2.0 / N[(x * N[(N[(x * x), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{x \cdot \left(x \cdot x - 1\right)}
\end{array}
herbie shell --seed 2024013
(FPCore (x)
:name "3frac (problem 3.3.3)"
:precision binary64
:pre (> (fabs x) 1.0)
:herbie-target
(/ 2.0 (* x (- (* x x) 1.0)))
(+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))