
(FPCore (x) :precision binary64 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x): return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x) return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0)); end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x): return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x) return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0)); end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}
(FPCore (x) :precision binary64 (/ (/ -2.0 (- (* x x) x)) (fma x -1.0 -1.0)))
double code(double x) {
return (-2.0 / ((x * x) - x)) / fma(x, -1.0, -1.0);
}
function code(x) return Float64(Float64(-2.0 / Float64(Float64(x * x) - x)) / fma(x, -1.0, -1.0)) end
code[x_] := N[(N[(-2.0 / N[(N[(x * x), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision] / N[(x * -1.0 + -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{-2}{x \cdot x - x}}{\mathsf{fma}\left(x, -1, -1\right)}
\end{array}
Initial program 81.6%
associate-+l-81.6%
sub-neg81.6%
neg-mul-181.6%
metadata-eval81.6%
cancel-sign-sub-inv81.6%
+-commutative81.6%
*-lft-identity81.6%
sub-neg81.6%
metadata-eval81.6%
Simplified81.6%
frac-2neg81.6%
metadata-eval81.6%
frac-sub57.5%
frac-sub59.5%
Applied egg-rr59.2%
Simplified59.5%
Taylor expanded in x around 0 99.7%
expm1-log1p-u73.1%
expm1-udef54.8%
Applied egg-rr54.8%
expm1-def73.1%
expm1-log1p99.7%
associate-/r*99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (/ -2.0 (* (- (* x x) x) (- -1.0 x))))
double code(double x) {
return -2.0 / (((x * x) - x) * (-1.0 - x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-2.0d0) / (((x * x) - x) * ((-1.0d0) - x))
end function
public static double code(double x) {
return -2.0 / (((x * x) - x) * (-1.0 - x));
}
def code(x): return -2.0 / (((x * x) - x) * (-1.0 - x))
function code(x) return Float64(-2.0 / Float64(Float64(Float64(x * x) - x) * Float64(-1.0 - x))) end
function tmp = code(x) tmp = -2.0 / (((x * x) - x) * (-1.0 - x)); end
code[x_] := N[(-2.0 / N[(N[(N[(x * x), $MachinePrecision] - x), $MachinePrecision] * N[(-1.0 - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2}{\left(x \cdot x - x\right) \cdot \left(-1 - x\right)}
\end{array}
Initial program 81.6%
associate-+l-81.6%
sub-neg81.6%
neg-mul-181.6%
metadata-eval81.6%
cancel-sign-sub-inv81.6%
+-commutative81.6%
*-lft-identity81.6%
sub-neg81.6%
metadata-eval81.6%
Simplified81.6%
frac-2neg81.6%
metadata-eval81.6%
frac-sub57.5%
frac-sub59.5%
Applied egg-rr59.2%
Simplified59.5%
Taylor expanded in x around 0 99.7%
pow199.7%
Applied egg-rr99.7%
unpow199.7%
fma-udef99.7%
*-commutative99.7%
+-commutative99.7%
neg-mul-199.7%
unsub-neg99.7%
Simplified99.7%
Final simplification99.7%
(FPCore (x) :precision binary64 (- 1.0 (- (/ 2.0 x) -1.0)))
double code(double x) {
return 1.0 - ((2.0 / x) - -1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - ((2.0d0 / x) - (-1.0d0))
end function
public static double code(double x) {
return 1.0 - ((2.0 / x) - -1.0);
}
def code(x): return 1.0 - ((2.0 / x) - -1.0)
function code(x) return Float64(1.0 - Float64(Float64(2.0 / x) - -1.0)) end
function tmp = code(x) tmp = 1.0 - ((2.0 / x) - -1.0); end
code[x_] := N[(1.0 - N[(N[(2.0 / x), $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \left(\frac{2}{x} - -1\right)
\end{array}
Initial program 81.6%
associate-+l-81.6%
sub-neg81.6%
neg-mul-181.6%
metadata-eval81.6%
cancel-sign-sub-inv81.6%
+-commutative81.6%
*-lft-identity81.6%
sub-neg81.6%
metadata-eval81.6%
Simplified81.6%
Taylor expanded in x around 0 47.7%
Taylor expanded in x around 0 79.2%
Final simplification79.2%
(FPCore (x) :precision binary64 (/ -2.0 x))
double code(double x) {
return -2.0 / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-2.0d0) / x
end function
public static double code(double x) {
return -2.0 / x;
}
def code(x): return -2.0 / x
function code(x) return Float64(-2.0 / x) end
function tmp = code(x) tmp = -2.0 / x; end
code[x_] := N[(-2.0 / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2}{x}
\end{array}
Initial program 81.6%
associate-+l-81.6%
sub-neg81.6%
neg-mul-181.6%
metadata-eval81.6%
cancel-sign-sub-inv81.6%
+-commutative81.6%
*-lft-identity81.6%
sub-neg81.6%
metadata-eval81.6%
Simplified81.6%
Taylor expanded in x around 0 48.3%
Final simplification48.3%
(FPCore (x) :precision binary64 -1.0)
double code(double x) {
return -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double x) {
return -1.0;
}
def code(x): return -1.0
function code(x) return -1.0 end
function tmp = code(x) tmp = -1.0; end
code[x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 81.6%
associate-+l-81.6%
sub-neg81.6%
neg-mul-181.6%
metadata-eval81.6%
cancel-sign-sub-inv81.6%
+-commutative81.6%
*-lft-identity81.6%
sub-neg81.6%
metadata-eval81.6%
Simplified81.6%
Taylor expanded in x around 0 47.7%
Taylor expanded in x around inf 3.4%
Final simplification3.4%
(FPCore (x) :precision binary64 (/ 2.0 (* x (- (* x x) 1.0))))
double code(double x) {
return 2.0 / (x * ((x * x) - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (x * ((x * x) - 1.0d0))
end function
public static double code(double x) {
return 2.0 / (x * ((x * x) - 1.0));
}
def code(x): return 2.0 / (x * ((x * x) - 1.0))
function code(x) return Float64(2.0 / Float64(x * Float64(Float64(x * x) - 1.0))) end
function tmp = code(x) tmp = 2.0 / (x * ((x * x) - 1.0)); end
code[x_] := N[(2.0 / N[(x * N[(N[(x * x), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{x \cdot \left(x \cdot x - 1\right)}
\end{array}
herbie shell --seed 2023229
(FPCore (x)
:name "3frac (problem 3.3.3)"
:precision binary64
:herbie-target
(/ 2.0 (* x (- (* x x) 1.0)))
(+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))