
(FPCore (x) :precision binary64 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x): return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x) return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0)); end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x): return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x) return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0)); end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}
(FPCore (x) :precision binary64 (/ -2.0 (* (* x (- 1.0 x)) (+ x 1.0))))
double code(double x) {
return -2.0 / ((x * (1.0 - x)) * (x + 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-2.0d0) / ((x * (1.0d0 - x)) * (x + 1.0d0))
end function
public static double code(double x) {
return -2.0 / ((x * (1.0 - x)) * (x + 1.0));
}
def code(x): return -2.0 / ((x * (1.0 - x)) * (x + 1.0))
function code(x) return Float64(-2.0 / Float64(Float64(x * Float64(1.0 - x)) * Float64(x + 1.0))) end
function tmp = code(x) tmp = -2.0 / ((x * (1.0 - x)) * (x + 1.0)); end
code[x_] := N[(-2.0 / N[(N[(x * N[(1.0 - x), $MachinePrecision]), $MachinePrecision] * N[(x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2}{\left(x \cdot \left(1 - x\right)\right) \cdot \left(x + 1\right)}
\end{array}
Initial program 89.3%
remove-double-neg89.3%
sub-neg89.3%
sub-neg89.3%
distribute-neg-frac89.3%
metadata-eval89.3%
metadata-eval89.3%
metadata-eval89.3%
associate-/r*89.3%
metadata-eval89.3%
neg-mul-189.3%
associate--l+89.3%
+-commutative89.3%
distribute-neg-frac89.3%
metadata-eval89.3%
metadata-eval89.3%
metadata-eval89.3%
associate-/r*89.3%
metadata-eval89.3%
neg-mul-189.3%
sub0-neg89.3%
associate-+l-89.3%
neg-sub089.3%
Simplified89.3%
frac-sub62.5%
div-inv61.1%
*-rgt-identity61.1%
Applied egg-rr61.1%
Taylor expanded in x around 0 53.1%
un-div-inv53.1%
frac-add58.8%
*-un-lft-identity58.8%
+-commutative58.8%
+-commutative58.8%
Applied egg-rr58.8%
Taylor expanded in x around 0 99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (if (or (<= x -1.0) (not (<= x 1.0))) (- (/ -2.0 x) (/ -2.0 x)) (- (* -2.0 x) (/ 2.0 x))))
double code(double x) {
double tmp;
if ((x <= -1.0) || !(x <= 1.0)) {
tmp = (-2.0 / x) - (-2.0 / x);
} else {
tmp = (-2.0 * x) - (2.0 / x);
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-1.0d0)) .or. (.not. (x <= 1.0d0))) then
tmp = ((-2.0d0) / x) - ((-2.0d0) / x)
else
tmp = ((-2.0d0) * x) - (2.0d0 / x)
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if ((x <= -1.0) || !(x <= 1.0)) {
tmp = (-2.0 / x) - (-2.0 / x);
} else {
tmp = (-2.0 * x) - (2.0 / x);
}
return tmp;
}
def code(x): tmp = 0 if (x <= -1.0) or not (x <= 1.0): tmp = (-2.0 / x) - (-2.0 / x) else: tmp = (-2.0 * x) - (2.0 / x) return tmp
function code(x) tmp = 0.0 if ((x <= -1.0) || !(x <= 1.0)) tmp = Float64(Float64(-2.0 / x) - Float64(-2.0 / x)); else tmp = Float64(Float64(-2.0 * x) - Float64(2.0 / x)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if ((x <= -1.0) || ~((x <= 1.0))) tmp = (-2.0 / x) - (-2.0 / x); else tmp = (-2.0 * x) - (2.0 / x); end tmp_2 = tmp; end
code[x_] := If[Or[LessEqual[x, -1.0], N[Not[LessEqual[x, 1.0]], $MachinePrecision]], N[(N[(-2.0 / x), $MachinePrecision] - N[(-2.0 / x), $MachinePrecision]), $MachinePrecision], N[(N[(-2.0 * x), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1 \lor \neg \left(x \leq 1\right):\\
\;\;\;\;\frac{-2}{x} - \frac{-2}{x}\\
\mathbf{else}:\\
\;\;\;\;-2 \cdot x - \frac{2}{x}\\
\end{array}
\end{array}
if x < -1 or 1 < x Initial program 78.2%
remove-double-neg78.2%
sub-neg78.2%
sub-neg78.2%
distribute-neg-frac78.2%
metadata-eval78.2%
metadata-eval78.2%
metadata-eval78.2%
associate-/r*78.2%
metadata-eval78.2%
neg-mul-178.2%
associate--l+78.2%
+-commutative78.2%
distribute-neg-frac78.2%
metadata-eval78.2%
metadata-eval78.2%
metadata-eval78.2%
associate-/r*78.2%
metadata-eval78.2%
neg-mul-178.2%
sub0-neg78.2%
associate-+l-78.2%
neg-sub078.2%
Simplified78.2%
+-commutative78.2%
associate-+l-78.1%
Applied egg-rr78.1%
Taylor expanded in x around inf 75.5%
if -1 < x < 1Initial program 100.0%
remove-double-neg100.0%
sub-neg100.0%
sub-neg100.0%
distribute-neg-frac100.0%
metadata-eval100.0%
metadata-eval100.0%
metadata-eval100.0%
associate-/r*100.0%
metadata-eval100.0%
neg-mul-1100.0%
associate--l+100.0%
+-commutative100.0%
distribute-neg-frac100.0%
metadata-eval100.0%
metadata-eval100.0%
metadata-eval100.0%
associate-/r*100.0%
metadata-eval100.0%
neg-mul-1100.0%
sub0-neg100.0%
associate-+l-100.0%
neg-sub0100.0%
Simplified100.0%
Taylor expanded in x around 0 99.4%
associate-*r/99.4%
metadata-eval99.4%
Simplified99.4%
Final simplification87.7%
(FPCore (x) :precision binary64 (if (<= x -0.65) (+ (/ 1.0 (+ x 1.0)) (/ -1.0 x)) (if (<= x 1.0) (- (* -2.0 x) (/ 2.0 x)) (- (/ -2.0 x) (/ -2.0 x)))))
double code(double x) {
double tmp;
if (x <= -0.65) {
tmp = (1.0 / (x + 1.0)) + (-1.0 / x);
} else if (x <= 1.0) {
tmp = (-2.0 * x) - (2.0 / x);
} else {
tmp = (-2.0 / x) - (-2.0 / x);
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-0.65d0)) then
tmp = (1.0d0 / (x + 1.0d0)) + ((-1.0d0) / x)
else if (x <= 1.0d0) then
tmp = ((-2.0d0) * x) - (2.0d0 / x)
else
tmp = ((-2.0d0) / x) - ((-2.0d0) / x)
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= -0.65) {
tmp = (1.0 / (x + 1.0)) + (-1.0 / x);
} else if (x <= 1.0) {
tmp = (-2.0 * x) - (2.0 / x);
} else {
tmp = (-2.0 / x) - (-2.0 / x);
}
return tmp;
}
def code(x): tmp = 0 if x <= -0.65: tmp = (1.0 / (x + 1.0)) + (-1.0 / x) elif x <= 1.0: tmp = (-2.0 * x) - (2.0 / x) else: tmp = (-2.0 / x) - (-2.0 / x) return tmp
function code(x) tmp = 0.0 if (x <= -0.65) tmp = Float64(Float64(1.0 / Float64(x + 1.0)) + Float64(-1.0 / x)); elseif (x <= 1.0) tmp = Float64(Float64(-2.0 * x) - Float64(2.0 / x)); else tmp = Float64(Float64(-2.0 / x) - Float64(-2.0 / x)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -0.65) tmp = (1.0 / (x + 1.0)) + (-1.0 / x); elseif (x <= 1.0) tmp = (-2.0 * x) - (2.0 / x); else tmp = (-2.0 / x) - (-2.0 / x); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -0.65], N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 1.0], N[(N[(-2.0 * x), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision], N[(N[(-2.0 / x), $MachinePrecision] - N[(-2.0 / x), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.65:\\
\;\;\;\;\frac{1}{x + 1} + \frac{-1}{x}\\
\mathbf{elif}\;x \leq 1:\\
\;\;\;\;-2 \cdot x - \frac{2}{x}\\
\mathbf{else}:\\
\;\;\;\;\frac{-2}{x} - \frac{-2}{x}\\
\end{array}
\end{array}
if x < -0.650000000000000022Initial program 74.9%
remove-double-neg74.9%
sub-neg74.9%
sub-neg74.9%
distribute-neg-frac74.9%
metadata-eval74.9%
metadata-eval74.9%
metadata-eval74.9%
associate-/r*74.9%
metadata-eval74.9%
neg-mul-174.9%
associate--l+74.9%
+-commutative74.9%
distribute-neg-frac74.9%
metadata-eval74.9%
metadata-eval74.9%
metadata-eval74.9%
associate-/r*74.9%
metadata-eval74.9%
neg-mul-174.9%
sub0-neg74.9%
associate-+l-74.9%
neg-sub074.9%
Simplified74.9%
frac-sub22.6%
div-inv19.9%
*-rgt-identity19.9%
Applied egg-rr19.9%
Taylor expanded in x around 0 19.9%
Taylor expanded in x around inf 72.9%
if -0.650000000000000022 < x < 1Initial program 100.0%
remove-double-neg100.0%
sub-neg100.0%
sub-neg100.0%
distribute-neg-frac100.0%
metadata-eval100.0%
metadata-eval100.0%
metadata-eval100.0%
associate-/r*100.0%
metadata-eval100.0%
neg-mul-1100.0%
associate--l+100.0%
+-commutative100.0%
distribute-neg-frac100.0%
metadata-eval100.0%
metadata-eval100.0%
metadata-eval100.0%
associate-/r*100.0%
metadata-eval100.0%
neg-mul-1100.0%
sub0-neg100.0%
associate-+l-100.0%
neg-sub0100.0%
Simplified100.0%
Taylor expanded in x around 0 99.4%
associate-*r/99.4%
metadata-eval99.4%
Simplified99.4%
if 1 < x Initial program 81.8%
remove-double-neg81.8%
sub-neg81.8%
sub-neg81.8%
distribute-neg-frac81.8%
metadata-eval81.8%
metadata-eval81.8%
metadata-eval81.8%
associate-/r*81.8%
metadata-eval81.8%
neg-mul-181.8%
associate--l+81.8%
+-commutative81.8%
distribute-neg-frac81.8%
metadata-eval81.8%
metadata-eval81.8%
metadata-eval81.8%
associate-/r*81.8%
metadata-eval81.8%
neg-mul-181.8%
sub0-neg81.8%
associate-+l-81.8%
neg-sub081.8%
Simplified81.8%
+-commutative81.8%
associate-+l-81.7%
Applied egg-rr81.7%
Taylor expanded in x around inf 79.0%
Final simplification87.9%
(FPCore (x) :precision binary64 (/ -2.0 x))
double code(double x) {
return -2.0 / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-2.0d0) / x
end function
public static double code(double x) {
return -2.0 / x;
}
def code(x): return -2.0 / x
function code(x) return Float64(-2.0 / x) end
function tmp = code(x) tmp = -2.0 / x; end
code[x_] := N[(-2.0 / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2}{x}
\end{array}
Initial program 89.3%
remove-double-neg89.3%
sub-neg89.3%
sub-neg89.3%
distribute-neg-frac89.3%
metadata-eval89.3%
metadata-eval89.3%
metadata-eval89.3%
associate-/r*89.3%
metadata-eval89.3%
neg-mul-189.3%
associate--l+89.3%
+-commutative89.3%
distribute-neg-frac89.3%
metadata-eval89.3%
metadata-eval89.3%
metadata-eval89.3%
associate-/r*89.3%
metadata-eval89.3%
neg-mul-189.3%
sub0-neg89.3%
associate-+l-89.3%
neg-sub089.3%
Simplified89.3%
Taylor expanded in x around 0 53.3%
Final simplification53.3%
(FPCore (x) :precision binary64 (/ 2.0 (* x (- (* x x) 1.0))))
double code(double x) {
return 2.0 / (x * ((x * x) - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (x * ((x * x) - 1.0d0))
end function
public static double code(double x) {
return 2.0 / (x * ((x * x) - 1.0));
}
def code(x): return 2.0 / (x * ((x * x) - 1.0))
function code(x) return Float64(2.0 / Float64(x * Float64(Float64(x * x) - 1.0))) end
function tmp = code(x) tmp = 2.0 / (x * ((x * x) - 1.0)); end
code[x_] := N[(2.0 / N[(x * N[(N[(x * x), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{x \cdot \left(x \cdot x - 1\right)}
\end{array}
herbie shell --seed 2023313
(FPCore (x)
:name "3frac (problem 3.3.3)"
:precision binary64
:herbie-target
(/ 2.0 (* x (- (* x x) 1.0)))
(+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))