
(FPCore (x) :precision binary64 (- (/ 1.0 (+ x 1.0)) (/ 1.0 x)))
double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / (x + 1.0d0)) - (1.0d0 / x)
end function
public static double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / x);
}
def code(x): return (1.0 / (x + 1.0)) - (1.0 / x)
function code(x) return Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(1.0 / x)) end
function tmp = code(x) tmp = (1.0 / (x + 1.0)) - (1.0 / x); end
code[x_] := N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(1.0 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x + 1} - \frac{1}{x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (/ 1.0 (+ x 1.0)) (/ 1.0 x)))
double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / (x + 1.0d0)) - (1.0d0 / x)
end function
public static double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / x);
}
def code(x): return (1.0 / (x + 1.0)) - (1.0 / x)
function code(x) return Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(1.0 / x)) end
function tmp = code(x) tmp = (1.0 / (x + 1.0)) - (1.0 / x); end
code[x_] := N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(1.0 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x + 1} - \frac{1}{x}
\end{array}
(FPCore (x) :precision binary64 (/ (/ -1.0 x) (+ x 1.0)))
double code(double x) {
return (-1.0 / x) / (x + 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((-1.0d0) / x) / (x + 1.0d0)
end function
public static double code(double x) {
return (-1.0 / x) / (x + 1.0);
}
def code(x): return (-1.0 / x) / (x + 1.0)
function code(x) return Float64(Float64(-1.0 / x) / Float64(x + 1.0)) end
function tmp = code(x) tmp = (-1.0 / x) / (x + 1.0); end
code[x_] := N[(N[(-1.0 / x), $MachinePrecision] / N[(x + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{-1}{x}}{x + 1}
\end{array}
Initial program 76.6%
frac-sub77.3%
div-inv77.3%
*-un-lft-identity77.3%
*-rgt-identity77.3%
+-commutative77.3%
metadata-eval77.3%
frac-times77.4%
clear-num77.4%
associate-*l/77.4%
*-un-lft-identity77.4%
div-inv77.4%
metadata-eval77.4%
*-rgt-identity77.4%
+-commutative77.4%
Applied egg-rr77.4%
Taylor expanded in x around 0 99.9%
associate-*r/99.9%
div-inv99.9%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (+ (/ -1.0 x) (/ 1.0 (+ x 1.0)))))
(if (<= t_0 -2e+28)
(/ -1.0 x)
(if (<= t_0 0.0) (/ (/ -1.0 x) x) (+ (/ -1.0 x) 1.0)))))
double code(double x) {
double t_0 = (-1.0 / x) + (1.0 / (x + 1.0));
double tmp;
if (t_0 <= -2e+28) {
tmp = -1.0 / x;
} else if (t_0 <= 0.0) {
tmp = (-1.0 / x) / x;
} else {
tmp = (-1.0 / x) + 1.0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = ((-1.0d0) / x) + (1.0d0 / (x + 1.0d0))
if (t_0 <= (-2d+28)) then
tmp = (-1.0d0) / x
else if (t_0 <= 0.0d0) then
tmp = ((-1.0d0) / x) / x
else
tmp = ((-1.0d0) / x) + 1.0d0
end if
code = tmp
end function
public static double code(double x) {
double t_0 = (-1.0 / x) + (1.0 / (x + 1.0));
double tmp;
if (t_0 <= -2e+28) {
tmp = -1.0 / x;
} else if (t_0 <= 0.0) {
tmp = (-1.0 / x) / x;
} else {
tmp = (-1.0 / x) + 1.0;
}
return tmp;
}
def code(x): t_0 = (-1.0 / x) + (1.0 / (x + 1.0)) tmp = 0 if t_0 <= -2e+28: tmp = -1.0 / x elif t_0 <= 0.0: tmp = (-1.0 / x) / x else: tmp = (-1.0 / x) + 1.0 return tmp
function code(x) t_0 = Float64(Float64(-1.0 / x) + Float64(1.0 / Float64(x + 1.0))) tmp = 0.0 if (t_0 <= -2e+28) tmp = Float64(-1.0 / x); elseif (t_0 <= 0.0) tmp = Float64(Float64(-1.0 / x) / x); else tmp = Float64(Float64(-1.0 / x) + 1.0); end return tmp end
function tmp_2 = code(x) t_0 = (-1.0 / x) + (1.0 / (x + 1.0)); tmp = 0.0; if (t_0 <= -2e+28) tmp = -1.0 / x; elseif (t_0 <= 0.0) tmp = (-1.0 / x) / x; else tmp = (-1.0 / x) + 1.0; end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(N[(-1.0 / x), $MachinePrecision] + N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -2e+28], N[(-1.0 / x), $MachinePrecision], If[LessEqual[t$95$0, 0.0], N[(N[(-1.0 / x), $MachinePrecision] / x), $MachinePrecision], N[(N[(-1.0 / x), $MachinePrecision] + 1.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{-1}{x} + \frac{1}{x + 1}\\
\mathbf{if}\;t\_0 \leq -2 \cdot 10^{+28}:\\
\;\;\;\;\frac{-1}{x}\\
\mathbf{elif}\;t\_0 \leq 0:\\
\;\;\;\;\frac{\frac{-1}{x}}{x}\\
\mathbf{else}:\\
\;\;\;\;\frac{-1}{x} + 1\\
\end{array}
\end{array}
if (-.f64 (/.f64 #s(literal 1 binary64) (+.f64 x #s(literal 1 binary64))) (/.f64 #s(literal 1 binary64) x)) < -1.99999999999999992e28Initial program 100.0%
Taylor expanded in x around 0 100.0%
if -1.99999999999999992e28 < (-.f64 (/.f64 #s(literal 1 binary64) (+.f64 x #s(literal 1 binary64))) (/.f64 #s(literal 1 binary64) x)) < 0.0Initial program 53.9%
Taylor expanded in x around inf 96.1%
unpow296.1%
associate-/r*97.1%
*-lft-identity97.1%
associate-*l/96.9%
metadata-eval96.9%
distribute-neg-frac96.9%
distribute-rgt-neg-out96.9%
unpow-196.9%
unpow-196.9%
pow-sqr97.3%
metadata-eval97.3%
Simplified97.3%
add-sqr-sqrt45.4%
sqrt-unprod49.7%
sqr-neg49.7%
sqrt-unprod49.4%
pow249.4%
sqrt-pow149.4%
metadata-eval49.4%
inv-pow49.4%
pow249.4%
un-div-inv49.4%
Applied egg-rr97.1%
if 0.0 < (-.f64 (/.f64 #s(literal 1 binary64) (+.f64 x #s(literal 1 binary64))) (/.f64 #s(literal 1 binary64) x)) Initial program 100.0%
Taylor expanded in x around 0 100.0%
Final simplification98.6%
(FPCore (x)
:precision binary64
(let* ((t_0 (+ (/ -1.0 x) (/ 1.0 (+ x 1.0)))))
(if (<= t_0 -2e+28)
(/ -1.0 x)
(if (<= t_0 0.0) (/ -1.0 (* x x)) (+ (/ -1.0 x) 1.0)))))
double code(double x) {
double t_0 = (-1.0 / x) + (1.0 / (x + 1.0));
double tmp;
if (t_0 <= -2e+28) {
tmp = -1.0 / x;
} else if (t_0 <= 0.0) {
tmp = -1.0 / (x * x);
} else {
tmp = (-1.0 / x) + 1.0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = ((-1.0d0) / x) + (1.0d0 / (x + 1.0d0))
if (t_0 <= (-2d+28)) then
tmp = (-1.0d0) / x
else if (t_0 <= 0.0d0) then
tmp = (-1.0d0) / (x * x)
else
tmp = ((-1.0d0) / x) + 1.0d0
end if
code = tmp
end function
public static double code(double x) {
double t_0 = (-1.0 / x) + (1.0 / (x + 1.0));
double tmp;
if (t_0 <= -2e+28) {
tmp = -1.0 / x;
} else if (t_0 <= 0.0) {
tmp = -1.0 / (x * x);
} else {
tmp = (-1.0 / x) + 1.0;
}
return tmp;
}
def code(x): t_0 = (-1.0 / x) + (1.0 / (x + 1.0)) tmp = 0 if t_0 <= -2e+28: tmp = -1.0 / x elif t_0 <= 0.0: tmp = -1.0 / (x * x) else: tmp = (-1.0 / x) + 1.0 return tmp
function code(x) t_0 = Float64(Float64(-1.0 / x) + Float64(1.0 / Float64(x + 1.0))) tmp = 0.0 if (t_0 <= -2e+28) tmp = Float64(-1.0 / x); elseif (t_0 <= 0.0) tmp = Float64(-1.0 / Float64(x * x)); else tmp = Float64(Float64(-1.0 / x) + 1.0); end return tmp end
function tmp_2 = code(x) t_0 = (-1.0 / x) + (1.0 / (x + 1.0)); tmp = 0.0; if (t_0 <= -2e+28) tmp = -1.0 / x; elseif (t_0 <= 0.0) tmp = -1.0 / (x * x); else tmp = (-1.0 / x) + 1.0; end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(N[(-1.0 / x), $MachinePrecision] + N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -2e+28], N[(-1.0 / x), $MachinePrecision], If[LessEqual[t$95$0, 0.0], N[(-1.0 / N[(x * x), $MachinePrecision]), $MachinePrecision], N[(N[(-1.0 / x), $MachinePrecision] + 1.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{-1}{x} + \frac{1}{x + 1}\\
\mathbf{if}\;t\_0 \leq -2 \cdot 10^{+28}:\\
\;\;\;\;\frac{-1}{x}\\
\mathbf{elif}\;t\_0 \leq 0:\\
\;\;\;\;\frac{-1}{x \cdot x}\\
\mathbf{else}:\\
\;\;\;\;\frac{-1}{x} + 1\\
\end{array}
\end{array}
if (-.f64 (/.f64 #s(literal 1 binary64) (+.f64 x #s(literal 1 binary64))) (/.f64 #s(literal 1 binary64) x)) < -1.99999999999999992e28Initial program 100.0%
Taylor expanded in x around 0 100.0%
if -1.99999999999999992e28 < (-.f64 (/.f64 #s(literal 1 binary64) (+.f64 x #s(literal 1 binary64))) (/.f64 #s(literal 1 binary64) x)) < 0.0Initial program 53.9%
clear-num53.9%
frac-sub55.4%
*-commutative55.4%
*-un-lft-identity55.4%
*-un-lft-identity55.4%
div-inv55.4%
metadata-eval55.4%
*-rgt-identity55.4%
+-commutative55.4%
*-commutative55.4%
div-inv55.4%
metadata-eval55.4%
*-rgt-identity55.4%
+-commutative55.4%
Applied egg-rr55.4%
Taylor expanded in x around 0 98.8%
Taylor expanded in x around inf 96.1%
if 0.0 < (-.f64 (/.f64 #s(literal 1 binary64) (+.f64 x #s(literal 1 binary64))) (/.f64 #s(literal 1 binary64) x)) Initial program 100.0%
Taylor expanded in x around 0 100.0%
Final simplification98.0%
(FPCore (x) :precision binary64 (if (<= x -1.0) 0.0 (if (<= x 4.5e+102) (/ -1.0 x) 0.0)))
double code(double x) {
double tmp;
if (x <= -1.0) {
tmp = 0.0;
} else if (x <= 4.5e+102) {
tmp = -1.0 / x;
} else {
tmp = 0.0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-1.0d0)) then
tmp = 0.0d0
else if (x <= 4.5d+102) then
tmp = (-1.0d0) / x
else
tmp = 0.0d0
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= -1.0) {
tmp = 0.0;
} else if (x <= 4.5e+102) {
tmp = -1.0 / x;
} else {
tmp = 0.0;
}
return tmp;
}
def code(x): tmp = 0 if x <= -1.0: tmp = 0.0 elif x <= 4.5e+102: tmp = -1.0 / x else: tmp = 0.0 return tmp
function code(x) tmp = 0.0 if (x <= -1.0) tmp = 0.0; elseif (x <= 4.5e+102) tmp = Float64(-1.0 / x); else tmp = 0.0; end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -1.0) tmp = 0.0; elseif (x <= 4.5e+102) tmp = -1.0 / x; else tmp = 0.0; end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -1.0], 0.0, If[LessEqual[x, 4.5e+102], N[(-1.0 / x), $MachinePrecision], 0.0]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1:\\
\;\;\;\;0\\
\mathbf{elif}\;x \leq 4.5 \cdot 10^{+102}:\\
\;\;\;\;\frac{-1}{x}\\
\mathbf{else}:\\
\;\;\;\;0\\
\end{array}
\end{array}
if x < -1 or 4.50000000000000021e102 < x Initial program 65.7%
Taylor expanded in x around inf 62.9%
Taylor expanded in x around 0 62.9%
if -1 < x < 4.50000000000000021e102Initial program 83.8%
Taylor expanded in x around 0 82.9%
(FPCore (x) :precision binary64 (/ -1.0 (* x (+ x 1.0))))
double code(double x) {
return -1.0 / (x * (x + 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) / (x * (x + 1.0d0))
end function
public static double code(double x) {
return -1.0 / (x * (x + 1.0));
}
def code(x): return -1.0 / (x * (x + 1.0))
function code(x) return Float64(-1.0 / Float64(x * Float64(x + 1.0))) end
function tmp = code(x) tmp = -1.0 / (x * (x + 1.0)); end
code[x_] := N[(-1.0 / N[(x * N[(x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{x \cdot \left(x + 1\right)}
\end{array}
Initial program 76.6%
clear-num76.6%
frac-sub77.3%
*-commutative77.3%
*-un-lft-identity77.3%
*-un-lft-identity77.3%
div-inv77.3%
metadata-eval77.3%
*-rgt-identity77.3%
+-commutative77.3%
*-commutative77.3%
div-inv77.3%
metadata-eval77.3%
*-rgt-identity77.3%
+-commutative77.3%
Applied egg-rr77.3%
Taylor expanded in x around 0 99.4%
Final simplification99.4%
(FPCore (x) :precision binary64 0.0)
double code(double x) {
return 0.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0
end function
public static double code(double x) {
return 0.0;
}
def code(x): return 0.0
function code(x) return 0.0 end
function tmp = code(x) tmp = 0.0; end
code[x_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 76.6%
Taylor expanded in x around inf 26.6%
Taylor expanded in x around 0 26.6%
(FPCore (x) :precision binary64 (/ (/ -1.0 x) (+ x 1.0)))
double code(double x) {
return (-1.0 / x) / (x + 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((-1.0d0) / x) / (x + 1.0d0)
end function
public static double code(double x) {
return (-1.0 / x) / (x + 1.0);
}
def code(x): return (-1.0 / x) / (x + 1.0)
function code(x) return Float64(Float64(-1.0 / x) / Float64(x + 1.0)) end
function tmp = code(x) tmp = (-1.0 / x) / (x + 1.0); end
code[x_] := N[(N[(-1.0 / x), $MachinePrecision] / N[(x + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{-1}{x}}{x + 1}
\end{array}
(FPCore (x) :precision binary64 (/ 1.0 (* x (- -1.0 x))))
double code(double x) {
return 1.0 / (x * (-1.0 - x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (x * ((-1.0d0) - x))
end function
public static double code(double x) {
return 1.0 / (x * (-1.0 - x));
}
def code(x): return 1.0 / (x * (-1.0 - x))
function code(x) return Float64(1.0 / Float64(x * Float64(-1.0 - x))) end
function tmp = code(x) tmp = 1.0 / (x * (-1.0 - x)); end
code[x_] := N[(1.0 / N[(x * N[(-1.0 - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x \cdot \left(-1 - x\right)}
\end{array}
herbie shell --seed 2024191
(FPCore (x)
:name "2frac (problem 3.3.1)"
:precision binary64
:alt
(! :herbie-platform default (/ (/ -1 x) (+ x 1)))
:alt
(! :herbie-platform default (/ 1 (* x (- -1 x))))
(- (/ 1.0 (+ x 1.0)) (/ 1.0 x)))