Average Error: 59.9 → 0.3
Time: 10.1s
Precision: binary64
\[-0.026 < x \land x < 0.026\]
\[\frac{1}{x} - \frac{1}{\tan x} \]
\[0.0021164021164021165 \cdot {x}^{5} + \left(x \cdot 0.3333333333333333 + 0.022222222222222223 \cdot {x}^{3}\right) \]
(FPCore (x) :precision binary64 (- (/ 1.0 x) (/ 1.0 (tan x))))
(FPCore (x)
 :precision binary64
 (+
  (* 0.0021164021164021165 (pow x 5.0))
  (+ (* x 0.3333333333333333) (* 0.022222222222222223 (pow x 3.0)))))
double code(double x) {
	return (1.0 / x) - (1.0 / tan(x));
}
double code(double x) {
	return (0.0021164021164021165 * pow(x, 5.0)) + ((x * 0.3333333333333333) + (0.022222222222222223 * pow(x, 3.0)));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (1.0d0 / x) - (1.0d0 / tan(x))
end function
real(8) function code(x)
    real(8), intent (in) :: x
    code = (0.0021164021164021165d0 * (x ** 5.0d0)) + ((x * 0.3333333333333333d0) + (0.022222222222222223d0 * (x ** 3.0d0)))
end function
public static double code(double x) {
	return (1.0 / x) - (1.0 / Math.tan(x));
}
public static double code(double x) {
	return (0.0021164021164021165 * Math.pow(x, 5.0)) + ((x * 0.3333333333333333) + (0.022222222222222223 * Math.pow(x, 3.0)));
}
def code(x):
	return (1.0 / x) - (1.0 / math.tan(x))
def code(x):
	return (0.0021164021164021165 * math.pow(x, 5.0)) + ((x * 0.3333333333333333) + (0.022222222222222223 * math.pow(x, 3.0)))
function code(x)
	return Float64(Float64(1.0 / x) - Float64(1.0 / tan(x)))
end
function code(x)
	return Float64(Float64(0.0021164021164021165 * (x ^ 5.0)) + Float64(Float64(x * 0.3333333333333333) + Float64(0.022222222222222223 * (x ^ 3.0))))
end
function tmp = code(x)
	tmp = (1.0 / x) - (1.0 / tan(x));
end
function tmp = code(x)
	tmp = (0.0021164021164021165 * (x ^ 5.0)) + ((x * 0.3333333333333333) + (0.022222222222222223 * (x ^ 3.0)));
end
code[x_] := N[(N[(1.0 / x), $MachinePrecision] - N[(1.0 / N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
code[x_] := N[(N[(0.0021164021164021165 * N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision] + N[(N[(x * 0.3333333333333333), $MachinePrecision] + N[(0.022222222222222223 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\frac{1}{x} - \frac{1}{\tan x}
0.0021164021164021165 \cdot {x}^{5} + \left(x \cdot 0.3333333333333333 + 0.022222222222222223 \cdot {x}^{3}\right)

Error

Bits error versus x

Try it out

Your Program's Arguments

Results

Enter valid numbers for all inputs

Target

Original59.9
Target0.1
Herbie0.3
\[\begin{array}{l} \mathbf{if}\;\left|x\right| < 0.026:\\ \;\;\;\;\frac{x}{3} \cdot \left(1 + \frac{x \cdot x}{15}\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{1}{x} - \frac{1}{\tan x}\\ \end{array} \]

Derivation

  1. Initial program 59.9

    \[\frac{1}{x} - \frac{1}{\tan x} \]
  2. Taylor expanded in x around 0 0.3

    \[\leadsto \color{blue}{0.0021164021164021165 \cdot {x}^{5} + \left(0.022222222222222223 \cdot {x}^{3} + 0.3333333333333333 \cdot x\right)} \]
  3. Applied egg-rr1.5

    \[\leadsto 0.0021164021164021165 \cdot {x}^{5} + \color{blue}{\mathsf{fma}\left({\left(\sqrt[3]{x}\right)}^{2}, \sqrt[3]{x} \cdot 0.3333333333333333, 0.022222222222222223 \cdot {x}^{3}\right)} \]
  4. Applied egg-rr0.3

    \[\leadsto 0.0021164021164021165 \cdot {x}^{5} + \color{blue}{\left(x \cdot 0.3333333333333333 + 0.022222222222222223 \cdot {x}^{3}\right)} \]
  5. Final simplification0.3

    \[\leadsto 0.0021164021164021165 \cdot {x}^{5} + \left(x \cdot 0.3333333333333333 + 0.022222222222222223 \cdot {x}^{3}\right) \]

Reproduce

herbie shell --seed 2022150 
(FPCore (x)
  :name "invcot (example 3.9)"
  :precision binary64
  :pre (and (< -0.026 x) (< x 0.026))

  :herbie-target
  (if (< (fabs x) 0.026) (* (/ x 3.0) (+ 1.0 (/ (* x x) 15.0))) (- (/ 1.0 x) (/ 1.0 (tan x))))

  (- (/ 1.0 x) (/ 1.0 (tan x))))