| Alternative 1 | |
|---|---|
| Accuracy | 99.4% |
| Cost | 20032 |
\[\mathsf{fma}\left(0.16666666666666666, x \cdot x, -0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)
\]
(FPCore (x) :precision binary64 (/ (- x (sin x)) (tan x)))
(FPCore (x)
:precision binary64
(+
(* 0.16666666666666666 (* x x))
(+
(* -0.00023644179894179894 (pow x 8.0))
(+
(* -0.0007275132275132275 (pow x 6.0))
(* -0.06388888888888888 (pow x 4.0))))))double code(double x) {
return (x - sin(x)) / tan(x);
}
double code(double x) {
return (0.16666666666666666 * (x * x)) + ((-0.00023644179894179894 * pow(x, 8.0)) + ((-0.0007275132275132275 * pow(x, 6.0)) + (-0.06388888888888888 * pow(x, 4.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x - sin(x)) / tan(x)
end function
real(8) function code(x)
real(8), intent (in) :: x
code = (0.16666666666666666d0 * (x * x)) + (((-0.00023644179894179894d0) * (x ** 8.0d0)) + (((-0.0007275132275132275d0) * (x ** 6.0d0)) + ((-0.06388888888888888d0) * (x ** 4.0d0))))
end function
public static double code(double x) {
return (x - Math.sin(x)) / Math.tan(x);
}
public static double code(double x) {
return (0.16666666666666666 * (x * x)) + ((-0.00023644179894179894 * Math.pow(x, 8.0)) + ((-0.0007275132275132275 * Math.pow(x, 6.0)) + (-0.06388888888888888 * Math.pow(x, 4.0))));
}
def code(x): return (x - math.sin(x)) / math.tan(x)
def code(x): return (0.16666666666666666 * (x * x)) + ((-0.00023644179894179894 * math.pow(x, 8.0)) + ((-0.0007275132275132275 * math.pow(x, 6.0)) + (-0.06388888888888888 * math.pow(x, 4.0))))
function code(x) return Float64(Float64(x - sin(x)) / tan(x)) end
function code(x) return Float64(Float64(0.16666666666666666 * Float64(x * x)) + Float64(Float64(-0.00023644179894179894 * (x ^ 8.0)) + Float64(Float64(-0.0007275132275132275 * (x ^ 6.0)) + Float64(-0.06388888888888888 * (x ^ 4.0))))) end
function tmp = code(x) tmp = (x - sin(x)) / tan(x); end
function tmp = code(x) tmp = (0.16666666666666666 * (x * x)) + ((-0.00023644179894179894 * (x ^ 8.0)) + ((-0.0007275132275132275 * (x ^ 6.0)) + (-0.06388888888888888 * (x ^ 4.0)))); end
code[x_] := N[(N[(x - N[Sin[x], $MachinePrecision]), $MachinePrecision] / N[Tan[x], $MachinePrecision]), $MachinePrecision]
code[x_] := N[(N[(0.16666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(N[(-0.00023644179894179894 * N[Power[x, 8.0], $MachinePrecision]), $MachinePrecision] + N[(N[(-0.0007275132275132275 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + N[(-0.06388888888888888 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\frac{x - \sin x}{\tan x}
0.16666666666666666 \cdot \left(x \cdot x\right) + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
Results
| Original | 52.9% |
|---|---|
| Target | 98.6% |
| Herbie | 99.6% |
Initial program 52.9%
Taylor expanded in x around 0 99.6%
Applied egg-rr99.6%
[Start]99.6 | \[ 0.16666666666666666 \cdot {x}^{2} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
|---|---|
add-log-exp [=>]52.5 | \[ \color{blue}{\log \left(e^{0.16666666666666666 \cdot {x}^{2}}\right)} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
*-un-lft-identity [=>]52.5 | \[ \log \color{blue}{\left(1 \cdot e^{0.16666666666666666 \cdot {x}^{2}}\right)} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
log-prod [=>]52.5 | \[ \color{blue}{\left(\log 1 + \log \left(e^{0.16666666666666666 \cdot {x}^{2}}\right)\right)} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
metadata-eval [=>]52.5 | \[ \left(\color{blue}{0} + \log \left(e^{0.16666666666666666 \cdot {x}^{2}}\right)\right) + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
add-log-exp [<=]99.6 | \[ \left(0 + \color{blue}{0.16666666666666666 \cdot {x}^{2}}\right) + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
unpow2 [=>]99.6 | \[ \left(0 + 0.16666666666666666 \cdot \color{blue}{\left(x \cdot x\right)}\right) + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
Simplified99.6%
[Start]99.6 | \[ \left(0 + 0.16666666666666666 \cdot \left(x \cdot x\right)\right) + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
|---|---|
+-lft-identity [=>]99.6 | \[ \color{blue}{0.16666666666666666 \cdot \left(x \cdot x\right)} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
Final simplification99.6%
| Alternative 1 | |
|---|---|
| Accuracy | 99.4% |
| Cost | 20032 |
| Alternative 2 | |
|---|---|
| Accuracy | 99.4% |
| Cost | 13760 |
| Alternative 3 | |
|---|---|
| Accuracy | 99.3% |
| Cost | 13312 |
| Alternative 4 | |
|---|---|
| Accuracy | 99.3% |
| Cost | 7040 |
| Alternative 5 | |
|---|---|
| Accuracy | 98.6% |
| Cost | 320 |
| Alternative 6 | |
|---|---|
| Accuracy | 98.7% |
| Cost | 320 |
herbie shell --seed 2023147
(FPCore (x)
:name "ENA, Section 1.4, Exercise 4a"
:precision binary64
:pre (and (<= -1.0 x) (<= x 1.0))
:herbie-target
(* 0.16666666666666666 (* x x))
(/ (- x (sin x)) (tan x)))