| Alternative 1 | |
|---|---|
| Accuracy | 99.6% |
| Cost | 20480 |
\[\left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right) + 0.16666666666666666 \cdot \left(x \cdot x\right)
\]
(FPCore (x) :precision binary64 (/ (- x (sin x)) (tan x)))
(FPCore (x)
:precision binary64
(+
(* x (* x 0.16666666666666666))
(+
(* -0.00023644179894179894 (pow x 8.0))
(+
(* -0.0007275132275132275 (pow x 6.0))
(* -0.06388888888888888 (pow x 4.0))))))double code(double x) {
return (x - sin(x)) / tan(x);
}
double code(double x) {
return (x * (x * 0.16666666666666666)) + ((-0.00023644179894179894 * pow(x, 8.0)) + ((-0.0007275132275132275 * pow(x, 6.0)) + (-0.06388888888888888 * pow(x, 4.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x - sin(x)) / tan(x)
end function
real(8) function code(x)
real(8), intent (in) :: x
code = (x * (x * 0.16666666666666666d0)) + (((-0.00023644179894179894d0) * (x ** 8.0d0)) + (((-0.0007275132275132275d0) * (x ** 6.0d0)) + ((-0.06388888888888888d0) * (x ** 4.0d0))))
end function
public static double code(double x) {
return (x - Math.sin(x)) / Math.tan(x);
}
public static double code(double x) {
return (x * (x * 0.16666666666666666)) + ((-0.00023644179894179894 * Math.pow(x, 8.0)) + ((-0.0007275132275132275 * Math.pow(x, 6.0)) + (-0.06388888888888888 * Math.pow(x, 4.0))));
}
def code(x): return (x - math.sin(x)) / math.tan(x)
def code(x): return (x * (x * 0.16666666666666666)) + ((-0.00023644179894179894 * math.pow(x, 8.0)) + ((-0.0007275132275132275 * math.pow(x, 6.0)) + (-0.06388888888888888 * math.pow(x, 4.0))))
function code(x) return Float64(Float64(x - sin(x)) / tan(x)) end
function code(x) return Float64(Float64(x * Float64(x * 0.16666666666666666)) + Float64(Float64(-0.00023644179894179894 * (x ^ 8.0)) + Float64(Float64(-0.0007275132275132275 * (x ^ 6.0)) + Float64(-0.06388888888888888 * (x ^ 4.0))))) end
function tmp = code(x) tmp = (x - sin(x)) / tan(x); end
function tmp = code(x) tmp = (x * (x * 0.16666666666666666)) + ((-0.00023644179894179894 * (x ^ 8.0)) + ((-0.0007275132275132275 * (x ^ 6.0)) + (-0.06388888888888888 * (x ^ 4.0)))); end
code[x_] := N[(N[(x - N[Sin[x], $MachinePrecision]), $MachinePrecision] / N[Tan[x], $MachinePrecision]), $MachinePrecision]
code[x_] := N[(N[(x * N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision] + N[(N[(-0.00023644179894179894 * N[Power[x, 8.0], $MachinePrecision]), $MachinePrecision] + N[(N[(-0.0007275132275132275 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + N[(-0.06388888888888888 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\frac{x - \sin x}{\tan x}
x \cdot \left(x \cdot 0.16666666666666666\right) + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
Results
| Original | 53.5% |
|---|---|
| Target | 98.7% |
| Herbie | 99.6% |
Initial program 53.5%
Taylor expanded in x around 0 99.6%
Applied egg-rr53.1%
[Start]99.6 | \[ 0.16666666666666666 \cdot {x}^{2} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
|---|---|
expm1-log1p-u [=>]99.6 | \[ \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(0.16666666666666666 \cdot {x}^{2}\right)\right)} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
expm1-udef [=>]53.1 | \[ \color{blue}{\left(e^{\mathsf{log1p}\left(0.16666666666666666 \cdot {x}^{2}\right)} - 1\right)} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
sub-neg [=>]53.1 | \[ \color{blue}{\left(e^{\mathsf{log1p}\left(0.16666666666666666 \cdot {x}^{2}\right)} + \left(-1\right)\right)} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
unpow2 [=>]53.1 | \[ \left(e^{\mathsf{log1p}\left(0.16666666666666666 \cdot \color{blue}{\left(x \cdot x\right)}\right)} + \left(-1\right)\right) + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
metadata-eval [=>]53.1 | \[ \left(e^{\mathsf{log1p}\left(0.16666666666666666 \cdot \left(x \cdot x\right)\right)} + \color{blue}{-1}\right) + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
Simplified99.6%
[Start]53.1 | \[ \left(e^{\mathsf{log1p}\left(0.16666666666666666 \cdot \left(x \cdot x\right)\right)} + -1\right) + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
|---|---|
metadata-eval [<=]53.1 | \[ \left(e^{\mathsf{log1p}\left(0.16666666666666666 \cdot \left(x \cdot x\right)\right)} + \color{blue}{\left(-1\right)}\right) + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
sub-neg [<=]53.1 | \[ \color{blue}{\left(e^{\mathsf{log1p}\left(0.16666666666666666 \cdot \left(x \cdot x\right)\right)} - 1\right)} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
expm1-def [=>]99.6 | \[ \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(0.16666666666666666 \cdot \left(x \cdot x\right)\right)\right)} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
expm1-log1p [=>]99.6 | \[ \color{blue}{0.16666666666666666 \cdot \left(x \cdot x\right)} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
Taylor expanded in x around 0 99.6%
Simplified99.6%
[Start]99.6 | \[ 0.16666666666666666 \cdot {x}^{2} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
|---|---|
unpow2 [=>]99.6 | \[ 0.16666666666666666 \cdot \color{blue}{\left(x \cdot x\right)} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
associate-*r* [=>]99.6 | \[ \color{blue}{\left(0.16666666666666666 \cdot x\right) \cdot x} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
*-commutative [=>]99.6 | \[ \color{blue}{x \cdot \left(0.16666666666666666 \cdot x\right)} + \left(-0.00023644179894179894 \cdot {x}^{8} + \left(-0.0007275132275132275 \cdot {x}^{6} + -0.06388888888888888 \cdot {x}^{4}\right)\right)
\] |
Final simplification99.6%
| Alternative 1 | |
|---|---|
| Accuracy | 99.6% |
| Cost | 20480 |
| Alternative 2 | |
|---|---|
| Accuracy | 99.5% |
| Cost | 13760 |
| Alternative 3 | |
|---|---|
| Accuracy | 99.3% |
| Cost | 704 |
| Alternative 4 | |
|---|---|
| Accuracy | 98.7% |
| Cost | 320 |
| Alternative 5 | |
|---|---|
| Accuracy | 98.8% |
| Cost | 320 |
herbie shell --seed 2023137
(FPCore (x)
:name "ENA, Section 1.4, Exercise 4a"
:precision binary64
:pre (and (<= -1.0 x) (<= x 1.0))
:herbie-target
(* 0.16666666666666666 (* x x))
(/ (- x (sin x)) (tan x)))