ENA, Section 1.4, Exercise 4a

Percentage Accurate: 52.8% → 99.5%
Time: 15.7s
Alternatives: 5
Speedup: 41.0×

Specification

?
\[-1 \leq x \land x \leq 1\]
\[\begin{array}{l} \\ \frac{x - \sin x}{\tan x} \end{array} \]
(FPCore (x) :precision binary64 (/ (- x (sin x)) (tan x)))
double code(double x) {
	return (x - sin(x)) / tan(x);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (x - sin(x)) / tan(x)
end function
public static double code(double x) {
	return (x - Math.sin(x)) / Math.tan(x);
}
def code(x):
	return (x - math.sin(x)) / math.tan(x)
function code(x)
	return Float64(Float64(x - sin(x)) / tan(x))
end
function tmp = code(x)
	tmp = (x - sin(x)) / tan(x);
end
code[x_] := N[(N[(x - N[Sin[x], $MachinePrecision]), $MachinePrecision] / N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{x - \sin x}{\tan x}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 5 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 52.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{x - \sin x}{\tan x} \end{array} \]
(FPCore (x) :precision binary64 (/ (- x (sin x)) (tan x)))
double code(double x) {
	return (x - sin(x)) / tan(x);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (x - sin(x)) / tan(x)
end function
public static double code(double x) {
	return (x - Math.sin(x)) / Math.tan(x);
}
def code(x):
	return (x - math.sin(x)) / math.tan(x)
function code(x)
	return Float64(Float64(x - sin(x)) / tan(x))
end
function tmp = code(x)
	tmp = (x - sin(x)) / tan(x);
end
code[x_] := N[(N[(x - N[Sin[x], $MachinePrecision]), $MachinePrecision] / N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{x - \sin x}{\tan x}
\end{array}

Alternative 1: 99.5% accurate, 8.9× speedup?

\[\begin{array}{l} \\ \left(x \cdot x\right) \cdot \left(0.16666666666666666 + x \cdot \left(x \cdot \left(-0.06388888888888888 + x \cdot \left(x \cdot \left(-0.0007275132275132275 + \left(x \cdot x\right) \cdot -0.00023644179894179894\right)\right)\right)\right)\right) \end{array} \]
(FPCore (x)
 :precision binary64
 (*
  (* x x)
  (+
   0.16666666666666666
   (*
    x
    (*
     x
     (+
      -0.06388888888888888
      (*
       x
       (*
        x
        (+ -0.0007275132275132275 (* (* x x) -0.00023644179894179894))))))))))
double code(double x) {
	return (x * x) * (0.16666666666666666 + (x * (x * (-0.06388888888888888 + (x * (x * (-0.0007275132275132275 + ((x * x) * -0.00023644179894179894))))))));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (x * x) * (0.16666666666666666d0 + (x * (x * ((-0.06388888888888888d0) + (x * (x * ((-0.0007275132275132275d0) + ((x * x) * (-0.00023644179894179894d0)))))))))
end function
public static double code(double x) {
	return (x * x) * (0.16666666666666666 + (x * (x * (-0.06388888888888888 + (x * (x * (-0.0007275132275132275 + ((x * x) * -0.00023644179894179894))))))));
}
def code(x):
	return (x * x) * (0.16666666666666666 + (x * (x * (-0.06388888888888888 + (x * (x * (-0.0007275132275132275 + ((x * x) * -0.00023644179894179894))))))))
function code(x)
	return Float64(Float64(x * x) * Float64(0.16666666666666666 + Float64(x * Float64(x * Float64(-0.06388888888888888 + Float64(x * Float64(x * Float64(-0.0007275132275132275 + Float64(Float64(x * x) * -0.00023644179894179894)))))))))
end
function tmp = code(x)
	tmp = (x * x) * (0.16666666666666666 + (x * (x * (-0.06388888888888888 + (x * (x * (-0.0007275132275132275 + ((x * x) * -0.00023644179894179894))))))));
end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(0.16666666666666666 + N[(x * N[(x * N[(-0.06388888888888888 + N[(x * N[(x * N[(-0.0007275132275132275 + N[(N[(x * x), $MachinePrecision] * -0.00023644179894179894), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot x\right) \cdot \left(0.16666666666666666 + x \cdot \left(x \cdot \left(-0.06388888888888888 + x \cdot \left(x \cdot \left(-0.0007275132275132275 + \left(x \cdot x\right) \cdot -0.00023644179894179894\right)\right)\right)\right)\right)
\end{array}
Derivation
  1. Initial program 55.1%

    \[\frac{x - \sin x}{\tan x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{{x}^{2} \cdot \left(\frac{1}{6} + {x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right) - \frac{23}{360}\right)\right)} \]
  4. Step-by-step derivation
    1. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\left(\frac{1}{6} + {x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right) - \frac{23}{360}\right)\right)}\right) \]
    2. unpow2N/A

      \[\leadsto \mathsf{*.f64}\left(\left(x \cdot x\right), \left(\color{blue}{\frac{1}{6}} + {x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right) - \frac{23}{360}\right)\right)\right) \]
    3. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\color{blue}{\frac{1}{6}} + {x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right) - \frac{23}{360}\right)\right)\right) \]
    4. +-lowering-+.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \color{blue}{\left({x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right) - \frac{23}{360}\right)\right)}\right)\right) \]
    5. unpow2N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \left(\left(x \cdot x\right) \cdot \left(\color{blue}{{x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right)} - \frac{23}{360}\right)\right)\right)\right) \]
    6. associate-*l*N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \left(x \cdot \color{blue}{\left(x \cdot \left({x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right) - \frac{23}{360}\right)\right)}\right)\right)\right) \]
    7. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \color{blue}{\left(x \cdot \left({x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right) - \frac{23}{360}\right)\right)}\right)\right)\right) \]
    8. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \color{blue}{\left({x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right) - \frac{23}{360}\right)}\right)\right)\right)\right) \]
    9. sub-negN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right) + \color{blue}{\left(\mathsf{neg}\left(\frac{23}{360}\right)\right)}\right)\right)\right)\right)\right) \]
    10. metadata-evalN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right) + \frac{-23}{360}\right)\right)\right)\right)\right) \]
    11. +-commutativeN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \left(\frac{-23}{360} + \color{blue}{{x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right)}\right)\right)\right)\right)\right) \]
    12. +-lowering-+.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{-23}{360}, \color{blue}{\left({x}^{2} \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right)\right)}\right)\right)\right)\right)\right) \]
    13. unpow2N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{-23}{360}, \left(\left(x \cdot x\right) \cdot \left(\color{blue}{\frac{-143}{604800} \cdot {x}^{2}} - \frac{11}{15120}\right)\right)\right)\right)\right)\right)\right) \]
    14. associate-*l*N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{-23}{360}, \left(x \cdot \color{blue}{\left(x \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right)\right)}\right)\right)\right)\right)\right)\right) \]
    15. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(x, \color{blue}{\left(x \cdot \left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right)\right)}\right)\right)\right)\right)\right)\right) \]
    16. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{-143}{604800} \cdot {x}^{2} - \frac{11}{15120}\right)}\right)\right)\right)\right)\right)\right)\right) \]
    17. sub-negN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \left(\frac{-143}{604800} \cdot {x}^{2} + \color{blue}{\left(\mathsf{neg}\left(\frac{11}{15120}\right)\right)}\right)\right)\right)\right)\right)\right)\right)\right) \]
    18. metadata-evalN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \left(\frac{-143}{604800} \cdot {x}^{2} + \frac{-11}{15120}\right)\right)\right)\right)\right)\right)\right)\right) \]
    19. +-commutativeN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \left(\frac{-11}{15120} + \color{blue}{\frac{-143}{604800} \cdot {x}^{2}}\right)\right)\right)\right)\right)\right)\right)\right) \]
    20. +-lowering-+.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{-11}{15120}, \color{blue}{\left(\frac{-143}{604800} \cdot {x}^{2}\right)}\right)\right)\right)\right)\right)\right)\right)\right) \]
  5. Simplified99.6%

    \[\leadsto \color{blue}{\left(x \cdot x\right) \cdot \left(0.16666666666666666 + x \cdot \left(x \cdot \left(-0.06388888888888888 + x \cdot \left(x \cdot \left(-0.0007275132275132275 + \left(x \cdot x\right) \cdot -0.00023644179894179894\right)\right)\right)\right)\right)} \]
  6. Add Preprocessing

Alternative 2: 99.5% accurate, 12.1× speedup?

\[\begin{array}{l} \\ x \cdot \left(x \cdot \left(0.16666666666666666 + \left(x \cdot x\right) \cdot \left(-0.06388888888888888 + \left(x \cdot x\right) \cdot -0.0007275132275132275\right)\right)\right) \end{array} \]
(FPCore (x)
 :precision binary64
 (*
  x
  (*
   x
   (+
    0.16666666666666666
    (* (* x x) (+ -0.06388888888888888 (* (* x x) -0.0007275132275132275)))))))
double code(double x) {
	return x * (x * (0.16666666666666666 + ((x * x) * (-0.06388888888888888 + ((x * x) * -0.0007275132275132275)))));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = x * (x * (0.16666666666666666d0 + ((x * x) * ((-0.06388888888888888d0) + ((x * x) * (-0.0007275132275132275d0))))))
end function
public static double code(double x) {
	return x * (x * (0.16666666666666666 + ((x * x) * (-0.06388888888888888 + ((x * x) * -0.0007275132275132275)))));
}
def code(x):
	return x * (x * (0.16666666666666666 + ((x * x) * (-0.06388888888888888 + ((x * x) * -0.0007275132275132275)))))
function code(x)
	return Float64(x * Float64(x * Float64(0.16666666666666666 + Float64(Float64(x * x) * Float64(-0.06388888888888888 + Float64(Float64(x * x) * -0.0007275132275132275))))))
end
function tmp = code(x)
	tmp = x * (x * (0.16666666666666666 + ((x * x) * (-0.06388888888888888 + ((x * x) * -0.0007275132275132275)))));
end
code[x_] := N[(x * N[(x * N[(0.16666666666666666 + N[(N[(x * x), $MachinePrecision] * N[(-0.06388888888888888 + N[(N[(x * x), $MachinePrecision] * -0.0007275132275132275), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \left(x \cdot \left(0.16666666666666666 + \left(x \cdot x\right) \cdot \left(-0.06388888888888888 + \left(x \cdot x\right) \cdot -0.0007275132275132275\right)\right)\right)
\end{array}
Derivation
  1. Initial program 55.1%

    \[\frac{x - \sin x}{\tan x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{{x}^{2} \cdot \left(\frac{1}{6} + {x}^{2} \cdot \left(\frac{-11}{15120} \cdot {x}^{2} - \frac{23}{360}\right)\right)} \]
  4. Step-by-step derivation
    1. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\left(\frac{1}{6} + {x}^{2} \cdot \left(\frac{-11}{15120} \cdot {x}^{2} - \frac{23}{360}\right)\right)}\right) \]
    2. unpow2N/A

      \[\leadsto \mathsf{*.f64}\left(\left(x \cdot x\right), \left(\color{blue}{\frac{1}{6}} + {x}^{2} \cdot \left(\frac{-11}{15120} \cdot {x}^{2} - \frac{23}{360}\right)\right)\right) \]
    3. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\color{blue}{\frac{1}{6}} + {x}^{2} \cdot \left(\frac{-11}{15120} \cdot {x}^{2} - \frac{23}{360}\right)\right)\right) \]
    4. +-lowering-+.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \color{blue}{\left({x}^{2} \cdot \left(\frac{-11}{15120} \cdot {x}^{2} - \frac{23}{360}\right)\right)}\right)\right) \]
    5. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\left(\frac{-11}{15120} \cdot {x}^{2} - \frac{23}{360}\right)}\right)\right)\right) \]
    6. unpow2N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\left(x \cdot x\right), \left(\color{blue}{\frac{-11}{15120} \cdot {x}^{2}} - \frac{23}{360}\right)\right)\right)\right) \]
    7. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\color{blue}{\frac{-11}{15120} \cdot {x}^{2}} - \frac{23}{360}\right)\right)\right)\right) \]
    8. sub-negN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\frac{-11}{15120} \cdot {x}^{2} + \color{blue}{\left(\mathsf{neg}\left(\frac{23}{360}\right)\right)}\right)\right)\right)\right) \]
    9. metadata-evalN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\frac{-11}{15120} \cdot {x}^{2} + \frac{-23}{360}\right)\right)\right)\right) \]
    10. +-commutativeN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\frac{-23}{360} + \color{blue}{\frac{-11}{15120} \cdot {x}^{2}}\right)\right)\right)\right) \]
    11. +-lowering-+.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \color{blue}{\left(\frac{-11}{15120} \cdot {x}^{2}\right)}\right)\right)\right)\right) \]
    12. *-commutativeN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \left({x}^{2} \cdot \color{blue}{\frac{-11}{15120}}\right)\right)\right)\right)\right) \]
    13. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\frac{-11}{15120}}\right)\right)\right)\right)\right) \]
    14. unpow2N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(\left(x \cdot x\right), \frac{-11}{15120}\right)\right)\right)\right)\right) \]
    15. *-lowering-*.f6499.6%

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \frac{-11}{15120}\right)\right)\right)\right)\right) \]
  5. Simplified99.6%

    \[\leadsto \color{blue}{\left(x \cdot x\right) \cdot \left(0.16666666666666666 + \left(x \cdot x\right) \cdot \left(-0.06388888888888888 + \left(x \cdot x\right) \cdot -0.0007275132275132275\right)\right)} \]
  6. Step-by-step derivation
    1. associate-*l*N/A

      \[\leadsto x \cdot \color{blue}{\left(x \cdot \left(\frac{1}{6} + \left(x \cdot x\right) \cdot \left(\frac{-23}{360} + \left(x \cdot x\right) \cdot \frac{-11}{15120}\right)\right)\right)} \]
    2. *-commutativeN/A

      \[\leadsto \left(x \cdot \left(\frac{1}{6} + \left(x \cdot x\right) \cdot \left(\frac{-23}{360} + \left(x \cdot x\right) \cdot \frac{-11}{15120}\right)\right)\right) \cdot \color{blue}{x} \]
    3. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\left(x \cdot \left(\frac{1}{6} + \left(x \cdot x\right) \cdot \left(\frac{-23}{360} + \left(x \cdot x\right) \cdot \frac{-11}{15120}\right)\right)\right), \color{blue}{x}\right) \]
    4. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, \left(\frac{1}{6} + \left(x \cdot x\right) \cdot \left(\frac{-23}{360} + \left(x \cdot x\right) \cdot \frac{-11}{15120}\right)\right)\right), x\right) \]
    5. +-lowering-+.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{6}, \left(\left(x \cdot x\right) \cdot \left(\frac{-23}{360} + \left(x \cdot x\right) \cdot \frac{-11}{15120}\right)\right)\right)\right), x\right) \]
    6. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\left(x \cdot x\right), \left(\frac{-23}{360} + \left(x \cdot x\right) \cdot \frac{-11}{15120}\right)\right)\right)\right), x\right) \]
    7. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\frac{-23}{360} + \left(x \cdot x\right) \cdot \frac{-11}{15120}\right)\right)\right)\right), x\right) \]
    8. +-lowering-+.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \left(\left(x \cdot x\right) \cdot \frac{-11}{15120}\right)\right)\right)\right)\right), x\right) \]
    9. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(\left(x \cdot x\right), \frac{-11}{15120}\right)\right)\right)\right)\right), x\right) \]
    10. *-lowering-*.f6499.6%

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \frac{-11}{15120}\right)\right)\right)\right)\right), x\right) \]
  7. Applied egg-rr99.6%

    \[\leadsto \color{blue}{\left(x \cdot \left(0.16666666666666666 + \left(x \cdot x\right) \cdot \left(-0.06388888888888888 + \left(x \cdot x\right) \cdot -0.0007275132275132275\right)\right)\right) \cdot x} \]
  8. Final simplification99.6%

    \[\leadsto x \cdot \left(x \cdot \left(0.16666666666666666 + \left(x \cdot x\right) \cdot \left(-0.06388888888888888 + \left(x \cdot x\right) \cdot -0.0007275132275132275\right)\right)\right) \]
  9. Add Preprocessing

Alternative 3: 99.4% accurate, 12.1× speedup?

\[\begin{array}{l} \\ \left(x \cdot x\right) \cdot \left(0.16666666666666666 + \left(x \cdot x\right) \cdot \left(-0.06388888888888888 + \left(x \cdot x\right) \cdot -0.0007275132275132275\right)\right) \end{array} \]
(FPCore (x)
 :precision binary64
 (*
  (* x x)
  (+
   0.16666666666666666
   (* (* x x) (+ -0.06388888888888888 (* (* x x) -0.0007275132275132275))))))
double code(double x) {
	return (x * x) * (0.16666666666666666 + ((x * x) * (-0.06388888888888888 + ((x * x) * -0.0007275132275132275))));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (x * x) * (0.16666666666666666d0 + ((x * x) * ((-0.06388888888888888d0) + ((x * x) * (-0.0007275132275132275d0)))))
end function
public static double code(double x) {
	return (x * x) * (0.16666666666666666 + ((x * x) * (-0.06388888888888888 + ((x * x) * -0.0007275132275132275))));
}
def code(x):
	return (x * x) * (0.16666666666666666 + ((x * x) * (-0.06388888888888888 + ((x * x) * -0.0007275132275132275))))
function code(x)
	return Float64(Float64(x * x) * Float64(0.16666666666666666 + Float64(Float64(x * x) * Float64(-0.06388888888888888 + Float64(Float64(x * x) * -0.0007275132275132275)))))
end
function tmp = code(x)
	tmp = (x * x) * (0.16666666666666666 + ((x * x) * (-0.06388888888888888 + ((x * x) * -0.0007275132275132275))));
end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(0.16666666666666666 + N[(N[(x * x), $MachinePrecision] * N[(-0.06388888888888888 + N[(N[(x * x), $MachinePrecision] * -0.0007275132275132275), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot x\right) \cdot \left(0.16666666666666666 + \left(x \cdot x\right) \cdot \left(-0.06388888888888888 + \left(x \cdot x\right) \cdot -0.0007275132275132275\right)\right)
\end{array}
Derivation
  1. Initial program 55.1%

    \[\frac{x - \sin x}{\tan x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{{x}^{2} \cdot \left(\frac{1}{6} + {x}^{2} \cdot \left(\frac{-11}{15120} \cdot {x}^{2} - \frac{23}{360}\right)\right)} \]
  4. Step-by-step derivation
    1. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\left(\frac{1}{6} + {x}^{2} \cdot \left(\frac{-11}{15120} \cdot {x}^{2} - \frac{23}{360}\right)\right)}\right) \]
    2. unpow2N/A

      \[\leadsto \mathsf{*.f64}\left(\left(x \cdot x\right), \left(\color{blue}{\frac{1}{6}} + {x}^{2} \cdot \left(\frac{-11}{15120} \cdot {x}^{2} - \frac{23}{360}\right)\right)\right) \]
    3. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\color{blue}{\frac{1}{6}} + {x}^{2} \cdot \left(\frac{-11}{15120} \cdot {x}^{2} - \frac{23}{360}\right)\right)\right) \]
    4. +-lowering-+.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \color{blue}{\left({x}^{2} \cdot \left(\frac{-11}{15120} \cdot {x}^{2} - \frac{23}{360}\right)\right)}\right)\right) \]
    5. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\left(\frac{-11}{15120} \cdot {x}^{2} - \frac{23}{360}\right)}\right)\right)\right) \]
    6. unpow2N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\left(x \cdot x\right), \left(\color{blue}{\frac{-11}{15120} \cdot {x}^{2}} - \frac{23}{360}\right)\right)\right)\right) \]
    7. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\color{blue}{\frac{-11}{15120} \cdot {x}^{2}} - \frac{23}{360}\right)\right)\right)\right) \]
    8. sub-negN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\frac{-11}{15120} \cdot {x}^{2} + \color{blue}{\left(\mathsf{neg}\left(\frac{23}{360}\right)\right)}\right)\right)\right)\right) \]
    9. metadata-evalN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\frac{-11}{15120} \cdot {x}^{2} + \frac{-23}{360}\right)\right)\right)\right) \]
    10. +-commutativeN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\frac{-23}{360} + \color{blue}{\frac{-11}{15120} \cdot {x}^{2}}\right)\right)\right)\right) \]
    11. +-lowering-+.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \color{blue}{\left(\frac{-11}{15120} \cdot {x}^{2}\right)}\right)\right)\right)\right) \]
    12. *-commutativeN/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \left({x}^{2} \cdot \color{blue}{\frac{-11}{15120}}\right)\right)\right)\right)\right) \]
    13. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\frac{-11}{15120}}\right)\right)\right)\right)\right) \]
    14. unpow2N/A

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(\left(x \cdot x\right), \frac{-11}{15120}\right)\right)\right)\right)\right) \]
    15. *-lowering-*.f6499.6%

      \[\leadsto \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{-23}{360}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \frac{-11}{15120}\right)\right)\right)\right)\right) \]
  5. Simplified99.6%

    \[\leadsto \color{blue}{\left(x \cdot x\right) \cdot \left(0.16666666666666666 + \left(x \cdot x\right) \cdot \left(-0.06388888888888888 + \left(x \cdot x\right) \cdot -0.0007275132275132275\right)\right)} \]
  6. Add Preprocessing

Alternative 4: 99.3% accurate, 18.6× speedup?

\[\begin{array}{l} \\ x \cdot \left(x \cdot \left(0.16666666666666666 + \left(x \cdot x\right) \cdot -0.06388888888888888\right)\right) \end{array} \]
(FPCore (x)
 :precision binary64
 (* x (* x (+ 0.16666666666666666 (* (* x x) -0.06388888888888888)))))
double code(double x) {
	return x * (x * (0.16666666666666666 + ((x * x) * -0.06388888888888888)));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = x * (x * (0.16666666666666666d0 + ((x * x) * (-0.06388888888888888d0))))
end function
public static double code(double x) {
	return x * (x * (0.16666666666666666 + ((x * x) * -0.06388888888888888)));
}
def code(x):
	return x * (x * (0.16666666666666666 + ((x * x) * -0.06388888888888888)))
function code(x)
	return Float64(x * Float64(x * Float64(0.16666666666666666 + Float64(Float64(x * x) * -0.06388888888888888))))
end
function tmp = code(x)
	tmp = x * (x * (0.16666666666666666 + ((x * x) * -0.06388888888888888)));
end
code[x_] := N[(x * N[(x * N[(0.16666666666666666 + N[(N[(x * x), $MachinePrecision] * -0.06388888888888888), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \left(x \cdot \left(0.16666666666666666 + \left(x \cdot x\right) \cdot -0.06388888888888888\right)\right)
\end{array}
Derivation
  1. Initial program 55.1%

    \[\frac{x - \sin x}{\tan x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{{x}^{2} \cdot \left(\frac{1}{6} + \frac{-23}{360} \cdot {x}^{2}\right)} \]
  4. Step-by-step derivation
    1. unpow2N/A

      \[\leadsto \left(x \cdot x\right) \cdot \left(\color{blue}{\frac{1}{6}} + \frac{-23}{360} \cdot {x}^{2}\right) \]
    2. associate-*l*N/A

      \[\leadsto x \cdot \color{blue}{\left(x \cdot \left(\frac{1}{6} + \frac{-23}{360} \cdot {x}^{2}\right)\right)} \]
    3. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(x \cdot \left(\frac{1}{6} + \frac{-23}{360} \cdot {x}^{2}\right)\right)}\right) \]
    4. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{1}{6} + \frac{-23}{360} \cdot {x}^{2}\right)}\right)\right) \]
    5. +-lowering-+.f64N/A

      \[\leadsto \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{6}, \color{blue}{\left(\frac{-23}{360} \cdot {x}^{2}\right)}\right)\right)\right) \]
    6. *-commutativeN/A

      \[\leadsto \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{6}, \left({x}^{2} \cdot \color{blue}{\frac{-23}{360}}\right)\right)\right)\right) \]
    7. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\frac{-23}{360}}\right)\right)\right)\right) \]
    8. unpow2N/A

      \[\leadsto \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\left(x \cdot x\right), \frac{-23}{360}\right)\right)\right)\right) \]
    9. *-lowering-*.f6499.4%

      \[\leadsto \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \frac{-23}{360}\right)\right)\right)\right) \]
  5. Simplified99.4%

    \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(0.16666666666666666 + \left(x \cdot x\right) \cdot -0.06388888888888888\right)\right)} \]
  6. Add Preprocessing

Alternative 5: 98.7% accurate, 41.0× speedup?

\[\begin{array}{l} \\ \left(x \cdot x\right) \cdot 0.16666666666666666 \end{array} \]
(FPCore (x) :precision binary64 (* (* x x) 0.16666666666666666))
double code(double x) {
	return (x * x) * 0.16666666666666666;
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (x * x) * 0.16666666666666666d0
end function
public static double code(double x) {
	return (x * x) * 0.16666666666666666;
}
def code(x):
	return (x * x) * 0.16666666666666666
function code(x)
	return Float64(Float64(x * x) * 0.16666666666666666)
end
function tmp = code(x)
	tmp = (x * x) * 0.16666666666666666;
end
code[x_] := N[(N[(x * x), $MachinePrecision] * 0.16666666666666666), $MachinePrecision]
\begin{array}{l}

\\
\left(x \cdot x\right) \cdot 0.16666666666666666
\end{array}
Derivation
  1. Initial program 55.1%

    \[\frac{x - \sin x}{\tan x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{\frac{1}{6} \cdot {x}^{2}} \]
  4. Step-by-step derivation
    1. *-lowering-*.f64N/A

      \[\leadsto \mathsf{*.f64}\left(\frac{1}{6}, \color{blue}{\left({x}^{2}\right)}\right) \]
    2. unpow2N/A

      \[\leadsto \mathsf{*.f64}\left(\frac{1}{6}, \left(x \cdot \color{blue}{x}\right)\right) \]
    3. *-lowering-*.f6498.9%

      \[\leadsto \mathsf{*.f64}\left(\frac{1}{6}, \mathsf{*.f64}\left(x, \color{blue}{x}\right)\right) \]
  5. Simplified98.9%

    \[\leadsto \color{blue}{0.16666666666666666 \cdot \left(x \cdot x\right)} \]
  6. Final simplification98.9%

    \[\leadsto \left(x \cdot x\right) \cdot 0.16666666666666666 \]
  7. Add Preprocessing

Developer Target 1: 98.7% accurate, 41.0× speedup?

\[\begin{array}{l} \\ 0.16666666666666666 \cdot \left(x \cdot x\right) \end{array} \]
(FPCore (x) :precision binary64 (* 0.16666666666666666 (* x x)))
double code(double x) {
	return 0.16666666666666666 * (x * x);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = 0.16666666666666666d0 * (x * x)
end function
public static double code(double x) {
	return 0.16666666666666666 * (x * x);
}
def code(x):
	return 0.16666666666666666 * (x * x)
function code(x)
	return Float64(0.16666666666666666 * Float64(x * x))
end
function tmp = code(x)
	tmp = 0.16666666666666666 * (x * x);
end
code[x_] := N[(0.16666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
0.16666666666666666 \cdot \left(x \cdot x\right)
\end{array}

Reproduce

?
herbie shell --seed 2024192 
(FPCore (x)
  :name "ENA, Section 1.4, Exercise 4a"
  :precision binary64
  :pre (and (<= -1.0 x) (<= x 1.0))

  :alt
  (! :herbie-platform default (* 1/6 (* x x)))

  (/ (- x (sin x)) (tan x)))