tanhf (example 3.4)

Percentage Accurate: 52.6% → 100.0%
Time: 7.4s
Alternatives: 8
Speedup: 25.6×

Specification

?
\[\begin{array}{l} \\ \frac{1 - \cos x}{\sin x} \end{array} \]
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (sin x)))
double code(double x) {
	return (1.0 - cos(x)) / sin(x);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (1.0d0 - cos(x)) / sin(x)
end function
public static double code(double x) {
	return (1.0 - Math.cos(x)) / Math.sin(x);
}
def code(x):
	return (1.0 - math.cos(x)) / math.sin(x)
function code(x)
	return Float64(Float64(1.0 - cos(x)) / sin(x))
end
function tmp = code(x)
	tmp = (1.0 - cos(x)) / sin(x);
end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[Sin[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{1 - \cos x}{\sin x}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 52.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{1 - \cos x}{\sin x} \end{array} \]
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (sin x)))
double code(double x) {
	return (1.0 - cos(x)) / sin(x);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (1.0d0 - cos(x)) / sin(x)
end function
public static double code(double x) {
	return (1.0 - Math.cos(x)) / Math.sin(x);
}
def code(x):
	return (1.0 - math.cos(x)) / math.sin(x)
function code(x)
	return Float64(Float64(1.0 - cos(x)) / sin(x))
end
function tmp = code(x)
	tmp = (1.0 - cos(x)) / sin(x);
end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[Sin[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{1 - \cos x}{\sin x}
\end{array}

Alternative 1: 100.0% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \tan \left(\frac{x}{2}\right) \end{array} \]
(FPCore (x) :precision binary64 (tan (/ x 2.0)))
double code(double x) {
	return tan((x / 2.0));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = tan((x / 2.0d0))
end function
public static double code(double x) {
	return Math.tan((x / 2.0));
}
def code(x):
	return math.tan((x / 2.0))
function code(x)
	return tan(Float64(x / 2.0))
end
function tmp = code(x)
	tmp = tan((x / 2.0));
end
code[x_] := N[Tan[N[(x / 2.0), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\tan \left(\frac{x}{2}\right)
\end{array}
Derivation
  1. Initial program 60.9%

    \[\frac{1 - \cos x}{\sin x} \]
  2. Step-by-step derivation
    1. hang-p0-tanN/A

      \[\leadsto \tan \left(\frac{x}{2}\right) \]
    2. tan-lowering-tan.f64N/A

      \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
    3. /-lowering-/.f64100.0%

      \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
  3. Simplified100.0%

    \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
  4. Add Preprocessing
  5. Add Preprocessing

Alternative 2: 53.5% accurate, 7.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 3.2:\\ \;\;\;\;\frac{x}{2} + x \cdot \left(\left(x \cdot x\right) \cdot \left(0.041666666666666664 + x \cdot \left(x \cdot \left(0.004166666666666667 + \left(x \cdot x\right) \cdot 0.00042162698412698415\right)\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;1\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x 3.2)
   (+
    (/ x 2.0)
    (*
     x
     (*
      (* x x)
      (+
       0.041666666666666664
       (*
        x
        (* x (+ 0.004166666666666667 (* (* x x) 0.00042162698412698415))))))))
   1.0))
double code(double x) {
	double tmp;
	if (x <= 3.2) {
		tmp = (x / 2.0) + (x * ((x * x) * (0.041666666666666664 + (x * (x * (0.004166666666666667 + ((x * x) * 0.00042162698412698415)))))));
	} else {
		tmp = 1.0;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= 3.2d0) then
        tmp = (x / 2.0d0) + (x * ((x * x) * (0.041666666666666664d0 + (x * (x * (0.004166666666666667d0 + ((x * x) * 0.00042162698412698415d0)))))))
    else
        tmp = 1.0d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= 3.2) {
		tmp = (x / 2.0) + (x * ((x * x) * (0.041666666666666664 + (x * (x * (0.004166666666666667 + ((x * x) * 0.00042162698412698415)))))));
	} else {
		tmp = 1.0;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= 3.2:
		tmp = (x / 2.0) + (x * ((x * x) * (0.041666666666666664 + (x * (x * (0.004166666666666667 + ((x * x) * 0.00042162698412698415)))))))
	else:
		tmp = 1.0
	return tmp
function code(x)
	tmp = 0.0
	if (x <= 3.2)
		tmp = Float64(Float64(x / 2.0) + Float64(x * Float64(Float64(x * x) * Float64(0.041666666666666664 + Float64(x * Float64(x * Float64(0.004166666666666667 + Float64(Float64(x * x) * 0.00042162698412698415))))))));
	else
		tmp = 1.0;
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= 3.2)
		tmp = (x / 2.0) + (x * ((x * x) * (0.041666666666666664 + (x * (x * (0.004166666666666667 + ((x * x) * 0.00042162698412698415)))))));
	else
		tmp = 1.0;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, 3.2], N[(N[(x / 2.0), $MachinePrecision] + N[(x * N[(N[(x * x), $MachinePrecision] * N[(0.041666666666666664 + N[(x * N[(x * N[(0.004166666666666667 + N[(N[(x * x), $MachinePrecision] * 0.00042162698412698415), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1.0]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 3.2:\\
\;\;\;\;\frac{x}{2} + x \cdot \left(\left(x \cdot x\right) \cdot \left(0.041666666666666664 + x \cdot \left(x \cdot \left(0.004166666666666667 + \left(x \cdot x\right) \cdot 0.00042162698412698415\right)\right)\right)\right)\\

\mathbf{else}:\\
\;\;\;\;1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 3.2000000000000002

    1. Initial program 44.1%

      \[\frac{1 - \cos x}{\sin x} \]
    2. Step-by-step derivation
      1. hang-p0-tanN/A

        \[\leadsto \tan \left(\frac{x}{2}\right) \]
      2. tan-lowering-tan.f64N/A

        \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
      3. /-lowering-/.f64100.0%

        \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0

      \[\leadsto \color{blue}{x \cdot \left(\frac{1}{2} + {x}^{2} \cdot \left(\frac{1}{24} + {x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)} \]
    6. Step-by-step derivation
      1. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{1}{2} + {x}^{2} \cdot \left(\frac{1}{24} + {x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)}\right) \]
      2. +-lowering-+.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \color{blue}{\left({x}^{2} \cdot \left(\frac{1}{24} + {x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)}\right)\right) \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\left(\frac{1}{24} + {x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)}\right)\right)\right) \]
      4. unpow2N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\left(x \cdot x\right), \left(\color{blue}{\frac{1}{24}} + {x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)\right)\right) \]
      5. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\color{blue}{\frac{1}{24}} + {x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)\right)\right) \]
      6. +-lowering-+.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \color{blue}{\left({x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)}\right)\right)\right)\right) \]
      7. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)}\right)\right)\right)\right)\right) \]
      8. unpow2N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\left(x \cdot x\right), \left(\color{blue}{\frac{1}{240}} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)\right)\right)\right) \]
      9. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\color{blue}{\frac{1}{240}} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)\right)\right)\right) \]
      10. +-lowering-+.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{240}, \color{blue}{\left(\frac{17}{40320} \cdot {x}^{2}\right)}\right)\right)\right)\right)\right)\right) \]
      11. *-commutativeN/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{240}, \left({x}^{2} \cdot \color{blue}{\frac{17}{40320}}\right)\right)\right)\right)\right)\right)\right) \]
      12. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{240}, \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\frac{17}{40320}}\right)\right)\right)\right)\right)\right)\right) \]
      13. unpow2N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{240}, \mathsf{*.f64}\left(\left(x \cdot x\right), \frac{17}{40320}\right)\right)\right)\right)\right)\right)\right) \]
      14. *-lowering-*.f6461.4%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{240}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \frac{17}{40320}\right)\right)\right)\right)\right)\right)\right) \]
    7. Simplified61.4%

      \[\leadsto \color{blue}{x \cdot \left(0.5 + \left(x \cdot x\right) \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot \left(0.004166666666666667 + \left(x \cdot x\right) \cdot 0.00042162698412698415\right)\right)\right)} \]
    8. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \frac{1}{2} \cdot x + \color{blue}{\left(\left(x \cdot x\right) \cdot \left(\frac{1}{24} + \left(x \cdot x\right) \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)\right) \cdot x} \]
      2. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot x + x \cdot \color{blue}{\left(\left(x \cdot x\right) \cdot \left(\frac{1}{24} + \left(x \cdot x\right) \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)\right)} \]
      3. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\left(\frac{1}{2} \cdot x\right), \color{blue}{\left(x \cdot \left(\left(x \cdot x\right) \cdot \left(\frac{1}{24} + \left(x \cdot x\right) \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)\right)\right)}\right) \]
      4. metadata-evalN/A

        \[\leadsto \mathsf{+.f64}\left(\left(\frac{1}{2} \cdot x\right), \left(x \cdot \left(\left(x \cdot x\right) \cdot \left(\frac{1}{24} + \left(x \cdot x\right) \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)\right)\right)\right) \]
      5. associate-/r/N/A

        \[\leadsto \mathsf{+.f64}\left(\left(\frac{1}{\frac{2}{x}}\right), \left(\color{blue}{x} \cdot \left(\left(x \cdot x\right) \cdot \left(\frac{1}{24} + \left(x \cdot x\right) \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)\right)\right)\right) \]
      6. clear-numN/A

        \[\leadsto \mathsf{+.f64}\left(\left(\frac{x}{2}\right), \left(\color{blue}{x} \cdot \left(\left(x \cdot x\right) \cdot \left(\frac{1}{24} + \left(x \cdot x\right) \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)\right)\right)\right) \]
      7. /-lowering-/.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \left(\color{blue}{x} \cdot \left(\left(x \cdot x\right) \cdot \left(\frac{1}{24} + \left(x \cdot x\right) \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)\right)\right)\right) \]
      8. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \color{blue}{\left(\left(x \cdot x\right) \cdot \left(\frac{1}{24} + \left(x \cdot x\right) \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)\right)}\right)\right) \]
      9. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(\left(x \cdot x\right), \color{blue}{\left(\frac{1}{24} + \left(x \cdot x\right) \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)}\right)\right)\right) \]
      10. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\color{blue}{\frac{1}{24}} + \left(x \cdot x\right) \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)\right)\right)\right) \]
      11. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \color{blue}{\left(\left(x \cdot x\right) \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)}\right)\right)\right)\right) \]
      12. associate-*l*N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \left(x \cdot \color{blue}{\left(x \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)}\right)\right)\right)\right)\right) \]
      13. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(x, \color{blue}{\left(x \cdot \left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)\right)}\right)\right)\right)\right)\right) \]
      14. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{1}{240} + \left(x \cdot x\right) \cdot \frac{17}{40320}\right)}\right)\right)\right)\right)\right)\right) \]
      15. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{240}, \color{blue}{\left(\left(x \cdot x\right) \cdot \frac{17}{40320}\right)}\right)\right)\right)\right)\right)\right)\right) \]
      16. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{240}, \mathsf{*.f64}\left(\left(x \cdot x\right), \color{blue}{\frac{17}{40320}}\right)\right)\right)\right)\right)\right)\right)\right) \]
      17. *-lowering-*.f6461.4%

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{240}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \frac{17}{40320}\right)\right)\right)\right)\right)\right)\right)\right) \]
    9. Applied egg-rr61.4%

      \[\leadsto \color{blue}{\frac{x}{2} + x \cdot \left(\left(x \cdot x\right) \cdot \left(0.041666666666666664 + x \cdot \left(x \cdot \left(0.004166666666666667 + \left(x \cdot x\right) \cdot 0.00042162698412698415\right)\right)\right)\right)} \]

    if 3.2000000000000002 < x

    1. Initial program 98.5%

      \[\frac{1 - \cos x}{\sin x} \]
    2. Step-by-step derivation
      1. hang-p0-tanN/A

        \[\leadsto \tan \left(\frac{x}{2}\right) \]
      2. tan-lowering-tan.f64N/A

        \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
      3. /-lowering-/.f64100.0%

        \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
    4. Add Preprocessing
    5. Applied egg-rr10.3%

      \[\leadsto \color{blue}{{1}^{-0.5}} \]
    6. Step-by-step derivation
      1. pow-base-110.3%

        \[\leadsto 1 \]
    7. Applied egg-rr10.3%

      \[\leadsto \color{blue}{1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 3: 53.5% accurate, 7.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 3.2:\\ \;\;\;\;x \cdot \left(0.5 + \left(x \cdot x\right) \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot \left(0.004166666666666667 + \left(x \cdot x\right) \cdot 0.00042162698412698415\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;1\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x 3.2)
   (*
    x
    (+
     0.5
     (*
      (* x x)
      (+
       0.041666666666666664
       (*
        (* x x)
        (+ 0.004166666666666667 (* (* x x) 0.00042162698412698415)))))))
   1.0))
double code(double x) {
	double tmp;
	if (x <= 3.2) {
		tmp = x * (0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * (0.004166666666666667 + ((x * x) * 0.00042162698412698415))))));
	} else {
		tmp = 1.0;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= 3.2d0) then
        tmp = x * (0.5d0 + ((x * x) * (0.041666666666666664d0 + ((x * x) * (0.004166666666666667d0 + ((x * x) * 0.00042162698412698415d0))))))
    else
        tmp = 1.0d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= 3.2) {
		tmp = x * (0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * (0.004166666666666667 + ((x * x) * 0.00042162698412698415))))));
	} else {
		tmp = 1.0;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= 3.2:
		tmp = x * (0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * (0.004166666666666667 + ((x * x) * 0.00042162698412698415))))))
	else:
		tmp = 1.0
	return tmp
function code(x)
	tmp = 0.0
	if (x <= 3.2)
		tmp = Float64(x * Float64(0.5 + Float64(Float64(x * x) * Float64(0.041666666666666664 + Float64(Float64(x * x) * Float64(0.004166666666666667 + Float64(Float64(x * x) * 0.00042162698412698415)))))));
	else
		tmp = 1.0;
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= 3.2)
		tmp = x * (0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * (0.004166666666666667 + ((x * x) * 0.00042162698412698415))))));
	else
		tmp = 1.0;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, 3.2], N[(x * N[(0.5 + N[(N[(x * x), $MachinePrecision] * N[(0.041666666666666664 + N[(N[(x * x), $MachinePrecision] * N[(0.004166666666666667 + N[(N[(x * x), $MachinePrecision] * 0.00042162698412698415), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1.0]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 3.2:\\
\;\;\;\;x \cdot \left(0.5 + \left(x \cdot x\right) \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot \left(0.004166666666666667 + \left(x \cdot x\right) \cdot 0.00042162698412698415\right)\right)\right)\\

\mathbf{else}:\\
\;\;\;\;1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 3.2000000000000002

    1. Initial program 44.1%

      \[\frac{1 - \cos x}{\sin x} \]
    2. Step-by-step derivation
      1. hang-p0-tanN/A

        \[\leadsto \tan \left(\frac{x}{2}\right) \]
      2. tan-lowering-tan.f64N/A

        \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
      3. /-lowering-/.f64100.0%

        \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0

      \[\leadsto \color{blue}{x \cdot \left(\frac{1}{2} + {x}^{2} \cdot \left(\frac{1}{24} + {x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)} \]
    6. Step-by-step derivation
      1. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{1}{2} + {x}^{2} \cdot \left(\frac{1}{24} + {x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)}\right) \]
      2. +-lowering-+.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \color{blue}{\left({x}^{2} \cdot \left(\frac{1}{24} + {x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)}\right)\right) \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\left(\frac{1}{24} + {x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)}\right)\right)\right) \]
      4. unpow2N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\left(x \cdot x\right), \left(\color{blue}{\frac{1}{24}} + {x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)\right)\right) \]
      5. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\color{blue}{\frac{1}{24}} + {x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)\right)\right) \]
      6. +-lowering-+.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \color{blue}{\left({x}^{2} \cdot \left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)\right)}\right)\right)\right)\right) \]
      7. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\left(\frac{1}{240} + \frac{17}{40320} \cdot {x}^{2}\right)}\right)\right)\right)\right)\right) \]
      8. unpow2N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\left(x \cdot x\right), \left(\color{blue}{\frac{1}{240}} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)\right)\right)\right) \]
      9. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \left(\color{blue}{\frac{1}{240}} + \frac{17}{40320} \cdot {x}^{2}\right)\right)\right)\right)\right)\right) \]
      10. +-lowering-+.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{240}, \color{blue}{\left(\frac{17}{40320} \cdot {x}^{2}\right)}\right)\right)\right)\right)\right)\right) \]
      11. *-commutativeN/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{240}, \left({x}^{2} \cdot \color{blue}{\frac{17}{40320}}\right)\right)\right)\right)\right)\right)\right) \]
      12. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{240}, \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\frac{17}{40320}}\right)\right)\right)\right)\right)\right)\right) \]
      13. unpow2N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{240}, \mathsf{*.f64}\left(\left(x \cdot x\right), \frac{17}{40320}\right)\right)\right)\right)\right)\right)\right) \]
      14. *-lowering-*.f6461.4%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \mathsf{+.f64}\left(\frac{1}{240}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \frac{17}{40320}\right)\right)\right)\right)\right)\right)\right) \]
    7. Simplified61.4%

      \[\leadsto \color{blue}{x \cdot \left(0.5 + \left(x \cdot x\right) \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot \left(0.004166666666666667 + \left(x \cdot x\right) \cdot 0.00042162698412698415\right)\right)\right)} \]

    if 3.2000000000000002 < x

    1. Initial program 98.5%

      \[\frac{1 - \cos x}{\sin x} \]
    2. Step-by-step derivation
      1. hang-p0-tanN/A

        \[\leadsto \tan \left(\frac{x}{2}\right) \]
      2. tan-lowering-tan.f64N/A

        \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
      3. /-lowering-/.f64100.0%

        \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
    4. Add Preprocessing
    5. Applied egg-rr10.3%

      \[\leadsto \color{blue}{{1}^{-0.5}} \]
    6. Step-by-step derivation
      1. pow-base-110.3%

        \[\leadsto 1 \]
    7. Applied egg-rr10.3%

      \[\leadsto \color{blue}{1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 4: 53.4% accurate, 10.2× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 3.2:\\ \;\;\;\;x \cdot \left(0.5 + x \cdot \left(x \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot 0.004166666666666667\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;1\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x 3.2)
   (*
    x
    (+
     0.5
     (* x (* x (+ 0.041666666666666664 (* (* x x) 0.004166666666666667))))))
   1.0))
double code(double x) {
	double tmp;
	if (x <= 3.2) {
		tmp = x * (0.5 + (x * (x * (0.041666666666666664 + ((x * x) * 0.004166666666666667)))));
	} else {
		tmp = 1.0;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= 3.2d0) then
        tmp = x * (0.5d0 + (x * (x * (0.041666666666666664d0 + ((x * x) * 0.004166666666666667d0)))))
    else
        tmp = 1.0d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= 3.2) {
		tmp = x * (0.5 + (x * (x * (0.041666666666666664 + ((x * x) * 0.004166666666666667)))));
	} else {
		tmp = 1.0;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= 3.2:
		tmp = x * (0.5 + (x * (x * (0.041666666666666664 + ((x * x) * 0.004166666666666667)))))
	else:
		tmp = 1.0
	return tmp
function code(x)
	tmp = 0.0
	if (x <= 3.2)
		tmp = Float64(x * Float64(0.5 + Float64(x * Float64(x * Float64(0.041666666666666664 + Float64(Float64(x * x) * 0.004166666666666667))))));
	else
		tmp = 1.0;
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= 3.2)
		tmp = x * (0.5 + (x * (x * (0.041666666666666664 + ((x * x) * 0.004166666666666667)))));
	else
		tmp = 1.0;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, 3.2], N[(x * N[(0.5 + N[(x * N[(x * N[(0.041666666666666664 + N[(N[(x * x), $MachinePrecision] * 0.004166666666666667), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1.0]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 3.2:\\
\;\;\;\;x \cdot \left(0.5 + x \cdot \left(x \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot 0.004166666666666667\right)\right)\right)\\

\mathbf{else}:\\
\;\;\;\;1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 3.2000000000000002

    1. Initial program 44.1%

      \[\frac{1 - \cos x}{\sin x} \]
    2. Step-by-step derivation
      1. hang-p0-tanN/A

        \[\leadsto \tan \left(\frac{x}{2}\right) \]
      2. tan-lowering-tan.f64N/A

        \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
      3. /-lowering-/.f64100.0%

        \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0

      \[\leadsto \color{blue}{x \cdot \left(\frac{1}{2} + {x}^{2} \cdot \left(\frac{1}{24} + \frac{1}{240} \cdot {x}^{2}\right)\right)} \]
    6. Step-by-step derivation
      1. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{1}{2} + {x}^{2} \cdot \left(\frac{1}{24} + \frac{1}{240} \cdot {x}^{2}\right)\right)}\right) \]
      2. +-lowering-+.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \color{blue}{\left({x}^{2} \cdot \left(\frac{1}{24} + \frac{1}{240} \cdot {x}^{2}\right)\right)}\right)\right) \]
      3. unpow2N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \left(\left(x \cdot x\right) \cdot \left(\color{blue}{\frac{1}{24}} + \frac{1}{240} \cdot {x}^{2}\right)\right)\right)\right) \]
      4. associate-*l*N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \left(x \cdot \color{blue}{\left(x \cdot \left(\frac{1}{24} + \frac{1}{240} \cdot {x}^{2}\right)\right)}\right)\right)\right) \]
      5. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \color{blue}{\left(x \cdot \left(\frac{1}{24} + \frac{1}{240} \cdot {x}^{2}\right)\right)}\right)\right)\right) \]
      6. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{1}{24} + \frac{1}{240} \cdot {x}^{2}\right)}\right)\right)\right)\right) \]
      7. +-lowering-+.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{24}, \color{blue}{\left(\frac{1}{240} \cdot {x}^{2}\right)}\right)\right)\right)\right)\right) \]
      8. *-commutativeN/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{24}, \left({x}^{2} \cdot \color{blue}{\frac{1}{240}}\right)\right)\right)\right)\right)\right) \]
      9. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\left({x}^{2}\right), \color{blue}{\frac{1}{240}}\right)\right)\right)\right)\right)\right) \]
      10. unpow2N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\left(x \cdot x\right), \frac{1}{240}\right)\right)\right)\right)\right)\right) \]
      11. *-lowering-*.f6461.3%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{24}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, x\right), \frac{1}{240}\right)\right)\right)\right)\right)\right) \]
    7. Simplified61.3%

      \[\leadsto \color{blue}{x \cdot \left(0.5 + x \cdot \left(x \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot 0.004166666666666667\right)\right)\right)} \]

    if 3.2000000000000002 < x

    1. Initial program 98.5%

      \[\frac{1 - \cos x}{\sin x} \]
    2. Step-by-step derivation
      1. hang-p0-tanN/A

        \[\leadsto \tan \left(\frac{x}{2}\right) \]
      2. tan-lowering-tan.f64N/A

        \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
      3. /-lowering-/.f64100.0%

        \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
    4. Add Preprocessing
    5. Applied egg-rr10.3%

      \[\leadsto \color{blue}{{1}^{-0.5}} \]
    6. Step-by-step derivation
      1. pow-base-110.3%

        \[\leadsto 1 \]
    7. Applied egg-rr10.3%

      \[\leadsto \color{blue}{1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 5: 53.3% accurate, 12.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 3.2:\\ \;\;\;\;\frac{x}{2} + x \cdot \left(x \cdot \left(x \cdot 0.041666666666666664\right)\right)\\ \mathbf{else}:\\ \;\;\;\;1\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x 3.2) (+ (/ x 2.0) (* x (* x (* x 0.041666666666666664)))) 1.0))
double code(double x) {
	double tmp;
	if (x <= 3.2) {
		tmp = (x / 2.0) + (x * (x * (x * 0.041666666666666664)));
	} else {
		tmp = 1.0;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= 3.2d0) then
        tmp = (x / 2.0d0) + (x * (x * (x * 0.041666666666666664d0)))
    else
        tmp = 1.0d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= 3.2) {
		tmp = (x / 2.0) + (x * (x * (x * 0.041666666666666664)));
	} else {
		tmp = 1.0;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= 3.2:
		tmp = (x / 2.0) + (x * (x * (x * 0.041666666666666664)))
	else:
		tmp = 1.0
	return tmp
function code(x)
	tmp = 0.0
	if (x <= 3.2)
		tmp = Float64(Float64(x / 2.0) + Float64(x * Float64(x * Float64(x * 0.041666666666666664))));
	else
		tmp = 1.0;
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= 3.2)
		tmp = (x / 2.0) + (x * (x * (x * 0.041666666666666664)));
	else
		tmp = 1.0;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, 3.2], N[(N[(x / 2.0), $MachinePrecision] + N[(x * N[(x * N[(x * 0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1.0]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 3.2:\\
\;\;\;\;\frac{x}{2} + x \cdot \left(x \cdot \left(x \cdot 0.041666666666666664\right)\right)\\

\mathbf{else}:\\
\;\;\;\;1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 3.2000000000000002

    1. Initial program 44.1%

      \[\frac{1 - \cos x}{\sin x} \]
    2. Step-by-step derivation
      1. hang-p0-tanN/A

        \[\leadsto \tan \left(\frac{x}{2}\right) \]
      2. tan-lowering-tan.f64N/A

        \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
      3. /-lowering-/.f64100.0%

        \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0

      \[\leadsto \color{blue}{x \cdot \left(\frac{1}{2} + \frac{1}{24} \cdot {x}^{2}\right)} \]
    6. Step-by-step derivation
      1. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{1}{2} + \frac{1}{24} \cdot {x}^{2}\right)}\right) \]
      2. +-lowering-+.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \color{blue}{\left(\frac{1}{24} \cdot {x}^{2}\right)}\right)\right) \]
      3. unpow2N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \left(\frac{1}{24} \cdot \left(x \cdot \color{blue}{x}\right)\right)\right)\right) \]
      4. associate-*r*N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \left(\left(\frac{1}{24} \cdot x\right) \cdot \color{blue}{x}\right)\right)\right) \]
      5. *-commutativeN/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \left(x \cdot \color{blue}{\left(\frac{1}{24} \cdot x\right)}\right)\right)\right) \]
      6. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{1}{24} \cdot x\right)}\right)\right)\right) \]
      7. *-commutativeN/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \left(x \cdot \color{blue}{\frac{1}{24}}\right)\right)\right)\right) \]
      8. *-lowering-*.f6461.1%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \color{blue}{\frac{1}{24}}\right)\right)\right)\right) \]
    7. Simplified61.1%

      \[\leadsto \color{blue}{x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664\right)\right)} \]
    8. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \frac{1}{2} \cdot x + \color{blue}{\left(x \cdot \left(x \cdot \frac{1}{24}\right)\right) \cdot x} \]
      2. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot x + x \cdot \color{blue}{\left(x \cdot \left(x \cdot \frac{1}{24}\right)\right)} \]
      3. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\left(\frac{1}{2} \cdot x\right), \color{blue}{\left(x \cdot \left(x \cdot \left(x \cdot \frac{1}{24}\right)\right)\right)}\right) \]
      4. metadata-evalN/A

        \[\leadsto \mathsf{+.f64}\left(\left(\frac{1}{2} \cdot x\right), \left(x \cdot \left(x \cdot \left(x \cdot \frac{1}{24}\right)\right)\right)\right) \]
      5. associate-/r/N/A

        \[\leadsto \mathsf{+.f64}\left(\left(\frac{1}{\frac{2}{x}}\right), \left(\color{blue}{x} \cdot \left(x \cdot \left(x \cdot \frac{1}{24}\right)\right)\right)\right) \]
      6. clear-numN/A

        \[\leadsto \mathsf{+.f64}\left(\left(\frac{x}{2}\right), \left(\color{blue}{x} \cdot \left(x \cdot \left(x \cdot \frac{1}{24}\right)\right)\right)\right) \]
      7. /-lowering-/.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \left(\color{blue}{x} \cdot \left(x \cdot \left(x \cdot \frac{1}{24}\right)\right)\right)\right) \]
      8. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \color{blue}{\left(x \cdot \left(x \cdot \frac{1}{24}\right)\right)}\right)\right) \]
      9. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \color{blue}{\left(x \cdot \frac{1}{24}\right)}\right)\right)\right) \]
      10. *-lowering-*.f6461.1%

        \[\leadsto \mathsf{+.f64}\left(\mathsf{/.f64}\left(x, 2\right), \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \color{blue}{\frac{1}{24}}\right)\right)\right)\right) \]
    9. Applied egg-rr61.1%

      \[\leadsto \color{blue}{\frac{x}{2} + x \cdot \left(x \cdot \left(x \cdot 0.041666666666666664\right)\right)} \]

    if 3.2000000000000002 < x

    1. Initial program 98.5%

      \[\frac{1 - \cos x}{\sin x} \]
    2. Step-by-step derivation
      1. hang-p0-tanN/A

        \[\leadsto \tan \left(\frac{x}{2}\right) \]
      2. tan-lowering-tan.f64N/A

        \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
      3. /-lowering-/.f64100.0%

        \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
    4. Add Preprocessing
    5. Applied egg-rr10.3%

      \[\leadsto \color{blue}{{1}^{-0.5}} \]
    6. Step-by-step derivation
      1. pow-base-110.3%

        \[\leadsto 1 \]
    7. Applied egg-rr10.3%

      \[\leadsto \color{blue}{1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 6: 53.3% accurate, 14.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 3.2:\\ \;\;\;\;x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664\right)\right)\\ \mathbf{else}:\\ \;\;\;\;1\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x 3.2) (* x (+ 0.5 (* x (* x 0.041666666666666664)))) 1.0))
double code(double x) {
	double tmp;
	if (x <= 3.2) {
		tmp = x * (0.5 + (x * (x * 0.041666666666666664)));
	} else {
		tmp = 1.0;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= 3.2d0) then
        tmp = x * (0.5d0 + (x * (x * 0.041666666666666664d0)))
    else
        tmp = 1.0d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= 3.2) {
		tmp = x * (0.5 + (x * (x * 0.041666666666666664)));
	} else {
		tmp = 1.0;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= 3.2:
		tmp = x * (0.5 + (x * (x * 0.041666666666666664)))
	else:
		tmp = 1.0
	return tmp
function code(x)
	tmp = 0.0
	if (x <= 3.2)
		tmp = Float64(x * Float64(0.5 + Float64(x * Float64(x * 0.041666666666666664))));
	else
		tmp = 1.0;
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= 3.2)
		tmp = x * (0.5 + (x * (x * 0.041666666666666664)));
	else
		tmp = 1.0;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, 3.2], N[(x * N[(0.5 + N[(x * N[(x * 0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1.0]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 3.2:\\
\;\;\;\;x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664\right)\right)\\

\mathbf{else}:\\
\;\;\;\;1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 3.2000000000000002

    1. Initial program 44.1%

      \[\frac{1 - \cos x}{\sin x} \]
    2. Step-by-step derivation
      1. hang-p0-tanN/A

        \[\leadsto \tan \left(\frac{x}{2}\right) \]
      2. tan-lowering-tan.f64N/A

        \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
      3. /-lowering-/.f64100.0%

        \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0

      \[\leadsto \color{blue}{x \cdot \left(\frac{1}{2} + \frac{1}{24} \cdot {x}^{2}\right)} \]
    6. Step-by-step derivation
      1. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{1}{2} + \frac{1}{24} \cdot {x}^{2}\right)}\right) \]
      2. +-lowering-+.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \color{blue}{\left(\frac{1}{24} \cdot {x}^{2}\right)}\right)\right) \]
      3. unpow2N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \left(\frac{1}{24} \cdot \left(x \cdot \color{blue}{x}\right)\right)\right)\right) \]
      4. associate-*r*N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \left(\left(\frac{1}{24} \cdot x\right) \cdot \color{blue}{x}\right)\right)\right) \]
      5. *-commutativeN/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \left(x \cdot \color{blue}{\left(\frac{1}{24} \cdot x\right)}\right)\right)\right) \]
      6. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{1}{24} \cdot x\right)}\right)\right)\right) \]
      7. *-commutativeN/A

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \left(x \cdot \color{blue}{\frac{1}{24}}\right)\right)\right)\right) \]
      8. *-lowering-*.f6461.1%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \color{blue}{\frac{1}{24}}\right)\right)\right)\right) \]
    7. Simplified61.1%

      \[\leadsto \color{blue}{x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664\right)\right)} \]

    if 3.2000000000000002 < x

    1. Initial program 98.5%

      \[\frac{1 - \cos x}{\sin x} \]
    2. Step-by-step derivation
      1. hang-p0-tanN/A

        \[\leadsto \tan \left(\frac{x}{2}\right) \]
      2. tan-lowering-tan.f64N/A

        \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
      3. /-lowering-/.f64100.0%

        \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
    4. Add Preprocessing
    5. Applied egg-rr10.3%

      \[\leadsto \color{blue}{{1}^{-0.5}} \]
    6. Step-by-step derivation
      1. pow-base-110.3%

        \[\leadsto 1 \]
    7. Applied egg-rr10.3%

      \[\leadsto \color{blue}{1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 7: 53.2% accurate, 25.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 1.4:\\ \;\;\;\;\frac{x}{2}\\ \mathbf{else}:\\ \;\;\;\;1\\ \end{array} \end{array} \]
(FPCore (x) :precision binary64 (if (<= x 1.4) (/ x 2.0) 1.0))
double code(double x) {
	double tmp;
	if (x <= 1.4) {
		tmp = x / 2.0;
	} else {
		tmp = 1.0;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= 1.4d0) then
        tmp = x / 2.0d0
    else
        tmp = 1.0d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= 1.4) {
		tmp = x / 2.0;
	} else {
		tmp = 1.0;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= 1.4:
		tmp = x / 2.0
	else:
		tmp = 1.0
	return tmp
function code(x)
	tmp = 0.0
	if (x <= 1.4)
		tmp = Float64(x / 2.0);
	else
		tmp = 1.0;
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= 1.4)
		tmp = x / 2.0;
	else
		tmp = 1.0;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, 1.4], N[(x / 2.0), $MachinePrecision], 1.0]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.4:\\
\;\;\;\;\frac{x}{2}\\

\mathbf{else}:\\
\;\;\;\;1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.3999999999999999

    1. Initial program 44.1%

      \[\frac{1 - \cos x}{\sin x} \]
    2. Step-by-step derivation
      1. hang-p0-tanN/A

        \[\leadsto \tan \left(\frac{x}{2}\right) \]
      2. tan-lowering-tan.f64N/A

        \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
      3. /-lowering-/.f64100.0%

        \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot x} \]
    6. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto x \cdot \color{blue}{\frac{1}{2}} \]
      2. metadata-evalN/A

        \[\leadsto x \cdot \frac{1}{\color{blue}{2}} \]
      3. associate-/l*N/A

        \[\leadsto \frac{x \cdot 1}{\color{blue}{2}} \]
      4. *-rgt-identityN/A

        \[\leadsto \frac{x}{2} \]
      5. /-lowering-/.f6460.7%

        \[\leadsto \mathsf{/.f64}\left(x, \color{blue}{2}\right) \]
    7. Simplified60.7%

      \[\leadsto \color{blue}{\frac{x}{2}} \]

    if 1.3999999999999999 < x

    1. Initial program 98.5%

      \[\frac{1 - \cos x}{\sin x} \]
    2. Step-by-step derivation
      1. hang-p0-tanN/A

        \[\leadsto \tan \left(\frac{x}{2}\right) \]
      2. tan-lowering-tan.f64N/A

        \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
      3. /-lowering-/.f64100.0%

        \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
    4. Add Preprocessing
    5. Applied egg-rr10.3%

      \[\leadsto \color{blue}{{1}^{-0.5}} \]
    6. Step-by-step derivation
      1. pow-base-110.3%

        \[\leadsto 1 \]
    7. Applied egg-rr10.3%

      \[\leadsto \color{blue}{1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 8: 7.0% accurate, 205.0× speedup?

\[\begin{array}{l} \\ 1 \end{array} \]
(FPCore (x) :precision binary64 1.0)
double code(double x) {
	return 1.0;
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = 1.0d0
end function
public static double code(double x) {
	return 1.0;
}
def code(x):
	return 1.0
function code(x)
	return 1.0
end
function tmp = code(x)
	tmp = 1.0;
end
code[x_] := 1.0
\begin{array}{l}

\\
1
\end{array}
Derivation
  1. Initial program 60.9%

    \[\frac{1 - \cos x}{\sin x} \]
  2. Step-by-step derivation
    1. hang-p0-tanN/A

      \[\leadsto \tan \left(\frac{x}{2}\right) \]
    2. tan-lowering-tan.f64N/A

      \[\leadsto \mathsf{tan.f64}\left(\left(\frac{x}{2}\right)\right) \]
    3. /-lowering-/.f64100.0%

      \[\leadsto \mathsf{tan.f64}\left(\mathsf{/.f64}\left(x, 2\right)\right) \]
  3. Simplified100.0%

    \[\leadsto \color{blue}{\tan \left(\frac{x}{2}\right)} \]
  4. Add Preprocessing
  5. Applied egg-rr7.6%

    \[\leadsto \color{blue}{{1}^{-0.5}} \]
  6. Step-by-step derivation
    1. pow-base-17.6%

      \[\leadsto 1 \]
  7. Applied egg-rr7.6%

    \[\leadsto \color{blue}{1} \]
  8. Add Preprocessing

Developer Target 1: 100.0% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \tan \left(\frac{x}{2}\right) \end{array} \]
(FPCore (x) :precision binary64 (tan (/ x 2.0)))
double code(double x) {
	return tan((x / 2.0));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = tan((x / 2.0d0))
end function
public static double code(double x) {
	return Math.tan((x / 2.0));
}
def code(x):
	return math.tan((x / 2.0))
function code(x)
	return tan(Float64(x / 2.0))
end
function tmp = code(x)
	tmp = tan((x / 2.0));
end
code[x_] := N[Tan[N[(x / 2.0), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\tan \left(\frac{x}{2}\right)
\end{array}

Reproduce

?
herbie shell --seed 2024161 
(FPCore (x)
  :name "tanhf (example 3.4)"
  :precision binary64

  :alt
  (! :herbie-platform default (tan (/ x 2)))

  (/ (- 1.0 (cos x)) (sin x)))