ENA, Section 1.4, Exercise 4d

Percentage Accurate: 61.4% → 99.4%
Time: 8.5s
Alternatives: 8
Speedup: 1.0×

Specification

?
\[\left(0 \leq x \land x \leq 1000000000\right) \land \left(-1 \leq \varepsilon \land \varepsilon \leq 1\right)\]
\[\begin{array}{l} \\ x - \sqrt{x \cdot x - \varepsilon} \end{array} \]
(FPCore (x eps) :precision binary64 (- x (sqrt (- (* x x) eps))))
double code(double x, double eps) {
	return x - sqrt(((x * x) - eps));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = x - sqrt(((x * x) - eps))
end function
public static double code(double x, double eps) {
	return x - Math.sqrt(((x * x) - eps));
}
def code(x, eps):
	return x - math.sqrt(((x * x) - eps))
function code(x, eps)
	return Float64(x - sqrt(Float64(Float64(x * x) - eps)))
end
function tmp = code(x, eps)
	tmp = x - sqrt(((x * x) - eps));
end
code[x_, eps_] := N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x - \sqrt{x \cdot x - \varepsilon}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 61.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ x - \sqrt{x \cdot x - \varepsilon} \end{array} \]
(FPCore (x eps) :precision binary64 (- x (sqrt (- (* x x) eps))))
double code(double x, double eps) {
	return x - sqrt(((x * x) - eps));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = x - sqrt(((x * x) - eps))
end function
public static double code(double x, double eps) {
	return x - Math.sqrt(((x * x) - eps));
}
def code(x, eps):
	return x - math.sqrt(((x * x) - eps))
function code(x, eps)
	return Float64(x - sqrt(Float64(Float64(x * x) - eps)))
end
function tmp = code(x, eps)
	tmp = x - sqrt(((x * x) - eps));
end
code[x_, eps_] := N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x - \sqrt{x \cdot x - \varepsilon}
\end{array}

Alternative 1: 99.4% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -1 \cdot 10^{-154}:\\ \;\;\;\;\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\left(x \cdot x\right) \cdot \frac{x}{\varepsilon}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= (- x (sqrt (- (* x x) eps))) -1e-154)
   (/ eps (+ x (hypot x (sqrt (- eps)))))
   (/
    eps
    (fma
     -0.125
     (/ eps (* (* x x) (/ x eps)))
     (+ (* x 2.0) (* -0.5 (/ eps x)))))))
double code(double x, double eps) {
	double tmp;
	if ((x - sqrt(((x * x) - eps))) <= -1e-154) {
		tmp = eps / (x + hypot(x, sqrt(-eps)));
	} else {
		tmp = eps / fma(-0.125, (eps / ((x * x) * (x / eps))), ((x * 2.0) + (-0.5 * (eps / x))));
	}
	return tmp;
}
function code(x, eps)
	tmp = 0.0
	if (Float64(x - sqrt(Float64(Float64(x * x) - eps))) <= -1e-154)
		tmp = Float64(eps / Float64(x + hypot(x, sqrt(Float64(-eps)))));
	else
		tmp = Float64(eps / fma(-0.125, Float64(eps / Float64(Float64(x * x) * Float64(x / eps))), Float64(Float64(x * 2.0) + Float64(-0.5 * Float64(eps / x)))));
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], -1e-154], N[(eps / N[(x + N[Sqrt[x ^ 2 + N[Sqrt[(-eps)], $MachinePrecision] ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(eps / N[(-0.125 * N[(eps / N[(N[(x * x), $MachinePrecision] * N[(x / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(x * 2.0), $MachinePrecision] + N[(-0.5 * N[(eps / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -1 \cdot 10^{-154}:\\
\;\;\;\;\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\left(x \cdot x\right) \cdot \frac{x}{\varepsilon}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps))) < -9.9999999999999997e-155

    1. Initial program 99.0%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Step-by-step derivation
      1. flip--98.9%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. div-inv98.6%

        \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      3. add-sqr-sqrt98.5%

        \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
      4. sub-neg98.5%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      5. add-sqr-sqrt98.5%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      6. hypot-def98.5%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    3. Applied egg-rr98.5%

      \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Step-by-step derivation
      1. associate-*r/98.4%

        \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. *-rgt-identity98.4%

        \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r-99.4%

        \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses99.4%

        \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. +-lft-identity99.4%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. Simplified99.4%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]

    if -9.9999999999999997e-155 < (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps)))

    1. Initial program 6.1%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Step-by-step derivation
      1. flip--6.1%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. div-inv6.1%

        \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      3. add-sqr-sqrt6.1%

        \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
      4. sub-neg6.1%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      5. add-sqr-sqrt2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      6. hypot-def2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    3. Applied egg-rr2.6%

      \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Step-by-step derivation
      1. associate-*r/2.6%

        \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. *-rgt-identity2.6%

        \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r-50.4%

        \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses50.4%

        \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. +-lft-identity50.4%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. Simplified50.4%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    6. Taylor expanded in x around inf 0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{-0.125 \cdot \frac{{\varepsilon}^{2} \cdot {\left(\sqrt{-1}\right)}^{4}}{{x}^{3}} + \left(0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)}} \]
    7. Step-by-step derivation
      1. fma-def0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(-0.125, \frac{{\varepsilon}^{2} \cdot {\left(\sqrt{-1}\right)}^{4}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)}} \]
      2. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\color{blue}{\left(\varepsilon \cdot \varepsilon\right)} \cdot {\left(\sqrt{-1}\right)}^{4}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      3. metadata-eval0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot {\left(\sqrt{-1}\right)}^{\color{blue}{\left(2 \cdot 2\right)}}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      4. pow-sqr0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot \color{blue}{\left({\left(\sqrt{-1}\right)}^{2} \cdot {\left(\sqrt{-1}\right)}^{2}\right)}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      5. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot \left(\color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)} \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      6. rem-square-sqrt0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot \left(\color{blue}{-1} \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      7. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot \left(-1 \cdot \color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)}\right)}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      8. rem-square-sqrt0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot \left(-1 \cdot \color{blue}{-1}\right)}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      9. metadata-eval0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot \color{blue}{1}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      10. *-rgt-identity0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\color{blue}{\varepsilon \cdot \varepsilon}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      11. associate-/l*0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \color{blue}{\frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      12. +-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}\right)} \]
      13. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)} \]
      14. fma-def0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}\right)} \]
      15. associate-*r/0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)\right)} \]
    8. Simplified94.6%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)\right)}} \]
    9. Step-by-step derivation
      1. fma-udef94.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{x \cdot 2 + \frac{\varepsilon}{x} \cdot -0.5}\right)} \]
      2. *-commutative94.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, x \cdot 2 + \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
    10. Applied egg-rr94.6%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
    11. Step-by-step derivation
      1. div-inv94.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\color{blue}{{x}^{3} \cdot \frac{1}{\varepsilon}}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)} \]
      2. unpow394.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\color{blue}{\left(\left(x \cdot x\right) \cdot x\right)} \cdot \frac{1}{\varepsilon}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)} \]
      3. associate-*l*100.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\color{blue}{\left(x \cdot x\right) \cdot \left(x \cdot \frac{1}{\varepsilon}\right)}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)} \]
      4. div-inv100.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\left(x \cdot x\right) \cdot \color{blue}{\frac{x}{\varepsilon}}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)} \]
    12. Applied egg-rr100.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\color{blue}{\left(x \cdot x\right) \cdot \frac{x}{\varepsilon}}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -1 \cdot 10^{-154}:\\ \;\;\;\;\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\left(x \cdot x\right) \cdot \frac{x}{\varepsilon}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)}\\ \end{array} \]

Alternative 2: 99.1% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x - \sqrt{x \cdot x - \varepsilon}\\ \mathbf{if}\;t_0 \leq -1 \cdot 10^{-154}:\\ \;\;\;\;t_0\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\left(x \cdot x\right) \cdot \frac{x}{\varepsilon}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (- x (sqrt (- (* x x) eps)))))
   (if (<= t_0 -1e-154)
     t_0
     (/
      eps
      (fma
       -0.125
       (/ eps (* (* x x) (/ x eps)))
       (+ (* x 2.0) (* -0.5 (/ eps x))))))))
double code(double x, double eps) {
	double t_0 = x - sqrt(((x * x) - eps));
	double tmp;
	if (t_0 <= -1e-154) {
		tmp = t_0;
	} else {
		tmp = eps / fma(-0.125, (eps / ((x * x) * (x / eps))), ((x * 2.0) + (-0.5 * (eps / x))));
	}
	return tmp;
}
function code(x, eps)
	t_0 = Float64(x - sqrt(Float64(Float64(x * x) - eps)))
	tmp = 0.0
	if (t_0 <= -1e-154)
		tmp = t_0;
	else
		tmp = Float64(eps / fma(-0.125, Float64(eps / Float64(Float64(x * x) * Float64(x / eps))), Float64(Float64(x * 2.0) + Float64(-0.5 * Float64(eps / x)))));
	end
	return tmp
end
code[x_, eps_] := Block[{t$95$0 = N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -1e-154], t$95$0, N[(eps / N[(-0.125 * N[(eps / N[(N[(x * x), $MachinePrecision] * N[(x / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(x * 2.0), $MachinePrecision] + N[(-0.5 * N[(eps / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x - \sqrt{x \cdot x - \varepsilon}\\
\mathbf{if}\;t_0 \leq -1 \cdot 10^{-154}:\\
\;\;\;\;t_0\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\left(x \cdot x\right) \cdot \frac{x}{\varepsilon}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps))) < -9.9999999999999997e-155

    1. Initial program 99.0%

      \[x - \sqrt{x \cdot x - \varepsilon} \]

    if -9.9999999999999997e-155 < (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps)))

    1. Initial program 6.1%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Step-by-step derivation
      1. flip--6.1%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. div-inv6.1%

        \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      3. add-sqr-sqrt6.1%

        \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
      4. sub-neg6.1%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      5. add-sqr-sqrt2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      6. hypot-def2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    3. Applied egg-rr2.6%

      \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Step-by-step derivation
      1. associate-*r/2.6%

        \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. *-rgt-identity2.6%

        \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r-50.4%

        \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses50.4%

        \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. +-lft-identity50.4%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. Simplified50.4%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    6. Taylor expanded in x around inf 0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{-0.125 \cdot \frac{{\varepsilon}^{2} \cdot {\left(\sqrt{-1}\right)}^{4}}{{x}^{3}} + \left(0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)}} \]
    7. Step-by-step derivation
      1. fma-def0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(-0.125, \frac{{\varepsilon}^{2} \cdot {\left(\sqrt{-1}\right)}^{4}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)}} \]
      2. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\color{blue}{\left(\varepsilon \cdot \varepsilon\right)} \cdot {\left(\sqrt{-1}\right)}^{4}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      3. metadata-eval0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot {\left(\sqrt{-1}\right)}^{\color{blue}{\left(2 \cdot 2\right)}}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      4. pow-sqr0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot \color{blue}{\left({\left(\sqrt{-1}\right)}^{2} \cdot {\left(\sqrt{-1}\right)}^{2}\right)}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      5. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot \left(\color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)} \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      6. rem-square-sqrt0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot \left(\color{blue}{-1} \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      7. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot \left(-1 \cdot \color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)}\right)}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      8. rem-square-sqrt0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot \left(-1 \cdot \color{blue}{-1}\right)}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      9. metadata-eval0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot \color{blue}{1}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      10. *-rgt-identity0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\color{blue}{\varepsilon \cdot \varepsilon}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      11. associate-/l*0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \color{blue}{\frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x\right)} \]
      12. +-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}\right)} \]
      13. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)} \]
      14. fma-def0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}\right)} \]
      15. associate-*r/0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)\right)} \]
    8. Simplified94.6%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)\right)}} \]
    9. Step-by-step derivation
      1. fma-udef94.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{x \cdot 2 + \frac{\varepsilon}{x} \cdot -0.5}\right)} \]
      2. *-commutative94.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, x \cdot 2 + \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
    10. Applied egg-rr94.6%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
    11. Step-by-step derivation
      1. div-inv94.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\color{blue}{{x}^{3} \cdot \frac{1}{\varepsilon}}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)} \]
      2. unpow394.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\color{blue}{\left(\left(x \cdot x\right) \cdot x\right)} \cdot \frac{1}{\varepsilon}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)} \]
      3. associate-*l*100.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\color{blue}{\left(x \cdot x\right) \cdot \left(x \cdot \frac{1}{\varepsilon}\right)}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)} \]
      4. div-inv100.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\left(x \cdot x\right) \cdot \color{blue}{\frac{x}{\varepsilon}}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)} \]
    12. Applied egg-rr100.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\color{blue}{\left(x \cdot x\right) \cdot \frac{x}{\varepsilon}}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -1 \cdot 10^{-154}:\\ \;\;\;\;x - \sqrt{x \cdot x - \varepsilon}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\left(x \cdot x\right) \cdot \frac{x}{\varepsilon}}, x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}\right)}\\ \end{array} \]

Alternative 3: 99.0% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x - \sqrt{x \cdot x - \varepsilon}\\ \mathbf{if}\;t_0 \leq -1 \cdot 10^{-154}:\\ \;\;\;\;t_0\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (- x (sqrt (- (* x x) eps)))))
   (if (<= t_0 -1e-154) t_0 (/ eps (+ (* x 2.0) (* -0.5 (/ eps x)))))))
double code(double x, double eps) {
	double t_0 = x - sqrt(((x * x) - eps));
	double tmp;
	if (t_0 <= -1e-154) {
		tmp = t_0;
	} else {
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = x - sqrt(((x * x) - eps))
    if (t_0 <= (-1d-154)) then
        tmp = t_0
    else
        tmp = eps / ((x * 2.0d0) + ((-0.5d0) * (eps / x)))
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double t_0 = x - Math.sqrt(((x * x) - eps));
	double tmp;
	if (t_0 <= -1e-154) {
		tmp = t_0;
	} else {
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	}
	return tmp;
}
def code(x, eps):
	t_0 = x - math.sqrt(((x * x) - eps))
	tmp = 0
	if t_0 <= -1e-154:
		tmp = t_0
	else:
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)))
	return tmp
function code(x, eps)
	t_0 = Float64(x - sqrt(Float64(Float64(x * x) - eps)))
	tmp = 0.0
	if (t_0 <= -1e-154)
		tmp = t_0;
	else
		tmp = Float64(eps / Float64(Float64(x * 2.0) + Float64(-0.5 * Float64(eps / x))));
	end
	return tmp
end
function tmp_2 = code(x, eps)
	t_0 = x - sqrt(((x * x) - eps));
	tmp = 0.0;
	if (t_0 <= -1e-154)
		tmp = t_0;
	else
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	end
	tmp_2 = tmp;
end
code[x_, eps_] := Block[{t$95$0 = N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -1e-154], t$95$0, N[(eps / N[(N[(x * 2.0), $MachinePrecision] + N[(-0.5 * N[(eps / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x - \sqrt{x \cdot x - \varepsilon}\\
\mathbf{if}\;t_0 \leq -1 \cdot 10^{-154}:\\
\;\;\;\;t_0\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps))) < -9.9999999999999997e-155

    1. Initial program 99.0%

      \[x - \sqrt{x \cdot x - \varepsilon} \]

    if -9.9999999999999997e-155 < (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps)))

    1. Initial program 6.1%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Step-by-step derivation
      1. flip--6.1%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. div-inv6.1%

        \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      3. add-sqr-sqrt6.1%

        \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
      4. sub-neg6.1%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      5. add-sqr-sqrt2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      6. hypot-def2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    3. Applied egg-rr2.6%

      \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Step-by-step derivation
      1. associate-*r/2.6%

        \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. *-rgt-identity2.6%

        \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r-50.4%

        \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses50.4%

        \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. +-lft-identity50.4%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. Simplified50.4%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    6. Taylor expanded in x around inf 0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x}} \]
    7. Step-by-step derivation
      1. +-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
      2. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
      3. fma-def0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
      4. associate-*r/0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
      5. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left({\left(\sqrt{-1}\right)}^{2} \cdot \varepsilon\right)}}{x}\right)} \]
      6. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)} \cdot \varepsilon\right)}{x}\right)} \]
      7. rem-square-sqrt100.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{-1} \cdot \varepsilon\right)}{x}\right)} \]
      8. associate-*r*100.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{\left(0.5 \cdot -1\right) \cdot \varepsilon}}{x}\right)} \]
      9. metadata-eval100.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5} \cdot \varepsilon}{x}\right)} \]
      10. associate-*r/100.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
      11. *-commutative100.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{\varepsilon}{x} \cdot -0.5}\right)} \]
    8. Simplified100.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}} \]
    9. Step-by-step derivation
      1. fma-udef94.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{x \cdot 2 + \frac{\varepsilon}{x} \cdot -0.5}\right)} \]
      2. *-commutative94.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, x \cdot 2 + \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
    10. Applied egg-rr100.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -1 \cdot 10^{-154}:\\ \;\;\;\;x - \sqrt{x \cdot x - \varepsilon}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\\ \end{array} \]

Alternative 4: 86.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 1.05 \cdot 10^{-124}:\\ \;\;\;\;x - \sqrt{-\varepsilon}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x 1.05e-124)
   (- x (sqrt (- eps)))
   (/ eps (+ (* x 2.0) (* -0.5 (/ eps x))))))
double code(double x, double eps) {
	double tmp;
	if (x <= 1.05e-124) {
		tmp = x - sqrt(-eps);
	} else {
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= 1.05d-124) then
        tmp = x - sqrt(-eps)
    else
        tmp = eps / ((x * 2.0d0) + ((-0.5d0) * (eps / x)))
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= 1.05e-124) {
		tmp = x - Math.sqrt(-eps);
	} else {
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= 1.05e-124:
		tmp = x - math.sqrt(-eps)
	else:
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)))
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= 1.05e-124)
		tmp = Float64(x - sqrt(Float64(-eps)));
	else
		tmp = Float64(eps / Float64(Float64(x * 2.0) + Float64(-0.5 * Float64(eps / x))));
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= 1.05e-124)
		tmp = x - sqrt(-eps);
	else
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, 1.05e-124], N[(x - N[Sqrt[(-eps)], $MachinePrecision]), $MachinePrecision], N[(eps / N[(N[(x * 2.0), $MachinePrecision] + N[(-0.5 * N[(eps / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.05 \cdot 10^{-124}:\\
\;\;\;\;x - \sqrt{-\varepsilon}\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.05e-124

    1. Initial program 98.4%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Taylor expanded in x around 0 98.1%

      \[\leadsto x - \sqrt{\color{blue}{-1 \cdot \varepsilon}} \]
    3. Step-by-step derivation
      1. neg-mul-198.1%

        \[\leadsto x - \sqrt{\color{blue}{-\varepsilon}} \]
    4. Simplified98.1%

      \[\leadsto x - \sqrt{\color{blue}{-\varepsilon}} \]

    if 1.05e-124 < x

    1. Initial program 23.2%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Step-by-step derivation
      1. flip--23.2%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. div-inv23.1%

        \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      3. add-sqr-sqrt23.2%

        \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
      4. sub-neg23.2%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      5. add-sqr-sqrt20.3%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      6. hypot-def20.3%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    3. Applied egg-rr20.3%

      \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Step-by-step derivation
      1. associate-*r/20.3%

        \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. *-rgt-identity20.3%

        \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r-59.9%

        \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses59.9%

        \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. +-lft-identity59.9%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. Simplified59.9%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    6. Taylor expanded in x around inf 0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x}} \]
    7. Step-by-step derivation
      1. +-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
      2. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
      3. fma-def0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
      4. associate-*r/0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
      5. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left({\left(\sqrt{-1}\right)}^{2} \cdot \varepsilon\right)}}{x}\right)} \]
      6. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)} \cdot \varepsilon\right)}{x}\right)} \]
      7. rem-square-sqrt84.1%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{-1} \cdot \varepsilon\right)}{x}\right)} \]
      8. associate-*r*84.1%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{\left(0.5 \cdot -1\right) \cdot \varepsilon}}{x}\right)} \]
      9. metadata-eval84.1%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5} \cdot \varepsilon}{x}\right)} \]
      10. associate-*r/84.1%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
      11. *-commutative84.1%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{\varepsilon}{x} \cdot -0.5}\right)} \]
    8. Simplified84.1%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}} \]
    9. Step-by-step derivation
      1. fma-udef79.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{x \cdot 2 + \frac{\varepsilon}{x} \cdot -0.5}\right)} \]
      2. *-commutative79.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, x \cdot 2 + \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
    10. Applied egg-rr84.1%

      \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification89.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.05 \cdot 10^{-124}:\\ \;\;\;\;x - \sqrt{-\varepsilon}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\\ \end{array} \]

Alternative 5: 45.7% accurate, 9.7× speedup?

\[\begin{array}{l} \\ \frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}} \end{array} \]
(FPCore (x eps) :precision binary64 (/ eps (+ (* x 2.0) (* -0.5 (/ eps x)))))
double code(double x, double eps) {
	return eps / ((x * 2.0) + (-0.5 * (eps / x)));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps / ((x * 2.0d0) + ((-0.5d0) * (eps / x)))
end function
public static double code(double x, double eps) {
	return eps / ((x * 2.0) + (-0.5 * (eps / x)));
}
def code(x, eps):
	return eps / ((x * 2.0) + (-0.5 * (eps / x)))
function code(x, eps)
	return Float64(eps / Float64(Float64(x * 2.0) + Float64(-0.5 * Float64(eps / x))))
end
function tmp = code(x, eps)
	tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
end
code[x_, eps_] := N[(eps / N[(N[(x * 2.0), $MachinePrecision] + N[(-0.5 * N[(eps / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}
\end{array}
Derivation
  1. Initial program 54.4%

    \[x - \sqrt{x \cdot x - \varepsilon} \]
  2. Step-by-step derivation
    1. flip--54.3%

      \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
    2. div-inv54.1%

      \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
    3. add-sqr-sqrt54.1%

      \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
    4. sub-neg54.1%

      \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
    5. add-sqr-sqrt52.4%

      \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
    6. hypot-def52.4%

      \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  3. Applied egg-rr52.4%

    \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  4. Step-by-step derivation
    1. associate-*r/52.4%

      \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    2. *-rgt-identity52.4%

      \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    3. associate--r-75.8%

      \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    4. +-inverses75.8%

      \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. +-lft-identity75.8%

      \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
  5. Simplified75.8%

    \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  6. Taylor expanded in x around inf 0.0%

    \[\leadsto \frac{\varepsilon}{\color{blue}{0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x}} \]
  7. Step-by-step derivation
    1. +-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
    2. *-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
    3. fma-def0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
    4. associate-*r/0.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
    5. *-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left({\left(\sqrt{-1}\right)}^{2} \cdot \varepsilon\right)}}{x}\right)} \]
    6. unpow20.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)} \cdot \varepsilon\right)}{x}\right)} \]
    7. rem-square-sqrt52.2%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{-1} \cdot \varepsilon\right)}{x}\right)} \]
    8. associate-*r*52.2%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{\left(0.5 \cdot -1\right) \cdot \varepsilon}}{x}\right)} \]
    9. metadata-eval52.2%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5} \cdot \varepsilon}{x}\right)} \]
    10. associate-*r/52.2%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
    11. *-commutative52.2%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{\varepsilon}{x} \cdot -0.5}\right)} \]
  8. Simplified52.2%

    \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}} \]
  9. Step-by-step derivation
    1. fma-udef48.2%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \color{blue}{x \cdot 2 + \frac{\varepsilon}{x} \cdot -0.5}\right)} \]
    2. *-commutative48.2%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, x \cdot 2 + \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
  10. Applied egg-rr52.2%

    \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}} \]
  11. Final simplification52.2%

    \[\leadsto \frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}} \]

Alternative 6: 44.9% accurate, 21.4× speedup?

\[\begin{array}{l} \\ \frac{\varepsilon}{x} \cdot 0.5 \end{array} \]
(FPCore (x eps) :precision binary64 (* (/ eps x) 0.5))
double code(double x, double eps) {
	return (eps / x) * 0.5;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (eps / x) * 0.5d0
end function
public static double code(double x, double eps) {
	return (eps / x) * 0.5;
}
def code(x, eps):
	return (eps / x) * 0.5
function code(x, eps)
	return Float64(Float64(eps / x) * 0.5)
end
function tmp = code(x, eps)
	tmp = (eps / x) * 0.5;
end
code[x_, eps_] := N[(N[(eps / x), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}

\\
\frac{\varepsilon}{x} \cdot 0.5
\end{array}
Derivation
  1. Initial program 54.4%

    \[x - \sqrt{x \cdot x - \varepsilon} \]
  2. Taylor expanded in x around inf 51.6%

    \[\leadsto \color{blue}{0.5 \cdot \frac{\varepsilon}{x}} \]
  3. Final simplification51.6%

    \[\leadsto \frac{\varepsilon}{x} \cdot 0.5 \]

Alternative 7: 5.3% accurate, 35.7× speedup?

\[\begin{array}{l} \\ x \cdot -2 \end{array} \]
(FPCore (x eps) :precision binary64 (* x -2.0))
double code(double x, double eps) {
	return x * -2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = x * (-2.0d0)
end function
public static double code(double x, double eps) {
	return x * -2.0;
}
def code(x, eps):
	return x * -2.0
function code(x, eps)
	return Float64(x * -2.0)
end
function tmp = code(x, eps)
	tmp = x * -2.0;
end
code[x_, eps_] := N[(x * -2.0), $MachinePrecision]
\begin{array}{l}

\\
x \cdot -2
\end{array}
Derivation
  1. Initial program 54.4%

    \[x - \sqrt{x \cdot x - \varepsilon} \]
  2. Step-by-step derivation
    1. flip--54.3%

      \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
    2. div-inv54.1%

      \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
    3. add-sqr-sqrt54.1%

      \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
    4. sub-neg54.1%

      \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
    5. add-sqr-sqrt52.4%

      \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
    6. hypot-def52.4%

      \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  3. Applied egg-rr52.4%

    \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  4. Step-by-step derivation
    1. associate-*r/52.4%

      \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    2. *-rgt-identity52.4%

      \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    3. associate--r-75.8%

      \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    4. +-inverses75.8%

      \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. +-lft-identity75.8%

      \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
  5. Simplified75.8%

    \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  6. Taylor expanded in x around inf 0.0%

    \[\leadsto \frac{\varepsilon}{\color{blue}{0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x}} \]
  7. Step-by-step derivation
    1. +-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
    2. *-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
    3. fma-def0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
    4. associate-*r/0.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
    5. *-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left({\left(\sqrt{-1}\right)}^{2} \cdot \varepsilon\right)}}{x}\right)} \]
    6. unpow20.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)} \cdot \varepsilon\right)}{x}\right)} \]
    7. rem-square-sqrt52.2%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{-1} \cdot \varepsilon\right)}{x}\right)} \]
    8. associate-*r*52.2%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{\left(0.5 \cdot -1\right) \cdot \varepsilon}}{x}\right)} \]
    9. metadata-eval52.2%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5} \cdot \varepsilon}{x}\right)} \]
    10. associate-*r/52.2%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
    11. *-commutative52.2%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{\varepsilon}{x} \cdot -0.5}\right)} \]
  8. Simplified52.2%

    \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}} \]
  9. Taylor expanded in eps around inf 5.1%

    \[\leadsto \color{blue}{-2 \cdot x} \]
  10. Step-by-step derivation
    1. *-commutative5.1%

      \[\leadsto \color{blue}{x \cdot -2} \]
  11. Simplified5.1%

    \[\leadsto \color{blue}{x \cdot -2} \]
  12. Final simplification5.1%

    \[\leadsto x \cdot -2 \]

Alternative 8: 3.5% accurate, 107.0× speedup?

\[\begin{array}{l} \\ x \end{array} \]
(FPCore (x eps) :precision binary64 x)
double code(double x, double eps) {
	return x;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = x
end function
public static double code(double x, double eps) {
	return x;
}
def code(x, eps):
	return x
function code(x, eps)
	return x
end
function tmp = code(x, eps)
	tmp = x;
end
code[x_, eps_] := x
\begin{array}{l}

\\
x
\end{array}
Derivation
  1. Initial program 54.4%

    \[x - \sqrt{x \cdot x - \varepsilon} \]
  2. Taylor expanded in x around 0 50.6%

    \[\leadsto x - \sqrt{\color{blue}{-1 \cdot \varepsilon}} \]
  3. Step-by-step derivation
    1. neg-mul-150.6%

      \[\leadsto x - \sqrt{\color{blue}{-\varepsilon}} \]
  4. Simplified50.6%

    \[\leadsto x - \sqrt{\color{blue}{-\varepsilon}} \]
  5. Taylor expanded in x around inf 3.6%

    \[\leadsto \color{blue}{x} \]
  6. Final simplification3.6%

    \[\leadsto x \]

Developer target: 99.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \end{array} \]
(FPCore (x eps) :precision binary64 (/ eps (+ x (sqrt (- (* x x) eps)))))
double code(double x, double eps) {
	return eps / (x + sqrt(((x * x) - eps)));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps / (x + sqrt(((x * x) - eps)))
end function
public static double code(double x, double eps) {
	return eps / (x + Math.sqrt(((x * x) - eps)));
}
def code(x, eps):
	return eps / (x + math.sqrt(((x * x) - eps)))
function code(x, eps)
	return Float64(eps / Float64(x + sqrt(Float64(Float64(x * x) - eps))))
end
function tmp = code(x, eps)
	tmp = eps / (x + sqrt(((x * x) - eps)));
end
code[x_, eps_] := N[(eps / N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\varepsilon}{x + \sqrt{x \cdot x - \varepsilon}}
\end{array}

Reproduce

?
herbie shell --seed 2023275 
(FPCore (x eps)
  :name "ENA, Section 1.4, Exercise 4d"
  :precision binary64
  :pre (and (and (<= 0.0 x) (<= x 1000000000.0)) (and (<= -1.0 eps) (<= eps 1.0)))

  :herbie-target
  (/ eps (+ x (sqrt (- (* x x) eps))))

  (- x (sqrt (- (* x x) eps))))