ENA, Section 1.4, Exercise 4d

Percentage Accurate: 68.5% → 98.9%
Time: 19.3s
Alternatives: 9
Speedup: 1.0×

Specification

?
\[\left(0 \leq x \land x \leq 1000000000\right) \land \left(-1 \leq \varepsilon \land \varepsilon \leq 1\right)\]
\[\begin{array}{l} \\ x - \sqrt{x \cdot x - \varepsilon} \end{array} \]
(FPCore (x eps) :precision binary64 (- x (sqrt (- (* x x) eps))))
double code(double x, double eps) {
	return x - sqrt(((x * x) - eps));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = x - sqrt(((x * x) - eps))
end function
public static double code(double x, double eps) {
	return x - Math.sqrt(((x * x) - eps));
}
def code(x, eps):
	return x - math.sqrt(((x * x) - eps))
function code(x, eps)
	return Float64(x - sqrt(Float64(Float64(x * x) - eps)))
end
function tmp = code(x, eps)
	tmp = x - sqrt(((x * x) - eps));
end
code[x_, eps_] := N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x - \sqrt{x \cdot x - \varepsilon}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 9 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 68.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ x - \sqrt{x \cdot x - \varepsilon} \end{array} \]
(FPCore (x eps) :precision binary64 (- x (sqrt (- (* x x) eps))))
double code(double x, double eps) {
	return x - sqrt(((x * x) - eps));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = x - sqrt(((x * x) - eps))
end function
public static double code(double x, double eps) {
	return x - Math.sqrt(((x * x) - eps));
}
def code(x, eps):
	return x - math.sqrt(((x * x) - eps))
function code(x, eps)
	return Float64(x - sqrt(Float64(Float64(x * x) - eps)))
end
function tmp = code(x, eps)
	tmp = x - sqrt(((x * x) - eps));
end
code[x_, eps_] := N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x - \sqrt{x \cdot x - \varepsilon}
\end{array}

Alternative 1: 98.9% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -5 \cdot 10^{-153}:\\ \;\;\;\;\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(x, 2, \mathsf{fma}\left(-0.125, \frac{\varepsilon}{\left(x \cdot x\right) \cdot \frac{x}{\varepsilon}}, \frac{\varepsilon}{x} \cdot -0.5\right)\right)}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= (- x (sqrt (- (* x x) eps))) -5e-153)
   (/ eps (+ x (hypot x (sqrt (- eps)))))
   (/
    eps
    (fma
     x
     2.0
     (fma -0.125 (/ eps (* (* x x) (/ x eps))) (* (/ eps x) -0.5))))))
double code(double x, double eps) {
	double tmp;
	if ((x - sqrt(((x * x) - eps))) <= -5e-153) {
		tmp = eps / (x + hypot(x, sqrt(-eps)));
	} else {
		tmp = eps / fma(x, 2.0, fma(-0.125, (eps / ((x * x) * (x / eps))), ((eps / x) * -0.5)));
	}
	return tmp;
}
function code(x, eps)
	tmp = 0.0
	if (Float64(x - sqrt(Float64(Float64(x * x) - eps))) <= -5e-153)
		tmp = Float64(eps / Float64(x + hypot(x, sqrt(Float64(-eps)))));
	else
		tmp = Float64(eps / fma(x, 2.0, fma(-0.125, Float64(eps / Float64(Float64(x * x) * Float64(x / eps))), Float64(Float64(eps / x) * -0.5))));
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], -5e-153], N[(eps / N[(x + N[Sqrt[x ^ 2 + N[Sqrt[(-eps)], $MachinePrecision] ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(eps / N[(x * 2.0 + N[(-0.125 * N[(eps / N[(N[(x * x), $MachinePrecision] * N[(x / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(eps / x), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -5 \cdot 10^{-153}:\\
\;\;\;\;\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(x, 2, \mathsf{fma}\left(-0.125, \frac{\varepsilon}{\left(x \cdot x\right) \cdot \frac{x}{\varepsilon}}, \frac{\varepsilon}{x} \cdot -0.5\right)\right)}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps))) < -5.00000000000000033e-153

    1. Initial program 98.8%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Step-by-step derivation
      1. flip--98.6%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. div-inv98.5%

        \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      3. add-sqr-sqrt98.1%

        \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
      4. sub-neg98.1%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      5. add-sqr-sqrt98.1%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      6. hypot-def98.1%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    3. Applied egg-rr98.1%

      \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Step-by-step derivation
      1. associate-*r/98.2%

        \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. *-rgt-identity98.2%

        \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r-99.4%

        \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses99.4%

        \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. +-lft-identity99.4%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. Simplified99.4%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]

    if -5.00000000000000033e-153 < (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps)))

    1. Initial program 6.6%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Step-by-step derivation
      1. flip--6.7%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. div-inv6.7%

        \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      3. add-sqr-sqrt6.7%

        \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
      4. sub-neg6.7%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      5. add-sqr-sqrt2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      6. hypot-def2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    3. Applied egg-rr2.6%

      \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Step-by-step derivation
      1. associate-*r/2.6%

        \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. *-rgt-identity2.6%

        \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r-52.4%

        \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses52.4%

        \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. +-lft-identity52.4%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. Simplified52.4%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    6. Taylor expanded in x around inf 0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + \left(0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + -0.125 \cdot \frac{{\left(\sqrt{-1}\right)}^{4} \cdot {\varepsilon}^{2}}{{x}^{3}}\right)}} \]
    7. Step-by-step derivation
      1. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + \left(0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + -0.125 \cdot \frac{{\left(\sqrt{-1}\right)}^{4} \cdot {\varepsilon}^{2}}{{x}^{3}}\right)} \]
      2. fma-def0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + -0.125 \cdot \frac{{\left(\sqrt{-1}\right)}^{4} \cdot {\varepsilon}^{2}}{{x}^{3}}\right)}} \]
      3. +-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.125 \cdot \frac{{\left(\sqrt{-1}\right)}^{4} \cdot {\varepsilon}^{2}}{{x}^{3}} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}\right)} \]
      4. fma-def0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\mathsf{fma}\left(-0.125, \frac{{\left(\sqrt{-1}\right)}^{4} \cdot {\varepsilon}^{2}}{{x}^{3}}, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}\right)} \]
    8. Simplified94.2%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, \mathsf{fma}\left(-0.125, \frac{\varepsilon}{\frac{{x}^{3}}{\varepsilon}}, \frac{\varepsilon}{x} \cdot -0.5\right)\right)}} \]
    9. Step-by-step derivation
      1. div-inv94.2%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \mathsf{fma}\left(-0.125, \frac{\varepsilon}{\color{blue}{{x}^{3} \cdot \frac{1}{\varepsilon}}}, \frac{\varepsilon}{x} \cdot -0.5\right)\right)} \]
      2. unpow394.2%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \mathsf{fma}\left(-0.125, \frac{\varepsilon}{\color{blue}{\left(\left(x \cdot x\right) \cdot x\right)} \cdot \frac{1}{\varepsilon}}, \frac{\varepsilon}{x} \cdot -0.5\right)\right)} \]
      3. associate-*l*100.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \mathsf{fma}\left(-0.125, \frac{\varepsilon}{\color{blue}{\left(x \cdot x\right) \cdot \left(x \cdot \frac{1}{\varepsilon}\right)}}, \frac{\varepsilon}{x} \cdot -0.5\right)\right)} \]
      4. div-inv100.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \mathsf{fma}\left(-0.125, \frac{\varepsilon}{\left(x \cdot x\right) \cdot \color{blue}{\frac{x}{\varepsilon}}}, \frac{\varepsilon}{x} \cdot -0.5\right)\right)} \]
    10. Applied egg-rr100.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \mathsf{fma}\left(-0.125, \frac{\varepsilon}{\color{blue}{\left(x \cdot x\right) \cdot \frac{x}{\varepsilon}}}, \frac{\varepsilon}{x} \cdot -0.5\right)\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -5 \cdot 10^{-153}:\\ \;\;\;\;\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(x, 2, \mathsf{fma}\left(-0.125, \frac{\varepsilon}{\left(x \cdot x\right) \cdot \frac{x}{\varepsilon}}, \frac{\varepsilon}{x} \cdot -0.5\right)\right)}\\ \end{array} \]

Alternative 2: 98.7% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -5 \cdot 10^{-153}:\\ \;\;\;\;\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= (- x (sqrt (- (* x x) eps))) -5e-153)
   (/ eps (+ x (hypot x (sqrt (- eps)))))
   (/ eps (fma x 2.0 (* (/ eps x) -0.5)))))
double code(double x, double eps) {
	double tmp;
	if ((x - sqrt(((x * x) - eps))) <= -5e-153) {
		tmp = eps / (x + hypot(x, sqrt(-eps)));
	} else {
		tmp = eps / fma(x, 2.0, ((eps / x) * -0.5));
	}
	return tmp;
}
function code(x, eps)
	tmp = 0.0
	if (Float64(x - sqrt(Float64(Float64(x * x) - eps))) <= -5e-153)
		tmp = Float64(eps / Float64(x + hypot(x, sqrt(Float64(-eps)))));
	else
		tmp = Float64(eps / fma(x, 2.0, Float64(Float64(eps / x) * -0.5)));
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], -5e-153], N[(eps / N[(x + N[Sqrt[x ^ 2 + N[Sqrt[(-eps)], $MachinePrecision] ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(eps / N[(x * 2.0 + N[(N[(eps / x), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -5 \cdot 10^{-153}:\\
\;\;\;\;\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps))) < -5.00000000000000033e-153

    1. Initial program 98.8%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Step-by-step derivation
      1. flip--98.6%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. div-inv98.5%

        \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      3. add-sqr-sqrt98.1%

        \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
      4. sub-neg98.1%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      5. add-sqr-sqrt98.1%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      6. hypot-def98.1%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    3. Applied egg-rr98.1%

      \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Step-by-step derivation
      1. associate-*r/98.2%

        \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. *-rgt-identity98.2%

        \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r-99.4%

        \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses99.4%

        \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. +-lft-identity99.4%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. Simplified99.4%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]

    if -5.00000000000000033e-153 < (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps)))

    1. Initial program 6.6%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Step-by-step derivation
      1. flip--6.7%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. div-inv6.7%

        \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      3. add-sqr-sqrt6.7%

        \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
      4. sub-neg6.7%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      5. add-sqr-sqrt2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      6. hypot-def2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    3. Applied egg-rr2.6%

      \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Step-by-step derivation
      1. associate-*r/2.6%

        \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. *-rgt-identity2.6%

        \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r-52.4%

        \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses52.4%

        \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. +-lft-identity52.4%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. Simplified52.4%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    6. Taylor expanded in x around inf 0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
    7. Step-by-step derivation
      1. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
      2. fma-def0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
      3. associate-*r/0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
      4. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\varepsilon \cdot \color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)}\right)}{x}\right)} \]
      5. rem-square-sqrt99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\varepsilon \cdot \color{blue}{-1}\right)}{x}\right)} \]
      6. *-commutative99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left(-1 \cdot \varepsilon\right)}}{x}\right)} \]
      7. associate-*r*99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{\left(0.5 \cdot -1\right) \cdot \varepsilon}}{x}\right)} \]
      8. metadata-eval99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5} \cdot \varepsilon}{x}\right)} \]
      9. associate-*r/99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
      10. *-commutative99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{\varepsilon}{x} \cdot -0.5}\right)} \]
    8. Simplified99.6%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -5 \cdot 10^{-153}:\\ \;\;\;\;\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}\\ \end{array} \]

Alternative 3: 98.5% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -5 \cdot 10^{-153}:\\ \;\;\;\;x - \mathsf{hypot}\left(\sqrt{-\varepsilon}, x\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= (- x (sqrt (- (* x x) eps))) -5e-153)
   (- x (hypot (sqrt (- eps)) x))
   (/ eps (fma x 2.0 (* (/ eps x) -0.5)))))
double code(double x, double eps) {
	double tmp;
	if ((x - sqrt(((x * x) - eps))) <= -5e-153) {
		tmp = x - hypot(sqrt(-eps), x);
	} else {
		tmp = eps / fma(x, 2.0, ((eps / x) * -0.5));
	}
	return tmp;
}
function code(x, eps)
	tmp = 0.0
	if (Float64(x - sqrt(Float64(Float64(x * x) - eps))) <= -5e-153)
		tmp = Float64(x - hypot(sqrt(Float64(-eps)), x));
	else
		tmp = Float64(eps / fma(x, 2.0, Float64(Float64(eps / x) * -0.5)));
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], -5e-153], N[(x - N[Sqrt[N[Sqrt[(-eps)], $MachinePrecision] ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision], N[(eps / N[(x * 2.0 + N[(N[(eps / x), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -5 \cdot 10^{-153}:\\
\;\;\;\;x - \mathsf{hypot}\left(\sqrt{-\varepsilon}, x\right)\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps))) < -5.00000000000000033e-153

    1. Initial program 98.8%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Step-by-step derivation
      1. sub-neg98.8%

        \[\leadsto x - \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}} \]
      2. +-commutative98.8%

        \[\leadsto x - \sqrt{\color{blue}{\left(-\varepsilon\right) + x \cdot x}} \]
      3. add-sqr-sqrt98.8%

        \[\leadsto x - \sqrt{\color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}} + x \cdot x} \]
      4. hypot-def98.8%

        \[\leadsto x - \color{blue}{\mathsf{hypot}\left(\sqrt{-\varepsilon}, x\right)} \]
    3. Applied egg-rr98.8%

      \[\leadsto x - \color{blue}{\mathsf{hypot}\left(\sqrt{-\varepsilon}, x\right)} \]

    if -5.00000000000000033e-153 < (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps)))

    1. Initial program 6.6%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Step-by-step derivation
      1. flip--6.7%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. div-inv6.7%

        \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      3. add-sqr-sqrt6.7%

        \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
      4. sub-neg6.7%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      5. add-sqr-sqrt2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      6. hypot-def2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    3. Applied egg-rr2.6%

      \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Step-by-step derivation
      1. associate-*r/2.6%

        \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. *-rgt-identity2.6%

        \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r-52.4%

        \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses52.4%

        \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. +-lft-identity52.4%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. Simplified52.4%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    6. Taylor expanded in x around inf 0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
    7. Step-by-step derivation
      1. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
      2. fma-def0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
      3. associate-*r/0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
      4. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\varepsilon \cdot \color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)}\right)}{x}\right)} \]
      5. rem-square-sqrt99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\varepsilon \cdot \color{blue}{-1}\right)}{x}\right)} \]
      6. *-commutative99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left(-1 \cdot \varepsilon\right)}}{x}\right)} \]
      7. associate-*r*99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{\left(0.5 \cdot -1\right) \cdot \varepsilon}}{x}\right)} \]
      8. metadata-eval99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5} \cdot \varepsilon}{x}\right)} \]
      9. associate-*r/99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
      10. *-commutative99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{\varepsilon}{x} \cdot -0.5}\right)} \]
    8. Simplified99.6%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -5 \cdot 10^{-153}:\\ \;\;\;\;x - \mathsf{hypot}\left(\sqrt{-\varepsilon}, x\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}\\ \end{array} \]

Alternative 4: 98.5% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x - \sqrt{x \cdot x - \varepsilon}\\ \mathbf{if}\;t_0 \leq -5 \cdot 10^{-153}:\\ \;\;\;\;t_0\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (- x (sqrt (- (* x x) eps)))))
   (if (<= t_0 -5e-153) t_0 (/ eps (fma x 2.0 (* (/ eps x) -0.5))))))
double code(double x, double eps) {
	double t_0 = x - sqrt(((x * x) - eps));
	double tmp;
	if (t_0 <= -5e-153) {
		tmp = t_0;
	} else {
		tmp = eps / fma(x, 2.0, ((eps / x) * -0.5));
	}
	return tmp;
}
function code(x, eps)
	t_0 = Float64(x - sqrt(Float64(Float64(x * x) - eps)))
	tmp = 0.0
	if (t_0 <= -5e-153)
		tmp = t_0;
	else
		tmp = Float64(eps / fma(x, 2.0, Float64(Float64(eps / x) * -0.5)));
	end
	return tmp
end
code[x_, eps_] := Block[{t$95$0 = N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -5e-153], t$95$0, N[(eps / N[(x * 2.0 + N[(N[(eps / x), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x - \sqrt{x \cdot x - \varepsilon}\\
\mathbf{if}\;t_0 \leq -5 \cdot 10^{-153}:\\
\;\;\;\;t_0\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps))) < -5.00000000000000033e-153

    1. Initial program 98.8%

      \[x - \sqrt{x \cdot x - \varepsilon} \]

    if -5.00000000000000033e-153 < (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps)))

    1. Initial program 6.6%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Step-by-step derivation
      1. flip--6.7%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. div-inv6.7%

        \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      3. add-sqr-sqrt6.7%

        \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
      4. sub-neg6.7%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      5. add-sqr-sqrt2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      6. hypot-def2.6%

        \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    3. Applied egg-rr2.6%

      \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Step-by-step derivation
      1. associate-*r/2.6%

        \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. *-rgt-identity2.6%

        \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r-52.4%

        \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses52.4%

        \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. +-lft-identity52.4%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. Simplified52.4%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    6. Taylor expanded in x around inf 0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
    7. Step-by-step derivation
      1. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
      2. fma-def0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
      3. associate-*r/0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
      4. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\varepsilon \cdot \color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)}\right)}{x}\right)} \]
      5. rem-square-sqrt99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\varepsilon \cdot \color{blue}{-1}\right)}{x}\right)} \]
      6. *-commutative99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left(-1 \cdot \varepsilon\right)}}{x}\right)} \]
      7. associate-*r*99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{\left(0.5 \cdot -1\right) \cdot \varepsilon}}{x}\right)} \]
      8. metadata-eval99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5} \cdot \varepsilon}{x}\right)} \]
      9. associate-*r/99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
      10. *-commutative99.6%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{\varepsilon}{x} \cdot -0.5}\right)} \]
    8. Simplified99.6%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -5 \cdot 10^{-153}:\\ \;\;\;\;x - \sqrt{x \cdot x - \varepsilon}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}\\ \end{array} \]

Alternative 5: 98.0% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x - \sqrt{x \cdot x - \varepsilon}\\ \mathbf{if}\;t_0 \leq -5 \cdot 10^{-153}:\\ \;\;\;\;t_0\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{x} \cdot 0.5\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (- x (sqrt (- (* x x) eps)))))
   (if (<= t_0 -5e-153) t_0 (* (/ eps x) 0.5))))
double code(double x, double eps) {
	double t_0 = x - sqrt(((x * x) - eps));
	double tmp;
	if (t_0 <= -5e-153) {
		tmp = t_0;
	} else {
		tmp = (eps / x) * 0.5;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = x - sqrt(((x * x) - eps))
    if (t_0 <= (-5d-153)) then
        tmp = t_0
    else
        tmp = (eps / x) * 0.5d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double t_0 = x - Math.sqrt(((x * x) - eps));
	double tmp;
	if (t_0 <= -5e-153) {
		tmp = t_0;
	} else {
		tmp = (eps / x) * 0.5;
	}
	return tmp;
}
def code(x, eps):
	t_0 = x - math.sqrt(((x * x) - eps))
	tmp = 0
	if t_0 <= -5e-153:
		tmp = t_0
	else:
		tmp = (eps / x) * 0.5
	return tmp
function code(x, eps)
	t_0 = Float64(x - sqrt(Float64(Float64(x * x) - eps)))
	tmp = 0.0
	if (t_0 <= -5e-153)
		tmp = t_0;
	else
		tmp = Float64(Float64(eps / x) * 0.5);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	t_0 = x - sqrt(((x * x) - eps));
	tmp = 0.0;
	if (t_0 <= -5e-153)
		tmp = t_0;
	else
		tmp = (eps / x) * 0.5;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := Block[{t$95$0 = N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -5e-153], t$95$0, N[(N[(eps / x), $MachinePrecision] * 0.5), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x - \sqrt{x \cdot x - \varepsilon}\\
\mathbf{if}\;t_0 \leq -5 \cdot 10^{-153}:\\
\;\;\;\;t_0\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{x} \cdot 0.5\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps))) < -5.00000000000000033e-153

    1. Initial program 98.8%

      \[x - \sqrt{x \cdot x - \varepsilon} \]

    if -5.00000000000000033e-153 < (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps)))

    1. Initial program 6.6%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Taylor expanded in x around inf 98.8%

      \[\leadsto \color{blue}{0.5 \cdot \frac{\varepsilon}{x}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification98.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -5 \cdot 10^{-153}:\\ \;\;\;\;x - \sqrt{x \cdot x - \varepsilon}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{x} \cdot 0.5\\ \end{array} \]

Alternative 6: 86.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 2.8 \cdot 10^{-94}:\\ \;\;\;\;x - \sqrt{-\varepsilon}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{x} \cdot 0.5\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x 2.8e-94) (- x (sqrt (- eps))) (* (/ eps x) 0.5)))
double code(double x, double eps) {
	double tmp;
	if (x <= 2.8e-94) {
		tmp = x - sqrt(-eps);
	} else {
		tmp = (eps / x) * 0.5;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= 2.8d-94) then
        tmp = x - sqrt(-eps)
    else
        tmp = (eps / x) * 0.5d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= 2.8e-94) {
		tmp = x - Math.sqrt(-eps);
	} else {
		tmp = (eps / x) * 0.5;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= 2.8e-94:
		tmp = x - math.sqrt(-eps)
	else:
		tmp = (eps / x) * 0.5
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= 2.8e-94)
		tmp = Float64(x - sqrt(Float64(-eps)));
	else
		tmp = Float64(Float64(eps / x) * 0.5);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= 2.8e-94)
		tmp = x - sqrt(-eps);
	else
		tmp = (eps / x) * 0.5;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, 2.8e-94], N[(x - N[Sqrt[(-eps)], $MachinePrecision]), $MachinePrecision], N[(N[(eps / x), $MachinePrecision] * 0.5), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 2.8 \cdot 10^{-94}:\\
\;\;\;\;x - \sqrt{-\varepsilon}\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{x} \cdot 0.5\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 2.7999999999999998e-94

    1. Initial program 94.9%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Taylor expanded in x around 0 92.8%

      \[\leadsto x - \sqrt{\color{blue}{-1 \cdot \varepsilon}} \]
    3. Step-by-step derivation
      1. neg-mul-192.8%

        \[\leadsto x - \sqrt{\color{blue}{-\varepsilon}} \]
    4. Simplified92.8%

      \[\leadsto x - \sqrt{\color{blue}{-\varepsilon}} \]

    if 2.7999999999999998e-94 < x

    1. Initial program 22.7%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Taylor expanded in x around inf 83.9%

      \[\leadsto \color{blue}{0.5 \cdot \frac{\varepsilon}{x}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification89.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 2.8 \cdot 10^{-94}:\\ \;\;\;\;x - \sqrt{-\varepsilon}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{x} \cdot 0.5\\ \end{array} \]

Alternative 7: 37.9% accurate, 21.4× speedup?

\[\begin{array}{l} \\ \frac{\varepsilon}{x} \cdot 0.5 \end{array} \]
(FPCore (x eps) :precision binary64 (* (/ eps x) 0.5))
double code(double x, double eps) {
	return (eps / x) * 0.5;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (eps / x) * 0.5d0
end function
public static double code(double x, double eps) {
	return (eps / x) * 0.5;
}
def code(x, eps):
	return (eps / x) * 0.5
function code(x, eps)
	return Float64(Float64(eps / x) * 0.5)
end
function tmp = code(x, eps)
	tmp = (eps / x) * 0.5;
end
code[x_, eps_] := N[(N[(eps / x), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}

\\
\frac{\varepsilon}{x} \cdot 0.5
\end{array}
Derivation
  1. Initial program 69.3%

    \[x - \sqrt{x \cdot x - \varepsilon} \]
  2. Taylor expanded in x around inf 36.9%

    \[\leadsto \color{blue}{0.5 \cdot \frac{\varepsilon}{x}} \]
  3. Final simplification36.9%

    \[\leadsto \frac{\varepsilon}{x} \cdot 0.5 \]

Alternative 8: 5.5% accurate, 35.7× speedup?

\[\begin{array}{l} \\ x \cdot -2 \end{array} \]
(FPCore (x eps) :precision binary64 (* x -2.0))
double code(double x, double eps) {
	return x * -2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = x * (-2.0d0)
end function
public static double code(double x, double eps) {
	return x * -2.0;
}
def code(x, eps):
	return x * -2.0
function code(x, eps)
	return Float64(x * -2.0)
end
function tmp = code(x, eps)
	tmp = x * -2.0;
end
code[x_, eps_] := N[(x * -2.0), $MachinePrecision]
\begin{array}{l}

\\
x \cdot -2
\end{array}
Derivation
  1. Initial program 69.3%

    \[x - \sqrt{x \cdot x - \varepsilon} \]
  2. Step-by-step derivation
    1. flip--69.2%

      \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
    2. div-inv69.1%

      \[\leadsto \color{blue}{\left(x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}}} \]
    3. add-sqr-sqrt68.8%

      \[\leadsto \left(x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}\right) \cdot \frac{1}{x + \sqrt{x \cdot x - \varepsilon}} \]
    4. sub-neg68.8%

      \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
    5. add-sqr-sqrt67.5%

      \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
    6. hypot-def67.5%

      \[\leadsto \left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  3. Applied egg-rr67.5%

    \[\leadsto \color{blue}{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot \frac{1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  4. Step-by-step derivation
    1. associate-*r/67.6%

      \[\leadsto \color{blue}{\frac{\left(x \cdot x - \left(x \cdot x - \varepsilon\right)\right) \cdot 1}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    2. *-rgt-identity67.6%

      \[\leadsto \frac{\color{blue}{x \cdot x - \left(x \cdot x - \varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    3. associate--r-84.3%

      \[\leadsto \frac{\color{blue}{\left(x \cdot x - x \cdot x\right) + \varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    4. +-inverses84.3%

      \[\leadsto \frac{\color{blue}{0} + \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. +-lft-identity84.3%

      \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
  5. Simplified84.3%

    \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  6. Taylor expanded in x around inf 0.0%

    \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
  7. Step-by-step derivation
    1. *-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
    2. fma-def0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
    3. associate-*r/0.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
    4. unpow20.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\varepsilon \cdot \color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)}\right)}{x}\right)} \]
    5. rem-square-sqrt37.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\varepsilon \cdot \color{blue}{-1}\right)}{x}\right)} \]
    6. *-commutative37.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left(-1 \cdot \varepsilon\right)}}{x}\right)} \]
    7. associate-*r*37.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{\left(0.5 \cdot -1\right) \cdot \varepsilon}}{x}\right)} \]
    8. metadata-eval37.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5} \cdot \varepsilon}{x}\right)} \]
    9. associate-*r/37.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5 \cdot \frac{\varepsilon}{x}}\right)} \]
    10. *-commutative37.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{\varepsilon}{x} \cdot -0.5}\right)} \]
  8. Simplified37.7%

    \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, \frac{\varepsilon}{x} \cdot -0.5\right)}} \]
  9. Taylor expanded in eps around inf 5.7%

    \[\leadsto \color{blue}{-2 \cdot x} \]
  10. Step-by-step derivation
    1. *-commutative5.7%

      \[\leadsto \color{blue}{x \cdot -2} \]
  11. Simplified5.7%

    \[\leadsto \color{blue}{x \cdot -2} \]
  12. Final simplification5.7%

    \[\leadsto x \cdot -2 \]

Alternative 9: 4.2% accurate, 107.0× speedup?

\[\begin{array}{l} \\ 0 \end{array} \]
(FPCore (x eps) :precision binary64 0.0)
double code(double x, double eps) {
	return 0.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = 0.0d0
end function
public static double code(double x, double eps) {
	return 0.0;
}
def code(x, eps):
	return 0.0
function code(x, eps)
	return 0.0
end
function tmp = code(x, eps)
	tmp = 0.0;
end
code[x_, eps_] := 0.0
\begin{array}{l}

\\
0
\end{array}
Derivation
  1. Initial program 69.3%

    \[x - \sqrt{x \cdot x - \varepsilon} \]
  2. Step-by-step derivation
    1. sub-neg69.3%

      \[\leadsto \color{blue}{x + \left(-\sqrt{x \cdot x - \varepsilon}\right)} \]
    2. +-commutative69.3%

      \[\leadsto \color{blue}{\left(-\sqrt{x \cdot x - \varepsilon}\right) + x} \]
    3. add-sqr-sqrt68.6%

      \[\leadsto \left(-\color{blue}{\sqrt{\sqrt{x \cdot x - \varepsilon}} \cdot \sqrt{\sqrt{x \cdot x - \varepsilon}}}\right) + x \]
    4. distribute-rgt-neg-in68.6%

      \[\leadsto \color{blue}{\sqrt{\sqrt{x \cdot x - \varepsilon}} \cdot \left(-\sqrt{\sqrt{x \cdot x - \varepsilon}}\right)} + x \]
    5. fma-def68.5%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\sqrt{\sqrt{x \cdot x - \varepsilon}}, -\sqrt{\sqrt{x \cdot x - \varepsilon}}, x\right)} \]
    6. pow1/268.5%

      \[\leadsto \mathsf{fma}\left(\sqrt{\color{blue}{{\left(x \cdot x - \varepsilon\right)}^{0.5}}}, -\sqrt{\sqrt{x \cdot x - \varepsilon}}, x\right) \]
    7. sqrt-pow168.7%

      \[\leadsto \mathsf{fma}\left(\color{blue}{{\left(x \cdot x - \varepsilon\right)}^{\left(\frac{0.5}{2}\right)}}, -\sqrt{\sqrt{x \cdot x - \varepsilon}}, x\right) \]
    8. metadata-eval68.7%

      \[\leadsto \mathsf{fma}\left({\left(x \cdot x - \varepsilon\right)}^{\color{blue}{0.25}}, -\sqrt{\sqrt{x \cdot x - \varepsilon}}, x\right) \]
    9. pow1/268.7%

      \[\leadsto \mathsf{fma}\left({\left(x \cdot x - \varepsilon\right)}^{0.25}, -\sqrt{\color{blue}{{\left(x \cdot x - \varepsilon\right)}^{0.5}}}, x\right) \]
    10. sqrt-pow168.5%

      \[\leadsto \mathsf{fma}\left({\left(x \cdot x - \varepsilon\right)}^{0.25}, -\color{blue}{{\left(x \cdot x - \varepsilon\right)}^{\left(\frac{0.5}{2}\right)}}, x\right) \]
    11. metadata-eval68.5%

      \[\leadsto \mathsf{fma}\left({\left(x \cdot x - \varepsilon\right)}^{0.25}, -{\left(x \cdot x - \varepsilon\right)}^{\color{blue}{0.25}}, x\right) \]
  3. Applied egg-rr68.5%

    \[\leadsto \color{blue}{\mathsf{fma}\left({\left(x \cdot x - \varepsilon\right)}^{0.25}, -{\left(x \cdot x - \varepsilon\right)}^{0.25}, x\right)} \]
  4. Taylor expanded in x around inf 4.2%

    \[\leadsto \color{blue}{-1 \cdot x + x} \]
  5. Step-by-step derivation
    1. distribute-lft1-in4.2%

      \[\leadsto \color{blue}{\left(-1 + 1\right) \cdot x} \]
    2. metadata-eval4.2%

      \[\leadsto \color{blue}{0} \cdot x \]
    3. mul0-lft4.2%

      \[\leadsto \color{blue}{0} \]
  6. Simplified4.2%

    \[\leadsto \color{blue}{0} \]
  7. Final simplification4.2%

    \[\leadsto 0 \]

Developer target: 99.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \end{array} \]
(FPCore (x eps) :precision binary64 (/ eps (+ x (sqrt (- (* x x) eps)))))
double code(double x, double eps) {
	return eps / (x + sqrt(((x * x) - eps)));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps / (x + sqrt(((x * x) - eps)))
end function
public static double code(double x, double eps) {
	return eps / (x + Math.sqrt(((x * x) - eps)));
}
def code(x, eps):
	return eps / (x + math.sqrt(((x * x) - eps)))
function code(x, eps)
	return Float64(eps / Float64(x + sqrt(Float64(Float64(x * x) - eps))))
end
function tmp = code(x, eps)
	tmp = eps / (x + sqrt(((x * x) - eps)));
end
code[x_, eps_] := N[(eps / N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\varepsilon}{x + \sqrt{x \cdot x - \varepsilon}}
\end{array}

Reproduce

?
herbie shell --seed 2023278 
(FPCore (x eps)
  :name "ENA, Section 1.4, Exercise 4d"
  :precision binary64
  :pre (and (and (<= 0.0 x) (<= x 1000000000.0)) (and (<= -1.0 eps) (<= eps 1.0)))

  :herbie-target
  (/ eps (+ x (sqrt (- (* x x) eps))))

  (- x (sqrt (- (* x x) eps))))