ENA, Section 1.4, Exercise 4d

Percentage Accurate: 62.4% → 98.4%
Time: 7.9s
Alternatives: 7
Speedup: 1.0×

Specification

?
\[\left(0 \leq x \land x \leq 1000000000\right) \land \left(-1 \leq \varepsilon \land \varepsilon \leq 1\right)\]
\[\begin{array}{l} \\ x - \sqrt{x \cdot x - \varepsilon} \end{array} \]
(FPCore (x eps) :precision binary64 (- x (sqrt (- (* x x) eps))))
double code(double x, double eps) {
	return x - sqrt(((x * x) - eps));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = x - sqrt(((x * x) - eps))
end function
public static double code(double x, double eps) {
	return x - Math.sqrt(((x * x) - eps));
}
def code(x, eps):
	return x - math.sqrt(((x * x) - eps))
function code(x, eps)
	return Float64(x - sqrt(Float64(Float64(x * x) - eps)))
end
function tmp = code(x, eps)
	tmp = x - sqrt(((x * x) - eps));
end
code[x_, eps_] := N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x - \sqrt{x \cdot x - \varepsilon}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 7 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 62.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ x - \sqrt{x \cdot x - \varepsilon} \end{array} \]
(FPCore (x eps) :precision binary64 (- x (sqrt (- (* x x) eps))))
double code(double x, double eps) {
	return x - sqrt(((x * x) - eps));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = x - sqrt(((x * x) - eps))
end function
public static double code(double x, double eps) {
	return x - Math.sqrt(((x * x) - eps));
}
def code(x, eps):
	return x - math.sqrt(((x * x) - eps))
function code(x, eps)
	return Float64(x - sqrt(Float64(Float64(x * x) - eps)))
end
function tmp = code(x, eps)
	tmp = x - sqrt(((x * x) - eps));
end
code[x_, eps_] := N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x - \sqrt{x \cdot x - \varepsilon}
\end{array}

Alternative 1: 98.4% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -2 \cdot 10^{-151}:\\ \;\;\;\;\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= (- x (sqrt (- (* x x) eps))) -2e-151)
   (/ eps (+ x (hypot x (sqrt (- eps)))))
   (/ eps (+ (* x 2.0) (* -0.5 (/ eps x))))))
double code(double x, double eps) {
	double tmp;
	if ((x - sqrt(((x * x) - eps))) <= -2e-151) {
		tmp = eps / (x + hypot(x, sqrt(-eps)));
	} else {
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	}
	return tmp;
}
public static double code(double x, double eps) {
	double tmp;
	if ((x - Math.sqrt(((x * x) - eps))) <= -2e-151) {
		tmp = eps / (x + Math.hypot(x, Math.sqrt(-eps)));
	} else {
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if (x - math.sqrt(((x * x) - eps))) <= -2e-151:
		tmp = eps / (x + math.hypot(x, math.sqrt(-eps)))
	else:
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)))
	return tmp
function code(x, eps)
	tmp = 0.0
	if (Float64(x - sqrt(Float64(Float64(x * x) - eps))) <= -2e-151)
		tmp = Float64(eps / Float64(x + hypot(x, sqrt(Float64(-eps)))));
	else
		tmp = Float64(eps / Float64(Float64(x * 2.0) + Float64(-0.5 * Float64(eps / x))));
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if ((x - sqrt(((x * x) - eps))) <= -2e-151)
		tmp = eps / (x + hypot(x, sqrt(-eps)));
	else
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], -2e-151], N[(eps / N[(x + N[Sqrt[x ^ 2 + N[Sqrt[(-eps)], $MachinePrecision] ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(eps / N[(N[(x * 2.0), $MachinePrecision] + N[(-0.5 * N[(eps / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x - \sqrt{x \cdot x - \varepsilon} \leq -2 \cdot 10^{-151}:\\
\;\;\;\;\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps))) < -1.9999999999999999e-151

    1. Initial program 99.3%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. flip--99.2%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. add-sqr-sqrt98.6%

        \[\leadsto \frac{x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}}{x + \sqrt{x \cdot x - \varepsilon}} \]
      3. div-sub98.6%

        \[\leadsto \color{blue}{\frac{x \cdot x}{x + \sqrt{x \cdot x - \varepsilon}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      4. pow298.6%

        \[\leadsto \frac{\color{blue}{{x}^{2}}}{x + \sqrt{x \cdot x - \varepsilon}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      5. sub-neg98.6%

        \[\leadsto \frac{{x}^{2}}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      6. add-sqr-sqrt98.6%

        \[\leadsto \frac{{x}^{2}}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      7. hypot-define98.6%

        \[\leadsto \frac{{x}^{2}}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      8. pow298.6%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{\color{blue}{{x}^{2}} - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      9. sub-neg98.6%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      10. add-sqr-sqrt98.6%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      11. hypot-define98.6%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Applied egg-rr98.6%

      \[\leadsto \color{blue}{\frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    5. Step-by-step derivation
      1. div-sub98.6%

        \[\leadsto \color{blue}{\frac{{x}^{2} - \left({x}^{2} - \varepsilon\right)}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. sub-neg98.6%

        \[\leadsto \frac{{x}^{2} - \color{blue}{\left({x}^{2} + \left(-\varepsilon\right)\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r+99.3%

        \[\leadsto \frac{\color{blue}{\left({x}^{2} - {x}^{2}\right) - \left(-\varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses99.3%

        \[\leadsto \frac{\color{blue}{0} - \left(-\varepsilon\right)}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. neg-sub099.3%

        \[\leadsto \frac{\color{blue}{-\left(-\varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      6. remove-double-neg99.3%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    6. Simplified99.3%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]

    if -1.9999999999999999e-151 < (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps)))

    1. Initial program 6.7%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. flip--6.8%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. add-sqr-sqrt6.9%

        \[\leadsto \frac{x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}}{x + \sqrt{x \cdot x - \varepsilon}} \]
      3. div-sub6.8%

        \[\leadsto \color{blue}{\frac{x \cdot x}{x + \sqrt{x \cdot x - \varepsilon}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      4. pow26.8%

        \[\leadsto \frac{\color{blue}{{x}^{2}}}{x + \sqrt{x \cdot x - \varepsilon}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      5. sub-neg6.8%

        \[\leadsto \frac{{x}^{2}}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      6. add-sqr-sqrt2.1%

        \[\leadsto \frac{{x}^{2}}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      7. hypot-define2.1%

        \[\leadsto \frac{{x}^{2}}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      8. pow22.1%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{\color{blue}{{x}^{2}} - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      9. sub-neg2.1%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      10. add-sqr-sqrt2.1%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      11. hypot-define2.1%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Applied egg-rr2.1%

      \[\leadsto \color{blue}{\frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    5. Step-by-step derivation
      1. div-sub2.1%

        \[\leadsto \color{blue}{\frac{{x}^{2} - \left({x}^{2} - \varepsilon\right)}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. sub-neg2.1%

        \[\leadsto \frac{{x}^{2} - \color{blue}{\left({x}^{2} + \left(-\varepsilon\right)\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r+45.2%

        \[\leadsto \frac{\color{blue}{\left({x}^{2} - {x}^{2}\right) - \left(-\varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses45.2%

        \[\leadsto \frac{\color{blue}{0} - \left(-\varepsilon\right)}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. neg-sub045.2%

        \[\leadsto \frac{\color{blue}{-\left(-\varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      6. remove-double-neg45.2%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    6. Simplified45.2%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    7. Taylor expanded in eps around 0 0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x}} \]
    8. Step-by-step derivation
      1. +-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
      2. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
      3. fma-define0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
      4. associate-*r/0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
      5. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left({\left(\sqrt{-1}\right)}^{2} \cdot \varepsilon\right)}}{x}\right)} \]
      6. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)} \cdot \varepsilon\right)}{x}\right)} \]
      7. rem-square-sqrt99.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{-1} \cdot \varepsilon\right)}{x}\right)} \]
      8. neg-mul-199.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left(-\varepsilon\right)}}{x}\right)} \]
      9. distribute-rgt-neg-in99.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5 \cdot \varepsilon}}{x}\right)} \]
      10. distribute-frac-neg99.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-\frac{0.5 \cdot \varepsilon}{x}}\right)} \]
      11. distribute-frac-neg299.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \varepsilon}{-x}}\right)} \]
      12. neg-mul-199.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \varepsilon}{\color{blue}{-1 \cdot x}}\right)} \]
      13. times-frac99.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5}{-1} \cdot \frac{\varepsilon}{x}}\right)} \]
      14. metadata-eval99.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5} \cdot \frac{\varepsilon}{x}\right)} \]
    9. Simplified99.8%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, -0.5 \cdot \frac{\varepsilon}{x}\right)}} \]
    10. Step-by-step derivation
      1. fma-undefine99.8%

        \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}} \]
    11. Applied egg-rr99.8%

      \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 2: 98.1% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x - \sqrt{x \cdot x - \varepsilon}\\ \mathbf{if}\;t\_0 \leq -2 \cdot 10^{-151}:\\ \;\;\;\;t\_0\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (- x (sqrt (- (* x x) eps)))))
   (if (<= t_0 -2e-151) t_0 (/ eps (+ (* x 2.0) (* -0.5 (/ eps x)))))))
double code(double x, double eps) {
	double t_0 = x - sqrt(((x * x) - eps));
	double tmp;
	if (t_0 <= -2e-151) {
		tmp = t_0;
	} else {
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = x - sqrt(((x * x) - eps))
    if (t_0 <= (-2d-151)) then
        tmp = t_0
    else
        tmp = eps / ((x * 2.0d0) + ((-0.5d0) * (eps / x)))
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double t_0 = x - Math.sqrt(((x * x) - eps));
	double tmp;
	if (t_0 <= -2e-151) {
		tmp = t_0;
	} else {
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	}
	return tmp;
}
def code(x, eps):
	t_0 = x - math.sqrt(((x * x) - eps))
	tmp = 0
	if t_0 <= -2e-151:
		tmp = t_0
	else:
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)))
	return tmp
function code(x, eps)
	t_0 = Float64(x - sqrt(Float64(Float64(x * x) - eps)))
	tmp = 0.0
	if (t_0 <= -2e-151)
		tmp = t_0;
	else
		tmp = Float64(eps / Float64(Float64(x * 2.0) + Float64(-0.5 * Float64(eps / x))));
	end
	return tmp
end
function tmp_2 = code(x, eps)
	t_0 = x - sqrt(((x * x) - eps));
	tmp = 0.0;
	if (t_0 <= -2e-151)
		tmp = t_0;
	else
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	end
	tmp_2 = tmp;
end
code[x_, eps_] := Block[{t$95$0 = N[(x - N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -2e-151], t$95$0, N[(eps / N[(N[(x * 2.0), $MachinePrecision] + N[(-0.5 * N[(eps / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x - \sqrt{x \cdot x - \varepsilon}\\
\mathbf{if}\;t\_0 \leq -2 \cdot 10^{-151}:\\
\;\;\;\;t\_0\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps))) < -1.9999999999999999e-151

    1. Initial program 99.3%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Add Preprocessing

    if -1.9999999999999999e-151 < (-.f64 x (sqrt.f64 (-.f64 (*.f64 x x) eps)))

    1. Initial program 6.7%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. flip--6.8%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. add-sqr-sqrt6.9%

        \[\leadsto \frac{x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}}{x + \sqrt{x \cdot x - \varepsilon}} \]
      3. div-sub6.8%

        \[\leadsto \color{blue}{\frac{x \cdot x}{x + \sqrt{x \cdot x - \varepsilon}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      4. pow26.8%

        \[\leadsto \frac{\color{blue}{{x}^{2}}}{x + \sqrt{x \cdot x - \varepsilon}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      5. sub-neg6.8%

        \[\leadsto \frac{{x}^{2}}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      6. add-sqr-sqrt2.1%

        \[\leadsto \frac{{x}^{2}}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      7. hypot-define2.1%

        \[\leadsto \frac{{x}^{2}}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      8. pow22.1%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{\color{blue}{{x}^{2}} - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      9. sub-neg2.1%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      10. add-sqr-sqrt2.1%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      11. hypot-define2.1%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Applied egg-rr2.1%

      \[\leadsto \color{blue}{\frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    5. Step-by-step derivation
      1. div-sub2.1%

        \[\leadsto \color{blue}{\frac{{x}^{2} - \left({x}^{2} - \varepsilon\right)}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. sub-neg2.1%

        \[\leadsto \frac{{x}^{2} - \color{blue}{\left({x}^{2} + \left(-\varepsilon\right)\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r+45.2%

        \[\leadsto \frac{\color{blue}{\left({x}^{2} - {x}^{2}\right) - \left(-\varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses45.2%

        \[\leadsto \frac{\color{blue}{0} - \left(-\varepsilon\right)}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. neg-sub045.2%

        \[\leadsto \frac{\color{blue}{-\left(-\varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      6. remove-double-neg45.2%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    6. Simplified45.2%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    7. Taylor expanded in eps around 0 0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x}} \]
    8. Step-by-step derivation
      1. +-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
      2. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
      3. fma-define0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
      4. associate-*r/0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
      5. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left({\left(\sqrt{-1}\right)}^{2} \cdot \varepsilon\right)}}{x}\right)} \]
      6. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)} \cdot \varepsilon\right)}{x}\right)} \]
      7. rem-square-sqrt99.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{-1} \cdot \varepsilon\right)}{x}\right)} \]
      8. neg-mul-199.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left(-\varepsilon\right)}}{x}\right)} \]
      9. distribute-rgt-neg-in99.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5 \cdot \varepsilon}}{x}\right)} \]
      10. distribute-frac-neg99.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-\frac{0.5 \cdot \varepsilon}{x}}\right)} \]
      11. distribute-frac-neg299.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \varepsilon}{-x}}\right)} \]
      12. neg-mul-199.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \varepsilon}{\color{blue}{-1 \cdot x}}\right)} \]
      13. times-frac99.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5}{-1} \cdot \frac{\varepsilon}{x}}\right)} \]
      14. metadata-eval99.8%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5} \cdot \frac{\varepsilon}{x}\right)} \]
    9. Simplified99.8%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, -0.5 \cdot \frac{\varepsilon}{x}\right)}} \]
    10. Step-by-step derivation
      1. fma-undefine99.8%

        \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}} \]
    11. Applied egg-rr99.8%

      \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 3: 87.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 3.3 \cdot 10^{-91}:\\ \;\;\;\;x - \sqrt{-\varepsilon}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x 3.3e-91)
   (- x (sqrt (- eps)))
   (/ eps (+ (* x 2.0) (* -0.5 (/ eps x))))))
double code(double x, double eps) {
	double tmp;
	if (x <= 3.3e-91) {
		tmp = x - sqrt(-eps);
	} else {
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= 3.3d-91) then
        tmp = x - sqrt(-eps)
    else
        tmp = eps / ((x * 2.0d0) + ((-0.5d0) * (eps / x)))
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= 3.3e-91) {
		tmp = x - Math.sqrt(-eps);
	} else {
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= 3.3e-91:
		tmp = x - math.sqrt(-eps)
	else:
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)))
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= 3.3e-91)
		tmp = Float64(x - sqrt(Float64(-eps)));
	else
		tmp = Float64(eps / Float64(Float64(x * 2.0) + Float64(-0.5 * Float64(eps / x))));
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= 3.3e-91)
		tmp = x - sqrt(-eps);
	else
		tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, 3.3e-91], N[(x - N[Sqrt[(-eps)], $MachinePrecision]), $MachinePrecision], N[(eps / N[(N[(x * 2.0), $MachinePrecision] + N[(-0.5 * N[(eps / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 3.3 \cdot 10^{-91}:\\
\;\;\;\;x - \sqrt{-\varepsilon}\\

\mathbf{else}:\\
\;\;\;\;\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 3.30000000000000011e-91

    1. Initial program 93.8%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 92.4%

      \[\leadsto x - \sqrt{\color{blue}{-1 \cdot \varepsilon}} \]
    4. Step-by-step derivation
      1. neg-mul-192.4%

        \[\leadsto x - \sqrt{\color{blue}{-\varepsilon}} \]
    5. Simplified92.4%

      \[\leadsto x - \sqrt{\color{blue}{-\varepsilon}} \]

    if 3.30000000000000011e-91 < x

    1. Initial program 21.0%

      \[x - \sqrt{x \cdot x - \varepsilon} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. flip--21.0%

        \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      2. add-sqr-sqrt21.0%

        \[\leadsto \frac{x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}}{x + \sqrt{x \cdot x - \varepsilon}} \]
      3. div-sub21.1%

        \[\leadsto \color{blue}{\frac{x \cdot x}{x + \sqrt{x \cdot x - \varepsilon}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}}} \]
      4. pow221.1%

        \[\leadsto \frac{\color{blue}{{x}^{2}}}{x + \sqrt{x \cdot x - \varepsilon}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      5. sub-neg21.1%

        \[\leadsto \frac{{x}^{2}}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      6. add-sqr-sqrt17.2%

        \[\leadsto \frac{{x}^{2}}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      7. hypot-define17.2%

        \[\leadsto \frac{{x}^{2}}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      8. pow217.2%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{\color{blue}{{x}^{2}} - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
      9. sub-neg17.2%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
      10. add-sqr-sqrt17.2%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
      11. hypot-define17.2%

        \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    4. Applied egg-rr17.2%

      \[\leadsto \color{blue}{\frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    5. Step-by-step derivation
      1. div-sub17.2%

        \[\leadsto \color{blue}{\frac{{x}^{2} - \left({x}^{2} - \varepsilon\right)}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
      2. sub-neg17.2%

        \[\leadsto \frac{{x}^{2} - \color{blue}{\left({x}^{2} + \left(-\varepsilon\right)\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      3. associate--r+53.5%

        \[\leadsto \frac{\color{blue}{\left({x}^{2} - {x}^{2}\right) - \left(-\varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      4. +-inverses53.5%

        \[\leadsto \frac{\color{blue}{0} - \left(-\varepsilon\right)}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      5. neg-sub053.5%

        \[\leadsto \frac{\color{blue}{-\left(-\varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
      6. remove-double-neg53.5%

        \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    6. Simplified53.5%

      \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    7. Taylor expanded in eps around 0 0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x}} \]
    8. Step-by-step derivation
      1. +-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
      2. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
      3. fma-define0.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
      4. associate-*r/0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
      5. *-commutative0.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left({\left(\sqrt{-1}\right)}^{2} \cdot \varepsilon\right)}}{x}\right)} \]
      6. unpow20.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)} \cdot \varepsilon\right)}{x}\right)} \]
      7. rem-square-sqrt86.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{-1} \cdot \varepsilon\right)}{x}\right)} \]
      8. neg-mul-186.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left(-\varepsilon\right)}}{x}\right)} \]
      9. distribute-rgt-neg-in86.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5 \cdot \varepsilon}}{x}\right)} \]
      10. distribute-frac-neg86.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-\frac{0.5 \cdot \varepsilon}{x}}\right)} \]
      11. distribute-frac-neg286.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \varepsilon}{-x}}\right)} \]
      12. neg-mul-186.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \varepsilon}{\color{blue}{-1 \cdot x}}\right)} \]
      13. times-frac86.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5}{-1} \cdot \frac{\varepsilon}{x}}\right)} \]
      14. metadata-eval86.0%

        \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5} \cdot \frac{\varepsilon}{x}\right)} \]
    9. Simplified86.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, -0.5 \cdot \frac{\varepsilon}{x}\right)}} \]
    10. Step-by-step derivation
      1. fma-undefine86.0%

        \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}} \]
    11. Applied egg-rr86.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 4: 44.8% accurate, 9.7× speedup?

\[\begin{array}{l} \\ \frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}} \end{array} \]
(FPCore (x eps) :precision binary64 (/ eps (+ (* x 2.0) (* -0.5 (/ eps x)))))
double code(double x, double eps) {
	return eps / ((x * 2.0) + (-0.5 * (eps / x)));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps / ((x * 2.0d0) + ((-0.5d0) * (eps / x)))
end function
public static double code(double x, double eps) {
	return eps / ((x * 2.0) + (-0.5 * (eps / x)));
}
def code(x, eps):
	return eps / ((x * 2.0) + (-0.5 * (eps / x)))
function code(x, eps)
	return Float64(eps / Float64(Float64(x * 2.0) + Float64(-0.5 * Float64(eps / x))))
end
function tmp = code(x, eps)
	tmp = eps / ((x * 2.0) + (-0.5 * (eps / x)));
end
code[x_, eps_] := N[(eps / N[(N[(x * 2.0), $MachinePrecision] + N[(-0.5 * N[(eps / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\varepsilon}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}
\end{array}
Derivation
  1. Initial program 65.6%

    \[x - \sqrt{x \cdot x - \varepsilon} \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. flip--65.6%

      \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
    2. add-sqr-sqrt65.3%

      \[\leadsto \frac{x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}}{x + \sqrt{x \cdot x - \varepsilon}} \]
    3. div-sub65.3%

      \[\leadsto \color{blue}{\frac{x \cdot x}{x + \sqrt{x \cdot x - \varepsilon}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}}} \]
    4. pow265.3%

      \[\leadsto \frac{\color{blue}{{x}^{2}}}{x + \sqrt{x \cdot x - \varepsilon}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
    5. sub-neg65.3%

      \[\leadsto \frac{{x}^{2}}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
    6. add-sqr-sqrt63.6%

      \[\leadsto \frac{{x}^{2}}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
    7. hypot-define63.6%

      \[\leadsto \frac{{x}^{2}}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
    8. pow263.6%

      \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{\color{blue}{{x}^{2}} - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
    9. sub-neg63.6%

      \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
    10. add-sqr-sqrt63.6%

      \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
    11. hypot-define63.6%

      \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  4. Applied egg-rr63.6%

    \[\leadsto \color{blue}{\frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  5. Step-by-step derivation
    1. div-sub63.6%

      \[\leadsto \color{blue}{\frac{{x}^{2} - \left({x}^{2} - \varepsilon\right)}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    2. sub-neg63.6%

      \[\leadsto \frac{{x}^{2} - \color{blue}{\left({x}^{2} + \left(-\varepsilon\right)\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    3. associate--r+79.6%

      \[\leadsto \frac{\color{blue}{\left({x}^{2} - {x}^{2}\right) - \left(-\varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    4. +-inverses79.6%

      \[\leadsto \frac{\color{blue}{0} - \left(-\varepsilon\right)}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. neg-sub079.6%

      \[\leadsto \frac{\color{blue}{-\left(-\varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    6. remove-double-neg79.6%

      \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
  6. Simplified79.6%

    \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  7. Taylor expanded in eps around 0 0.0%

    \[\leadsto \frac{\varepsilon}{\color{blue}{0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x}} \]
  8. Step-by-step derivation
    1. +-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
    2. *-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
    3. fma-define0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
    4. associate-*r/0.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
    5. *-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left({\left(\sqrt{-1}\right)}^{2} \cdot \varepsilon\right)}}{x}\right)} \]
    6. unpow20.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)} \cdot \varepsilon\right)}{x}\right)} \]
    7. rem-square-sqrt40.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{-1} \cdot \varepsilon\right)}{x}\right)} \]
    8. neg-mul-140.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left(-\varepsilon\right)}}{x}\right)} \]
    9. distribute-rgt-neg-in40.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5 \cdot \varepsilon}}{x}\right)} \]
    10. distribute-frac-neg40.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-\frac{0.5 \cdot \varepsilon}{x}}\right)} \]
    11. distribute-frac-neg240.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \varepsilon}{-x}}\right)} \]
    12. neg-mul-140.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \varepsilon}{\color{blue}{-1 \cdot x}}\right)} \]
    13. times-frac40.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5}{-1} \cdot \frac{\varepsilon}{x}}\right)} \]
    14. metadata-eval40.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5} \cdot \frac{\varepsilon}{x}\right)} \]
  9. Simplified40.7%

    \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, -0.5 \cdot \frac{\varepsilon}{x}\right)}} \]
  10. Step-by-step derivation
    1. fma-undefine40.7%

      \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}} \]
  11. Applied egg-rr40.7%

    \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2 + -0.5 \cdot \frac{\varepsilon}{x}}} \]
  12. Add Preprocessing

Alternative 5: 44.0% accurate, 21.4× speedup?

\[\begin{array}{l} \\ \frac{\varepsilon}{x} \cdot 0.5 \end{array} \]
(FPCore (x eps) :precision binary64 (* (/ eps x) 0.5))
double code(double x, double eps) {
	return (eps / x) * 0.5;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (eps / x) * 0.5d0
end function
public static double code(double x, double eps) {
	return (eps / x) * 0.5;
}
def code(x, eps):
	return (eps / x) * 0.5
function code(x, eps)
	return Float64(Float64(eps / x) * 0.5)
end
function tmp = code(x, eps)
	tmp = (eps / x) * 0.5;
end
code[x_, eps_] := N[(N[(eps / x), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}

\\
\frac{\varepsilon}{x} \cdot 0.5
\end{array}
Derivation
  1. Initial program 65.6%

    \[x - \sqrt{x \cdot x - \varepsilon} \]
  2. Add Preprocessing
  3. Taylor expanded in x around inf 40.2%

    \[\leadsto \color{blue}{0.5 \cdot \frac{\varepsilon}{x}} \]
  4. Final simplification40.2%

    \[\leadsto \frac{\varepsilon}{x} \cdot 0.5 \]
  5. Add Preprocessing

Alternative 6: 5.3% accurate, 35.7× speedup?

\[\begin{array}{l} \\ x \cdot -2 \end{array} \]
(FPCore (x eps) :precision binary64 (* x -2.0))
double code(double x, double eps) {
	return x * -2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = x * (-2.0d0)
end function
public static double code(double x, double eps) {
	return x * -2.0;
}
def code(x, eps):
	return x * -2.0
function code(x, eps)
	return Float64(x * -2.0)
end
function tmp = code(x, eps)
	tmp = x * -2.0;
end
code[x_, eps_] := N[(x * -2.0), $MachinePrecision]
\begin{array}{l}

\\
x \cdot -2
\end{array}
Derivation
  1. Initial program 65.6%

    \[x - \sqrt{x \cdot x - \varepsilon} \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. flip--65.6%

      \[\leadsto \color{blue}{\frac{x \cdot x - \sqrt{x \cdot x - \varepsilon} \cdot \sqrt{x \cdot x - \varepsilon}}{x + \sqrt{x \cdot x - \varepsilon}}} \]
    2. add-sqr-sqrt65.3%

      \[\leadsto \frac{x \cdot x - \color{blue}{\left(x \cdot x - \varepsilon\right)}}{x + \sqrt{x \cdot x - \varepsilon}} \]
    3. div-sub65.3%

      \[\leadsto \color{blue}{\frac{x \cdot x}{x + \sqrt{x \cdot x - \varepsilon}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}}} \]
    4. pow265.3%

      \[\leadsto \frac{\color{blue}{{x}^{2}}}{x + \sqrt{x \cdot x - \varepsilon}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
    5. sub-neg65.3%

      \[\leadsto \frac{{x}^{2}}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
    6. add-sqr-sqrt63.6%

      \[\leadsto \frac{{x}^{2}}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
    7. hypot-define63.6%

      \[\leadsto \frac{{x}^{2}}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} - \frac{x \cdot x - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
    8. pow263.6%

      \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{\color{blue}{{x}^{2}} - \varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \]
    9. sub-neg63.6%

      \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \sqrt{\color{blue}{x \cdot x + \left(-\varepsilon\right)}}} \]
    10. add-sqr-sqrt63.6%

      \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \sqrt{x \cdot x + \color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \]
    11. hypot-define63.6%

      \[\leadsto \frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \color{blue}{\mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  4. Applied egg-rr63.6%

    \[\leadsto \color{blue}{\frac{{x}^{2}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} - \frac{{x}^{2} - \varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  5. Step-by-step derivation
    1. div-sub63.6%

      \[\leadsto \color{blue}{\frac{{x}^{2} - \left({x}^{2} - \varepsilon\right)}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
    2. sub-neg63.6%

      \[\leadsto \frac{{x}^{2} - \color{blue}{\left({x}^{2} + \left(-\varepsilon\right)\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    3. associate--r+79.6%

      \[\leadsto \frac{\color{blue}{\left({x}^{2} - {x}^{2}\right) - \left(-\varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    4. +-inverses79.6%

      \[\leadsto \frac{\color{blue}{0} - \left(-\varepsilon\right)}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    5. neg-sub079.6%

      \[\leadsto \frac{\color{blue}{-\left(-\varepsilon\right)}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
    6. remove-double-neg79.6%

      \[\leadsto \frac{\color{blue}{\varepsilon}}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)} \]
  6. Simplified79.6%

    \[\leadsto \color{blue}{\frac{\varepsilon}{x + \mathsf{hypot}\left(x, \sqrt{-\varepsilon}\right)}} \]
  7. Taylor expanded in eps around 0 0.0%

    \[\leadsto \frac{\varepsilon}{\color{blue}{0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x} + 2 \cdot x}} \]
  8. Step-by-step derivation
    1. +-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{2 \cdot x + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}}} \]
    2. *-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{x \cdot 2} + 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}} \]
    3. fma-define0.0%

      \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, 0.5 \cdot \frac{\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}}{x}\right)}} \]
    4. associate-*r/0.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \left(\varepsilon \cdot {\left(\sqrt{-1}\right)}^{2}\right)}{x}}\right)} \]
    5. *-commutative0.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left({\left(\sqrt{-1}\right)}^{2} \cdot \varepsilon\right)}}{x}\right)} \]
    6. unpow20.0%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{\left(\sqrt{-1} \cdot \sqrt{-1}\right)} \cdot \varepsilon\right)}{x}\right)} \]
    7. rem-square-sqrt40.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \left(\color{blue}{-1} \cdot \varepsilon\right)}{x}\right)} \]
    8. neg-mul-140.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \color{blue}{\left(-\varepsilon\right)}}{x}\right)} \]
    9. distribute-rgt-neg-in40.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{\color{blue}{-0.5 \cdot \varepsilon}}{x}\right)} \]
    10. distribute-frac-neg40.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-\frac{0.5 \cdot \varepsilon}{x}}\right)} \]
    11. distribute-frac-neg240.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5 \cdot \varepsilon}{-x}}\right)} \]
    12. neg-mul-140.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \frac{0.5 \cdot \varepsilon}{\color{blue}{-1 \cdot x}}\right)} \]
    13. times-frac40.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{\frac{0.5}{-1} \cdot \frac{\varepsilon}{x}}\right)} \]
    14. metadata-eval40.7%

      \[\leadsto \frac{\varepsilon}{\mathsf{fma}\left(x, 2, \color{blue}{-0.5} \cdot \frac{\varepsilon}{x}\right)} \]
  9. Simplified40.7%

    \[\leadsto \frac{\varepsilon}{\color{blue}{\mathsf{fma}\left(x, 2, -0.5 \cdot \frac{\varepsilon}{x}\right)}} \]
  10. Taylor expanded in eps around inf 5.3%

    \[\leadsto \color{blue}{-2 \cdot x} \]
  11. Step-by-step derivation
    1. *-commutative5.3%

      \[\leadsto \color{blue}{x \cdot -2} \]
  12. Simplified5.3%

    \[\leadsto \color{blue}{x \cdot -2} \]
  13. Add Preprocessing

Alternative 7: 3.5% accurate, 107.0× speedup?

\[\begin{array}{l} \\ x \end{array} \]
(FPCore (x eps) :precision binary64 x)
double code(double x, double eps) {
	return x;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = x
end function
public static double code(double x, double eps) {
	return x;
}
def code(x, eps):
	return x
function code(x, eps)
	return x
end
function tmp = code(x, eps)
	tmp = x;
end
code[x_, eps_] := x
\begin{array}{l}

\\
x
\end{array}
Derivation
  1. Initial program 65.6%

    \[x - \sqrt{x \cdot x - \varepsilon} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0 62.7%

    \[\leadsto x - \sqrt{\color{blue}{-1 \cdot \varepsilon}} \]
  4. Step-by-step derivation
    1. neg-mul-162.7%

      \[\leadsto x - \sqrt{\color{blue}{-\varepsilon}} \]
  5. Simplified62.7%

    \[\leadsto x - \sqrt{\color{blue}{-\varepsilon}} \]
  6. Taylor expanded in x around inf 3.5%

    \[\leadsto \color{blue}{x} \]
  7. Add Preprocessing

Developer target: 99.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\varepsilon}{x + \sqrt{x \cdot x - \varepsilon}} \end{array} \]
(FPCore (x eps) :precision binary64 (/ eps (+ x (sqrt (- (* x x) eps)))))
double code(double x, double eps) {
	return eps / (x + sqrt(((x * x) - eps)));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps / (x + sqrt(((x * x) - eps)))
end function
public static double code(double x, double eps) {
	return eps / (x + Math.sqrt(((x * x) - eps)));
}
def code(x, eps):
	return eps / (x + math.sqrt(((x * x) - eps)))
function code(x, eps)
	return Float64(eps / Float64(x + sqrt(Float64(Float64(x * x) - eps))))
end
function tmp = code(x, eps)
	tmp = eps / (x + sqrt(((x * x) - eps)));
end
code[x_, eps_] := N[(eps / N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] - eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\varepsilon}{x + \sqrt{x \cdot x - \varepsilon}}
\end{array}

Reproduce

?
herbie shell --seed 2024096 
(FPCore (x eps)
  :name "ENA, Section 1.4, Exercise 4d"
  :precision binary64
  :pre (and (and (<= 0.0 x) (<= x 1000000000.0)) (and (<= -1.0 eps) (<= eps 1.0)))

  :alt
  (/ eps (+ x (sqrt (- (* x x) eps))))

  (- x (sqrt (- (* x x) eps))))