Average Error: 16.0 → 0.0
Time: 2.3s
Precision: binary64
\[\left(-1000000000 \leq x \land x \leq 1000000000\right) \land \left(-1 \leq \varepsilon \land \varepsilon \leq 1\right)\]
\[{\left(x + \varepsilon\right)}^{2} - {x}^{2} \]
\[\mathsf{fma}\left(\varepsilon, 2 \cdot x, \varepsilon \cdot \varepsilon\right) \]
(FPCore (x eps) :precision binary64 (- (pow (+ x eps) 2.0) (pow x 2.0)))
(FPCore (x eps) :precision binary64 (fma eps (* 2.0 x) (* eps eps)))
double code(double x, double eps) {
	return pow((x + eps), 2.0) - pow(x, 2.0);
}
double code(double x, double eps) {
	return fma(eps, (2.0 * x), (eps * eps));
}
function code(x, eps)
	return Float64((Float64(x + eps) ^ 2.0) - (x ^ 2.0))
end
function code(x, eps)
	return fma(eps, Float64(2.0 * x), Float64(eps * eps))
end
code[x_, eps_] := N[(N[Power[N[(x + eps), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]
code[x_, eps_] := N[(eps * N[(2.0 * x), $MachinePrecision] + N[(eps * eps), $MachinePrecision]), $MachinePrecision]
{\left(x + \varepsilon\right)}^{2} - {x}^{2}
\mathsf{fma}\left(\varepsilon, 2 \cdot x, \varepsilon \cdot \varepsilon\right)

Error

Bits error versus x

Bits error versus eps

Derivation

  1. Initial program 16.0

    \[{\left(x + \varepsilon\right)}^{2} - {x}^{2} \]
  2. Simplified0.0

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(2, x, \varepsilon\right)} \]
  3. Applied egg-rr0.0

    \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, 2 \cdot x, \varepsilon \cdot \varepsilon\right)} \]
  4. Final simplification0.0

    \[\leadsto \mathsf{fma}\left(\varepsilon, 2 \cdot x, \varepsilon \cdot \varepsilon\right) \]

Reproduce

herbie shell --seed 2022150 
(FPCore (x eps)
  :name "ENA, Section 1.4, Exercise 4b, n=2"
  :precision binary64
  :pre (and (and (<= -1000000000.0 x) (<= x 1000000000.0)) (and (<= -1.0 eps) (<= eps 1.0)))
  (- (pow (+ x eps) 2.0) (pow x 2.0)))