?

Average Accuracy: 74.6% → 100.0%
Time: 6.2s
Precision: binary64
Cost: 576

?

\[\left(-1000000000 \leq x \land x \leq 1000000000\right) \land \left(-1 \leq \varepsilon \land \varepsilon \leq 1\right)\]
\[{\left(x + \varepsilon\right)}^{2} - {x}^{2} \]
\[\varepsilon \cdot \varepsilon + \left(2 \cdot x\right) \cdot \varepsilon \]
(FPCore (x eps) :precision binary64 (- (pow (+ x eps) 2.0) (pow x 2.0)))
(FPCore (x eps) :precision binary64 (+ (* eps eps) (* (* 2.0 x) eps)))
double code(double x, double eps) {
	return pow((x + eps), 2.0) - pow(x, 2.0);
}
double code(double x, double eps) {
	return (eps * eps) + ((2.0 * x) * eps);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = ((x + eps) ** 2.0d0) - (x ** 2.0d0)
end function
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (eps * eps) + ((2.0d0 * x) * eps)
end function
public static double code(double x, double eps) {
	return Math.pow((x + eps), 2.0) - Math.pow(x, 2.0);
}
public static double code(double x, double eps) {
	return (eps * eps) + ((2.0 * x) * eps);
}
def code(x, eps):
	return math.pow((x + eps), 2.0) - math.pow(x, 2.0)
def code(x, eps):
	return (eps * eps) + ((2.0 * x) * eps)
function code(x, eps)
	return Float64((Float64(x + eps) ^ 2.0) - (x ^ 2.0))
end
function code(x, eps)
	return Float64(Float64(eps * eps) + Float64(Float64(2.0 * x) * eps))
end
function tmp = code(x, eps)
	tmp = ((x + eps) ^ 2.0) - (x ^ 2.0);
end
function tmp = code(x, eps)
	tmp = (eps * eps) + ((2.0 * x) * eps);
end
code[x_, eps_] := N[(N[Power[N[(x + eps), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]
code[x_, eps_] := N[(N[(eps * eps), $MachinePrecision] + N[(N[(2.0 * x), $MachinePrecision] * eps), $MachinePrecision]), $MachinePrecision]
{\left(x + \varepsilon\right)}^{2} - {x}^{2}
\varepsilon \cdot \varepsilon + \left(2 \cdot x\right) \cdot \varepsilon

Error?

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation?

  1. Initial program 74.6%

    \[{\left(x + \varepsilon\right)}^{2} - {x}^{2} \]
  2. Simplified100.0%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(2, x, \varepsilon\right)} \]
    Proof

    [Start]74.6

    \[ {\left(x + \varepsilon\right)}^{2} - {x}^{2} \]

    unpow2 [=>]74.5

    \[ \color{blue}{\left(x + \varepsilon\right) \cdot \left(x + \varepsilon\right)} - {x}^{2} \]

    unpow2 [=>]74.6

    \[ \left(x + \varepsilon\right) \cdot \left(x + \varepsilon\right) - \color{blue}{x \cdot x} \]

    difference-of-squares [=>]74.6

    \[ \color{blue}{\left(\left(x + \varepsilon\right) + x\right) \cdot \left(\left(x + \varepsilon\right) - x\right)} \]

    *-commutative [=>]74.6

    \[ \color{blue}{\left(\left(x + \varepsilon\right) - x\right) \cdot \left(\left(x + \varepsilon\right) + x\right)} \]

    +-commutative [=>]74.6

    \[ \left(\color{blue}{\left(\varepsilon + x\right)} - x\right) \cdot \left(\left(x + \varepsilon\right) + x\right) \]

    associate--l+ [=>]100.0

    \[ \color{blue}{\left(\varepsilon + \left(x - x\right)\right)} \cdot \left(\left(x + \varepsilon\right) + x\right) \]

    +-inverses [=>]100.0

    \[ \left(\varepsilon + \color{blue}{0}\right) \cdot \left(\left(x + \varepsilon\right) + x\right) \]

    +-rgt-identity [=>]100.0

    \[ \color{blue}{\varepsilon} \cdot \left(\left(x + \varepsilon\right) + x\right) \]

    +-commutative [=>]100.0

    \[ \varepsilon \cdot \color{blue}{\left(x + \left(x + \varepsilon\right)\right)} \]

    associate-+r+ [=>]100.0

    \[ \varepsilon \cdot \color{blue}{\left(\left(x + x\right) + \varepsilon\right)} \]

    count-2 [=>]100.0

    \[ \varepsilon \cdot \left(\color{blue}{2 \cdot x} + \varepsilon\right) \]

    fma-def [=>]100.0

    \[ \varepsilon \cdot \color{blue}{\mathsf{fma}\left(2, x, \varepsilon\right)} \]
  3. Applied egg-rr100.0%

    \[\leadsto \color{blue}{\left(2 \cdot x\right) \cdot \varepsilon + \varepsilon \cdot \varepsilon} \]
    Proof

    [Start]100.0

    \[ \varepsilon \cdot \mathsf{fma}\left(2, x, \varepsilon\right) \]

    fma-udef [=>]100.0

    \[ \varepsilon \cdot \color{blue}{\left(2 \cdot x + \varepsilon\right)} \]

    distribute-rgt-in [=>]100.0

    \[ \color{blue}{\left(2 \cdot x\right) \cdot \varepsilon + \varepsilon \cdot \varepsilon} \]
  4. Final simplification100.0%

    \[\leadsto \varepsilon \cdot \varepsilon + \left(2 \cdot x\right) \cdot \varepsilon \]

Alternatives

Alternative 1
Accuracy91.1%
Cost585
\[\begin{array}{l} \mathbf{if}\;x \leq -9.6 \cdot 10^{-111} \lor \neg \left(x \leq 4.75 \cdot 10^{-88}\right):\\ \;\;\;\;\left(2 \cdot x\right) \cdot \varepsilon\\ \mathbf{else}:\\ \;\;\;\;\varepsilon \cdot \varepsilon\\ \end{array} \]
Alternative 2
Accuracy100.0%
Cost448
\[\varepsilon \cdot \left(\varepsilon + \left(x + x\right)\right) \]
Alternative 3
Accuracy72.2%
Cost192
\[\varepsilon \cdot \varepsilon \]

Error

Reproduce?

herbie shell --seed 2023126 
(FPCore (x eps)
  :name "ENA, Section 1.4, Exercise 4b, n=2"
  :precision binary64
  :pre (and (and (<= -1000000000.0 x) (<= x 1000000000.0)) (and (<= -1.0 eps) (<= eps 1.0)))
  (- (pow (+ x eps) 2.0) (pow x 2.0)))