Average Error: 0.0 → 0.0
Time: 4.3s
Precision: binary64
Cost: 6976
\[ \begin{array}{c}[x, y] = \mathsf{sort}([x, y])\\ \end{array} \]
\[\left(x + y\right) \cdot \left(x + y\right) \]
\[\mathsf{fma}\left(y, x + \left(y + x\right), x \cdot x\right) \]
(FPCore (x y) :precision binary64 (* (+ x y) (+ x y)))
(FPCore (x y) :precision binary64 (fma y (+ x (+ y x)) (* x x)))
double code(double x, double y) {
	return (x + y) * (x + y);
}
double code(double x, double y) {
	return fma(y, (x + (y + x)), (x * x));
}
function code(x, y)
	return Float64(Float64(x + y) * Float64(x + y))
end
function code(x, y)
	return fma(y, Float64(x + Float64(y + x)), Float64(x * x))
end
code[x_, y_] := N[(N[(x + y), $MachinePrecision] * N[(x + y), $MachinePrecision]), $MachinePrecision]
code[x_, y_] := N[(y * N[(x + N[(y + x), $MachinePrecision]), $MachinePrecision] + N[(x * x), $MachinePrecision]), $MachinePrecision]
\left(x + y\right) \cdot \left(x + y\right)
\mathsf{fma}\left(y, x + \left(y + x\right), x \cdot x\right)

Error

Target

Original0.0
Target0.0
Herbie0.0
\[x \cdot x + \left(y \cdot y + 2 \cdot \left(y \cdot x\right)\right) \]

Derivation

  1. Initial program 0.0

    \[\left(x + y\right) \cdot \left(x + y\right) \]
  2. Applied egg-rr18.4

    \[\leadsto \color{blue}{\frac{\left(x \cdot x - y \cdot y\right) \cdot \left(x + y\right)}{x - y}} \]
  3. Taylor expanded in x around 0 0.0

    \[\leadsto \color{blue}{\left(y - -1 \cdot y\right) \cdot x + \left({y}^{2} + -1 \cdot \left({x}^{2} \cdot \left(1 + -1 \cdot \frac{y - -1 \cdot y}{y}\right)\right)\right)} \]
  4. Simplified0.0

    \[\leadsto \color{blue}{\mathsf{fma}\left(y, x + \left(x + y\right), x \cdot x\right)} \]
    Proof
    (fma.f64 y (+.f64 x (+.f64 x y)) (*.f64 x x)): 0 points increase in error, 0 points decrease in error
    (fma.f64 y (Rewrite<= associate-+l+_binary64 (+.f64 (+.f64 x x) y)) (*.f64 x x)): 1 points increase in error, 0 points decrease in error
    (fma.f64 y (+.f64 (+.f64 x (Rewrite<= *-lft-identity_binary64 (*.f64 1 x))) y) (*.f64 x x)): 0 points increase in error, 0 points decrease in error
    (fma.f64 y (+.f64 (+.f64 x (*.f64 (Rewrite<= metadata-eval (neg.f64 -1)) x)) y) (*.f64 x x)): 0 points increase in error, 0 points decrease in error
    (fma.f64 y (+.f64 (Rewrite<= cancel-sign-sub-inv_binary64 (-.f64 x (*.f64 -1 x))) y) (*.f64 x x)): 0 points increase in error, 0 points decrease in error
    (fma.f64 y (+.f64 (-.f64 x (*.f64 -1 x)) y) (Rewrite<= unpow2_binary64 (pow.f64 x 2))): 1 points increase in error, 0 points decrease in error
    (fma.f64 y (+.f64 (-.f64 x (*.f64 -1 x)) y) (Rewrite<= remove-double-neg_binary64 (neg.f64 (neg.f64 (pow.f64 x 2))))): 0 points increase in error, 0 points decrease in error
    (fma.f64 y (+.f64 (-.f64 x (*.f64 -1 x)) y) (neg.f64 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (pow.f64 x 2))))): 0 points increase in error, 0 points decrease in error
    (Rewrite<= fma-neg_binary64 (-.f64 (*.f64 y (+.f64 (-.f64 x (*.f64 -1 x)) y)) (*.f64 -1 (pow.f64 x 2)))): 1 points increase in error, 1 points decrease in error
    (-.f64 (Rewrite<= distribute-lft-out_binary64 (+.f64 (*.f64 y (-.f64 x (*.f64 -1 x))) (*.f64 y y))) (*.f64 -1 (pow.f64 x 2))): 2 points increase in error, 2 points decrease in error
    (-.f64 (+.f64 (*.f64 y (-.f64 x (*.f64 -1 x))) (Rewrite<= unpow2_binary64 (pow.f64 y 2))) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
    (-.f64 (+.f64 (*.f64 (Rewrite<= remove-double-neg_binary64 (neg.f64 (neg.f64 y))) (-.f64 x (*.f64 -1 x))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
    (-.f64 (+.f64 (*.f64 (neg.f64 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 y))) (-.f64 x (*.f64 -1 x))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
    (-.f64 (+.f64 (Rewrite=> distribute-lft-neg-out_binary64 (neg.f64 (*.f64 (*.f64 -1 y) (-.f64 x (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
    (-.f64 (+.f64 (Rewrite<= distribute-rgt-neg-out_binary64 (*.f64 (*.f64 -1 y) (neg.f64 (-.f64 x (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
    (-.f64 (+.f64 (*.f64 (*.f64 -1 y) (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (-.f64 x (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
    (-.f64 (+.f64 (*.f64 (*.f64 -1 y) (Rewrite<= distribute-lft-out--_binary64 (-.f64 (*.f64 -1 x) (*.f64 -1 (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
    (-.f64 (+.f64 (*.f64 (*.f64 -1 y) (-.f64 (*.f64 -1 x) (Rewrite<= neg-mul-1_binary64 (neg.f64 (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
    (-.f64 (+.f64 (*.f64 (*.f64 -1 y) (-.f64 (*.f64 -1 x) (neg.f64 (Rewrite=> mul-1-neg_binary64 (neg.f64 x))))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
    (-.f64 (+.f64 (*.f64 (*.f64 -1 y) (-.f64 (*.f64 -1 x) (Rewrite=> remove-double-neg_binary64 x))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
    (-.f64 (+.f64 (Rewrite<= associate-*r*_binary64 (*.f64 -1 (*.f64 y (-.f64 (*.f64 -1 x) x)))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
    (-.f64 (+.f64 (*.f64 -1 (*.f64 y (-.f64 (*.f64 -1 x) x))) (pow.f64 y 2)) (Rewrite=> *-commutative_binary64 (*.f64 (pow.f64 x 2) -1))): 0 points increase in error, 0 points decrease in error
    (Rewrite=> cancel-sign-sub-inv_binary64 (+.f64 (+.f64 (*.f64 -1 (*.f64 y (-.f64 (*.f64 -1 x) x))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1))): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (Rewrite=> mul-1-neg_binary64 (neg.f64 (*.f64 y (-.f64 (*.f64 -1 x) x)))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (neg.f64 (*.f64 y (Rewrite=> sub-neg_binary64 (+.f64 (*.f64 -1 x) (neg.f64 x))))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (neg.f64 (*.f64 y (+.f64 (*.f64 -1 x) (Rewrite<= mul-1-neg_binary64 (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (neg.f64 (Rewrite=> distribute-lft-in_binary64 (+.f64 (*.f64 y (*.f64 -1 x)) (*.f64 y (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (neg.f64 (Rewrite=> distribute-rgt-out_binary64 (*.f64 (*.f64 -1 x) (+.f64 y y)))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (neg.f64 (*.f64 (*.f64 -1 x) (+.f64 y (Rewrite<= *-lft-identity_binary64 (*.f64 1 y))))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (neg.f64 (*.f64 (*.f64 -1 x) (+.f64 y (*.f64 (Rewrite<= metadata-eval (neg.f64 -1)) y)))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (neg.f64 (*.f64 (*.f64 -1 x) (Rewrite<= cancel-sign-sub-inv_binary64 (-.f64 y (*.f64 -1 y))))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (Rewrite<= distribute-lft-neg-out_binary64 (*.f64 (neg.f64 (*.f64 -1 x)) (-.f64 y (*.f64 -1 y)))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (neg.f64 (Rewrite=> mul-1-neg_binary64 (neg.f64 x))) (-.f64 y (*.f64 -1 y))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (Rewrite=> remove-double-neg_binary64 x) (-.f64 y (*.f64 -1 y))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (Rewrite<= *-commutative_binary64 (*.f64 (-.f64 y (*.f64 -1 y)) x)) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (pow.f64 x 2))) -1)): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (Rewrite<= metadata-eval (-.f64 1 2)))): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (Rewrite<= metadata-eval (+.f64 1 1))))): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (+.f64 (Rewrite<= *-inverses_binary64 (/.f64 y y)) 1)))): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (+.f64 (/.f64 y y) (Rewrite<= metadata-eval (neg.f64 -1)))))): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (+.f64 (/.f64 y y) (neg.f64 (Rewrite<= metadata-eval (/.f64 -1 1))))))): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (+.f64 (/.f64 y y) (neg.f64 (/.f64 -1 (Rewrite<= *-inverses_binary64 (/.f64 y y)))))))): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (+.f64 (/.f64 y y) (neg.f64 (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 -1 y) y))))))): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (Rewrite<= sub-neg_binary64 (-.f64 (/.f64 y y) (/.f64 (*.f64 -1 y) y)))))): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (Rewrite<= div-sub_binary64 (/.f64 (-.f64 y (*.f64 -1 y)) y))))): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (Rewrite<= unsub-neg_binary64 (+.f64 1 (neg.f64 (/.f64 (-.f64 y (*.f64 -1 y)) y)))))): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (+.f64 1 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (/.f64 (-.f64 y (*.f64 -1 y)) y)))))): 0 points increase in error, 0 points decrease in error
    (+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (Rewrite<= associate-*r*_binary64 (*.f64 -1 (*.f64 (pow.f64 x 2) (+.f64 1 (*.f64 -1 (/.f64 (-.f64 y (*.f64 -1 y)) y))))))): 0 points increase in error, 0 points decrease in error
    (Rewrite<= associate-+r+_binary64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (+.f64 (pow.f64 y 2) (*.f64 -1 (*.f64 (pow.f64 x 2) (+.f64 1 (*.f64 -1 (/.f64 (-.f64 y (*.f64 -1 y)) y)))))))): 0 points increase in error, 0 points decrease in error
  5. Final simplification0.0

    \[\leadsto \mathsf{fma}\left(y, x + \left(y + x\right), x \cdot x\right) \]

Alternatives

Alternative 1
Error0.0
Cost704
\[x \cdot x + y \cdot \left(x + \left(y + x\right)\right) \]
Alternative 2
Error0.0
Cost448
\[\left(y + x\right) \cdot \left(y + x\right) \]
Alternative 3
Error8.8
Cost324
\[\begin{array}{l} \mathbf{if}\;y \leq 7.800775820743477 \cdot 10^{-79}:\\ \;\;\;\;x \cdot x\\ \mathbf{else}:\\ \;\;\;\;y \cdot y\\ \end{array} \]
Alternative 4
Error28.0
Cost192
\[x \cdot x \]

Error

Reproduce

herbie shell --seed 2022291 
(FPCore (x y)
  :name "Examples.Basics.BasicTests:f3 from sbv-4.4"
  :precision binary64

  :herbie-target
  (+ (* x x) (+ (* y y) (* 2.0 (* y x))))

  (* (+ x y) (+ x y)))