Simplified0.0
\[\leadsto \color{blue}{\mathsf{fma}\left(y, x + \left(x + y\right), x \cdot x\right)}
\]
Proof
(fma.f64 y (+.f64 x (+.f64 x y)) (*.f64 x x)): 0 points increase in error, 0 points decrease in error
(fma.f64 y (Rewrite<= associate-+l+_binary64 (+.f64 (+.f64 x x) y)) (*.f64 x x)): 1 points increase in error, 0 points decrease in error
(fma.f64 y (+.f64 (+.f64 x (Rewrite<= *-lft-identity_binary64 (*.f64 1 x))) y) (*.f64 x x)): 0 points increase in error, 0 points decrease in error
(fma.f64 y (+.f64 (+.f64 x (*.f64 (Rewrite<= metadata-eval (neg.f64 -1)) x)) y) (*.f64 x x)): 0 points increase in error, 0 points decrease in error
(fma.f64 y (+.f64 (Rewrite<= cancel-sign-sub-inv_binary64 (-.f64 x (*.f64 -1 x))) y) (*.f64 x x)): 0 points increase in error, 0 points decrease in error
(fma.f64 y (+.f64 (-.f64 x (*.f64 -1 x)) y) (Rewrite<= unpow2_binary64 (pow.f64 x 2))): 1 points increase in error, 0 points decrease in error
(fma.f64 y (+.f64 (-.f64 x (*.f64 -1 x)) y) (Rewrite<= remove-double-neg_binary64 (neg.f64 (neg.f64 (pow.f64 x 2))))): 0 points increase in error, 0 points decrease in error
(fma.f64 y (+.f64 (-.f64 x (*.f64 -1 x)) y) (neg.f64 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (pow.f64 x 2))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= fma-neg_binary64 (-.f64 (*.f64 y (+.f64 (-.f64 x (*.f64 -1 x)) y)) (*.f64 -1 (pow.f64 x 2)))): 1 points increase in error, 1 points decrease in error
(-.f64 (Rewrite<= distribute-lft-out_binary64 (+.f64 (*.f64 y (-.f64 x (*.f64 -1 x))) (*.f64 y y))) (*.f64 -1 (pow.f64 x 2))): 2 points increase in error, 2 points decrease in error
(-.f64 (+.f64 (*.f64 y (-.f64 x (*.f64 -1 x))) (Rewrite<= unpow2_binary64 (pow.f64 y 2))) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (*.f64 (Rewrite<= remove-double-neg_binary64 (neg.f64 (neg.f64 y))) (-.f64 x (*.f64 -1 x))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (*.f64 (neg.f64 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 y))) (-.f64 x (*.f64 -1 x))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (Rewrite=> distribute-lft-neg-out_binary64 (neg.f64 (*.f64 (*.f64 -1 y) (-.f64 x (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (Rewrite<= distribute-rgt-neg-out_binary64 (*.f64 (*.f64 -1 y) (neg.f64 (-.f64 x (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (*.f64 (*.f64 -1 y) (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (-.f64 x (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (*.f64 (*.f64 -1 y) (Rewrite<= distribute-lft-out--_binary64 (-.f64 (*.f64 -1 x) (*.f64 -1 (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (*.f64 (*.f64 -1 y) (-.f64 (*.f64 -1 x) (Rewrite<= neg-mul-1_binary64 (neg.f64 (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (*.f64 (*.f64 -1 y) (-.f64 (*.f64 -1 x) (neg.f64 (Rewrite=> mul-1-neg_binary64 (neg.f64 x))))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (*.f64 (*.f64 -1 y) (-.f64 (*.f64 -1 x) (Rewrite=> remove-double-neg_binary64 x))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (Rewrite<= associate-*r*_binary64 (*.f64 -1 (*.f64 y (-.f64 (*.f64 -1 x) x)))) (pow.f64 y 2)) (*.f64 -1 (pow.f64 x 2))): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (*.f64 -1 (*.f64 y (-.f64 (*.f64 -1 x) x))) (pow.f64 y 2)) (Rewrite=> *-commutative_binary64 (*.f64 (pow.f64 x 2) -1))): 0 points increase in error, 0 points decrease in error
(Rewrite=> cancel-sign-sub-inv_binary64 (+.f64 (+.f64 (*.f64 -1 (*.f64 y (-.f64 (*.f64 -1 x) x))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (Rewrite=> mul-1-neg_binary64 (neg.f64 (*.f64 y (-.f64 (*.f64 -1 x) x)))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (neg.f64 (*.f64 y (Rewrite=> sub-neg_binary64 (+.f64 (*.f64 -1 x) (neg.f64 x))))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (neg.f64 (*.f64 y (+.f64 (*.f64 -1 x) (Rewrite<= mul-1-neg_binary64 (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (neg.f64 (Rewrite=> distribute-lft-in_binary64 (+.f64 (*.f64 y (*.f64 -1 x)) (*.f64 y (*.f64 -1 x))))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (neg.f64 (Rewrite=> distribute-rgt-out_binary64 (*.f64 (*.f64 -1 x) (+.f64 y y)))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (neg.f64 (*.f64 (*.f64 -1 x) (+.f64 y (Rewrite<= *-lft-identity_binary64 (*.f64 1 y))))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (neg.f64 (*.f64 (*.f64 -1 x) (+.f64 y (*.f64 (Rewrite<= metadata-eval (neg.f64 -1)) y)))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (neg.f64 (*.f64 (*.f64 -1 x) (Rewrite<= cancel-sign-sub-inv_binary64 (-.f64 y (*.f64 -1 y))))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (Rewrite<= distribute-lft-neg-out_binary64 (*.f64 (neg.f64 (*.f64 -1 x)) (-.f64 y (*.f64 -1 y)))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (neg.f64 (Rewrite=> mul-1-neg_binary64 (neg.f64 x))) (-.f64 y (*.f64 -1 y))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (Rewrite=> remove-double-neg_binary64 x) (-.f64 y (*.f64 -1 y))) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (Rewrite<= *-commutative_binary64 (*.f64 (-.f64 y (*.f64 -1 y)) x)) (pow.f64 y 2)) (*.f64 (neg.f64 (pow.f64 x 2)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (pow.f64 x 2))) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (Rewrite<= metadata-eval (-.f64 1 2)))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (Rewrite<= metadata-eval (+.f64 1 1))))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (+.f64 (Rewrite<= *-inverses_binary64 (/.f64 y y)) 1)))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (+.f64 (/.f64 y y) (Rewrite<= metadata-eval (neg.f64 -1)))))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (+.f64 (/.f64 y y) (neg.f64 (Rewrite<= metadata-eval (/.f64 -1 1))))))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (+.f64 (/.f64 y y) (neg.f64 (/.f64 -1 (Rewrite<= *-inverses_binary64 (/.f64 y y)))))))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (+.f64 (/.f64 y y) (neg.f64 (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 -1 y) y))))))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (Rewrite<= sub-neg_binary64 (-.f64 (/.f64 y y) (/.f64 (*.f64 -1 y) y)))))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (-.f64 1 (Rewrite<= div-sub_binary64 (/.f64 (-.f64 y (*.f64 -1 y)) y))))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (Rewrite<= unsub-neg_binary64 (+.f64 1 (neg.f64 (/.f64 (-.f64 y (*.f64 -1 y)) y)))))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (*.f64 (*.f64 -1 (pow.f64 x 2)) (+.f64 1 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (/.f64 (-.f64 y (*.f64 -1 y)) y)))))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (pow.f64 y 2)) (Rewrite<= associate-*r*_binary64 (*.f64 -1 (*.f64 (pow.f64 x 2) (+.f64 1 (*.f64 -1 (/.f64 (-.f64 y (*.f64 -1 y)) y))))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-+r+_binary64 (+.f64 (*.f64 (-.f64 y (*.f64 -1 y)) x) (+.f64 (pow.f64 y 2) (*.f64 -1 (*.f64 (pow.f64 x 2) (+.f64 1 (*.f64 -1 (/.f64 (-.f64 y (*.f64 -1 y)) y)))))))): 0 points increase in error, 0 points decrease in error