Simplified0.0
\[\leadsto \color{blue}{\frac{-0.5}{x} - \frac{-0.5}{y}}
\]
Proof
(-.f64 (/.f64 -1/2 x) (/.f64 -1/2 y)): 0 points increase in error, 0 points decrease in error
(-.f64 (/.f64 (Rewrite<= metadata-eval (/.f64 -1 2)) x) (/.f64 -1/2 y)): 0 points increase in error, 0 points decrease in error
(-.f64 (Rewrite<= associate-/r*_binary64 (/.f64 -1 (*.f64 2 x))) (/.f64 -1/2 y)): 0 points increase in error, 0 points decrease in error
(-.f64 (/.f64 -1 (Rewrite<= *-commutative_binary64 (*.f64 x 2))) (/.f64 -1/2 y)): 0 points increase in error, 0 points decrease in error
(-.f64 (/.f64 -1 (Rewrite<= /-rgt-identity_binary64 (/.f64 (*.f64 x 2) 1))) (/.f64 -1/2 y)): 0 points increase in error, 0 points decrease in error
(-.f64 (/.f64 -1 (/.f64 (*.f64 x 2) (Rewrite<= *-inverses_binary64 (/.f64 y y)))) (/.f64 -1/2 y)): 0 points increase in error, 0 points decrease in error
(-.f64 (/.f64 -1 (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 (*.f64 x 2) y) y))) (/.f64 -1/2 y)): 47 points increase in error, 0 points decrease in error
(-.f64 (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 -1 y) (*.f64 (*.f64 x 2) y))) (/.f64 -1/2 y)): 19 points increase in error, 3 points decrease in error
(-.f64 (/.f64 (Rewrite<= neg-mul-1_binary64 (neg.f64 y)) (*.f64 (*.f64 x 2) y)) (/.f64 -1/2 y)): 0 points increase in error, 0 points decrease in error
(-.f64 (/.f64 (neg.f64 y) (*.f64 (*.f64 x 2) y)) (/.f64 (Rewrite<= metadata-eval (neg.f64 1/2)) y)): 0 points increase in error, 0 points decrease in error
(-.f64 (/.f64 (neg.f64 y) (*.f64 (*.f64 x 2) y)) (/.f64 (neg.f64 (Rewrite<= metadata-eval (/.f64 1 2))) y)): 0 points increase in error, 0 points decrease in error
(-.f64 (/.f64 (neg.f64 y) (*.f64 (*.f64 x 2) y)) (/.f64 (neg.f64 (/.f64 (Rewrite<= *-inverses_binary64 (/.f64 x x)) 2)) y)): 0 points increase in error, 0 points decrease in error
(-.f64 (/.f64 (neg.f64 y) (*.f64 (*.f64 x 2) y)) (/.f64 (neg.f64 (Rewrite<= associate-/r*_binary64 (/.f64 x (*.f64 x 2)))) y)): 0 points increase in error, 0 points decrease in error
(-.f64 (/.f64 (neg.f64 y) (*.f64 (*.f64 x 2) y)) (Rewrite<= distribute-neg-frac_binary64 (neg.f64 (/.f64 (/.f64 x (*.f64 x 2)) y)))): 0 points increase in error, 0 points decrease in error
(-.f64 (/.f64 (neg.f64 y) (*.f64 (*.f64 x 2) y)) (neg.f64 (Rewrite<= associate-/r*_binary64 (/.f64 x (*.f64 (*.f64 x 2) y))))): 44 points increase in error, 4 points decrease in error
(-.f64 (/.f64 (neg.f64 y) (*.f64 (*.f64 x 2) y)) (Rewrite=> distribute-neg-frac_binary64 (/.f64 (neg.f64 x) (*.f64 (*.f64 x 2) y)))): 0 points increase in error, 0 points decrease in error
(Rewrite<= div-sub_binary64 (/.f64 (-.f64 (neg.f64 y) (neg.f64 x)) (*.f64 (*.f64 x 2) y))): 1 points increase in error, 2 points decrease in error
(/.f64 (Rewrite<= unsub-neg_binary64 (+.f64 (neg.f64 y) (neg.f64 (neg.f64 x)))) (*.f64 (*.f64 x 2) y)): 0 points increase in error, 0 points decrease in error
(/.f64 (+.f64 (neg.f64 y) (Rewrite=> remove-double-neg_binary64 x)) (*.f64 (*.f64 x 2) y)): 0 points increase in error, 0 points decrease in error
(/.f64 (Rewrite<= +-commutative_binary64 (+.f64 x (neg.f64 y))) (*.f64 (*.f64 x 2) y)): 0 points increase in error, 0 points decrease in error
(/.f64 (Rewrite<= sub-neg_binary64 (-.f64 x y)) (*.f64 (*.f64 x 2) y)): 0 points increase in error, 0 points decrease in error