Simplified11.8
\[\leadsto \color{blue}{\frac{x.im}{y.im} + \frac{y.re}{y.im \cdot y.im} \cdot \left(x.re - \frac{x.im \cdot y.re}{y.im}\right)}
\]
Proof
(+.f64 (/.f64 x.im y.im) (*.f64 (/.f64 y.re (*.f64 y.im y.im)) (-.f64 x.re (/.f64 (*.f64 x.im y.re) y.im)))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 x.im y.im) (*.f64 (/.f64 y.re (Rewrite<= unpow2_binary64 (pow.f64 y.im 2))) (-.f64 x.re (/.f64 (*.f64 x.im y.re) y.im)))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 x.im y.im) (Rewrite<= distribute-lft-out--_binary64 (-.f64 (*.f64 (/.f64 y.re (pow.f64 y.im 2)) x.re) (*.f64 (/.f64 y.re (pow.f64 y.im 2)) (/.f64 (*.f64 x.im y.re) y.im))))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 x.im y.im) (-.f64 (Rewrite<= associate-/r/_binary64 (/.f64 y.re (/.f64 (pow.f64 y.im 2) x.re))) (*.f64 (/.f64 y.re (pow.f64 y.im 2)) (/.f64 (*.f64 x.im y.re) y.im)))): 7 points increase in error, 6 points decrease in error
(+.f64 (/.f64 x.im y.im) (-.f64 (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 y.re x.re) (pow.f64 y.im 2))) (*.f64 (/.f64 y.re (pow.f64 y.im 2)) (/.f64 (*.f64 x.im y.re) y.im)))): 10 points increase in error, 9 points decrease in error
(+.f64 (/.f64 x.im y.im) (-.f64 (/.f64 (Rewrite<= *-commutative_binary64 (*.f64 x.re y.re)) (pow.f64 y.im 2)) (*.f64 (/.f64 y.re (pow.f64 y.im 2)) (/.f64 (*.f64 x.im y.re) y.im)))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 x.im y.im) (-.f64 (/.f64 (*.f64 x.re y.re) (pow.f64 y.im 2)) (*.f64 (/.f64 y.re (pow.f64 y.im 2)) (/.f64 (Rewrite=> *-commutative_binary64 (*.f64 y.re x.im)) y.im)))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 x.im y.im) (-.f64 (/.f64 (*.f64 x.re y.re) (pow.f64 y.im 2)) (Rewrite<= times-frac_binary64 (/.f64 (*.f64 y.re (*.f64 y.re x.im)) (*.f64 (pow.f64 y.im 2) y.im))))): 21 points increase in error, 4 points decrease in error
(+.f64 (/.f64 x.im y.im) (-.f64 (/.f64 (*.f64 x.re y.re) (pow.f64 y.im 2)) (/.f64 (Rewrite<= associate-*l*_binary64 (*.f64 (*.f64 y.re y.re) x.im)) (*.f64 (pow.f64 y.im 2) y.im)))): 12 points increase in error, 1 points decrease in error
(+.f64 (/.f64 x.im y.im) (-.f64 (/.f64 (*.f64 x.re y.re) (pow.f64 y.im 2)) (/.f64 (*.f64 (Rewrite<= unpow2_binary64 (pow.f64 y.re 2)) x.im) (*.f64 (pow.f64 y.im 2) y.im)))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 x.im y.im) (-.f64 (/.f64 (*.f64 x.re y.re) (pow.f64 y.im 2)) (/.f64 (*.f64 (pow.f64 y.re 2) x.im) (*.f64 (Rewrite=> unpow2_binary64 (*.f64 y.im y.im)) y.im)))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 x.im y.im) (-.f64 (/.f64 (*.f64 x.re y.re) (pow.f64 y.im 2)) (/.f64 (*.f64 (pow.f64 y.re 2) x.im) (Rewrite<= unpow3_binary64 (pow.f64 y.im 3))))): 3 points increase in error, 0 points decrease in error
(+.f64 (/.f64 x.im y.im) (Rewrite<= unsub-neg_binary64 (+.f64 (/.f64 (*.f64 x.re y.re) (pow.f64 y.im 2)) (neg.f64 (/.f64 (*.f64 (pow.f64 y.re 2) x.im) (pow.f64 y.im 3)))))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 x.im y.im) (+.f64 (/.f64 (*.f64 x.re y.re) (pow.f64 y.im 2)) (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (/.f64 (*.f64 (pow.f64 y.re 2) x.im) (pow.f64 y.im 3)))))): 0 points increase in error, 0 points decrease in error
(Rewrite=> +-commutative_binary64 (+.f64 (+.f64 (/.f64 (*.f64 x.re y.re) (pow.f64 y.im 2)) (*.f64 -1 (/.f64 (*.f64 (pow.f64 y.re 2) x.im) (pow.f64 y.im 3)))) (/.f64 x.im y.im))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-+r+_binary64 (+.f64 (/.f64 (*.f64 x.re y.re) (pow.f64 y.im 2)) (+.f64 (*.f64 -1 (/.f64 (*.f64 (pow.f64 y.re 2) x.im) (pow.f64 y.im 3))) (/.f64 x.im y.im)))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 (*.f64 x.re y.re) (pow.f64 y.im 2)) (Rewrite<= +-commutative_binary64 (+.f64 (/.f64 x.im y.im) (*.f64 -1 (/.f64 (*.f64 (pow.f64 y.re 2) x.im) (pow.f64 y.im 3)))))): 0 points increase in error, 0 points decrease in error