Simplified0.0
\[\leadsto \color{blue}{\mathsf{fma}\left(1 - x, \frac{y}{-1 - y}, 1\right)}
\]
Proof
(fma.f64 (-.f64 1 x) (/.f64 y (-.f64 -1 y)) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (/.f64 y (Rewrite=> sub-neg_binary64 (+.f64 -1 (neg.f64 y)))) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (/.f64 y (+.f64 (Rewrite<= metadata-eval (*.f64 -1 1)) (neg.f64 y))) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (/.f64 y (+.f64 (*.f64 -1 1) (Rewrite=> neg-mul-1_binary64 (*.f64 -1 y)))) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (/.f64 y (Rewrite<= distribute-lft-in_binary64 (*.f64 -1 (+.f64 1 y)))) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (/.f64 y (*.f64 -1 (Rewrite<= +-commutative_binary64 (+.f64 y 1)))) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (/.f64 y (*.f64 (Rewrite<= metadata-eval (/.f64 1 -1)) (+.f64 y 1))) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (/.f64 y (Rewrite<= associate-/r/_binary64 (/.f64 1 (/.f64 -1 (+.f64 y 1))))) 1): 23 points increase in error, 7 points decrease in error
(fma.f64 (-.f64 1 x) (/.f64 y (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 1 (+.f64 y 1)) -1))) 1): 7 points increase in error, 23 points decrease in error
(fma.f64 (-.f64 1 x) (/.f64 y (/.f64 (Rewrite<= *-commutative_binary64 (*.f64 (+.f64 y 1) 1)) -1)) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (/.f64 y (Rewrite=> associate-/l*_binary64 (/.f64 (+.f64 y 1) (/.f64 -1 1)))) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (/.f64 y (/.f64 (+.f64 y 1) (Rewrite=> metadata-eval -1))) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (Rewrite=> associate-/r/_binary64 (*.f64 (/.f64 y (+.f64 y 1)) -1)) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (Rewrite=> *-commutative_binary64 (*.f64 -1 (/.f64 y (+.f64 y 1)))) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (Rewrite<= neg-mul-1_binary64 (neg.f64 (/.f64 y (+.f64 y 1)))) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (neg.f64 (/.f64 (Rewrite<= *-lft-identity_binary64 (*.f64 1 y)) (+.f64 y 1))) 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (-.f64 1 x) (neg.f64 (Rewrite=> associate-/l*_binary64 (/.f64 1 (/.f64 (+.f64 y 1) y)))) 1): 7 points increase in error, 0 points decrease in error
(Rewrite<= fma-def_binary64 (+.f64 (*.f64 (-.f64 1 x) (neg.f64 (/.f64 1 (/.f64 (+.f64 y 1) y)))) 1)): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 (-.f64 1 x) (neg.f64 (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 1 y) (+.f64 y 1))))) 1): 0 points increase in error, 7 points decrease in error
(+.f64 (*.f64 (-.f64 1 x) (neg.f64 (/.f64 (Rewrite=> *-lft-identity_binary64 y) (+.f64 y 1)))) 1): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 (-.f64 1 x) (Rewrite=> distribute-neg-frac_binary64 (/.f64 (neg.f64 y) (+.f64 y 1)))) 1): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite=> associate-*r/_binary64 (/.f64 (*.f64 (-.f64 1 x) (neg.f64 y)) (+.f64 y 1))) 1): 31 points increase in error, 0 points decrease in error
(+.f64 (/.f64 (Rewrite<= distribute-rgt-neg-in_binary64 (neg.f64 (*.f64 (-.f64 1 x) y))) (+.f64 y 1)) 1): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite<= distribute-neg-frac_binary64 (neg.f64 (/.f64 (*.f64 (-.f64 1 x) y) (+.f64 y 1)))) 1): 0 points increase in error, 0 points decrease in error
(Rewrite<= +-commutative_binary64 (+.f64 1 (neg.f64 (/.f64 (*.f64 (-.f64 1 x) y) (+.f64 y 1))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= sub-neg_binary64 (-.f64 1 (/.f64 (*.f64 (-.f64 1 x) y) (+.f64 y 1)))): 0 points increase in error, 0 points decrease in error