Simplified27.8
\[\leadsto \color{blue}{\mathsf{fma}\left(\frac{x - 1}{1 + y}, y, 1\right)}
\]
Proof
(fma.f64 (/.f64 (-.f64 x 1) (+.f64 1 y)) y 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (/.f64 (-.f64 x 1) (Rewrite<= +-commutative_binary64 (+.f64 y 1))) y 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (Rewrite=> div-sub_binary64 (-.f64 (/.f64 x (+.f64 y 1)) (/.f64 1 (+.f64 y 1)))) y 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (Rewrite=> sub-neg_binary64 (+.f64 (/.f64 x (+.f64 y 1)) (neg.f64 (/.f64 1 (+.f64 y 1))))) y 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (Rewrite<= remove-double-neg_binary64 (neg.f64 (neg.f64 (/.f64 x (+.f64 y 1))))) (neg.f64 (/.f64 1 (+.f64 y 1)))) y 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (Rewrite<= distribute-neg-in_binary64 (neg.f64 (+.f64 (neg.f64 (/.f64 x (+.f64 y 1))) (/.f64 1 (+.f64 y 1))))) y 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (neg.f64 (Rewrite<= +-commutative_binary64 (+.f64 (/.f64 1 (+.f64 y 1)) (neg.f64 (/.f64 x (+.f64 y 1)))))) y 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (neg.f64 (Rewrite<= sub-neg_binary64 (-.f64 (/.f64 1 (+.f64 y 1)) (/.f64 x (+.f64 y 1))))) y 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (neg.f64 (Rewrite<= div-sub_binary64 (/.f64 (-.f64 1 x) (+.f64 y 1)))) y 1): 0 points increase in error, 0 points decrease in error
(fma.f64 (Rewrite=> distribute-neg-frac_binary64 (/.f64 (neg.f64 (-.f64 1 x)) (+.f64 y 1))) y 1): 0 points increase in error, 0 points decrease in error
(Rewrite<= fma-def_binary64 (+.f64 (*.f64 (/.f64 (neg.f64 (-.f64 1 x)) (+.f64 y 1)) y) 1)): 30 points increase in error, 37 points decrease in error
(+.f64 (Rewrite<= associate-/r/_binary64 (/.f64 (neg.f64 (-.f64 1 x)) (/.f64 (+.f64 y 1) y))) 1): 17 points increase in error, 17 points decrease in error
(+.f64 (Rewrite<= distribute-neg-frac_binary64 (neg.f64 (/.f64 (-.f64 1 x) (/.f64 (+.f64 y 1) y)))) 1): 0 points increase in error, 0 points decrease in error
(+.f64 (neg.f64 (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 (-.f64 1 x) y) (+.f64 y 1)))) 1): 35 points increase in error, 13 points decrease in error
(Rewrite<= +-commutative_binary64 (+.f64 1 (neg.f64 (/.f64 (*.f64 (-.f64 1 x) y) (+.f64 y 1))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= sub-neg_binary64 (-.f64 1 (/.f64 (*.f64 (-.f64 1 x) y) (+.f64 y 1)))): 0 points increase in error, 0 points decrease in error
Simplified0.0
\[\leadsto \color{blue}{x + \left(\frac{-1}{y} + 1\right) \cdot \frac{1 - x}{y}}
\]
Proof
(+.f64 x (*.f64 (+.f64 (/.f64 -1 y) 1) (/.f64 (-.f64 1 x) y))): 0 points increase in error, 0 points decrease in error
(+.f64 x (Rewrite<= distribute-lft1-in_binary64 (+.f64 (*.f64 (/.f64 -1 y) (/.f64 (-.f64 1 x) y)) (/.f64 (-.f64 1 x) y)))): 1 points increase in error, 1 points decrease in error
(+.f64 x (+.f64 (Rewrite<= times-frac_binary64 (/.f64 (*.f64 -1 (-.f64 1 x)) (*.f64 y y))) (/.f64 (-.f64 1 x) y))): 8 points increase in error, 12 points decrease in error
(+.f64 x (+.f64 (/.f64 (*.f64 -1 (-.f64 1 x)) (Rewrite<= unpow2_binary64 (pow.f64 y 2))) (/.f64 (-.f64 1 x) y))): 0 points increase in error, 0 points decrease in error
(+.f64 x (+.f64 (Rewrite<= associate-*r/_binary64 (*.f64 -1 (/.f64 (-.f64 1 x) (pow.f64 y 2)))) (/.f64 (-.f64 1 x) y))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-+l+_binary64 (+.f64 (+.f64 x (*.f64 -1 (/.f64 (-.f64 1 x) (pow.f64 y 2)))) (/.f64 (-.f64 1 x) y))): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite<= +-commutative_binary64 (+.f64 (*.f64 -1 (/.f64 (-.f64 1 x) (pow.f64 y 2))) x)) (/.f64 (-.f64 1 x) y)): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 -1 (/.f64 (-.f64 1 x) (pow.f64 y 2))) x) (Rewrite=> div-sub_binary64 (-.f64 (/.f64 1 y) (/.f64 x y)))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate--l+_binary64 (-.f64 (+.f64 (+.f64 (*.f64 -1 (/.f64 (-.f64 1 x) (pow.f64 y 2))) x) (/.f64 1 y)) (/.f64 x y))): 0 points increase in error, 0 points decrease in error
(-.f64 (Rewrite<= +-commutative_binary64 (+.f64 (/.f64 1 y) (+.f64 (*.f64 -1 (/.f64 (-.f64 1 x) (pow.f64 y 2))) x))) (/.f64 x y)): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (/.f64 1 y) (Rewrite=> +-commutative_binary64 (+.f64 x (*.f64 -1 (/.f64 (-.f64 1 x) (pow.f64 y 2)))))) (/.f64 x y)): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (/.f64 1 y) (+.f64 x (Rewrite=> mul-1-neg_binary64 (neg.f64 (/.f64 (-.f64 1 x) (pow.f64 y 2)))))) (/.f64 x y)): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (/.f64 1 y) (+.f64 x (neg.f64 (Rewrite=> div-sub_binary64 (-.f64 (/.f64 1 (pow.f64 y 2)) (/.f64 x (pow.f64 y 2))))))) (/.f64 x y)): 2 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (/.f64 1 y) (+.f64 x (neg.f64 (Rewrite=> sub-neg_binary64 (+.f64 (/.f64 1 (pow.f64 y 2)) (neg.f64 (/.f64 x (pow.f64 y 2)))))))) (/.f64 x y)): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (/.f64 1 y) (+.f64 x (neg.f64 (Rewrite=> +-commutative_binary64 (+.f64 (neg.f64 (/.f64 x (pow.f64 y 2))) (/.f64 1 (pow.f64 y 2))))))) (/.f64 x y)): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (/.f64 1 y) (+.f64 x (Rewrite=> distribute-neg-in_binary64 (+.f64 (neg.f64 (neg.f64 (/.f64 x (pow.f64 y 2)))) (neg.f64 (/.f64 1 (pow.f64 y 2))))))) (/.f64 x y)): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (/.f64 1 y) (+.f64 x (+.f64 (Rewrite=> remove-double-neg_binary64 (/.f64 x (pow.f64 y 2))) (neg.f64 (/.f64 1 (pow.f64 y 2)))))) (/.f64 x y)): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (/.f64 1 y) (+.f64 x (Rewrite=> unsub-neg_binary64 (-.f64 (/.f64 x (pow.f64 y 2)) (/.f64 1 (pow.f64 y 2)))))) (/.f64 x y)): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (/.f64 1 y) (Rewrite<= associate--l+_binary64 (-.f64 (+.f64 x (/.f64 x (pow.f64 y 2))) (/.f64 1 (pow.f64 y 2))))) (/.f64 x y)): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (/.f64 1 y) (-.f64 (Rewrite<= +-commutative_binary64 (+.f64 (/.f64 x (pow.f64 y 2)) x)) (/.f64 1 (pow.f64 y 2)))) (/.f64 x y)): 0 points increase in error, 0 points decrease in error
(-.f64 (Rewrite<= associate--l+_binary64 (-.f64 (+.f64 (/.f64 1 y) (+.f64 (/.f64 x (pow.f64 y 2)) x)) (/.f64 1 (pow.f64 y 2)))) (/.f64 x y)): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate--r+_binary64 (-.f64 (+.f64 (/.f64 1 y) (+.f64 (/.f64 x (pow.f64 y 2)) x)) (+.f64 (/.f64 1 (pow.f64 y 2)) (/.f64 x y)))): 0 points increase in error, 0 points decrease in error