Simplified0.1
\[\leadsto \color{blue}{x + \frac{-1 + x}{y} \cdot \left(\frac{1}{y} + -1\right)}
\]
Proof
(+.f64 x (*.f64 (/.f64 (+.f64 -1 x) y) (+.f64 (/.f64 1 y) -1))): 0 points increase in error, 0 points decrease in error
(+.f64 x (*.f64 (/.f64 (Rewrite<= +-commutative_binary64 (+.f64 x -1)) y) (+.f64 (/.f64 1 y) -1))): 0 points increase in error, 0 points decrease in error
(+.f64 x (*.f64 (/.f64 (+.f64 x (Rewrite<= metadata-eval (neg.f64 1))) y) (+.f64 (/.f64 1 y) -1))): 0 points increase in error, 0 points decrease in error
(+.f64 x (*.f64 (/.f64 (Rewrite<= sub-neg_binary64 (-.f64 x 1)) y) (+.f64 (/.f64 1 y) -1))): 0 points increase in error, 0 points decrease in error
(+.f64 x (Rewrite<= distribute-rgt-out_binary64 (+.f64 (*.f64 (/.f64 1 y) (/.f64 (-.f64 x 1) y)) (*.f64 -1 (/.f64 (-.f64 x 1) y))))): 2 points increase in error, 2 points decrease in error
(+.f64 x (+.f64 (Rewrite<= times-frac_binary64 (/.f64 (*.f64 1 (-.f64 x 1)) (*.f64 y y))) (*.f64 -1 (/.f64 (-.f64 x 1) y)))): 14 points increase in error, 13 points decrease in error
(+.f64 x (+.f64 (/.f64 (*.f64 1 (-.f64 x 1)) (Rewrite<= unpow2_binary64 (pow.f64 y 2))) (*.f64 -1 (/.f64 (-.f64 x 1) y)))): 0 points increase in error, 0 points decrease in error
(+.f64 x (+.f64 (/.f64 (*.f64 1 (-.f64 x 1)) (Rewrite<= *-lft-identity_binary64 (*.f64 1 (pow.f64 y 2)))) (*.f64 -1 (/.f64 (-.f64 x 1) y)))): 0 points increase in error, 0 points decrease in error
(+.f64 x (+.f64 (Rewrite=> times-frac_binary64 (*.f64 (/.f64 1 1) (/.f64 (-.f64 x 1) (pow.f64 y 2)))) (*.f64 -1 (/.f64 (-.f64 x 1) y)))): 0 points increase in error, 0 points decrease in error
(+.f64 x (+.f64 (*.f64 (Rewrite=> metadata-eval 1) (/.f64 (-.f64 x 1) (pow.f64 y 2))) (*.f64 -1 (/.f64 (-.f64 x 1) y)))): 0 points increase in error, 0 points decrease in error
(+.f64 x (+.f64 (Rewrite=> *-lft-identity_binary64 (/.f64 (-.f64 x 1) (pow.f64 y 2))) (*.f64 -1 (/.f64 (-.f64 x 1) y)))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-+l+_binary64 (+.f64 (+.f64 x (/.f64 (-.f64 x 1) (pow.f64 y 2))) (*.f64 -1 (/.f64 (-.f64 x 1) y)))): 0 points increase in error, 2 points decrease in error
(+.f64 (Rewrite=> +-commutative_binary64 (+.f64 (/.f64 (-.f64 x 1) (pow.f64 y 2)) x)) (*.f64 -1 (/.f64 (-.f64 x 1) y))): 0 points increase in error, 0 points decrease in error
(Rewrite=> associate-+l+_binary64 (+.f64 (/.f64 (-.f64 x 1) (pow.f64 y 2)) (+.f64 x (*.f64 -1 (/.f64 (-.f64 x 1) y))))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 (-.f64 x 1) (pow.f64 y 2)) (Rewrite<= +-commutative_binary64 (+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) x))): 0 points increase in error, 0 points decrease in error
(Rewrite<= +-commutative_binary64 (+.f64 (+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) x) (/.f64 (-.f64 x 1) (pow.f64 y 2)))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) x) (Rewrite=> div-sub_binary64 (-.f64 (/.f64 x (pow.f64 y 2)) (/.f64 1 (pow.f64 y 2))))): 2 points increase in error, 0 points decrease in error
(Rewrite<= associate--l+_binary64 (-.f64 (+.f64 (+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) x) (/.f64 x (pow.f64 y 2))) (/.f64 1 (pow.f64 y 2)))): 0 points increase in error, 0 points decrease in error
(-.f64 (Rewrite<= +-commutative_binary64 (+.f64 (/.f64 x (pow.f64 y 2)) (+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) x))) (/.f64 1 (pow.f64 y 2))): 0 points increase in error, 0 points decrease in error
(Rewrite=> associate--l+_binary64 (+.f64 (/.f64 x (pow.f64 y 2)) (-.f64 (+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) x) (/.f64 1 (pow.f64 y 2))))): 0 points increase in error, 0 points decrease in error
(Rewrite=> +-commutative_binary64 (+.f64 (-.f64 (+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) x) (/.f64 1 (pow.f64 y 2))) (/.f64 x (pow.f64 y 2)))): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite=> associate--l+_binary64 (+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) (-.f64 x (/.f64 1 (pow.f64 y 2))))) (/.f64 x (pow.f64 y 2))): 0 points increase in error, 0 points decrease in error
(Rewrite=> associate-+l+_binary64 (+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) (+.f64 (-.f64 x (/.f64 1 (pow.f64 y 2))) (/.f64 x (pow.f64 y 2))))): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) (Rewrite<= associate--r-_binary64 (-.f64 x (-.f64 (/.f64 1 (pow.f64 y 2)) (/.f64 x (pow.f64 y 2)))))): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) (-.f64 x (Rewrite<= div-sub_binary64 (/.f64 (-.f64 1 x) (pow.f64 y 2))))): 0 points increase in error, 2 points decrease in error
(+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) (-.f64 x (/.f64 (Rewrite=> sub-neg_binary64 (+.f64 1 (neg.f64 x))) (pow.f64 y 2)))): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) (-.f64 x (/.f64 (+.f64 1 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 x))) (pow.f64 y 2)))): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) (Rewrite<= unsub-neg_binary64 (+.f64 x (neg.f64 (/.f64 (+.f64 1 (*.f64 -1 x)) (pow.f64 y 2)))))): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) (+.f64 x (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (/.f64 (+.f64 1 (*.f64 -1 x)) (pow.f64 y 2)))))): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 -1 (/.f64 (-.f64 x 1) y)) (Rewrite<= +-commutative_binary64 (+.f64 (*.f64 -1 (/.f64 (+.f64 1 (*.f64 -1 x)) (pow.f64 y 2))) x))): 0 points increase in error, 0 points decrease in error
(Rewrite<= +-commutative_binary64 (+.f64 (+.f64 (*.f64 -1 (/.f64 (+.f64 1 (*.f64 -1 x)) (pow.f64 y 2))) x) (*.f64 -1 (/.f64 (-.f64 x 1) y)))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 -1 (/.f64 (+.f64 1 (*.f64 -1 x)) (pow.f64 y 2))) x) (Rewrite=> mul-1-neg_binary64 (neg.f64 (/.f64 (-.f64 x 1) y)))): 0 points increase in error, 0 points decrease in error
(Rewrite=> unsub-neg_binary64 (-.f64 (+.f64 (*.f64 -1 (/.f64 (+.f64 1 (*.f64 -1 x)) (pow.f64 y 2))) x) (/.f64 (-.f64 x 1) y))): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (*.f64 -1 (/.f64 (+.f64 1 (*.f64 -1 x)) (pow.f64 y 2))) x) (Rewrite=> div-sub_binary64 (-.f64 (/.f64 x y) (/.f64 1 y)))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-+l-_binary64 (+.f64 (-.f64 (+.f64 (*.f64 -1 (/.f64 (+.f64 1 (*.f64 -1 x)) (pow.f64 y 2))) x) (/.f64 x y)) (/.f64 1 y))): 0 points increase in error, 0 points decrease in error
(Rewrite<= +-commutative_binary64 (+.f64 (/.f64 1 y) (-.f64 (+.f64 (*.f64 -1 (/.f64 (+.f64 1 (*.f64 -1 x)) (pow.f64 y 2))) x) (/.f64 x y)))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate--l+_binary64 (-.f64 (+.f64 (/.f64 1 y) (+.f64 (*.f64 -1 (/.f64 (+.f64 1 (*.f64 -1 x)) (pow.f64 y 2))) x)) (/.f64 x y))): 0 points increase in error, 0 points decrease in error
Simplified0.4
\[\leadsto x + \color{blue}{\frac{1 + \frac{-1}{y}}{y}}
\]
Proof
(/.f64 (+.f64 1 (/.f64 -1 y)) y): 0 points increase in error, 0 points decrease in error
(/.f64 (+.f64 1 (/.f64 (Rewrite<= metadata-eval (neg.f64 1)) y)) y): 0 points increase in error, 0 points decrease in error
(/.f64 (+.f64 1 (Rewrite<= distribute-neg-frac_binary64 (neg.f64 (/.f64 1 y)))) y): 0 points increase in error, 0 points decrease in error
(/.f64 (Rewrite=> +-commutative_binary64 (+.f64 (neg.f64 (/.f64 1 y)) 1)) y): 0 points increase in error, 0 points decrease in error
(/.f64 (+.f64 (Rewrite=> neg-sub0_binary64 (-.f64 0 (/.f64 1 y))) 1) y): 0 points increase in error, 0 points decrease in error
(/.f64 (Rewrite=> associate-+l-_binary64 (-.f64 0 (-.f64 (/.f64 1 y) 1))) y): 0 points increase in error, 0 points decrease in error
(/.f64 (Rewrite<= neg-sub0_binary64 (neg.f64 (-.f64 (/.f64 1 y) 1))) y): 0 points increase in error, 0 points decrease in error
(/.f64 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (-.f64 (/.f64 1 y) 1))) y): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-*r/_binary64 (*.f64 -1 (/.f64 (-.f64 (/.f64 1 y) 1) y))): 0 points increase in error, 0 points decrease in error