Simplified9.8
\[\leadsto \color{blue}{\frac{1}{1 + x} + \left(\frac{1}{x + -1} + \frac{-2}{x}\right)}
\]
Proof
(+.f64 (/.f64 1 (+.f64 1 x)) (+.f64 (/.f64 1 (+.f64 x -1)) (/.f64 -2 x))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 1 (Rewrite<= +-commutative_binary64 (+.f64 x 1))) (+.f64 (/.f64 1 (+.f64 x -1)) (/.f64 -2 x))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 1 (+.f64 x 1)) (+.f64 (/.f64 1 (+.f64 x (Rewrite<= metadata-eval (neg.f64 1)))) (/.f64 -2 x))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 1 (+.f64 x 1)) (+.f64 (/.f64 1 (Rewrite<= sub-neg_binary64 (-.f64 x 1))) (/.f64 -2 x))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 1 (+.f64 x 1)) (+.f64 (/.f64 1 (-.f64 x 1)) (/.f64 (Rewrite<= metadata-eval (neg.f64 2)) x))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 1 (+.f64 x 1)) (+.f64 (/.f64 1 (-.f64 x 1)) (Rewrite<= distribute-neg-frac_binary64 (neg.f64 (/.f64 2 x))))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 1 (+.f64 x 1)) (Rewrite<= +-commutative_binary64 (+.f64 (neg.f64 (/.f64 2 x)) (/.f64 1 (-.f64 x 1))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-+l+_binary64 (+.f64 (+.f64 (/.f64 1 (+.f64 x 1)) (neg.f64 (/.f64 2 x))) (/.f64 1 (-.f64 x 1)))): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite<= sub-neg_binary64 (-.f64 (/.f64 1 (+.f64 x 1)) (/.f64 2 x))) (/.f64 1 (-.f64 x 1))): 0 points increase in error, 0 points decrease in error
Simplified26.0
\[\leadsto \frac{1}{1 + x} + \color{blue}{\frac{\left(-2 + x \cdot 2\right) - x}{x - x \cdot x}}
\]
Proof
(/.f64 (-.f64 (+.f64 -2 (*.f64 x 2)) x) (-.f64 x (*.f64 x x))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (+.f64 (Rewrite<= metadata-eval (*.f64 -1 2)) (*.f64 x 2)) x) (-.f64 x (*.f64 x x))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (Rewrite<= distribute-rgt-in_binary64 (*.f64 2 (+.f64 -1 x))) x) (-.f64 x (*.f64 x x))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (*.f64 2 (Rewrite<= +-commutative_binary64 (+.f64 x -1))) x) (-.f64 x (*.f64 x x))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (Rewrite<= *-commutative_binary64 (*.f64 (+.f64 x -1) 2)) x) (-.f64 x (*.f64 x x))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (Rewrite<= +-rgt-identity_binary64 (+.f64 (*.f64 (+.f64 x -1) 2) 0)) x) (-.f64 x (*.f64 x x))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (+.f64 (*.f64 (+.f64 x -1) 2) 0) x) (-.f64 x (Rewrite<= unpow2_binary64 (pow.f64 x 2)))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (+.f64 (*.f64 (+.f64 x -1) 2) 0) x) (Rewrite<= unsub-neg_binary64 (+.f64 x (neg.f64 (pow.f64 x 2))))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (+.f64 (*.f64 (+.f64 x -1) 2) 0) x) (+.f64 (Rewrite<= remove-double-neg_binary64 (neg.f64 (neg.f64 x))) (neg.f64 (pow.f64 x 2)))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (+.f64 (*.f64 (+.f64 x -1) 2) 0) x) (Rewrite<= distribute-neg-in_binary64 (neg.f64 (+.f64 (neg.f64 x) (pow.f64 x 2))))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (+.f64 (*.f64 (+.f64 x -1) 2) 0) x) (neg.f64 (Rewrite=> +-commutative_binary64 (+.f64 (pow.f64 x 2) (neg.f64 x))))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (+.f64 (*.f64 (+.f64 x -1) 2) 0) x) (neg.f64 (+.f64 (Rewrite=> unpow2_binary64 (*.f64 x x)) (neg.f64 x)))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (+.f64 (*.f64 (+.f64 x -1) 2) 0) x) (neg.f64 (+.f64 (*.f64 x x) (Rewrite=> neg-mul-1_binary64 (*.f64 -1 x))))): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (+.f64 (*.f64 (+.f64 x -1) 2) 0) x) (neg.f64 (Rewrite<= distribute-rgt-in_binary64 (*.f64 x (+.f64 x -1))))): 0 points increase in error, 0 points decrease in error
(Rewrite=> div-sub_binary64 (-.f64 (/.f64 (+.f64 (*.f64 (+.f64 x -1) 2) 0) (neg.f64 (*.f64 x (+.f64 x -1)))) (/.f64 x (neg.f64 (*.f64 x (+.f64 x -1)))))): 1 points increase in error, 1 points decrease in error
(-.f64 (/.f64 (Rewrite=> +-rgt-identity_binary64 (*.f64 (+.f64 x -1) 2)) (neg.f64 (*.f64 x (+.f64 x -1)))) (/.f64 x (neg.f64 (*.f64 x (+.f64 x -1))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= div-sub_binary64 (/.f64 (-.f64 (*.f64 (+.f64 x -1) 2) x) (neg.f64 (*.f64 x (+.f64 x -1))))): 1 points increase in error, 1 points decrease in error
(/.f64 (Rewrite<= unsub-neg_binary64 (+.f64 (*.f64 (+.f64 x -1) 2) (neg.f64 x))) (neg.f64 (*.f64 x (+.f64 x -1)))): 0 points increase in error, 0 points decrease in error
(/.f64 (Rewrite=> +-commutative_binary64 (+.f64 (neg.f64 x) (*.f64 (+.f64 x -1) 2))) (neg.f64 (*.f64 x (+.f64 x -1)))): 0 points increase in error, 0 points decrease in error
(/.f64 (+.f64 (Rewrite=> neg-mul-1_binary64 (*.f64 -1 x)) (*.f64 (+.f64 x -1) 2)) (neg.f64 (*.f64 x (+.f64 x -1)))): 0 points increase in error, 0 points decrease in error
(/.f64 (+.f64 (Rewrite=> *-commutative_binary64 (*.f64 x -1)) (*.f64 (+.f64 x -1) 2)) (neg.f64 (*.f64 x (+.f64 x -1)))): 0 points increase in error, 0 points decrease in error
(/.f64 (Rewrite<= fma-udef_binary64 (fma.f64 x -1 (*.f64 (+.f64 x -1) 2))) (neg.f64 (*.f64 x (+.f64 x -1)))): 0 points increase in error, 0 points decrease in error
(/.f64 (fma.f64 x -1 (*.f64 (+.f64 x -1) 2)) (Rewrite<= distribute-lft-neg-out_binary64 (*.f64 (neg.f64 x) (+.f64 x -1)))): 0 points increase in error, 0 points decrease in error
(/.f64 (fma.f64 x -1 (*.f64 (+.f64 x -1) 2)) (Rewrite=> *-commutative_binary64 (*.f64 (+.f64 x -1) (neg.f64 x)))): 0 points increase in error, 0 points decrease in error