Simplified1.5
\[\leadsto \color{blue}{\frac{\frac{v}{t1 + u}}{-1 - \frac{u}{t1}}}
\]
Proof
(/.f64 (/.f64 v (+.f64 t1 u)) (-.f64 -1 (/.f64 u t1))): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (-.f64 (Rewrite<= metadata-eval (neg.f64 1)) (/.f64 u t1))): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (-.f64 (neg.f64 (Rewrite<= *-inverses_binary64 (/.f64 t1 t1))) (/.f64 u t1))): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (-.f64 (Rewrite<= distribute-frac-neg_binary64 (/.f64 (neg.f64 t1) t1)) (/.f64 u t1))): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (Rewrite<= div-sub_binary64 (/.f64 (-.f64 (neg.f64 t1) u) t1))): 3 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (/.f64 (Rewrite<= unsub-neg_binary64 (+.f64 (neg.f64 t1) (neg.f64 u))) t1)): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (/.f64 (Rewrite=> distribute-neg-out_binary64 (neg.f64 (+.f64 t1 u))) t1)): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (/.f64 (Rewrite=> neg-mul-1_binary64 (*.f64 -1 (+.f64 t1 u))) t1)): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (/.f64 (*.f64 -1 (+.f64 t1 u)) (Rewrite<= remove-double-neg_binary64 (neg.f64 (neg.f64 t1))))): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (/.f64 (*.f64 -1 (+.f64 t1 u)) (Rewrite=> neg-mul-1_binary64 (*.f64 -1 (neg.f64 t1))))): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (Rewrite=> times-frac_binary64 (*.f64 (/.f64 -1 -1) (/.f64 (+.f64 t1 u) (neg.f64 t1))))): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (*.f64 (Rewrite=> metadata-eval 1) (/.f64 (+.f64 t1 u) (neg.f64 t1)))): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (Rewrite=> *-lft-identity_binary64 (/.f64 (+.f64 t1 u) (neg.f64 t1)))): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (/.f64 (+.f64 t1 u) (Rewrite=> neg-mul-1_binary64 (*.f64 -1 t1)))): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 v (+.f64 t1 u)) (Rewrite<= associate-/l/_binary64 (/.f64 (/.f64 (+.f64 t1 u) t1) -1))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 (/.f64 v (+.f64 t1 u)) -1) (/.f64 (+.f64 t1 u) t1))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-*r/_binary64 (*.f64 (/.f64 v (+.f64 t1 u)) (/.f64 -1 (/.f64 (+.f64 t1 u) t1)))): 12 points increase in error, 6 points decrease in error
(*.f64 (/.f64 v (+.f64 t1 u)) (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 -1 t1) (+.f64 t1 u)))): 4 points increase in error, 14 points decrease in error
(*.f64 (/.f64 v (+.f64 t1 u)) (/.f64 (Rewrite<= neg-mul-1_binary64 (neg.f64 t1)) (+.f64 t1 u))): 0 points increase in error, 0 points decrease in error
(Rewrite<= times-frac_binary64 (/.f64 (*.f64 v (neg.f64 t1)) (*.f64 (+.f64 t1 u) (+.f64 t1 u)))): 95 points increase in error, 11 points decrease in error
(/.f64 (Rewrite<= *-commutative_binary64 (*.f64 (neg.f64 t1) v)) (*.f64 (+.f64 t1 u) (+.f64 t1 u))): 0 points increase in error, 0 points decrease in error