Simplified9.0
\[\leadsto \color{blue}{y - \left(\frac{a}{t} + 1\right) \cdot \left(\frac{y - x}{t} \cdot \left(z - a\right)\right)}
\]
Proof
(-.f64 y (*.f64 (+.f64 (/.f64 a t) 1) (*.f64 (/.f64 (-.f64 y x) t) (-.f64 z a)))): 0 points increase in error, 0 points decrease in error
(-.f64 y (*.f64 (+.f64 (/.f64 a t) 1) (Rewrite<= associate-/r/_binary64 (/.f64 (-.f64 y x) (/.f64 t (-.f64 z a)))))): 14 points increase in error, 18 points decrease in error
(-.f64 y (*.f64 (+.f64 (/.f64 a t) 1) (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 (-.f64 y x) (-.f64 z a)) t)))): 35 points increase in error, 12 points decrease in error
(-.f64 y (*.f64 (+.f64 (/.f64 a t) 1) (/.f64 (Rewrite<= distribute-rgt-out--_binary64 (-.f64 (*.f64 z (-.f64 y x)) (*.f64 a (-.f64 y x)))) t))): 0 points increase in error, 0 points decrease in error
(-.f64 y (*.f64 (+.f64 (/.f64 a t) 1) (/.f64 (-.f64 (Rewrite=> *-commutative_binary64 (*.f64 (-.f64 y x) z)) (*.f64 a (-.f64 y x))) t))): 0 points increase in error, 0 points decrease in error
(-.f64 y (Rewrite<= distribute-rgt1-in_binary64 (+.f64 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t) (*.f64 (/.f64 a t) (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))))): 1 points increase in error, 3 points decrease in error
(-.f64 y (+.f64 (/.f64 (-.f64 (Rewrite<= *-commutative_binary64 (*.f64 z (-.f64 y x))) (*.f64 a (-.f64 y x))) t) (*.f64 (/.f64 a t) (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t)))): 0 points increase in error, 0 points decrease in error
(-.f64 y (+.f64 (/.f64 (-.f64 (*.f64 z (-.f64 y x)) (*.f64 a (-.f64 y x))) t) (Rewrite<= times-frac_binary64 (/.f64 (*.f64 a (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x)))) (*.f64 t t))))): 18 points increase in error, 2 points decrease in error
(-.f64 y (+.f64 (/.f64 (-.f64 (*.f64 z (-.f64 y x)) (*.f64 a (-.f64 y x))) t) (/.f64 (*.f64 a (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x)))) (Rewrite<= unpow2_binary64 (pow.f64 t 2))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= unsub-neg_binary64 (+.f64 y (neg.f64 (+.f64 (/.f64 (-.f64 (*.f64 z (-.f64 y x)) (*.f64 a (-.f64 y x))) t) (/.f64 (*.f64 a (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x)))) (pow.f64 t 2)))))): 0 points increase in error, 0 points decrease in error
(+.f64 y (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (+.f64 (/.f64 (-.f64 (*.f64 z (-.f64 y x)) (*.f64 a (-.f64 y x))) t) (/.f64 (*.f64 a (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x)))) (pow.f64 t 2)))))): 0 points increase in error, 0 points decrease in error
(+.f64 y (*.f64 -1 (+.f64 (/.f64 (-.f64 (Rewrite=> *-commutative_binary64 (*.f64 (-.f64 y x) z)) (*.f64 a (-.f64 y x))) t) (/.f64 (*.f64 a (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x)))) (pow.f64 t 2))))): 0 points increase in error, 0 points decrease in error
(+.f64 y (*.f64 -1 (+.f64 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t) (/.f64 (*.f64 a (-.f64 (Rewrite<= *-commutative_binary64 (*.f64 z (-.f64 y x))) (*.f64 a (-.f64 y x)))) (pow.f64 t 2))))): 0 points increase in error, 0 points decrease in error
(+.f64 y (*.f64 -1 (Rewrite=> +-commutative_binary64 (+.f64 (/.f64 (*.f64 a (-.f64 (*.f64 z (-.f64 y x)) (*.f64 a (-.f64 y x)))) (pow.f64 t 2)) (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))))): 0 points increase in error, 0 points decrease in error
(+.f64 y (Rewrite<= distribute-lft-out_binary64 (+.f64 (*.f64 -1 (/.f64 (*.f64 a (-.f64 (*.f64 z (-.f64 y x)) (*.f64 a (-.f64 y x)))) (pow.f64 t 2))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))))): 0 points increase in error, 0 points decrease in error
(Rewrite=> associate-+r+_binary64 (+.f64 (+.f64 y (*.f64 -1 (/.f64 (*.f64 a (-.f64 (*.f64 z (-.f64 y x)) (*.f64 a (-.f64 y x)))) (pow.f64 t 2)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t)))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 y (Rewrite=> mul-1-neg_binary64 (neg.f64 (/.f64 (*.f64 a (-.f64 (*.f64 z (-.f64 y x)) (*.f64 a (-.f64 y x)))) (pow.f64 t 2))))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite=> unsub-neg_binary64 (-.f64 y (/.f64 (*.f64 a (-.f64 (*.f64 z (-.f64 y x)) (*.f64 a (-.f64 y x)))) (pow.f64 t 2)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 0 points increase in error, 0 points decrease in error
(+.f64 (-.f64 y (/.f64 (Rewrite=> *-commutative_binary64 (*.f64 (-.f64 (*.f64 z (-.f64 y x)) (*.f64 a (-.f64 y x))) a)) (pow.f64 t 2))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 0 points increase in error, 0 points decrease in error
(+.f64 (-.f64 y (/.f64 (*.f64 (-.f64 (Rewrite=> *-commutative_binary64 (*.f64 (-.f64 y x) z)) (*.f64 a (-.f64 y x))) a) (pow.f64 t 2))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 0 points increase in error, 0 points decrease in error
(+.f64 (-.f64 y (/.f64 (*.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) a) (Rewrite=> unpow2_binary64 (*.f64 t t)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 0 points increase in error, 0 points decrease in error
(+.f64 (-.f64 y (Rewrite=> times-frac_binary64 (*.f64 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t) (/.f64 a t)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 2 points increase in error, 18 points decrease in error
(+.f64 (Rewrite=> cancel-sign-sub-inv_binary64 (+.f64 y (*.f64 (neg.f64 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t)) (/.f64 a t)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 y (*.f64 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))) (/.f64 a t))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 y (*.f64 (Rewrite=> associate-*r/_binary64 (/.f64 (*.f64 -1 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x)))) t)) (/.f64 a t))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 y (*.f64 (/.f64 (Rewrite<= distribute-lft-out--_binary64 (-.f64 (*.f64 -1 (*.f64 (-.f64 y x) z)) (*.f64 -1 (*.f64 a (-.f64 y x))))) t) (/.f64 a t))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 y (Rewrite<= times-frac_binary64 (/.f64 (*.f64 (-.f64 (*.f64 -1 (*.f64 (-.f64 y x) z)) (*.f64 -1 (*.f64 a (-.f64 y x)))) a) (*.f64 t t)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 18 points increase in error, 2 points decrease in error
(+.f64 (+.f64 y (/.f64 (*.f64 (-.f64 (*.f64 -1 (*.f64 (-.f64 y x) z)) (*.f64 -1 (*.f64 a (-.f64 y x)))) a) (Rewrite<= unpow2_binary64 (pow.f64 t 2)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite<= +-commutative_binary64 (+.f64 (/.f64 (*.f64 (-.f64 (*.f64 -1 (*.f64 (-.f64 y x) z)) (*.f64 -1 (*.f64 a (-.f64 y x)))) a) (pow.f64 t 2)) y)) (*.f64 -1 (/.f64 (-.f64 (*.f64 (-.f64 y x) z) (*.f64 a (-.f64 y x))) t))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (/.f64 (*.f64 (-.f64 (*.f64 -1 (*.f64 (-.f64 y x) z)) (*.f64 -1 (*.f64 a (-.f64 y x)))) a) (pow.f64 t 2)) y) (*.f64 -1 (Rewrite=> div-sub_binary64 (-.f64 (/.f64 (*.f64 (-.f64 y x) z) t) (/.f64 (*.f64 a (-.f64 y x)) t))))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (/.f64 (*.f64 (-.f64 (*.f64 -1 (*.f64 (-.f64 y x) z)) (*.f64 -1 (*.f64 a (-.f64 y x)))) a) (pow.f64 t 2)) y) (*.f64 -1 (-.f64 (/.f64 (Rewrite<= *-commutative_binary64 (*.f64 z (-.f64 y x))) t) (/.f64 (*.f64 a (-.f64 y x)) t)))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (/.f64 (*.f64 (-.f64 (*.f64 -1 (*.f64 (-.f64 y x) z)) (*.f64 -1 (*.f64 a (-.f64 y x)))) a) (pow.f64 t 2)) y) (Rewrite<= distribute-lft-out--_binary64 (-.f64 (*.f64 -1 (/.f64 (*.f64 z (-.f64 y x)) t)) (*.f64 -1 (/.f64 (*.f64 a (-.f64 y x)) t))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate--l+_binary64 (-.f64 (+.f64 (+.f64 (/.f64 (*.f64 (-.f64 (*.f64 -1 (*.f64 (-.f64 y x) z)) (*.f64 -1 (*.f64 a (-.f64 y x)))) a) (pow.f64 t 2)) y) (*.f64 -1 (/.f64 (*.f64 z (-.f64 y x)) t))) (*.f64 -1 (/.f64 (*.f64 a (-.f64 y x)) t)))): 0 points increase in error, 0 points decrease in error
(-.f64 (Rewrite<= +-commutative_binary64 (+.f64 (*.f64 -1 (/.f64 (*.f64 z (-.f64 y x)) t)) (+.f64 (/.f64 (*.f64 (-.f64 (*.f64 -1 (*.f64 (-.f64 y x) z)) (*.f64 -1 (*.f64 a (-.f64 y x)))) a) (pow.f64 t 2)) y))) (*.f64 -1 (/.f64 (*.f64 a (-.f64 y x)) t))): 0 points increase in error, 0 points decrease in error