Simplified15.8
\[\leadsto \color{blue}{\frac{y}{a} + \frac{\frac{y}{a} \cdot \frac{t}{a} - \frac{x}{a}}{z}}
\]
Proof
(+.f64 (/.f64 y a) (/.f64 (-.f64 (*.f64 (/.f64 y a) (/.f64 t a)) (/.f64 x a)) z)): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (/.f64 (-.f64 (Rewrite<= times-frac_binary64 (/.f64 (*.f64 y t) (*.f64 a a))) (/.f64 x a)) z)): 25 points increase in error, 8 points decrease in error
(+.f64 (/.f64 y a) (/.f64 (-.f64 (/.f64 (*.f64 y t) (Rewrite<= unpow2_binary64 (pow.f64 a 2))) (/.f64 x a)) z)): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (/.f64 (Rewrite<= unsub-neg_binary64 (+.f64 (/.f64 (*.f64 y t) (pow.f64 a 2)) (neg.f64 (/.f64 x a)))) z)): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (/.f64 (+.f64 (/.f64 (*.f64 y t) (pow.f64 a 2)) (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (/.f64 x a)))) z)): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (/.f64 (Rewrite<= +-commutative_binary64 (+.f64 (*.f64 -1 (/.f64 x a)) (/.f64 (*.f64 y t) (pow.f64 a 2)))) z)): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (/.f64 (+.f64 (*.f64 -1 (/.f64 x a)) (Rewrite<= *-lft-identity_binary64 (*.f64 1 (/.f64 (*.f64 y t) (pow.f64 a 2))))) z)): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (/.f64 (+.f64 (*.f64 -1 (/.f64 x a)) (*.f64 (Rewrite<= metadata-eval (neg.f64 -1)) (/.f64 (*.f64 y t) (pow.f64 a 2)))) z)): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (/.f64 (Rewrite<= cancel-sign-sub-inv_binary64 (-.f64 (*.f64 -1 (/.f64 x a)) (*.f64 -1 (/.f64 (*.f64 y t) (pow.f64 a 2))))) z)): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (/.f64 (Rewrite=> distribute-lft-out--_binary64 (*.f64 -1 (-.f64 (/.f64 x a) (/.f64 (*.f64 y t) (pow.f64 a 2))))) z)): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (Rewrite<= associate-*r/_binary64 (*.f64 -1 (/.f64 (-.f64 (/.f64 x a) (/.f64 (*.f64 y t) (pow.f64 a 2))) z)))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (Rewrite=> mul-1-neg_binary64 (neg.f64 (/.f64 (-.f64 (/.f64 x a) (/.f64 (*.f64 y t) (pow.f64 a 2))) z)))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (neg.f64 (Rewrite=> div-sub_binary64 (-.f64 (/.f64 (/.f64 x a) z) (/.f64 (/.f64 (*.f64 y t) (pow.f64 a 2)) z))))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (neg.f64 (-.f64 (Rewrite<= associate-/r*_binary64 (/.f64 x (*.f64 a z))) (/.f64 (/.f64 (*.f64 y t) (pow.f64 a 2)) z)))): 20 points increase in error, 17 points decrease in error
(+.f64 (/.f64 y a) (neg.f64 (-.f64 (/.f64 x (*.f64 a z)) (Rewrite<= associate-/r*_binary64 (/.f64 (*.f64 y t) (*.f64 (pow.f64 a 2) z)))))): 3 points increase in error, 4 points decrease in error
(+.f64 (/.f64 y a) (neg.f64 (Rewrite=> sub-neg_binary64 (+.f64 (/.f64 x (*.f64 a z)) (neg.f64 (/.f64 (*.f64 y t) (*.f64 (pow.f64 a 2) z))))))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (neg.f64 (+.f64 (/.f64 x (*.f64 a z)) (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (/.f64 (*.f64 y t) (*.f64 (pow.f64 a 2) z))))))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (Rewrite<= distribute-neg-out_binary64 (+.f64 (neg.f64 (/.f64 x (*.f64 a z))) (neg.f64 (*.f64 -1 (/.f64 (*.f64 y t) (*.f64 (pow.f64 a 2) z))))))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (+.f64 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (/.f64 x (*.f64 a z)))) (neg.f64 (*.f64 -1 (/.f64 (*.f64 y t) (*.f64 (pow.f64 a 2) z)))))): 0 points increase in error, 0 points decrease in error
(+.f64 (/.f64 y a) (Rewrite=> unsub-neg_binary64 (-.f64 (*.f64 -1 (/.f64 x (*.f64 a z))) (*.f64 -1 (/.f64 (*.f64 y t) (*.f64 (pow.f64 a 2) z)))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate--l+_binary64 (-.f64 (+.f64 (/.f64 y a) (*.f64 -1 (/.f64 x (*.f64 a z)))) (*.f64 -1 (/.f64 (*.f64 y t) (*.f64 (pow.f64 a 2) z))))): 0 points increase in error, 0 points decrease in error
(-.f64 (Rewrite<= +-commutative_binary64 (+.f64 (*.f64 -1 (/.f64 x (*.f64 a z))) (/.f64 y a))) (*.f64 -1 (/.f64 (*.f64 y t) (*.f64 (pow.f64 a 2) z)))): 0 points increase in error, 0 points decrease in error