Simplified10.1
\[\leadsto \color{blue}{t + \left(\frac{a}{z} + 1\right) \cdot \frac{t - x}{\frac{z}{\mathsf{fma}\left(-1, y, a\right)}}}
\]
Proof
(+.f64 t (*.f64 (+.f64 (/.f64 a z) 1) (/.f64 (-.f64 t x) (/.f64 z (fma.f64 -1 y a))))): 0 points increase in error, 0 points decrease in error
(+.f64 t (*.f64 (+.f64 (/.f64 a z) 1) (/.f64 (-.f64 t x) (/.f64 z (fma.f64 -1 y (Rewrite<= remove-double-neg_binary64 (neg.f64 (neg.f64 a)))))))): 0 points increase in error, 0 points decrease in error
(+.f64 t (*.f64 (+.f64 (/.f64 a z) 1) (/.f64 (-.f64 t x) (/.f64 z (Rewrite<= fma-neg_binary64 (-.f64 (*.f64 -1 y) (neg.f64 a))))))): 0 points increase in error, 0 points decrease in error
(+.f64 t (*.f64 (+.f64 (/.f64 a z) 1) (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 (-.f64 t x) (-.f64 (*.f64 -1 y) (neg.f64 a))) z)))): 40 points increase in error, 17 points decrease in error
(+.f64 t (*.f64 (+.f64 (/.f64 a z) 1) (/.f64 (Rewrite<= distribute-rgt-out--_binary64 (-.f64 (*.f64 (*.f64 -1 y) (-.f64 t x)) (*.f64 (neg.f64 a) (-.f64 t x)))) z))): 2 points increase in error, 0 points decrease in error
(+.f64 t (*.f64 (+.f64 (/.f64 a z) 1) (/.f64 (-.f64 (Rewrite<= associate-*r*_binary64 (*.f64 -1 (*.f64 y (-.f64 t x)))) (*.f64 (neg.f64 a) (-.f64 t x))) z))): 0 points increase in error, 0 points decrease in error
(+.f64 t (*.f64 (+.f64 (/.f64 a z) 1) (/.f64 (-.f64 (*.f64 -1 (*.f64 y (-.f64 t x))) (Rewrite=> distribute-lft-neg-out_binary64 (neg.f64 (*.f64 a (-.f64 t x))))) z))): 0 points increase in error, 0 points decrease in error
(+.f64 t (*.f64 (+.f64 (/.f64 a z) 1) (/.f64 (-.f64 (*.f64 -1 (*.f64 y (-.f64 t x))) (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (*.f64 a (-.f64 t x))))) z))): 0 points increase in error, 0 points decrease in error
(+.f64 t (*.f64 (+.f64 (/.f64 a z) 1) (/.f64 (Rewrite=> distribute-lft-out--_binary64 (*.f64 -1 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))))) z))): 0 points increase in error, 0 points decrease in error
(+.f64 t (*.f64 (+.f64 (/.f64 a z) 1) (Rewrite<= associate-*r/_binary64 (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z))))): 0 points increase in error, 0 points decrease in error
(+.f64 t (Rewrite<= distribute-lft1-in_binary64 (+.f64 (*.f64 (/.f64 a z) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z))) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z))))): 2 points increase in error, 3 points decrease in error
(+.f64 t (+.f64 (*.f64 (/.f64 a z) (Rewrite=> mul-1-neg_binary64 (neg.f64 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z)))): 0 points increase in error, 0 points decrease in error
(+.f64 t (+.f64 (Rewrite=> distribute-rgt-neg-out_binary64 (neg.f64 (*.f64 (/.f64 a z) (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z)))): 0 points increase in error, 0 points decrease in error
(+.f64 t (+.f64 (neg.f64 (Rewrite<= times-frac_binary64 (/.f64 (*.f64 a (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x)))) (*.f64 z z)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z)))): 39 points increase in error, 3 points decrease in error
(+.f64 t (+.f64 (neg.f64 (/.f64 (*.f64 a (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x)))) (Rewrite<= unpow2_binary64 (pow.f64 z 2)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z)))): 0 points increase in error, 0 points decrease in error
(+.f64 t (+.f64 (Rewrite<= mul-1-neg_binary64 (*.f64 -1 (/.f64 (*.f64 a (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x)))) (pow.f64 z 2)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z)))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-+l+_binary64 (+.f64 (+.f64 t (*.f64 -1 (/.f64 (*.f64 a (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x)))) (pow.f64 z 2)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z)))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 t (Rewrite=> associate-*r/_binary64 (/.f64 (*.f64 -1 (*.f64 a (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))))) (pow.f64 z 2)))) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 t (/.f64 (*.f64 -1 (Rewrite=> *-commutative_binary64 (*.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) a))) (pow.f64 z 2))) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 t (/.f64 (Rewrite<= associate-*l*_binary64 (*.f64 (*.f64 -1 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x)))) a)) (pow.f64 z 2))) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 t (/.f64 (*.f64 (Rewrite<= distribute-lft-out--_binary64 (-.f64 (*.f64 -1 (*.f64 y (-.f64 t x))) (*.f64 -1 (*.f64 a (-.f64 t x))))) a) (pow.f64 z 2))) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 t (/.f64 (Rewrite<= *-commutative_binary64 (*.f64 a (-.f64 (*.f64 -1 (*.f64 y (-.f64 t x))) (*.f64 -1 (*.f64 a (-.f64 t x)))))) (pow.f64 z 2))) (*.f64 -1 (/.f64 (-.f64 (*.f64 y (-.f64 t x)) (*.f64 a (-.f64 t x))) z))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 t (/.f64 (*.f64 a (-.f64 (*.f64 -1 (*.f64 y (-.f64 t x))) (*.f64 -1 (*.f64 a (-.f64 t x))))) (pow.f64 z 2))) (*.f64 -1 (Rewrite=> div-sub_binary64 (-.f64 (/.f64 (*.f64 y (-.f64 t x)) z) (/.f64 (*.f64 a (-.f64 t x)) z))))): 1 points increase in error, 0 points decrease in error
(+.f64 (+.f64 t (/.f64 (*.f64 a (-.f64 (*.f64 -1 (*.f64 y (-.f64 t x))) (*.f64 -1 (*.f64 a (-.f64 t x))))) (pow.f64 z 2))) (Rewrite<= distribute-lft-out--_binary64 (-.f64 (*.f64 -1 (/.f64 (*.f64 y (-.f64 t x)) z)) (*.f64 -1 (/.f64 (*.f64 a (-.f64 t x)) z))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate--l+_binary64 (-.f64 (+.f64 (+.f64 t (/.f64 (*.f64 a (-.f64 (*.f64 -1 (*.f64 y (-.f64 t x))) (*.f64 -1 (*.f64 a (-.f64 t x))))) (pow.f64 z 2))) (*.f64 -1 (/.f64 (*.f64 y (-.f64 t x)) z))) (*.f64 -1 (/.f64 (*.f64 a (-.f64 t x)) z)))): 0 points increase in error, 0 points decrease in error
(-.f64 (Rewrite<= +-commutative_binary64 (+.f64 (*.f64 -1 (/.f64 (*.f64 y (-.f64 t x)) z)) (+.f64 t (/.f64 (*.f64 a (-.f64 (*.f64 -1 (*.f64 y (-.f64 t x))) (*.f64 -1 (*.f64 a (-.f64 t x))))) (pow.f64 z 2))))) (*.f64 -1 (/.f64 (*.f64 a (-.f64 t x)) z))): 0 points increase in error, 0 points decrease in error