Simplified3.7
\[\leadsto \color{blue}{\frac{\frac{\mathsf{fma}\left(x \cdot 9, y, b\right)}{z} + t \cdot \left(a \cdot -4\right)}{c}}
\]
Proof
(/.f64 (+.f64 (/.f64 (fma.f64 (*.f64 x 9) y b) z) (*.f64 t (*.f64 a -4))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (+.f64 (/.f64 (Rewrite<= fma-def_binary64 (+.f64 (*.f64 (*.f64 x 9) y) b)) z) (*.f64 t (*.f64 a -4))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (+.f64 (/.f64 (Rewrite<= +-commutative_binary64 (+.f64 b (*.f64 (*.f64 x 9) y))) z) (*.f64 t (*.f64 a -4))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (+.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (*.f64 t (*.f64 a (Rewrite<= metadata-eval (neg.f64 4))))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (+.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (*.f64 t (Rewrite<= distribute-rgt-neg-in_binary64 (neg.f64 (*.f64 a 4))))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (+.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (Rewrite=> distribute-rgt-neg-out_binary64 (neg.f64 (*.f64 t (*.f64 a 4))))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (+.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (Rewrite<= distribute-lft-neg-out_binary64 (*.f64 (neg.f64 t) (*.f64 a 4)))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (Rewrite<= cancel-sign-sub-inv_binary64 (-.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (*.f64 t (*.f64 a 4)))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (Rewrite<= associate-*l*_binary64 (*.f64 (*.f64 t a) 4))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (Rewrite<= *-commutative_binary64 (*.f64 4 (*.f64 t a)))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (*.f64 (Rewrite<= metadata-eval (/.f64 4 1)) (*.f64 t a))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (*.f64 (/.f64 4 (Rewrite<= *-inverses_binary64 (/.f64 z z))) (*.f64 t a))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (*.f64 (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 4 z) z)) (*.f64 t a))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (*.f64 (/.f64 (Rewrite<= *-commutative_binary64 (*.f64 z 4)) z) (*.f64 t a))) c): 0 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (Rewrite<= associate-/r/_binary64 (/.f64 (*.f64 z 4) (/.f64 z (*.f64 t a))))) c): 18 points increase in error, 0 points decrease in error
(/.f64 (-.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 (*.f64 z 4) (*.f64 t a)) z))) c): 18 points increase in error, 17 points decrease in error
(/.f64 (-.f64 (/.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) z) (/.f64 (Rewrite<= associate-*l*_binary64 (*.f64 (*.f64 (*.f64 z 4) t) a)) z)) c): 26 points increase in error, 5 points decrease in error
(/.f64 (Rewrite<= div-sub_binary64 (/.f64 (-.f64 (+.f64 b (*.f64 (*.f64 x 9) y)) (*.f64 (*.f64 (*.f64 z 4) t) a)) z)) c): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 (Rewrite<= associate-+r-_binary64 (+.f64 b (-.f64 (*.f64 (*.f64 x 9) y) (*.f64 (*.f64 (*.f64 z 4) t) a)))) z) c): 0 points increase in error, 0 points decrease in error
(/.f64 (/.f64 (Rewrite<= +-commutative_binary64 (+.f64 (-.f64 (*.f64 (*.f64 x 9) y) (*.f64 (*.f64 (*.f64 z 4) t) a)) b)) z) c): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-/r*_binary64 (/.f64 (+.f64 (-.f64 (*.f64 (*.f64 x 9) y) (*.f64 (*.f64 (*.f64 z 4) t) a)) b) (*.f64 z c))): 51 points increase in error, 38 points decrease in error