Simplified0.0
\[\leadsto \color{blue}{\mathsf{fma}\left(4, \frac{x - z}{y}, 2\right)}
\]
Proof
(fma.f64 4 (/.f64 (-.f64 x z) y) 2): 0 points increase in error, 0 points decrease in error
(fma.f64 4 (/.f64 (Rewrite<= unsub-neg_binary64 (+.f64 x (neg.f64 z))) y) 2): 0 points increase in error, 0 points decrease in error
(fma.f64 4 (/.f64 (Rewrite=> +-commutative_binary64 (+.f64 (neg.f64 z) x)) y) 2): 0 points increase in error, 0 points decrease in error
(fma.f64 4 (/.f64 (+.f64 (Rewrite=> neg-sub0_binary64 (-.f64 0 z)) x) y) 2): 0 points increase in error, 0 points decrease in error
(fma.f64 4 (/.f64 (Rewrite=> associate-+l-_binary64 (-.f64 0 (-.f64 z x))) y) 2): 0 points increase in error, 0 points decrease in error
(fma.f64 4 (/.f64 (Rewrite<= neg-sub0_binary64 (neg.f64 (-.f64 z x))) y) 2): 0 points increase in error, 0 points decrease in error
(fma.f64 4 (Rewrite<= distribute-neg-frac_binary64 (neg.f64 (/.f64 (-.f64 z x) y))) 2): 0 points increase in error, 0 points decrease in error
(fma.f64 4 (neg.f64 (/.f64 (-.f64 z x) y)) (Rewrite<= metadata-eval (+.f64 1 1))): 0 points increase in error, 0 points decrease in error
(fma.f64 4 (neg.f64 (/.f64 (-.f64 z x) y)) (+.f64 1 (Rewrite<= lft-mult-inverse_binary64 (*.f64 (/.f64 1 y) y)))): 2 points increase in error, 0 points decrease in error
(fma.f64 4 (neg.f64 (/.f64 (-.f64 z x) y)) (+.f64 1 (*.f64 (/.f64 (Rewrite<= metadata-eval (*.f64 4 1/4)) y) y))): 0 points increase in error, 0 points decrease in error
(fma.f64 4 (neg.f64 (/.f64 (-.f64 z x) y)) (+.f64 1 (*.f64 (Rewrite<= associate-*l/_binary64 (*.f64 (/.f64 4 y) 1/4)) y))): 0 points increase in error, 0 points decrease in error
(fma.f64 4 (neg.f64 (/.f64 (-.f64 z x) y)) (+.f64 1 (Rewrite=> associate-*l*_binary64 (*.f64 (/.f64 4 y) (*.f64 1/4 y))))): 0 points increase in error, 2 points decrease in error
(fma.f64 4 (neg.f64 (/.f64 (-.f64 z x) y)) (+.f64 1 (*.f64 (/.f64 4 y) (Rewrite<= *-commutative_binary64 (*.f64 y 1/4))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= fma-def_binary64 (+.f64 (*.f64 4 (neg.f64 (/.f64 (-.f64 z x) y))) (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 1/4))))): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 4 (Rewrite=> distribute-neg-frac_binary64 (/.f64 (neg.f64 (-.f64 z x)) y))) (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 1/4)))): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 4 (/.f64 (Rewrite=> neg-sub0_binary64 (-.f64 0 (-.f64 z x))) y)) (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 1/4)))): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 4 (/.f64 (Rewrite<= associate-+l-_binary64 (+.f64 (-.f64 0 z) x)) y)) (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 1/4)))): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 4 (/.f64 (+.f64 (Rewrite<= neg-sub0_binary64 (neg.f64 z)) x) y)) (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 1/4)))): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 4 (/.f64 (Rewrite<= +-commutative_binary64 (+.f64 x (neg.f64 z))) y)) (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 1/4)))): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite=> associate-*r/_binary64 (/.f64 (*.f64 4 (+.f64 x (neg.f64 z))) y)) (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 1/4)))): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite<= associate-*l/_binary64 (*.f64 (/.f64 4 y) (+.f64 x (neg.f64 z)))) (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 1/4)))): 36 points increase in error, 0 points decrease in error
(+.f64 (Rewrite<= *-commutative_binary64 (*.f64 (+.f64 x (neg.f64 z)) (/.f64 4 y))) (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 1/4)))): 0 points increase in error, 0 points decrease in error
(Rewrite<= +-commutative_binary64 (+.f64 (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 1/4))) (*.f64 (+.f64 x (neg.f64 z)) (/.f64 4 y)))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 1/4))) (Rewrite=> *-commutative_binary64 (*.f64 (/.f64 4 y) (+.f64 x (neg.f64 z))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-+r+_binary64 (+.f64 1 (+.f64 (*.f64 (/.f64 4 y) (*.f64 y 1/4)) (*.f64 (/.f64 4 y) (+.f64 x (neg.f64 z)))))): 1 points increase in error, 1 points decrease in error
(+.f64 1 (Rewrite<= distribute-lft-in_binary64 (*.f64 (/.f64 4 y) (+.f64 (*.f64 y 1/4) (+.f64 x (neg.f64 z)))))): 5 points increase in error, 1 points decrease in error
(+.f64 1 (*.f64 (/.f64 4 y) (Rewrite<= associate-+l+_binary64 (+.f64 (+.f64 (*.f64 y 1/4) x) (neg.f64 z))))): 0 points increase in error, 0 points decrease in error
(+.f64 1 (*.f64 (/.f64 4 y) (+.f64 (Rewrite<= +-commutative_binary64 (+.f64 x (*.f64 y 1/4))) (neg.f64 z)))): 0 points increase in error, 0 points decrease in error
(+.f64 1 (*.f64 (/.f64 4 y) (Rewrite<= sub-neg_binary64 (-.f64 (+.f64 x (*.f64 y 1/4)) z)))): 0 points increase in error, 0 points decrease in error
(+.f64 1 (Rewrite=> associate-*l/_binary64 (/.f64 (*.f64 4 (-.f64 (+.f64 x (*.f64 y 1/4)) z)) y))): 4 points increase in error, 38 points decrease in error