Simplified0.2
\[\leadsto \color{blue}{\mathsf{fma}\left(\frac{4}{y}, x - z, 4\right)}
\]
Proof
(fma.f64 (/.f64 4 y) (-.f64 x z) 4): 0 points increase in error, 0 points decrease in error
(fma.f64 (/.f64 4 y) (Rewrite<= unsub-neg_binary64 (+.f64 x (neg.f64 z))) 4): 0 points increase in error, 0 points decrease in error
(Rewrite=> fma-udef_binary64 (+.f64 (*.f64 (/.f64 4 y) (+.f64 x (neg.f64 z))) 4)): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite<= *-commutative_binary64 (*.f64 (+.f64 x (neg.f64 z)) (/.f64 4 y))) 4): 0 points increase in error, 0 points decrease in error
(Rewrite<= +-commutative_binary64 (+.f64 4 (*.f64 (+.f64 x (neg.f64 z)) (/.f64 4 y)))): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite<= metadata-eval (+.f64 3 1)) (*.f64 (+.f64 x (neg.f64 z)) (/.f64 4 y))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (Rewrite<= metadata-eval (*.f64 4 3/4)) 1) (*.f64 (+.f64 x (neg.f64 z)) (/.f64 4 y))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (Rewrite<= metadata-eval (/.f64 4 1)) 3/4) 1) (*.f64 (+.f64 x (neg.f64 z)) (/.f64 4 y))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (/.f64 4 (Rewrite<= *-inverses_binary64 (/.f64 y y))) 3/4) 1) (*.f64 (+.f64 x (neg.f64 z)) (/.f64 4 y))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (Rewrite<= associate-/l*_binary64 (/.f64 (*.f64 4 y) y)) 3/4) 1) (*.f64 (+.f64 x (neg.f64 z)) (/.f64 4 y))): 2 points increase in error, 0 points decrease in error
(+.f64 (+.f64 (*.f64 (Rewrite<= associate-*l/_binary64 (*.f64 (/.f64 4 y) y)) 3/4) 1) (*.f64 (+.f64 x (neg.f64 z)) (/.f64 4 y))): 12 points increase in error, 2 points decrease in error
(+.f64 (+.f64 (Rewrite<= associate-*r*_binary64 (*.f64 (/.f64 4 y) (*.f64 y 3/4))) 1) (*.f64 (+.f64 x (neg.f64 z)) (/.f64 4 y))): 13 points increase in error, 5 points decrease in error
(+.f64 (Rewrite<= +-commutative_binary64 (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 3/4)))) (*.f64 (+.f64 x (neg.f64 z)) (/.f64 4 y))): 0 points increase in error, 0 points decrease in error
(+.f64 (+.f64 1 (*.f64 (/.f64 4 y) (*.f64 y 3/4))) (Rewrite=> *-commutative_binary64 (*.f64 (/.f64 4 y) (+.f64 x (neg.f64 z))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate-+r+_binary64 (+.f64 1 (+.f64 (*.f64 (/.f64 4 y) (*.f64 y 3/4)) (*.f64 (/.f64 4 y) (+.f64 x (neg.f64 z)))))): 0 points increase in error, 1 points decrease in error
(+.f64 1 (Rewrite<= distribute-lft-in_binary64 (*.f64 (/.f64 4 y) (+.f64 (*.f64 y 3/4) (+.f64 x (neg.f64 z)))))): 3 points increase in error, 1 points decrease in error
(+.f64 1 (*.f64 (/.f64 4 y) (Rewrite<= associate-+l+_binary64 (+.f64 (+.f64 (*.f64 y 3/4) x) (neg.f64 z))))): 0 points increase in error, 0 points decrease in error
(+.f64 1 (*.f64 (/.f64 4 y) (+.f64 (Rewrite<= +-commutative_binary64 (+.f64 x (*.f64 y 3/4))) (neg.f64 z)))): 0 points increase in error, 0 points decrease in error
(+.f64 1 (*.f64 (/.f64 4 y) (Rewrite<= sub-neg_binary64 (-.f64 (+.f64 x (*.f64 y 3/4)) z)))): 0 points increase in error, 0 points decrease in error
(+.f64 1 (Rewrite=> associate-*l/_binary64 (/.f64 (*.f64 4 (-.f64 (+.f64 x (*.f64 y 3/4)) z)) y))): 10 points increase in error, 49 points decrease in error