Simplified4.9
\[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon \cdot 5, {x}^{4}, \left(\varepsilon \cdot \varepsilon\right) \cdot \left(\left(x \cdot x\right) \cdot \left(\varepsilon \cdot 10\right) + {x}^{3} \cdot 10\right)\right)}
\]
Proof
(fma.f64 (*.f64 eps 5) (pow.f64 x 4) (*.f64 (*.f64 eps eps) (+.f64 (*.f64 (*.f64 x x) (*.f64 eps 10)) (*.f64 (pow.f64 x 3) 10)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (Rewrite<= *-commutative_binary64 (*.f64 5 eps)) (pow.f64 x 4) (*.f64 (*.f64 eps eps) (+.f64 (*.f64 (*.f64 x x) (*.f64 eps 10)) (*.f64 (pow.f64 x 3) 10)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (*.f64 (Rewrite<= metadata-eval (+.f64 4 1)) eps) (pow.f64 x 4) (*.f64 (*.f64 eps eps) (+.f64 (*.f64 (*.f64 x x) (*.f64 eps 10)) (*.f64 (pow.f64 x 3) 10)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (Rewrite<= distribute-lft1-in_binary64 (+.f64 (*.f64 4 eps) eps)) (pow.f64 x 4) (*.f64 (*.f64 eps eps) (+.f64 (*.f64 (*.f64 x x) (*.f64 eps 10)) (*.f64 (pow.f64 x 3) 10)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (Rewrite<= unpow2_binary64 (pow.f64 eps 2)) (+.f64 (*.f64 (*.f64 x x) (*.f64 eps 10)) (*.f64 (pow.f64 x 3) 10)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (Rewrite<= unpow2_binary64 (pow.f64 x 2)) (*.f64 eps 10)) (*.f64 (pow.f64 x 3) 10)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (pow.f64 x 2) (*.f64 eps (Rewrite<= metadata-eval (+.f64 2 8)))) (*.f64 (pow.f64 x 3) 10)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (pow.f64 x 2) (Rewrite=> *-commutative_binary64 (*.f64 (+.f64 2 8) eps))) (*.f64 (pow.f64 x 3) 10)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (Rewrite<= associate-*l*_binary64 (*.f64 (*.f64 (pow.f64 x 2) (+.f64 2 8)) eps)) (*.f64 (pow.f64 x 3) 10)))): 3 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (Rewrite<= distribute-rgt-out_binary64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2)))) eps) (*.f64 (pow.f64 x 3) 10)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) eps) (*.f64 (pow.f64 x 3) (Rewrite<= metadata-eval (+.f64 4 6)))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) eps) (*.f64 (pow.f64 x 3) (+.f64 4 (Rewrite<= metadata-eval (+.f64 2 4))))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) eps) (Rewrite<= distribute-lft-out_binary64 (+.f64 (*.f64 (pow.f64 x 3) 4) (*.f64 (pow.f64 x 3) (+.f64 2 4))))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) eps) (+.f64 (Rewrite<= *-commutative_binary64 (*.f64 4 (pow.f64 x 3))) (*.f64 (pow.f64 x 3) (+.f64 2 4)))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) eps) (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (Rewrite=> cube-mult_binary64 (*.f64 x (*.f64 x x))) (+.f64 2 4)))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) eps) (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (*.f64 x (Rewrite<= unpow2_binary64 (pow.f64 x 2))) (+.f64 2 4)))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) eps) (+.f64 (*.f64 4 (pow.f64 x 3)) (Rewrite<= associate-*r*_binary64 (*.f64 x (*.f64 (pow.f64 x 2) (+.f64 2 4)))))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) eps) (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 x (Rewrite<= distribute-rgt-out_binary64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))))))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (*.f64 (pow.f64 eps 2) (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) eps) (+.f64 (*.f64 4 (pow.f64 x 3)) (Rewrite<= *-commutative_binary64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (Rewrite<= distribute-rgt-out_binary64 (+.f64 (*.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) eps) (pow.f64 eps 2)) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (Rewrite<= associate-*r*_binary64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) (*.f64 eps (pow.f64 eps 2)))) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 1 points increase in error, 1 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) (*.f64 eps (Rewrite=> unpow2_binary64 (*.f64 eps eps)))) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))) (Rewrite<= cube-mult_binary64 (pow.f64 eps 3))) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 1 points increase in error, 1 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (Rewrite<= *-commutative_binary64 (*.f64 (pow.f64 eps 3) (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 8 (pow.f64 x 2))))) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (pow.f64 eps 3) (Rewrite=> distribute-rgt-out_binary64 (*.f64 (pow.f64 x 2) (+.f64 2 8)))) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (pow.f64 eps 3) (Rewrite=> *-commutative_binary64 (*.f64 (+.f64 2 8) (pow.f64 x 2)))) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (Rewrite=> associate-*r*_binary64 (*.f64 (*.f64 (pow.f64 eps 3) (+.f64 2 8)) (pow.f64 x 2))) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 1 points increase in error, 2 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (*.f64 (pow.f64 eps 3) (Rewrite=> metadata-eval 10)) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (*.f64 (pow.f64 eps 3) (Rewrite<= metadata-eval (+.f64 6 4))) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (*.f64 (pow.f64 eps 3) (+.f64 (Rewrite<= metadata-eval (+.f64 2 4)) 4)) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (Rewrite<= distribute-lft-out_binary64 (+.f64 (*.f64 (pow.f64 eps 3) (+.f64 2 4)) (*.f64 (pow.f64 eps 3) 4))) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 1 points increase in error, 1 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (Rewrite=> cube-mult_binary64 (*.f64 eps (*.f64 eps eps))) (+.f64 2 4)) (*.f64 (pow.f64 eps 3) 4)) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 1 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (*.f64 eps (Rewrite<= unpow2_binary64 (pow.f64 eps 2))) (+.f64 2 4)) (*.f64 (pow.f64 eps 3) 4)) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (Rewrite<= associate-*r*_binary64 (*.f64 eps (*.f64 (pow.f64 eps 2) (+.f64 2 4)))) (*.f64 (pow.f64 eps 3) 4)) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 1 points increase in error, 2 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 eps (Rewrite<= distribute-rgt-out_binary64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))))) (*.f64 (pow.f64 eps 3) 4)) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (Rewrite<= *-commutative_binary64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps)) (*.f64 (pow.f64 eps 3) 4)) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (Rewrite<= *-commutative_binary64 (*.f64 4 (pow.f64 eps 3)))) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 4 (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (*.f64 (+.f64 (Rewrite=> *-commutative_binary64 (*.f64 (pow.f64 x 3) 4)) (*.f64 (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2))) x)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 (pow.f64 x 3) 4) (Rewrite=> *-commutative_binary64 (*.f64 x (+.f64 (*.f64 2 (pow.f64 x 2)) (*.f64 4 (pow.f64 x 2)))))) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 (pow.f64 x 3) 4) (*.f64 x (Rewrite=> distribute-rgt-out_binary64 (*.f64 (pow.f64 x 2) (+.f64 2 4))))) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 (pow.f64 x 3) 4) (Rewrite=> associate-*r*_binary64 (*.f64 (*.f64 x (pow.f64 x 2)) (+.f64 2 4)))) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 (pow.f64 x 3) 4) (*.f64 (*.f64 x (Rewrite=> unpow2_binary64 (*.f64 x x))) (+.f64 2 4))) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (*.f64 (+.f64 (*.f64 (pow.f64 x 3) 4) (*.f64 (Rewrite<= cube-mult_binary64 (pow.f64 x 3)) (+.f64 2 4))) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (*.f64 (Rewrite=> distribute-lft-out_binary64 (*.f64 (pow.f64 x 3) (+.f64 4 (+.f64 2 4)))) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (*.f64 (*.f64 (pow.f64 x 3) (+.f64 4 (Rewrite=> metadata-eval 6))) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (*.f64 (*.f64 (pow.f64 x 3) (Rewrite=> metadata-eval 10)) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (*.f64 (*.f64 (pow.f64 x 3) (Rewrite<= metadata-eval (+.f64 2 8))) (pow.f64 eps 2)))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (Rewrite<= associate-*r*_binary64 (*.f64 (pow.f64 x 3) (*.f64 (+.f64 2 8) (pow.f64 eps 2)))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (*.f64 (pow.f64 x 3) (Rewrite<= *-commutative_binary64 (*.f64 (pow.f64 eps 2) (+.f64 2 8)))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (*.f64 (pow.f64 x 3) (Rewrite<= distribute-rgt-out_binary64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 8 (pow.f64 eps 2))))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (+.f64 (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2)) (Rewrite<= *-commutative_binary64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 8 (pow.f64 eps 2))) (pow.f64 x 3))))): 0 points increase in error, 0 points decrease in error
(fma.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4) (Rewrite=> +-commutative_binary64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 8 (pow.f64 eps 2))) (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2))))): 0 points increase in error, 0 points decrease in error
(Rewrite<= fma-def_binary64 (+.f64 (*.f64 (+.f64 (*.f64 4 eps) eps) (pow.f64 x 4)) (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 8 (pow.f64 eps 2))) (pow.f64 x 3)) (*.f64 (+.f64 (*.f64 (+.f64 (*.f64 2 (pow.f64 eps 2)) (*.f64 4 (pow.f64 eps 2))) eps) (*.f64 4 (pow.f64 eps 3))) (pow.f64 x 2))))): 0 points increase in error, 1 points decrease in error