Simplified0.0
\[\leadsto \color{blue}{{\left(\mathsf{hypot}\left(a, b\right)\right)}^{4} + \mathsf{fma}\left(4, \mathsf{fma}\left(b, b \cdot \left(a + 3\right), a \cdot a\right) - {a}^{3}, -1\right)}
\]
Proof
(+.f64 (pow.f64 (hypot.f64 a b) 4) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (Rewrite<= hypot-def_binary64 (sqrt.f64 (+.f64 (*.f64 a a) (*.f64 b b)))) 4) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (Rewrite<= unpow1/2_binary64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 1/2)) 4) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (Rewrite<= metadata-eval (/.f64 1 2))) 4) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2)) (Rewrite<= metadata-eval (+.f64 3 1))) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite<= pow-plus_binary64 (*.f64 (pow.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2)) 3) (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2)))) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 20 points increase in error, 3 points decrease in error
(+.f64 (*.f64 (Rewrite<= cube-unmult_binary64 (*.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2)) (*.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2)) (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2))))) (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2))) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 14 points increase in error, 4 points decrease in error
(+.f64 (*.f64 (*.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2)) (Rewrite<= sqr-pow_binary64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 1))) (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2))) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (*.f64 (*.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2)) (Rewrite=> unpow1_binary64 (+.f64 (*.f64 a a) (*.f64 b b)))) (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2))) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite<= *-commutative_binary64 (*.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2)) (*.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2)) (+.f64 (*.f64 a a) (*.f64 b b))))) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite<= associate-*l*_binary64 (*.f64 (*.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2)) (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (/.f64 1 2))) (+.f64 (*.f64 a a) (*.f64 b b)))) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 17 points increase in error, 8 points decrease in error
(+.f64 (*.f64 (Rewrite<= sqr-pow_binary64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 1)) (+.f64 (*.f64 a a) (*.f64 b b))) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite=> pow-plus_binary64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (+.f64 1 1))) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) (Rewrite=> metadata-eval 2)) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (+.f64 a 3)) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (fma.f64 4 (-.f64 (fma.f64 b (*.f64 b (Rewrite<= +-commutative_binary64 (+.f64 3 a))) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (fma.f64 4 (-.f64 (Rewrite<= fma-def_binary64 (+.f64 (*.f64 b (*.f64 b (+.f64 3 a))) (*.f64 a a))) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (fma.f64 4 (-.f64 (+.f64 (Rewrite<= associate-*l*_binary64 (*.f64 (*.f64 b b) (+.f64 3 a))) (*.f64 a a)) (pow.f64 a 3)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (fma.f64 4 (-.f64 (+.f64 (*.f64 (*.f64 b b) (+.f64 3 a)) (*.f64 a a)) (Rewrite<= cube-unmult_binary64 (*.f64 a (*.f64 a a)))) -1)): 0 points increase in error, 1 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (fma.f64 4 (Rewrite=> cancel-sign-sub-inv_binary64 (+.f64 (+.f64 (*.f64 (*.f64 b b) (+.f64 3 a)) (*.f64 a a)) (*.f64 (neg.f64 a) (*.f64 a a)))) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (fma.f64 4 (Rewrite<= associate-+r+_binary64 (+.f64 (*.f64 (*.f64 b b) (+.f64 3 a)) (+.f64 (*.f64 a a) (*.f64 (neg.f64 a) (*.f64 a a))))) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (fma.f64 4 (+.f64 (*.f64 (*.f64 b b) (+.f64 3 a)) (+.f64 (Rewrite<= *-lft-identity_binary64 (*.f64 1 (*.f64 a a))) (*.f64 (neg.f64 a) (*.f64 a a)))) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (fma.f64 4 (+.f64 (*.f64 (*.f64 b b) (+.f64 3 a)) (Rewrite=> distribute-rgt-out_binary64 (*.f64 (*.f64 a a) (+.f64 1 (neg.f64 a))))) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (fma.f64 4 (+.f64 (*.f64 (*.f64 b b) (+.f64 3 a)) (*.f64 (*.f64 a a) (Rewrite<= sub-neg_binary64 (-.f64 1 a)))) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (fma.f64 4 (Rewrite<= +-commutative_binary64 (+.f64 (*.f64 (*.f64 a a) (-.f64 1 a)) (*.f64 (*.f64 b b) (+.f64 3 a)))) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (fma.f64 4 (+.f64 (*.f64 (*.f64 a a) (-.f64 1 a)) (*.f64 (*.f64 b b) (+.f64 3 a))) (Rewrite<= metadata-eval (neg.f64 1)))): 0 points increase in error, 0 points decrease in error
(+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (Rewrite<= fma-neg_binary64 (-.f64 (*.f64 4 (+.f64 (*.f64 (*.f64 a a) (-.f64 1 a)) (*.f64 (*.f64 b b) (+.f64 3 a)))) 1))): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate--l+_binary64 (-.f64 (+.f64 (pow.f64 (+.f64 (*.f64 a a) (*.f64 b b)) 2) (*.f64 4 (+.f64 (*.f64 (*.f64 a a) (-.f64 1 a)) (*.f64 (*.f64 b b) (+.f64 3 a))))) 1)): 0 points increase in error, 1 points decrease in error