Simplified56.2
\[\leadsto \color{blue}{\sqrt{\left(2 \cdot \left(n \cdot U\right)\right) \cdot \left(t + \frac{\ell}{Om} \cdot \mathsf{fma}\left(\ell, -2, n \cdot \frac{\ell \cdot \left(U* - U\right)}{Om}\right)\right)}}
\]
Proof
(sqrt.f64 (*.f64 (*.f64 2 (*.f64 n U)) (+.f64 t (*.f64 (/.f64 l Om) (fma.f64 l -2 (*.f64 n (/.f64 (*.f64 l (-.f64 U* U)) Om))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (Rewrite<= associate-*l*_binary64 (*.f64 (*.f64 2 n) U)) (+.f64 t (*.f64 (/.f64 l Om) (fma.f64 l -2 (*.f64 n (/.f64 (*.f64 l (-.f64 U* U)) Om))))))): 0 points increase in error, 1 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (*.f64 (/.f64 l Om) (fma.f64 l (Rewrite<= metadata-eval (neg.f64 2)) (*.f64 n (/.f64 (*.f64 l (-.f64 U* U)) Om))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (*.f64 (/.f64 l Om) (fma.f64 l (neg.f64 2) (*.f64 n (/.f64 (*.f64 l (Rewrite=> sub-neg_binary64 (+.f64 U* (neg.f64 U)))) Om))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (*.f64 (/.f64 l Om) (fma.f64 l (neg.f64 2) (*.f64 n (/.f64 (*.f64 l (+.f64 (Rewrite<= remove-double-neg_binary64 (neg.f64 (neg.f64 U*))) (neg.f64 U))) Om))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (*.f64 (/.f64 l Om) (fma.f64 l (neg.f64 2) (*.f64 n (/.f64 (*.f64 l (Rewrite<= distribute-neg-in_binary64 (neg.f64 (+.f64 (neg.f64 U*) U)))) Om))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (*.f64 (/.f64 l Om) (fma.f64 l (neg.f64 2) (*.f64 n (/.f64 (*.f64 l (neg.f64 (Rewrite<= +-commutative_binary64 (+.f64 U (neg.f64 U*))))) Om))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (*.f64 (/.f64 l Om) (fma.f64 l (neg.f64 2) (*.f64 n (/.f64 (*.f64 l (neg.f64 (Rewrite<= sub-neg_binary64 (-.f64 U U*)))) Om))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (*.f64 (/.f64 l Om) (fma.f64 l (neg.f64 2) (*.f64 n (Rewrite<= associate-*l/_binary64 (*.f64 (/.f64 l Om) (neg.f64 (-.f64 U U*)))))))))): 2 points increase in error, 14 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (*.f64 (/.f64 l Om) (fma.f64 l (neg.f64 2) (*.f64 n (Rewrite=> distribute-rgt-neg-out_binary64 (neg.f64 (*.f64 (/.f64 l Om) (-.f64 U U*)))))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (*.f64 (/.f64 l Om) (fma.f64 l (neg.f64 2) (Rewrite<= distribute-rgt-neg-in_binary64 (neg.f64 (*.f64 n (*.f64 (/.f64 l Om) (-.f64 U U*)))))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (*.f64 (/.f64 l Om) (fma.f64 l (neg.f64 2) (neg.f64 (Rewrite<= *-commutative_binary64 (*.f64 (*.f64 (/.f64 l Om) (-.f64 U U*)) n)))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (*.f64 (/.f64 l Om) (Rewrite<= fma-neg_binary64 (-.f64 (*.f64 l (neg.f64 2)) (*.f64 (*.f64 (/.f64 l Om) (-.f64 U U*)) n))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (*.f64 (/.f64 l Om) (-.f64 (Rewrite<= *-commutative_binary64 (*.f64 (neg.f64 2) l)) (*.f64 (*.f64 (/.f64 l Om) (-.f64 U U*)) n)))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (Rewrite<= distribute-lft-out--_binary64 (-.f64 (*.f64 (/.f64 l Om) (*.f64 (neg.f64 2) l)) (*.f64 (/.f64 l Om) (*.f64 (*.f64 (/.f64 l Om) (-.f64 U U*)) n))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (-.f64 (Rewrite=> associate-*l/_binary64 (/.f64 (*.f64 l (*.f64 (neg.f64 2) l)) Om)) (*.f64 (/.f64 l Om) (*.f64 (*.f64 (/.f64 l Om) (-.f64 U U*)) n)))))): 20 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (-.f64 (/.f64 (Rewrite<= *-commutative_binary64 (*.f64 (*.f64 (neg.f64 2) l) l)) Om) (*.f64 (/.f64 l Om) (*.f64 (*.f64 (/.f64 l Om) (-.f64 U U*)) n)))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (-.f64 (/.f64 (Rewrite<= associate-*r*_binary64 (*.f64 (neg.f64 2) (*.f64 l l))) Om) (*.f64 (/.f64 l Om) (*.f64 (*.f64 (/.f64 l Om) (-.f64 U U*)) n)))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (-.f64 (Rewrite<= associate-*r/_binary64 (*.f64 (neg.f64 2) (/.f64 (*.f64 l l) Om))) (*.f64 (/.f64 l Om) (*.f64 (*.f64 (/.f64 l Om) (-.f64 U U*)) n)))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (-.f64 (Rewrite<= distribute-lft-neg-in_binary64 (neg.f64 (*.f64 2 (/.f64 (*.f64 l l) Om)))) (*.f64 (/.f64 l Om) (*.f64 (*.f64 (/.f64 l Om) (-.f64 U U*)) n)))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (-.f64 (neg.f64 (*.f64 2 (/.f64 (*.f64 l l) Om))) (Rewrite<= associate-*l*_binary64 (*.f64 (*.f64 (/.f64 l Om) (*.f64 (/.f64 l Om) (-.f64 U U*))) n)))))): 4 points increase in error, 3 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (-.f64 (neg.f64 (*.f64 2 (/.f64 (*.f64 l l) Om))) (*.f64 (Rewrite<= associate-*l*_binary64 (*.f64 (*.f64 (/.f64 l Om) (/.f64 l Om)) (-.f64 U U*))) n))))): 3 points increase in error, 1 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (-.f64 (neg.f64 (*.f64 2 (/.f64 (*.f64 l l) Om))) (*.f64 (*.f64 (Rewrite<= unpow2_binary64 (pow.f64 (/.f64 l Om) 2)) (-.f64 U U*)) n))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (-.f64 (neg.f64 (*.f64 2 (/.f64 (*.f64 l l) Om))) (Rewrite<= *-commutative_binary64 (*.f64 n (*.f64 (pow.f64 (/.f64 l Om) 2) (-.f64 U U*)))))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (+.f64 t (-.f64 (neg.f64 (*.f64 2 (/.f64 (*.f64 l l) Om))) (Rewrite<= associate-*l*_binary64 (*.f64 (*.f64 n (pow.f64 (/.f64 l Om) 2)) (-.f64 U U*))))))): 2 points increase in error, 4 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (Rewrite<= associate--l+_binary64 (-.f64 (+.f64 t (neg.f64 (*.f64 2 (/.f64 (*.f64 l l) Om)))) (*.f64 (*.f64 n (pow.f64 (/.f64 l Om) 2)) (-.f64 U U*)))))): 0 points increase in error, 0 points decrease in error
(sqrt.f64 (*.f64 (*.f64 (*.f64 2 n) U) (-.f64 (Rewrite<= sub-neg_binary64 (-.f64 t (*.f64 2 (/.f64 (*.f64 l l) Om)))) (*.f64 (*.f64 n (pow.f64 (/.f64 l Om) 2)) (-.f64 U U*))))): 0 points increase in error, 0 points decrease in error