Simplified0.7
\[\leadsto \color{blue}{e^{\log \left(\mathsf{hypot}\left(x.re, x.im\right)\right) \cdot y.re - \tan^{-1}_* \frac{x.im}{x.re} \cdot y.im} \cdot \sin \left(\mathsf{fma}\left(\log \left(\mathsf{hypot}\left(x.re, x.im\right)\right), y.im, y.re \cdot \tan^{-1}_* \frac{x.im}{x.re}\right)\right)}
\]
Proof
(*.f64 (exp.f64 (-.f64 (*.f64 (log.f64 (hypot.f64 x.re x.im)) y.re) (*.f64 (atan2.f64 x.im x.re) y.im))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (-.f64 (*.f64 (log.f64 (Rewrite<= hypot-def_binary64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im))))) y.re) (*.f64 (atan2.f64 x.im x.re) y.im))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 88 points increase in error, 2 points decrease in error
(*.f64 (exp.f64 (-.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) (Rewrite<= remove-double-neg_binary64 (neg.f64 (neg.f64 y.re)))) (*.f64 (atan2.f64 x.im x.re) y.im))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (Rewrite=> fma-neg_binary64 (fma.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) (neg.f64 (neg.f64 y.re)) (neg.f64 (*.f64 (atan2.f64 x.im x.re) y.im))))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (fma.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) (Rewrite=> remove-double-neg_binary64 y.re) (neg.f64 (*.f64 (atan2.f64 x.im x.re) y.im)))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (Rewrite=> fma-udef_binary64 (+.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re) (neg.f64 (*.f64 (atan2.f64 x.im x.re) y.im))))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (+.f64 (Rewrite<= remove-double-neg_binary64 (neg.f64 (neg.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re)))) (neg.f64 (*.f64 (atan2.f64 x.im x.re) y.im)))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (Rewrite<= distribute-neg-in_binary64 (neg.f64 (+.f64 (neg.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re)) (*.f64 (atan2.f64 x.im x.re) y.im))))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (neg.f64 (Rewrite<= +-commutative_binary64 (+.f64 (*.f64 (atan2.f64 x.im x.re) y.im) (neg.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re)))))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (neg.f64 (Rewrite<= sub-neg_binary64 (-.f64 (*.f64 (atan2.f64 x.im x.re) y.im) (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re))))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (Rewrite=> neg-mul-1_binary64 (*.f64 -1 (-.f64 (*.f64 (atan2.f64 x.im x.re) y.im) (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re))))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (Rewrite<= neg-mul-1_binary64 (neg.f64 (-.f64 (*.f64 (atan2.f64 x.im x.re) y.im) (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re))))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (neg.f64 (Rewrite=> sub-neg_binary64 (+.f64 (*.f64 (atan2.f64 x.im x.re) y.im) (neg.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re)))))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (neg.f64 (Rewrite=> +-commutative_binary64 (+.f64 (neg.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re)) (*.f64 (atan2.f64 x.im x.re) y.im))))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (Rewrite=> distribute-neg-in_binary64 (+.f64 (neg.f64 (neg.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re))) (neg.f64 (*.f64 (atan2.f64 x.im x.re) y.im))))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (+.f64 (Rewrite=> remove-double-neg_binary64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re)) (neg.f64 (*.f64 (atan2.f64 x.im x.re) y.im)))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (Rewrite<= sub-neg_binary64 (-.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re) (*.f64 (atan2.f64 x.im x.re) y.im)))) (sin.f64 (fma.f64 (log.f64 (hypot.f64 x.re x.im)) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (-.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re) (*.f64 (atan2.f64 x.im x.re) y.im))) (sin.f64 (fma.f64 (log.f64 (Rewrite<= hypot-def_binary64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im))))) y.im (*.f64 y.re (atan2.f64 x.im x.re))))): 85 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (-.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re) (*.f64 (atan2.f64 x.im x.re) y.im))) (sin.f64 (fma.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.im (Rewrite<= *-commutative_binary64 (*.f64 (atan2.f64 x.im x.re) y.re))))): 0 points increase in error, 0 points decrease in error
(*.f64 (exp.f64 (-.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.re) (*.f64 (atan2.f64 x.im x.re) y.im))) (sin.f64 (Rewrite<= fma-def_binary64 (+.f64 (*.f64 (log.f64 (sqrt.f64 (+.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)))) y.im) (*.f64 (atan2.f64 x.im x.re) y.re))))): 0 points increase in error, 0 points decrease in error