Simplified44.2
\[\leadsto \color{blue}{\mathsf{log1p}\left(n\right) + \mathsf{fma}\left(n, \mathsf{log1p}\left(n\right) - \log n, -1\right)}
\]
Proof
(+.f64 (log1p.f64 n) (fma.f64 n (-.f64 (log1p.f64 n) (log.f64 n)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite<= log1p-def_binary64 (log.f64 (+.f64 1 n))) (fma.f64 n (-.f64 (log1p.f64 n) (log.f64 n)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (log.f64 (Rewrite<= +-commutative_binary64 (+.f64 n 1))) (fma.f64 n (-.f64 (log1p.f64 n) (log.f64 n)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (log.f64 (+.f64 n 1)) (fma.f64 n (-.f64 (Rewrite<= log1p-def_binary64 (log.f64 (+.f64 1 n))) (log.f64 n)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (log.f64 (+.f64 n 1)) (fma.f64 n (-.f64 (log.f64 (Rewrite<= +-commutative_binary64 (+.f64 n 1))) (log.f64 n)) -1)): 0 points increase in error, 0 points decrease in error
(+.f64 (log.f64 (+.f64 n 1)) (fma.f64 n (-.f64 (log.f64 (+.f64 n 1)) (log.f64 n)) (Rewrite<= metadata-eval (neg.f64 1)))): 0 points increase in error, 0 points decrease in error
(+.f64 (log.f64 (+.f64 n 1)) (Rewrite<= fma-neg_binary64 (-.f64 (*.f64 n (-.f64 (log.f64 (+.f64 n 1)) (log.f64 n))) 1))): 0 points increase in error, 0 points decrease in error
(+.f64 (log.f64 (+.f64 n 1)) (-.f64 (Rewrite<= distribute-rgt-out--_binary64 (-.f64 (*.f64 (log.f64 (+.f64 n 1)) n) (*.f64 (log.f64 n) n))) 1)): 2 points increase in error, 0 points decrease in error
(+.f64 (log.f64 (+.f64 n 1)) (-.f64 (-.f64 (*.f64 (log.f64 (+.f64 n 1)) n) (Rewrite<= *-commutative_binary64 (*.f64 n (log.f64 n)))) 1)): 0 points increase in error, 0 points decrease in error
(+.f64 (log.f64 (+.f64 n 1)) (-.f64 (Rewrite=> cancel-sign-sub-inv_binary64 (+.f64 (*.f64 (log.f64 (+.f64 n 1)) n) (*.f64 (neg.f64 n) (log.f64 n)))) 1)): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate--l+_binary64 (-.f64 (+.f64 (log.f64 (+.f64 n 1)) (+.f64 (*.f64 (log.f64 (+.f64 n 1)) n) (*.f64 (neg.f64 n) (log.f64 n)))) 1)): 0 points increase in error, 0 points decrease in error
(-.f64 (Rewrite=> +-commutative_binary64 (+.f64 (+.f64 (*.f64 (log.f64 (+.f64 n 1)) n) (*.f64 (neg.f64 n) (log.f64 n))) (log.f64 (+.f64 n 1)))) 1): 0 points increase in error, 0 points decrease in error
(Rewrite=> associate--l+_binary64 (+.f64 (+.f64 (*.f64 (log.f64 (+.f64 n 1)) n) (*.f64 (neg.f64 n) (log.f64 n))) (-.f64 (log.f64 (+.f64 n 1)) 1))): 0 points increase in error, 0 points decrease in error
(+.f64 (Rewrite=> +-commutative_binary64 (+.f64 (*.f64 (neg.f64 n) (log.f64 n)) (*.f64 (log.f64 (+.f64 n 1)) n))) (-.f64 (log.f64 (+.f64 n 1)) 1)): 0 points increase in error, 0 points decrease in error
(Rewrite<= associate--l+_binary64 (-.f64 (+.f64 (+.f64 (*.f64 (neg.f64 n) (log.f64 n)) (*.f64 (log.f64 (+.f64 n 1)) n)) (log.f64 (+.f64 n 1))) 1)): 0 points increase in error, 0 points decrease in error
(-.f64 (Rewrite<= associate-+r+_binary64 (+.f64 (*.f64 (neg.f64 n) (log.f64 n)) (+.f64 (*.f64 (log.f64 (+.f64 n 1)) n) (log.f64 (+.f64 n 1))))) 1): 254 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (*.f64 (neg.f64 n) (log.f64 n)) (+.f64 (Rewrite=> *-commutative_binary64 (*.f64 n (log.f64 (+.f64 n 1)))) (log.f64 (+.f64 n 1)))) 1): 0 points increase in error, 0 points decrease in error
(-.f64 (+.f64 (*.f64 (neg.f64 n) (log.f64 n)) (Rewrite=> distribute-lft1-in_binary64 (*.f64 (+.f64 n 1) (log.f64 (+.f64 n 1))))) 1): 1 points increase in error, 0 points decrease in error
(-.f64 (Rewrite<= +-commutative_binary64 (+.f64 (*.f64 (+.f64 n 1) (log.f64 (+.f64 n 1))) (*.f64 (neg.f64 n) (log.f64 n)))) 1): 0 points increase in error, 0 points decrease in error
(-.f64 (Rewrite<= cancel-sign-sub-inv_binary64 (-.f64 (*.f64 (+.f64 n 1) (log.f64 (+.f64 n 1))) (*.f64 n (log.f64 n)))) 1): 0 points increase in error, 0 points decrease in error