2log (problem 3.3.6)

Percentage Accurate: 23.7% → 99.5%
Time: 9.8s
Alternatives: 8
Speedup: 17.3×

Specification

?
\[N > 1 \land N < 10^{+40}\]
\[\begin{array}{l} \\ \log \left(N + 1\right) - \log N \end{array} \]
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
	return log((N + 1.0)) - log(N);
}
real(8) function code(n)
    real(8), intent (in) :: n
    code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
	return Math.log((N + 1.0)) - Math.log(N);
}
def code(N):
	return math.log((N + 1.0)) - math.log(N)
function code(N)
	return Float64(log(Float64(N + 1.0)) - log(N))
end
function tmp = code(N)
	tmp = log((N + 1.0)) - log(N);
end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\log \left(N + 1\right) - \log N
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 23.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log \left(N + 1\right) - \log N \end{array} \]
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
	return log((N + 1.0)) - log(N);
}
real(8) function code(n)
    real(8), intent (in) :: n
    code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
	return Math.log((N + 1.0)) - Math.log(N);
}
def code(N):
	return math.log((N + 1.0)) - math.log(N)
function code(N)
	return Float64(log(Float64(N + 1.0)) - log(N))
end
function tmp = code(N)
	tmp = log((N + 1.0)) - log(N);
end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\log \left(N + 1\right) - \log N
\end{array}

Alternative 1: 99.5% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\log \left(N + 1\right) - \log N \leq 0.001:\\ \;\;\;\;\frac{1}{N + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{N \cdot N}}\\ \mathbf{else}:\\ \;\;\;\;-\log \left(\frac{N}{N + 1}\right)\\ \end{array} \end{array} \]
(FPCore (N)
 :precision binary64
 (if (<= (- (log (+ N 1.0)) (log N)) 0.001)
   (/
    1.0
    (+
     N
     (/
      (fma N (fma N 0.5 -0.08333333333333333) 0.041666666666666664)
      (* N N))))
   (- (log (/ N (+ N 1.0))))))
double code(double N) {
	double tmp;
	if ((log((N + 1.0)) - log(N)) <= 0.001) {
		tmp = 1.0 / (N + (fma(N, fma(N, 0.5, -0.08333333333333333), 0.041666666666666664) / (N * N)));
	} else {
		tmp = -log((N / (N + 1.0)));
	}
	return tmp;
}
function code(N)
	tmp = 0.0
	if (Float64(log(Float64(N + 1.0)) - log(N)) <= 0.001)
		tmp = Float64(1.0 / Float64(N + Float64(fma(N, fma(N, 0.5, -0.08333333333333333), 0.041666666666666664) / Float64(N * N))));
	else
		tmp = Float64(-log(Float64(N / Float64(N + 1.0))));
	end
	return tmp
end
code[N_] := If[LessEqual[N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision], 0.001], N[(1.0 / N[(N + N[(N[(N * N[(N * 0.5 + -0.08333333333333333), $MachinePrecision] + 0.041666666666666664), $MachinePrecision] / N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], (-N[Log[N[(N / N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision])]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\log \left(N + 1\right) - \log N \leq 0.001:\\
\;\;\;\;\frac{1}{N + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{N \cdot N}}\\

\mathbf{else}:\\
\;\;\;\;-\log \left(\frac{N}{N + 1}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) < 1e-3

    1. Initial program 16.3%

      \[\log \left(N + 1\right) - \log N \]
    2. Add Preprocessing
    3. Taylor expanded in N around inf

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{\frac{1}{3}}{{N}^{2}}\right) - \left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{4} \cdot \frac{1}{{N}^{3}}\right)}{N}} \]
    4. Applied rewrites99.7%

      \[\leadsto \color{blue}{\frac{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}{N}} \]
    5. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \frac{\color{blue}{\frac{\frac{1}{4}}{N}} + \frac{-1}{3}}{N}}{N}}{N} \]
      2. lift-+.f64N/A

        \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \frac{\color{blue}{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}}{N}}{N}}{N} \]
      3. lift-/.f64N/A

        \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \color{blue}{\frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}}{N}}{N} \]
      4. lift--.f64N/A

        \[\leadsto \frac{1 + \frac{\color{blue}{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}}{N}}{N} \]
      5. lift-/.f64N/A

        \[\leadsto \frac{1 + \color{blue}{\frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}{N} \]
      6. lift-+.f64N/A

        \[\leadsto \frac{\color{blue}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}{N} \]
      7. clear-numN/A

        \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}} \]
      8. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}} \]
      9. lower-/.f6499.7

        \[\leadsto \frac{1}{\color{blue}{\frac{N}{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}}} \]
    6. Applied rewrites99.7%

      \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}}} \]
    7. Taylor expanded in N around inf

      \[\leadsto \frac{1}{\color{blue}{N \cdot \left(\left(1 + \left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right)\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
    8. Step-by-step derivation
      1. associate--l+N/A

        \[\leadsto \frac{1}{N \cdot \color{blue}{\left(1 + \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)\right)}} \]
      2. distribute-lft-inN/A

        \[\leadsto \frac{1}{\color{blue}{N \cdot 1 + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
      3. *-rgt-identityN/A

        \[\leadsto \frac{1}{\color{blue}{N} + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)} \]
      4. lower-+.f64N/A

        \[\leadsto \frac{1}{\color{blue}{N + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
      5. lower-*.f64N/A

        \[\leadsto \frac{1}{N + \color{blue}{N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
      6. sub-negN/A

        \[\leadsto \frac{1}{N + N \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) + \left(\mathsf{neg}\left(\frac{\frac{1}{12}}{{N}^{2}}\right)\right)\right)}} \]
      7. lower-+.f64N/A

        \[\leadsto \frac{1}{N + N \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) + \left(\mathsf{neg}\left(\frac{\frac{1}{12}}{{N}^{2}}\right)\right)\right)}} \]
    9. Applied rewrites99.8%

      \[\leadsto \frac{1}{\color{blue}{N + N \cdot \left(\left(\frac{0.5}{N} + \frac{0.041666666666666664}{N \cdot \left(N \cdot N\right)}\right) + \frac{-0.08333333333333333}{N \cdot N}\right)}} \]
    10. Taylor expanded in N around 0

      \[\leadsto \frac{1}{N + \color{blue}{\frac{\frac{1}{24} + N \cdot \left(\frac{1}{2} \cdot N - \frac{1}{12}\right)}{{N}^{2}}}} \]
    11. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \frac{1}{N + \color{blue}{\frac{\frac{1}{24} + N \cdot \left(\frac{1}{2} \cdot N - \frac{1}{12}\right)}{{N}^{2}}}} \]
      2. +-commutativeN/A

        \[\leadsto \frac{1}{N + \frac{\color{blue}{N \cdot \left(\frac{1}{2} \cdot N - \frac{1}{12}\right) + \frac{1}{24}}}{{N}^{2}}} \]
      3. lower-fma.f64N/A

        \[\leadsto \frac{1}{N + \frac{\color{blue}{\mathsf{fma}\left(N, \frac{1}{2} \cdot N - \frac{1}{12}, \frac{1}{24}\right)}}{{N}^{2}}} \]
      4. sub-negN/A

        \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \color{blue}{\frac{1}{2} \cdot N + \left(\mathsf{neg}\left(\frac{1}{12}\right)\right)}, \frac{1}{24}\right)}{{N}^{2}}} \]
      5. *-commutativeN/A

        \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \color{blue}{N \cdot \frac{1}{2}} + \left(\mathsf{neg}\left(\frac{1}{12}\right)\right), \frac{1}{24}\right)}{{N}^{2}}} \]
      6. metadata-evalN/A

        \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, N \cdot \frac{1}{2} + \color{blue}{\frac{-1}{12}}, \frac{1}{24}\right)}{{N}^{2}}} \]
      7. lower-fma.f64N/A

        \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \color{blue}{\mathsf{fma}\left(N, \frac{1}{2}, \frac{-1}{12}\right)}, \frac{1}{24}\right)}{{N}^{2}}} \]
      8. unpow2N/A

        \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, \frac{1}{2}, \frac{-1}{12}\right), \frac{1}{24}\right)}{\color{blue}{N \cdot N}}} \]
      9. lower-*.f6499.8

        \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{\color{blue}{N \cdot N}}} \]
    12. Applied rewrites99.8%

      \[\leadsto \frac{1}{N + \color{blue}{\frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{N \cdot N}}} \]

    if 1e-3 < (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N))

    1. Initial program 93.1%

      \[\log \left(N + 1\right) - \log N \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \log \color{blue}{\left(N + 1\right)} - \log N \]
      2. diff-logN/A

        \[\leadsto \color{blue}{\log \left(\frac{N + 1}{N}\right)} \]
      3. clear-numN/A

        \[\leadsto \log \color{blue}{\left(\frac{1}{\frac{N}{N + 1}}\right)} \]
      4. log-recN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(\log \left(\frac{N}{N + 1}\right)\right)} \]
      5. diff-logN/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{\left(\log N - \log \left(N + 1\right)\right)}\right) \]
      6. lift-log.f64N/A

        \[\leadsto \mathsf{neg}\left(\left(\color{blue}{\log N} - \log \left(N + 1\right)\right)\right) \]
      7. lift-log.f64N/A

        \[\leadsto \mathsf{neg}\left(\left(\log N - \color{blue}{\log \left(N + 1\right)}\right)\right) \]
      8. lower-neg.f64N/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(\left(\log N - \log \left(N + 1\right)\right)\right)} \]
      9. lift-log.f64N/A

        \[\leadsto \mathsf{neg}\left(\left(\color{blue}{\log N} - \log \left(N + 1\right)\right)\right) \]
      10. lift-log.f64N/A

        \[\leadsto \mathsf{neg}\left(\left(\log N - \color{blue}{\log \left(N + 1\right)}\right)\right) \]
      11. diff-logN/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{\log \left(\frac{N}{N + 1}\right)}\right) \]
      12. lower-log.f64N/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{\log \left(\frac{N}{N + 1}\right)}\right) \]
      13. lower-/.f6496.2

        \[\leadsto -\log \color{blue}{\left(\frac{N}{N + 1}\right)} \]
    4. Applied rewrites96.2%

      \[\leadsto \color{blue}{-\log \left(\frac{N}{N + 1}\right)} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 2: 99.4% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\log \left(N + 1\right) - \log N \leq 0.001:\\ \;\;\;\;\frac{1}{N + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{N \cdot N}}\\ \mathbf{else}:\\ \;\;\;\;\log \left(\frac{N + 1}{N}\right)\\ \end{array} \end{array} \]
(FPCore (N)
 :precision binary64
 (if (<= (- (log (+ N 1.0)) (log N)) 0.001)
   (/
    1.0
    (+
     N
     (/
      (fma N (fma N 0.5 -0.08333333333333333) 0.041666666666666664)
      (* N N))))
   (log (/ (+ N 1.0) N))))
double code(double N) {
	double tmp;
	if ((log((N + 1.0)) - log(N)) <= 0.001) {
		tmp = 1.0 / (N + (fma(N, fma(N, 0.5, -0.08333333333333333), 0.041666666666666664) / (N * N)));
	} else {
		tmp = log(((N + 1.0) / N));
	}
	return tmp;
}
function code(N)
	tmp = 0.0
	if (Float64(log(Float64(N + 1.0)) - log(N)) <= 0.001)
		tmp = Float64(1.0 / Float64(N + Float64(fma(N, fma(N, 0.5, -0.08333333333333333), 0.041666666666666664) / Float64(N * N))));
	else
		tmp = log(Float64(Float64(N + 1.0) / N));
	end
	return tmp
end
code[N_] := If[LessEqual[N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision], 0.001], N[(1.0 / N[(N + N[(N[(N * N[(N * 0.5 + -0.08333333333333333), $MachinePrecision] + 0.041666666666666664), $MachinePrecision] / N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(N[(N + 1.0), $MachinePrecision] / N), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\log \left(N + 1\right) - \log N \leq 0.001:\\
\;\;\;\;\frac{1}{N + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{N \cdot N}}\\

\mathbf{else}:\\
\;\;\;\;\log \left(\frac{N + 1}{N}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) < 1e-3

    1. Initial program 16.3%

      \[\log \left(N + 1\right) - \log N \]
    2. Add Preprocessing
    3. Taylor expanded in N around inf

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{\frac{1}{3}}{{N}^{2}}\right) - \left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{4} \cdot \frac{1}{{N}^{3}}\right)}{N}} \]
    4. Applied rewrites99.7%

      \[\leadsto \color{blue}{\frac{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}{N}} \]
    5. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \frac{\color{blue}{\frac{\frac{1}{4}}{N}} + \frac{-1}{3}}{N}}{N}}{N} \]
      2. lift-+.f64N/A

        \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \frac{\color{blue}{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}}{N}}{N}}{N} \]
      3. lift-/.f64N/A

        \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \color{blue}{\frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}}{N}}{N} \]
      4. lift--.f64N/A

        \[\leadsto \frac{1 + \frac{\color{blue}{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}}{N}}{N} \]
      5. lift-/.f64N/A

        \[\leadsto \frac{1 + \color{blue}{\frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}{N} \]
      6. lift-+.f64N/A

        \[\leadsto \frac{\color{blue}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}{N} \]
      7. clear-numN/A

        \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}} \]
      8. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}} \]
      9. lower-/.f6499.7

        \[\leadsto \frac{1}{\color{blue}{\frac{N}{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}}} \]
    6. Applied rewrites99.7%

      \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}}} \]
    7. Taylor expanded in N around inf

      \[\leadsto \frac{1}{\color{blue}{N \cdot \left(\left(1 + \left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right)\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
    8. Step-by-step derivation
      1. associate--l+N/A

        \[\leadsto \frac{1}{N \cdot \color{blue}{\left(1 + \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)\right)}} \]
      2. distribute-lft-inN/A

        \[\leadsto \frac{1}{\color{blue}{N \cdot 1 + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
      3. *-rgt-identityN/A

        \[\leadsto \frac{1}{\color{blue}{N} + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)} \]
      4. lower-+.f64N/A

        \[\leadsto \frac{1}{\color{blue}{N + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
      5. lower-*.f64N/A

        \[\leadsto \frac{1}{N + \color{blue}{N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
      6. sub-negN/A

        \[\leadsto \frac{1}{N + N \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) + \left(\mathsf{neg}\left(\frac{\frac{1}{12}}{{N}^{2}}\right)\right)\right)}} \]
      7. lower-+.f64N/A

        \[\leadsto \frac{1}{N + N \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) + \left(\mathsf{neg}\left(\frac{\frac{1}{12}}{{N}^{2}}\right)\right)\right)}} \]
    9. Applied rewrites99.8%

      \[\leadsto \frac{1}{\color{blue}{N + N \cdot \left(\left(\frac{0.5}{N} + \frac{0.041666666666666664}{N \cdot \left(N \cdot N\right)}\right) + \frac{-0.08333333333333333}{N \cdot N}\right)}} \]
    10. Taylor expanded in N around 0

      \[\leadsto \frac{1}{N + \color{blue}{\frac{\frac{1}{24} + N \cdot \left(\frac{1}{2} \cdot N - \frac{1}{12}\right)}{{N}^{2}}}} \]
    11. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \frac{1}{N + \color{blue}{\frac{\frac{1}{24} + N \cdot \left(\frac{1}{2} \cdot N - \frac{1}{12}\right)}{{N}^{2}}}} \]
      2. +-commutativeN/A

        \[\leadsto \frac{1}{N + \frac{\color{blue}{N \cdot \left(\frac{1}{2} \cdot N - \frac{1}{12}\right) + \frac{1}{24}}}{{N}^{2}}} \]
      3. lower-fma.f64N/A

        \[\leadsto \frac{1}{N + \frac{\color{blue}{\mathsf{fma}\left(N, \frac{1}{2} \cdot N - \frac{1}{12}, \frac{1}{24}\right)}}{{N}^{2}}} \]
      4. sub-negN/A

        \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \color{blue}{\frac{1}{2} \cdot N + \left(\mathsf{neg}\left(\frac{1}{12}\right)\right)}, \frac{1}{24}\right)}{{N}^{2}}} \]
      5. *-commutativeN/A

        \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \color{blue}{N \cdot \frac{1}{2}} + \left(\mathsf{neg}\left(\frac{1}{12}\right)\right), \frac{1}{24}\right)}{{N}^{2}}} \]
      6. metadata-evalN/A

        \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, N \cdot \frac{1}{2} + \color{blue}{\frac{-1}{12}}, \frac{1}{24}\right)}{{N}^{2}}} \]
      7. lower-fma.f64N/A

        \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \color{blue}{\mathsf{fma}\left(N, \frac{1}{2}, \frac{-1}{12}\right)}, \frac{1}{24}\right)}{{N}^{2}}} \]
      8. unpow2N/A

        \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, \frac{1}{2}, \frac{-1}{12}\right), \frac{1}{24}\right)}{\color{blue}{N \cdot N}}} \]
      9. lower-*.f6499.8

        \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{\color{blue}{N \cdot N}}} \]
    12. Applied rewrites99.8%

      \[\leadsto \frac{1}{N + \color{blue}{\frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{N \cdot N}}} \]

    if 1e-3 < (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N))

    1. Initial program 93.1%

      \[\log \left(N + 1\right) - \log N \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \log \color{blue}{\left(N + 1\right)} - \log N \]
      2. diff-logN/A

        \[\leadsto \color{blue}{\log \left(\frac{N + 1}{N}\right)} \]
      3. lower-log.f64N/A

        \[\leadsto \color{blue}{\log \left(\frac{N + 1}{N}\right)} \]
      4. lower-/.f6495.2

        \[\leadsto \log \color{blue}{\left(\frac{N + 1}{N}\right)} \]
    4. Applied rewrites95.2%

      \[\leadsto \color{blue}{\log \left(\frac{N + 1}{N}\right)} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 3: 96.7% accurate, 4.8× speedup?

\[\begin{array}{l} \\ \frac{1}{N + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{N \cdot N}} \end{array} \]
(FPCore (N)
 :precision binary64
 (/
  1.0
  (+
   N
   (/ (fma N (fma N 0.5 -0.08333333333333333) 0.041666666666666664) (* N N)))))
double code(double N) {
	return 1.0 / (N + (fma(N, fma(N, 0.5, -0.08333333333333333), 0.041666666666666664) / (N * N)));
}
function code(N)
	return Float64(1.0 / Float64(N + Float64(fma(N, fma(N, 0.5, -0.08333333333333333), 0.041666666666666664) / Float64(N * N))))
end
code[N_] := N[(1.0 / N[(N + N[(N[(N * N[(N * 0.5 + -0.08333333333333333), $MachinePrecision] + 0.041666666666666664), $MachinePrecision] / N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{1}{N + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{N \cdot N}}
\end{array}
Derivation
  1. Initial program 23.5%

    \[\log \left(N + 1\right) - \log N \]
  2. Add Preprocessing
  3. Taylor expanded in N around inf

    \[\leadsto \color{blue}{\frac{\left(1 + \frac{\frac{1}{3}}{{N}^{2}}\right) - \left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{4} \cdot \frac{1}{{N}^{3}}\right)}{N}} \]
  4. Applied rewrites94.9%

    \[\leadsto \color{blue}{\frac{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}{N}} \]
  5. Step-by-step derivation
    1. lift-/.f64N/A

      \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \frac{\color{blue}{\frac{\frac{1}{4}}{N}} + \frac{-1}{3}}{N}}{N}}{N} \]
    2. lift-+.f64N/A

      \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \frac{\color{blue}{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}}{N}}{N}}{N} \]
    3. lift-/.f64N/A

      \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \color{blue}{\frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}}{N}}{N} \]
    4. lift--.f64N/A

      \[\leadsto \frac{1 + \frac{\color{blue}{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}}{N}}{N} \]
    5. lift-/.f64N/A

      \[\leadsto \frac{1 + \color{blue}{\frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}{N} \]
    6. lift-+.f64N/A

      \[\leadsto \frac{\color{blue}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}{N} \]
    7. clear-numN/A

      \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}} \]
    8. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}} \]
    9. lower-/.f6494.9

      \[\leadsto \frac{1}{\color{blue}{\frac{N}{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}}} \]
  6. Applied rewrites94.9%

    \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}}} \]
  7. Taylor expanded in N around inf

    \[\leadsto \frac{1}{\color{blue}{N \cdot \left(\left(1 + \left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right)\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
  8. Step-by-step derivation
    1. associate--l+N/A

      \[\leadsto \frac{1}{N \cdot \color{blue}{\left(1 + \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)\right)}} \]
    2. distribute-lft-inN/A

      \[\leadsto \frac{1}{\color{blue}{N \cdot 1 + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
    3. *-rgt-identityN/A

      \[\leadsto \frac{1}{\color{blue}{N} + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)} \]
    4. lower-+.f64N/A

      \[\leadsto \frac{1}{\color{blue}{N + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
    5. lower-*.f64N/A

      \[\leadsto \frac{1}{N + \color{blue}{N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
    6. sub-negN/A

      \[\leadsto \frac{1}{N + N \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) + \left(\mathsf{neg}\left(\frac{\frac{1}{12}}{{N}^{2}}\right)\right)\right)}} \]
    7. lower-+.f64N/A

      \[\leadsto \frac{1}{N + N \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) + \left(\mathsf{neg}\left(\frac{\frac{1}{12}}{{N}^{2}}\right)\right)\right)}} \]
  9. Applied rewrites95.4%

    \[\leadsto \frac{1}{\color{blue}{N + N \cdot \left(\left(\frac{0.5}{N} + \frac{0.041666666666666664}{N \cdot \left(N \cdot N\right)}\right) + \frac{-0.08333333333333333}{N \cdot N}\right)}} \]
  10. Taylor expanded in N around 0

    \[\leadsto \frac{1}{N + \color{blue}{\frac{\frac{1}{24} + N \cdot \left(\frac{1}{2} \cdot N - \frac{1}{12}\right)}{{N}^{2}}}} \]
  11. Step-by-step derivation
    1. lower-/.f64N/A

      \[\leadsto \frac{1}{N + \color{blue}{\frac{\frac{1}{24} + N \cdot \left(\frac{1}{2} \cdot N - \frac{1}{12}\right)}{{N}^{2}}}} \]
    2. +-commutativeN/A

      \[\leadsto \frac{1}{N + \frac{\color{blue}{N \cdot \left(\frac{1}{2} \cdot N - \frac{1}{12}\right) + \frac{1}{24}}}{{N}^{2}}} \]
    3. lower-fma.f64N/A

      \[\leadsto \frac{1}{N + \frac{\color{blue}{\mathsf{fma}\left(N, \frac{1}{2} \cdot N - \frac{1}{12}, \frac{1}{24}\right)}}{{N}^{2}}} \]
    4. sub-negN/A

      \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \color{blue}{\frac{1}{2} \cdot N + \left(\mathsf{neg}\left(\frac{1}{12}\right)\right)}, \frac{1}{24}\right)}{{N}^{2}}} \]
    5. *-commutativeN/A

      \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \color{blue}{N \cdot \frac{1}{2}} + \left(\mathsf{neg}\left(\frac{1}{12}\right)\right), \frac{1}{24}\right)}{{N}^{2}}} \]
    6. metadata-evalN/A

      \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, N \cdot \frac{1}{2} + \color{blue}{\frac{-1}{12}}, \frac{1}{24}\right)}{{N}^{2}}} \]
    7. lower-fma.f64N/A

      \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \color{blue}{\mathsf{fma}\left(N, \frac{1}{2}, \frac{-1}{12}\right)}, \frac{1}{24}\right)}{{N}^{2}}} \]
    8. unpow2N/A

      \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, \frac{1}{2}, \frac{-1}{12}\right), \frac{1}{24}\right)}{\color{blue}{N \cdot N}}} \]
    9. lower-*.f6495.4

      \[\leadsto \frac{1}{N + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{\color{blue}{N \cdot N}}} \]
  12. Applied rewrites95.4%

    \[\leadsto \frac{1}{N + \color{blue}{\frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{N \cdot N}}} \]
  13. Add Preprocessing

Alternative 4: 95.5% accurate, 7.1× speedup?

\[\begin{array}{l} \\ \frac{1}{N + \left(0.5 + \frac{-0.08333333333333333}{N}\right)} \end{array} \]
(FPCore (N)
 :precision binary64
 (/ 1.0 (+ N (+ 0.5 (/ -0.08333333333333333 N)))))
double code(double N) {
	return 1.0 / (N + (0.5 + (-0.08333333333333333 / N)));
}
real(8) function code(n)
    real(8), intent (in) :: n
    code = 1.0d0 / (n + (0.5d0 + ((-0.08333333333333333d0) / n)))
end function
public static double code(double N) {
	return 1.0 / (N + (0.5 + (-0.08333333333333333 / N)));
}
def code(N):
	return 1.0 / (N + (0.5 + (-0.08333333333333333 / N)))
function code(N)
	return Float64(1.0 / Float64(N + Float64(0.5 + Float64(-0.08333333333333333 / N))))
end
function tmp = code(N)
	tmp = 1.0 / (N + (0.5 + (-0.08333333333333333 / N)));
end
code[N_] := N[(1.0 / N[(N + N[(0.5 + N[(-0.08333333333333333 / N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{1}{N + \left(0.5 + \frac{-0.08333333333333333}{N}\right)}
\end{array}
Derivation
  1. Initial program 23.5%

    \[\log \left(N + 1\right) - \log N \]
  2. Add Preprocessing
  3. Taylor expanded in N around inf

    \[\leadsto \color{blue}{\frac{\left(1 + \frac{\frac{1}{3}}{{N}^{2}}\right) - \left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{4} \cdot \frac{1}{{N}^{3}}\right)}{N}} \]
  4. Applied rewrites94.9%

    \[\leadsto \color{blue}{\frac{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}{N}} \]
  5. Step-by-step derivation
    1. lift-/.f64N/A

      \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \frac{\color{blue}{\frac{\frac{1}{4}}{N}} + \frac{-1}{3}}{N}}{N}}{N} \]
    2. lift-+.f64N/A

      \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \frac{\color{blue}{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}}{N}}{N}}{N} \]
    3. lift-/.f64N/A

      \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \color{blue}{\frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}}{N}}{N} \]
    4. lift--.f64N/A

      \[\leadsto \frac{1 + \frac{\color{blue}{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}}{N}}{N} \]
    5. lift-/.f64N/A

      \[\leadsto \frac{1 + \color{blue}{\frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}{N} \]
    6. lift-+.f64N/A

      \[\leadsto \frac{\color{blue}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}{N} \]
    7. clear-numN/A

      \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}} \]
    8. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}} \]
    9. lower-/.f6494.9

      \[\leadsto \frac{1}{\color{blue}{\frac{N}{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}}} \]
  6. Applied rewrites94.9%

    \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}}} \]
  7. Taylor expanded in N around inf

    \[\leadsto \frac{1}{\color{blue}{N \cdot \left(\left(1 + \left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right)\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
  8. Step-by-step derivation
    1. associate--l+N/A

      \[\leadsto \frac{1}{N \cdot \color{blue}{\left(1 + \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)\right)}} \]
    2. distribute-lft-inN/A

      \[\leadsto \frac{1}{\color{blue}{N \cdot 1 + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
    3. *-rgt-identityN/A

      \[\leadsto \frac{1}{\color{blue}{N} + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)} \]
    4. lower-+.f64N/A

      \[\leadsto \frac{1}{\color{blue}{N + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
    5. lower-*.f64N/A

      \[\leadsto \frac{1}{N + \color{blue}{N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
    6. sub-negN/A

      \[\leadsto \frac{1}{N + N \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) + \left(\mathsf{neg}\left(\frac{\frac{1}{12}}{{N}^{2}}\right)\right)\right)}} \]
    7. lower-+.f64N/A

      \[\leadsto \frac{1}{N + N \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) + \left(\mathsf{neg}\left(\frac{\frac{1}{12}}{{N}^{2}}\right)\right)\right)}} \]
  9. Applied rewrites95.4%

    \[\leadsto \frac{1}{\color{blue}{N + N \cdot \left(\left(\frac{0.5}{N} + \frac{0.041666666666666664}{N \cdot \left(N \cdot N\right)}\right) + \frac{-0.08333333333333333}{N \cdot N}\right)}} \]
  10. Taylor expanded in N around inf

    \[\leadsto \frac{1}{N + \color{blue}{\left(\frac{1}{2} - \frac{1}{12} \cdot \frac{1}{N}\right)}} \]
  11. Step-by-step derivation
    1. sub-negN/A

      \[\leadsto \frac{1}{N + \color{blue}{\left(\frac{1}{2} + \left(\mathsf{neg}\left(\frac{1}{12} \cdot \frac{1}{N}\right)\right)\right)}} \]
    2. lower-+.f64N/A

      \[\leadsto \frac{1}{N + \color{blue}{\left(\frac{1}{2} + \left(\mathsf{neg}\left(\frac{1}{12} \cdot \frac{1}{N}\right)\right)\right)}} \]
    3. associate-*r/N/A

      \[\leadsto \frac{1}{N + \left(\frac{1}{2} + \left(\mathsf{neg}\left(\color{blue}{\frac{\frac{1}{12} \cdot 1}{N}}\right)\right)\right)} \]
    4. metadata-evalN/A

      \[\leadsto \frac{1}{N + \left(\frac{1}{2} + \left(\mathsf{neg}\left(\frac{\color{blue}{\frac{1}{12}}}{N}\right)\right)\right)} \]
    5. distribute-neg-fracN/A

      \[\leadsto \frac{1}{N + \left(\frac{1}{2} + \color{blue}{\frac{\mathsf{neg}\left(\frac{1}{12}\right)}{N}}\right)} \]
    6. metadata-evalN/A

      \[\leadsto \frac{1}{N + \left(\frac{1}{2} + \frac{\color{blue}{\frac{-1}{12}}}{N}\right)} \]
    7. lower-/.f6494.2

      \[\leadsto \frac{1}{N + \left(0.5 + \color{blue}{\frac{-0.08333333333333333}{N}}\right)} \]
  12. Applied rewrites94.2%

    \[\leadsto \frac{1}{N + \color{blue}{\left(0.5 + \frac{-0.08333333333333333}{N}\right)}} \]
  13. Add Preprocessing

Alternative 5: 93.0% accurate, 13.8× speedup?

\[\begin{array}{l} \\ \frac{1}{N + 0.5} \end{array} \]
(FPCore (N) :precision binary64 (/ 1.0 (+ N 0.5)))
double code(double N) {
	return 1.0 / (N + 0.5);
}
real(8) function code(n)
    real(8), intent (in) :: n
    code = 1.0d0 / (n + 0.5d0)
end function
public static double code(double N) {
	return 1.0 / (N + 0.5);
}
def code(N):
	return 1.0 / (N + 0.5)
function code(N)
	return Float64(1.0 / Float64(N + 0.5))
end
function tmp = code(N)
	tmp = 1.0 / (N + 0.5);
end
code[N_] := N[(1.0 / N[(N + 0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{1}{N + 0.5}
\end{array}
Derivation
  1. Initial program 23.5%

    \[\log \left(N + 1\right) - \log N \]
  2. Add Preprocessing
  3. Taylor expanded in N around inf

    \[\leadsto \color{blue}{\frac{\left(1 + \frac{\frac{1}{3}}{{N}^{2}}\right) - \left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{4} \cdot \frac{1}{{N}^{3}}\right)}{N}} \]
  4. Applied rewrites94.9%

    \[\leadsto \color{blue}{\frac{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}{N}} \]
  5. Step-by-step derivation
    1. lift-/.f64N/A

      \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \frac{\color{blue}{\frac{\frac{1}{4}}{N}} + \frac{-1}{3}}{N}}{N}}{N} \]
    2. lift-+.f64N/A

      \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \frac{\color{blue}{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}}{N}}{N}}{N} \]
    3. lift-/.f64N/A

      \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \color{blue}{\frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}}{N}}{N} \]
    4. lift--.f64N/A

      \[\leadsto \frac{1 + \frac{\color{blue}{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}}{N}}{N} \]
    5. lift-/.f64N/A

      \[\leadsto \frac{1 + \color{blue}{\frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}{N} \]
    6. lift-+.f64N/A

      \[\leadsto \frac{\color{blue}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}{N} \]
    7. clear-numN/A

      \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}} \]
    8. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}} \]
    9. lower-/.f6494.9

      \[\leadsto \frac{1}{\color{blue}{\frac{N}{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}}} \]
  6. Applied rewrites94.9%

    \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}}} \]
  7. Taylor expanded in N around inf

    \[\leadsto \frac{1}{\color{blue}{N \cdot \left(\left(1 + \left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right)\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
  8. Step-by-step derivation
    1. associate--l+N/A

      \[\leadsto \frac{1}{N \cdot \color{blue}{\left(1 + \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)\right)}} \]
    2. distribute-lft-inN/A

      \[\leadsto \frac{1}{\color{blue}{N \cdot 1 + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
    3. *-rgt-identityN/A

      \[\leadsto \frac{1}{\color{blue}{N} + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)} \]
    4. lower-+.f64N/A

      \[\leadsto \frac{1}{\color{blue}{N + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
    5. lower-*.f64N/A

      \[\leadsto \frac{1}{N + \color{blue}{N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
    6. sub-negN/A

      \[\leadsto \frac{1}{N + N \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) + \left(\mathsf{neg}\left(\frac{\frac{1}{12}}{{N}^{2}}\right)\right)\right)}} \]
    7. lower-+.f64N/A

      \[\leadsto \frac{1}{N + N \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) + \left(\mathsf{neg}\left(\frac{\frac{1}{12}}{{N}^{2}}\right)\right)\right)}} \]
  9. Applied rewrites95.4%

    \[\leadsto \frac{1}{\color{blue}{N + N \cdot \left(\left(\frac{0.5}{N} + \frac{0.041666666666666664}{N \cdot \left(N \cdot N\right)}\right) + \frac{-0.08333333333333333}{N \cdot N}\right)}} \]
  10. Taylor expanded in N around inf

    \[\leadsto \frac{1}{N + \color{blue}{\frac{1}{2}}} \]
  11. Step-by-step derivation
    1. Applied rewrites92.1%

      \[\leadsto \frac{1}{N + \color{blue}{0.5}} \]
    2. Add Preprocessing

    Alternative 6: 84.5% accurate, 17.3× speedup?

    \[\begin{array}{l} \\ \frac{1}{N} \end{array} \]
    (FPCore (N) :precision binary64 (/ 1.0 N))
    double code(double N) {
    	return 1.0 / N;
    }
    
    real(8) function code(n)
        real(8), intent (in) :: n
        code = 1.0d0 / n
    end function
    
    public static double code(double N) {
    	return 1.0 / N;
    }
    
    def code(N):
    	return 1.0 / N
    
    function code(N)
    	return Float64(1.0 / N)
    end
    
    function tmp = code(N)
    	tmp = 1.0 / N;
    end
    
    code[N_] := N[(1.0 / N), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \frac{1}{N}
    \end{array}
    
    Derivation
    1. Initial program 23.5%

      \[\log \left(N + 1\right) - \log N \]
    2. Add Preprocessing
    3. Taylor expanded in N around inf

      \[\leadsto \color{blue}{\frac{1}{N}} \]
    4. Step-by-step derivation
      1. lower-/.f6484.6

        \[\leadsto \color{blue}{\frac{1}{N}} \]
    5. Applied rewrites84.6%

      \[\leadsto \color{blue}{\frac{1}{N}} \]
    6. Add Preprocessing

    Alternative 7: 7.3% accurate, 18.8× speedup?

    \[\begin{array}{l} \\ \left(N \cdot N\right) \cdot 24 \end{array} \]
    (FPCore (N) :precision binary64 (* (* N N) 24.0))
    double code(double N) {
    	return (N * N) * 24.0;
    }
    
    real(8) function code(n)
        real(8), intent (in) :: n
        code = (n * n) * 24.0d0
    end function
    
    public static double code(double N) {
    	return (N * N) * 24.0;
    }
    
    def code(N):
    	return (N * N) * 24.0
    
    function code(N)
    	return Float64(Float64(N * N) * 24.0)
    end
    
    function tmp = code(N)
    	tmp = (N * N) * 24.0;
    end
    
    code[N_] := N[(N[(N * N), $MachinePrecision] * 24.0), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \left(N \cdot N\right) \cdot 24
    \end{array}
    
    Derivation
    1. Initial program 23.5%

      \[\log \left(N + 1\right) - \log N \]
    2. Add Preprocessing
    3. Taylor expanded in N around inf

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{\frac{1}{3}}{{N}^{2}}\right) - \left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{4} \cdot \frac{1}{{N}^{3}}\right)}{N}} \]
    4. Applied rewrites94.9%

      \[\leadsto \color{blue}{\frac{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}{N}} \]
    5. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \frac{\color{blue}{\frac{\frac{1}{4}}{N}} + \frac{-1}{3}}{N}}{N}}{N} \]
      2. lift-+.f64N/A

        \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \frac{\color{blue}{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}}{N}}{N}}{N} \]
      3. lift-/.f64N/A

        \[\leadsto \frac{1 + \frac{\frac{-1}{2} - \color{blue}{\frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}}{N}}{N} \]
      4. lift--.f64N/A

        \[\leadsto \frac{1 + \frac{\color{blue}{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}}{N}}{N} \]
      5. lift-/.f64N/A

        \[\leadsto \frac{1 + \color{blue}{\frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}{N} \]
      6. lift-+.f64N/A

        \[\leadsto \frac{\color{blue}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}{N} \]
      7. clear-numN/A

        \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}} \]
      8. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{\frac{-1}{2} - \frac{\frac{\frac{1}{4}}{N} + \frac{-1}{3}}{N}}{N}}}} \]
      9. lower-/.f6494.9

        \[\leadsto \frac{1}{\color{blue}{\frac{N}{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}}} \]
    6. Applied rewrites94.9%

      \[\leadsto \color{blue}{\frac{1}{\frac{N}{1 + \frac{-0.5 - \frac{\frac{0.25}{N} + -0.3333333333333333}{N}}{N}}}} \]
    7. Taylor expanded in N around inf

      \[\leadsto \frac{1}{\color{blue}{N \cdot \left(\left(1 + \left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right)\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
    8. Step-by-step derivation
      1. associate--l+N/A

        \[\leadsto \frac{1}{N \cdot \color{blue}{\left(1 + \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)\right)}} \]
      2. distribute-lft-inN/A

        \[\leadsto \frac{1}{\color{blue}{N \cdot 1 + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
      3. *-rgt-identityN/A

        \[\leadsto \frac{1}{\color{blue}{N} + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)} \]
      4. lower-+.f64N/A

        \[\leadsto \frac{1}{\color{blue}{N + N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
      5. lower-*.f64N/A

        \[\leadsto \frac{1}{N + \color{blue}{N \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) - \frac{\frac{1}{12}}{{N}^{2}}\right)}} \]
      6. sub-negN/A

        \[\leadsto \frac{1}{N + N \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) + \left(\mathsf{neg}\left(\frac{\frac{1}{12}}{{N}^{2}}\right)\right)\right)}} \]
      7. lower-+.f64N/A

        \[\leadsto \frac{1}{N + N \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{N} + \frac{1}{24} \cdot \frac{1}{{N}^{3}}\right) + \left(\mathsf{neg}\left(\frac{\frac{1}{12}}{{N}^{2}}\right)\right)\right)}} \]
    9. Applied rewrites95.4%

      \[\leadsto \frac{1}{\color{blue}{N + N \cdot \left(\left(\frac{0.5}{N} + \frac{0.041666666666666664}{N \cdot \left(N \cdot N\right)}\right) + \frac{-0.08333333333333333}{N \cdot N}\right)}} \]
    10. Taylor expanded in N around 0

      \[\leadsto \color{blue}{24 \cdot {N}^{2}} \]
    11. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \color{blue}{{N}^{2} \cdot 24} \]
      2. lower-*.f64N/A

        \[\leadsto \color{blue}{{N}^{2} \cdot 24} \]
      3. unpow2N/A

        \[\leadsto \color{blue}{\left(N \cdot N\right)} \cdot 24 \]
      4. lower-*.f647.4

        \[\leadsto \color{blue}{\left(N \cdot N\right)} \cdot 24 \]
    12. Applied rewrites7.4%

      \[\leadsto \color{blue}{\left(N \cdot N\right) \cdot 24} \]
    13. Add Preprocessing

    Alternative 8: 3.3% accurate, 207.0× speedup?

    \[\begin{array}{l} \\ 0 \end{array} \]
    (FPCore (N) :precision binary64 0.0)
    double code(double N) {
    	return 0.0;
    }
    
    real(8) function code(n)
        real(8), intent (in) :: n
        code = 0.0d0
    end function
    
    public static double code(double N) {
    	return 0.0;
    }
    
    def code(N):
    	return 0.0
    
    function code(N)
    	return 0.0
    end
    
    function tmp = code(N)
    	tmp = 0.0;
    end
    
    code[N_] := 0.0
    
    \begin{array}{l}
    
    \\
    0
    \end{array}
    
    Derivation
    1. Initial program 23.5%

      \[\log \left(N + 1\right) - \log N \]
    2. Add Preprocessing
    3. Applied rewrites25.3%

      \[\leadsto \color{blue}{\mathsf{fma}\left({\left(\mathsf{log1p}\left(N\right)\right)}^{1.5}, \frac{{\left(\mathsf{log1p}\left(N\right)\right)}^{1.5}}{\mathsf{fma}\left(\log N, \log \left(\mathsf{fma}\left(N, N, N\right)\right), {\left(\mathsf{log1p}\left(N\right)\right)}^{2}\right)}, -\frac{{\log N}^{3}}{\mathsf{fma}\left(\log N, \log \left(\mathsf{fma}\left(N, N, N\right)\right), {\left(\mathsf{log1p}\left(N\right)\right)}^{2}\right)}\right)} \]
    4. Step-by-step derivation
      1. Applied rewrites25.2%

        \[\leadsto \color{blue}{\mathsf{fma}\left({\log N}^{3}, \frac{1}{-\mathsf{fma}\left(\log N, \log \left(\mathsf{fma}\left(N, N, N\right)\right), {\left(\mathsf{log1p}\left(N\right)\right)}^{2}\right)}, \frac{{\left(\mathsf{log1p}\left(N\right)\right)}^{3}}{\mathsf{fma}\left(\log N, \log \left(\mathsf{fma}\left(N, N, N\right)\right), {\left(\mathsf{log1p}\left(N\right)\right)}^{2}\right)}\right)} \]
      2. Taylor expanded in N around inf

        \[\leadsto \color{blue}{-1 \cdot \frac{{\log \left(\frac{1}{N}\right)}^{3}}{2 \cdot {\log \left(\frac{1}{N}\right)}^{2} + {\log \left(\frac{1}{N}\right)}^{2}} + \frac{{\log \left(\frac{1}{N}\right)}^{3}}{2 \cdot {\log \left(\frac{1}{N}\right)}^{2} + {\log \left(\frac{1}{N}\right)}^{2}}} \]
      3. Step-by-step derivation
        1. distribute-lft1-inN/A

          \[\leadsto \color{blue}{\left(-1 + 1\right) \cdot \frac{{\log \left(\frac{1}{N}\right)}^{3}}{2 \cdot {\log \left(\frac{1}{N}\right)}^{2} + {\log \left(\frac{1}{N}\right)}^{2}}} \]
        2. metadata-evalN/A

          \[\leadsto \color{blue}{0} \cdot \frac{{\log \left(\frac{1}{N}\right)}^{3}}{2 \cdot {\log \left(\frac{1}{N}\right)}^{2} + {\log \left(\frac{1}{N}\right)}^{2}} \]
        3. mul0-lft3.3

          \[\leadsto \color{blue}{0} \]
      4. Applied rewrites3.3%

        \[\leadsto \color{blue}{0} \]
      5. Add Preprocessing

      Developer Target 1: 99.8% accurate, 1.8× speedup?

      \[\begin{array}{l} \\ \mathsf{log1p}\left(\frac{1}{N}\right) \end{array} \]
      (FPCore (N) :precision binary64 (log1p (/ 1.0 N)))
      double code(double N) {
      	return log1p((1.0 / N));
      }
      
      public static double code(double N) {
      	return Math.log1p((1.0 / N));
      }
      
      def code(N):
      	return math.log1p((1.0 / N))
      
      function code(N)
      	return log1p(Float64(1.0 / N))
      end
      
      code[N_] := N[Log[1 + N[(1.0 / N), $MachinePrecision]], $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \mathsf{log1p}\left(\frac{1}{N}\right)
      \end{array}
      

      Developer Target 2: 26.3% accurate, 1.8× speedup?

      \[\begin{array}{l} \\ \log \left(1 + \frac{1}{N}\right) \end{array} \]
      (FPCore (N) :precision binary64 (log (+ 1.0 (/ 1.0 N))))
      double code(double N) {
      	return log((1.0 + (1.0 / N)));
      }
      
      real(8) function code(n)
          real(8), intent (in) :: n
          code = log((1.0d0 + (1.0d0 / n)))
      end function
      
      public static double code(double N) {
      	return Math.log((1.0 + (1.0 / N)));
      }
      
      def code(N):
      	return math.log((1.0 + (1.0 / N)))
      
      function code(N)
      	return log(Float64(1.0 + Float64(1.0 / N)))
      end
      
      function tmp = code(N)
      	tmp = log((1.0 + (1.0 / N)));
      end
      
      code[N_] := N[Log[N[(1.0 + N[(1.0 / N), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \log \left(1 + \frac{1}{N}\right)
      \end{array}
      

      Developer Target 3: 96.2% accurate, 0.6× speedup?

      \[\begin{array}{l} \\ \left(\left(\frac{1}{N} + \frac{-1}{2 \cdot {N}^{2}}\right) + \frac{1}{3 \cdot {N}^{3}}\right) + \frac{-1}{4 \cdot {N}^{4}} \end{array} \]
      (FPCore (N)
       :precision binary64
       (+
        (+ (+ (/ 1.0 N) (/ -1.0 (* 2.0 (pow N 2.0)))) (/ 1.0 (* 3.0 (pow N 3.0))))
        (/ -1.0 (* 4.0 (pow N 4.0)))))
      double code(double N) {
      	return (((1.0 / N) + (-1.0 / (2.0 * pow(N, 2.0)))) + (1.0 / (3.0 * pow(N, 3.0)))) + (-1.0 / (4.0 * pow(N, 4.0)));
      }
      
      real(8) function code(n)
          real(8), intent (in) :: n
          code = (((1.0d0 / n) + ((-1.0d0) / (2.0d0 * (n ** 2.0d0)))) + (1.0d0 / (3.0d0 * (n ** 3.0d0)))) + ((-1.0d0) / (4.0d0 * (n ** 4.0d0)))
      end function
      
      public static double code(double N) {
      	return (((1.0 / N) + (-1.0 / (2.0 * Math.pow(N, 2.0)))) + (1.0 / (3.0 * Math.pow(N, 3.0)))) + (-1.0 / (4.0 * Math.pow(N, 4.0)));
      }
      
      def code(N):
      	return (((1.0 / N) + (-1.0 / (2.0 * math.pow(N, 2.0)))) + (1.0 / (3.0 * math.pow(N, 3.0)))) + (-1.0 / (4.0 * math.pow(N, 4.0)))
      
      function code(N)
      	return Float64(Float64(Float64(Float64(1.0 / N) + Float64(-1.0 / Float64(2.0 * (N ^ 2.0)))) + Float64(1.0 / Float64(3.0 * (N ^ 3.0)))) + Float64(-1.0 / Float64(4.0 * (N ^ 4.0))))
      end
      
      function tmp = code(N)
      	tmp = (((1.0 / N) + (-1.0 / (2.0 * (N ^ 2.0)))) + (1.0 / (3.0 * (N ^ 3.0)))) + (-1.0 / (4.0 * (N ^ 4.0)));
      end
      
      code[N_] := N[(N[(N[(N[(1.0 / N), $MachinePrecision] + N[(-1.0 / N[(2.0 * N[Power[N, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(3.0 * N[Power[N, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / N[(4.0 * N[Power[N, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \left(\left(\frac{1}{N} + \frac{-1}{2 \cdot {N}^{2}}\right) + \frac{1}{3 \cdot {N}^{3}}\right) + \frac{-1}{4 \cdot {N}^{4}}
      \end{array}
      

      Reproduce

      ?
      herbie shell --seed 2024214 
      (FPCore (N)
        :name "2log (problem 3.3.6)"
        :precision binary64
        :pre (and (> N 1.0) (< N 1e+40))
      
        :alt
        (! :herbie-platform default (log1p (/ 1 N)))
      
        :alt
        (! :herbie-platform default (log (+ 1 (/ 1 N))))
      
        :alt
        (! :herbie-platform default (+ (/ 1 N) (/ -1 (* 2 (pow N 2))) (/ 1 (* 3 (pow N 3))) (/ -1 (* 4 (pow N 4)))))
      
        (- (log (+ N 1.0)) (log N)))