
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
(FPCore (N)
:precision binary64
(let* ((t_0 (log (fma N N N))) (t_1 (log (/ N (+ N 1.0)))))
(if (<= (- (log (+ N 1.0)) (log N)) 0.0015)
(/
1.0
(-
(fma
N
(/
(fma N (fma N -0.5 0.08333333333333333) -0.041666666666666664)
(* N (* N N)))
(- N))))
(*
(/
(* t_0 (* (* t_0 t_1) (/ -1.0 t_0)))
(+ (pow (log1p N) 3.0) (pow (log N) 3.0)))
(fma (log N) t_1 (pow (log1p N) 2.0))))))
double code(double N) {
double t_0 = log(fma(N, N, N));
double t_1 = log((N / (N + 1.0)));
double tmp;
if ((log((N + 1.0)) - log(N)) <= 0.0015) {
tmp = 1.0 / -fma(N, (fma(N, fma(N, -0.5, 0.08333333333333333), -0.041666666666666664) / (N * (N * N))), -N);
} else {
tmp = ((t_0 * ((t_0 * t_1) * (-1.0 / t_0))) / (pow(log1p(N), 3.0) + pow(log(N), 3.0))) * fma(log(N), t_1, pow(log1p(N), 2.0));
}
return tmp;
}
function code(N) t_0 = log(fma(N, N, N)) t_1 = log(Float64(N / Float64(N + 1.0))) tmp = 0.0 if (Float64(log(Float64(N + 1.0)) - log(N)) <= 0.0015) tmp = Float64(1.0 / Float64(-fma(N, Float64(fma(N, fma(N, -0.5, 0.08333333333333333), -0.041666666666666664) / Float64(N * Float64(N * N))), Float64(-N)))); else tmp = Float64(Float64(Float64(t_0 * Float64(Float64(t_0 * t_1) * Float64(-1.0 / t_0))) / Float64((log1p(N) ^ 3.0) + (log(N) ^ 3.0))) * fma(log(N), t_1, (log1p(N) ^ 2.0))); end return tmp end
code[N_] := Block[{t$95$0 = N[Log[N[(N * N + N), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[Log[N[(N / N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision], 0.0015], N[(1.0 / (-N[(N * N[(N[(N * N[(N * -0.5 + 0.08333333333333333), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] / N[(N * N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + (-N)), $MachinePrecision])), $MachinePrecision], N[(N[(N[(t$95$0 * N[(N[(t$95$0 * t$95$1), $MachinePrecision] * N[(-1.0 / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[Power[N[Log[1 + N], $MachinePrecision], 3.0], $MachinePrecision] + N[Power[N[Log[N], $MachinePrecision], 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[Log[N], $MachinePrecision] * t$95$1 + N[Power[N[Log[1 + N], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \log \left(\mathsf{fma}\left(N, N, N\right)\right)\\
t_1 := \log \left(\frac{N}{N + 1}\right)\\
\mathbf{if}\;\log \left(N + 1\right) - \log N \leq 0.0015:\\
\;\;\;\;\frac{1}{-\mathsf{fma}\left(N, \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, -0.5, 0.08333333333333333\right), -0.041666666666666664\right)}{N \cdot \left(N \cdot N\right)}, -N\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{t\_0 \cdot \left(\left(t\_0 \cdot t\_1\right) \cdot \frac{-1}{t\_0}\right)}{{\left(\mathsf{log1p}\left(N\right)\right)}^{3} + {\log N}^{3}} \cdot \mathsf{fma}\left(\log N, t\_1, {\left(\mathsf{log1p}\left(N\right)\right)}^{2}\right)\\
\end{array}
\end{array}
if (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) < 0.0015Initial program 19.5%
Taylor expanded in N around inf
Applied rewrites99.8%
Applied rewrites99.8%
Taylor expanded in N around -inf
Applied rewrites99.9%
Taylor expanded in N around 0
Applied rewrites99.9%
if 0.0015 < (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) Initial program 93.4%
Applied rewrites94.7%
Applied rewrites96.1%
Final simplification99.6%
(FPCore (N)
:precision binary64
(if (<= (- (log (+ N 1.0)) (log N)) 0.0015)
(/
1.0
(-
(fma
N
(/
(fma N (fma N -0.5 0.08333333333333333) -0.041666666666666664)
(* N (* N N)))
(- N))))
(- (log (/ (- (* N N) N) (fma N N -1.0))))))
double code(double N) {
double tmp;
if ((log((N + 1.0)) - log(N)) <= 0.0015) {
tmp = 1.0 / -fma(N, (fma(N, fma(N, -0.5, 0.08333333333333333), -0.041666666666666664) / (N * (N * N))), -N);
} else {
tmp = -log((((N * N) - N) / fma(N, N, -1.0)));
}
return tmp;
}
function code(N) tmp = 0.0 if (Float64(log(Float64(N + 1.0)) - log(N)) <= 0.0015) tmp = Float64(1.0 / Float64(-fma(N, Float64(fma(N, fma(N, -0.5, 0.08333333333333333), -0.041666666666666664) / Float64(N * Float64(N * N))), Float64(-N)))); else tmp = Float64(-log(Float64(Float64(Float64(N * N) - N) / fma(N, N, -1.0)))); end return tmp end
code[N_] := If[LessEqual[N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision], 0.0015], N[(1.0 / (-N[(N * N[(N[(N * N[(N * -0.5 + 0.08333333333333333), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] / N[(N * N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + (-N)), $MachinePrecision])), $MachinePrecision], (-N[Log[N[(N[(N[(N * N), $MachinePrecision] - N), $MachinePrecision] / N[(N * N + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision])]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\log \left(N + 1\right) - \log N \leq 0.0015:\\
\;\;\;\;\frac{1}{-\mathsf{fma}\left(N, \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, -0.5, 0.08333333333333333\right), -0.041666666666666664\right)}{N \cdot \left(N \cdot N\right)}, -N\right)}\\
\mathbf{else}:\\
\;\;\;\;-\log \left(\frac{N \cdot N - N}{\mathsf{fma}\left(N, N, -1\right)}\right)\\
\end{array}
\end{array}
if (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) < 0.0015Initial program 19.5%
Taylor expanded in N around inf
Applied rewrites99.8%
Applied rewrites99.8%
Taylor expanded in N around -inf
Applied rewrites99.9%
Taylor expanded in N around 0
Applied rewrites99.9%
if 0.0015 < (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) Initial program 93.4%
lift--.f64N/A
lift-log.f64N/A
lift-log.f64N/A
diff-logN/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
clear-numN/A
log-recN/A
lower-neg.f64N/A
lower-log.f64N/A
distribute-rgt-out--N/A
lower-/.f64N/A
*-lft-identityN/A
lower--.f64N/A
lower-*.f64N/A
metadata-evalN/A
sub-negN/A
lower-fma.f64N/A
metadata-eval96.1
Applied rewrites96.1%
(FPCore (N)
:precision binary64
(if (<= N 940.0)
(- (log (/ N (+ N 1.0))))
(/
1.0
(-
(fma
N
(/
(fma N (fma N -0.5 0.08333333333333333) -0.041666666666666664)
(* N (* N N)))
(- N))))))
double code(double N) {
double tmp;
if (N <= 940.0) {
tmp = -log((N / (N + 1.0)));
} else {
tmp = 1.0 / -fma(N, (fma(N, fma(N, -0.5, 0.08333333333333333), -0.041666666666666664) / (N * (N * N))), -N);
}
return tmp;
}
function code(N) tmp = 0.0 if (N <= 940.0) tmp = Float64(-log(Float64(N / Float64(N + 1.0)))); else tmp = Float64(1.0 / Float64(-fma(N, Float64(fma(N, fma(N, -0.5, 0.08333333333333333), -0.041666666666666664) / Float64(N * Float64(N * N))), Float64(-N)))); end return tmp end
code[N_] := If[LessEqual[N, 940.0], (-N[Log[N[(N / N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), N[(1.0 / (-N[(N * N[(N[(N * N[(N * -0.5 + 0.08333333333333333), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] / N[(N * N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + (-N)), $MachinePrecision])), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;N \leq 940:\\
\;\;\;\;-\log \left(\frac{N}{N + 1}\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{-\mathsf{fma}\left(N, \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, -0.5, 0.08333333333333333\right), -0.041666666666666664\right)}{N \cdot \left(N \cdot N\right)}, -N\right)}\\
\end{array}
\end{array}
if N < 940Initial program 93.4%
lift--.f64N/A
lift-log.f64N/A
lift-log.f64N/A
diff-logN/A
clear-numN/A
neg-logN/A
diff-logN/A
lift-log.f64N/A
lift-log.f64N/A
lower-neg.f64N/A
lift-log.f64N/A
lift-log.f64N/A
diff-logN/A
lower-log.f64N/A
lower-/.f6495.8
Applied rewrites95.8%
if 940 < N Initial program 19.5%
Taylor expanded in N around inf
Applied rewrites99.8%
Applied rewrites99.8%
Taylor expanded in N around -inf
Applied rewrites99.9%
Taylor expanded in N around 0
Applied rewrites99.9%
(FPCore (N)
:precision binary64
(if (<= N 900.0)
(log (/ (+ N 1.0) N))
(/
1.0
(-
(fma
N
(/
(fma N (fma N -0.5 0.08333333333333333) -0.041666666666666664)
(* N (* N N)))
(- N))))))
double code(double N) {
double tmp;
if (N <= 900.0) {
tmp = log(((N + 1.0) / N));
} else {
tmp = 1.0 / -fma(N, (fma(N, fma(N, -0.5, 0.08333333333333333), -0.041666666666666664) / (N * (N * N))), -N);
}
return tmp;
}
function code(N) tmp = 0.0 if (N <= 900.0) tmp = log(Float64(Float64(N + 1.0) / N)); else tmp = Float64(1.0 / Float64(-fma(N, Float64(fma(N, fma(N, -0.5, 0.08333333333333333), -0.041666666666666664) / Float64(N * Float64(N * N))), Float64(-N)))); end return tmp end
code[N_] := If[LessEqual[N, 900.0], N[Log[N[(N[(N + 1.0), $MachinePrecision] / N), $MachinePrecision]], $MachinePrecision], N[(1.0 / (-N[(N * N[(N[(N * N[(N * -0.5 + 0.08333333333333333), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] / N[(N * N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + (-N)), $MachinePrecision])), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;N \leq 900:\\
\;\;\;\;\log \left(\frac{N + 1}{N}\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{-\mathsf{fma}\left(N, \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, -0.5, 0.08333333333333333\right), -0.041666666666666664\right)}{N \cdot \left(N \cdot N\right)}, -N\right)}\\
\end{array}
\end{array}
if N < 900Initial program 93.4%
lift--.f64N/A
lift-log.f64N/A
lift-log.f64N/A
diff-logN/A
lower-log.f64N/A
lower-/.f6494.6
Applied rewrites94.6%
if 900 < N Initial program 19.5%
Taylor expanded in N around inf
Applied rewrites99.8%
Applied rewrites99.8%
Taylor expanded in N around -inf
Applied rewrites99.9%
Taylor expanded in N around 0
Applied rewrites99.9%
(FPCore (N)
:precision binary64
(/
1.0
(-
(fma
N
(/
(fma N (fma N -0.5 0.08333333333333333) -0.041666666666666664)
(* N (* N N)))
(- N)))))
double code(double N) {
return 1.0 / -fma(N, (fma(N, fma(N, -0.5, 0.08333333333333333), -0.041666666666666664) / (N * (N * N))), -N);
}
function code(N) return Float64(1.0 / Float64(-fma(N, Float64(fma(N, fma(N, -0.5, 0.08333333333333333), -0.041666666666666664) / Float64(N * Float64(N * N))), Float64(-N)))) end
code[N_] := N[(1.0 / (-N[(N * N[(N[(N * N[(N * -0.5 + 0.08333333333333333), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] / N[(N * N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + (-N)), $MachinePrecision])), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{-\mathsf{fma}\left(N, \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, -0.5, 0.08333333333333333\right), -0.041666666666666664\right)}{N \cdot \left(N \cdot N\right)}, -N\right)}
\end{array}
Initial program 25.0%
Taylor expanded in N around inf
Applied rewrites96.0%
Applied rewrites96.0%
Taylor expanded in N around -inf
Applied rewrites96.4%
Taylor expanded in N around 0
Applied rewrites96.4%
(FPCore (N)
:precision binary64
(/
1.0
(*
N
(+
1.0
(/
(fma N (fma N 0.5 -0.08333333333333333) 0.041666666666666664)
(* N (* N N)))))))
double code(double N) {
return 1.0 / (N * (1.0 + (fma(N, fma(N, 0.5, -0.08333333333333333), 0.041666666666666664) / (N * (N * N)))));
}
function code(N) return Float64(1.0 / Float64(N * Float64(1.0 + Float64(fma(N, fma(N, 0.5, -0.08333333333333333), 0.041666666666666664) / Float64(N * Float64(N * N)))))) end
code[N_] := N[(1.0 / N[(N * N[(1.0 + N[(N[(N * N[(N * 0.5 + -0.08333333333333333), $MachinePrecision] + 0.041666666666666664), $MachinePrecision] / N[(N * N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N \cdot \left(1 + \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{N \cdot \left(N \cdot N\right)}\right)}
\end{array}
Initial program 25.0%
Taylor expanded in N around inf
Applied rewrites96.0%
Applied rewrites96.0%
Taylor expanded in N around inf
Applied rewrites96.3%
Taylor expanded in N around 0
Applied rewrites96.3%
(FPCore (N) :precision binary64 (/ 1.0 (/ (fma N (fma N (+ N 0.5) -0.08333333333333333) 0.041666666666666664) (* N N))))
double code(double N) {
return 1.0 / (fma(N, fma(N, (N + 0.5), -0.08333333333333333), 0.041666666666666664) / (N * N));
}
function code(N) return Float64(1.0 / Float64(fma(N, fma(N, Float64(N + 0.5), -0.08333333333333333), 0.041666666666666664) / Float64(N * N))) end
code[N_] := N[(1.0 / N[(N[(N * N[(N * N[(N + 0.5), $MachinePrecision] + -0.08333333333333333), $MachinePrecision] + 0.041666666666666664), $MachinePrecision] / N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, N + 0.5, -0.08333333333333333\right), 0.041666666666666664\right)}{N \cdot N}}
\end{array}
Initial program 25.0%
Taylor expanded in N around inf
Applied rewrites96.0%
Applied rewrites96.0%
Taylor expanded in N around inf
Applied rewrites96.3%
Taylor expanded in N around 0
Applied rewrites96.2%
(FPCore (N) :precision binary64 (/ 1.0 (+ (+ N 0.5) (* N (/ -0.08333333333333333 (* N N))))))
double code(double N) {
return 1.0 / ((N + 0.5) + (N * (-0.08333333333333333 / (N * N))));
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / ((n + 0.5d0) + (n * ((-0.08333333333333333d0) / (n * n))))
end function
public static double code(double N) {
return 1.0 / ((N + 0.5) + (N * (-0.08333333333333333 / (N * N))));
}
def code(N): return 1.0 / ((N + 0.5) + (N * (-0.08333333333333333 / (N * N))))
function code(N) return Float64(1.0 / Float64(Float64(N + 0.5) + Float64(N * Float64(-0.08333333333333333 / Float64(N * N))))) end
function tmp = code(N) tmp = 1.0 / ((N + 0.5) + (N * (-0.08333333333333333 / (N * N)))); end
code[N_] := N[(1.0 / N[(N[(N + 0.5), $MachinePrecision] + N[(N * N[(-0.08333333333333333 / N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\left(N + 0.5\right) + N \cdot \frac{-0.08333333333333333}{N \cdot N}}
\end{array}
Initial program 25.0%
Taylor expanded in N around inf
Applied rewrites96.0%
Applied rewrites96.0%
Taylor expanded in N around inf
Applied rewrites95.4%
(FPCore (N) :precision binary64 (/ 1.0 (+ N 0.5)))
double code(double N) {
return 1.0 / (N + 0.5);
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / (n + 0.5d0)
end function
public static double code(double N) {
return 1.0 / (N + 0.5);
}
def code(N): return 1.0 / (N + 0.5)
function code(N) return Float64(1.0 / Float64(N + 0.5)) end
function tmp = code(N) tmp = 1.0 / (N + 0.5); end
code[N_] := N[(1.0 / N[(N + 0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N + 0.5}
\end{array}
Initial program 25.0%
Taylor expanded in N around inf
Applied rewrites96.0%
Applied rewrites96.0%
Taylor expanded in N around inf
Applied rewrites92.7%
(FPCore (N) :precision binary64 (/ 1.0 N))
double code(double N) {
return 1.0 / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / n
end function
public static double code(double N) {
return 1.0 / N;
}
def code(N): return 1.0 / N
function code(N) return Float64(1.0 / N) end
function tmp = code(N) tmp = 1.0 / N; end
code[N_] := N[(1.0 / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N}
\end{array}
Initial program 25.0%
Taylor expanded in N around inf
lower-/.f6483.4
Applied rewrites83.4%
(FPCore (N) :precision binary64 (log1p (/ 1.0 N)))
double code(double N) {
return log1p((1.0 / N));
}
public static double code(double N) {
return Math.log1p((1.0 / N));
}
def code(N): return math.log1p((1.0 / N))
function code(N) return log1p(Float64(1.0 / N)) end
code[N_] := N[Log[1 + N[(1.0 / N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\frac{1}{N}\right)
\end{array}
(FPCore (N) :precision binary64 (log (+ 1.0 (/ 1.0 N))))
double code(double N) {
return log((1.0 + (1.0 / N)));
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((1.0d0 + (1.0d0 / n)))
end function
public static double code(double N) {
return Math.log((1.0 + (1.0 / N)));
}
def code(N): return math.log((1.0 + (1.0 / N)))
function code(N) return log(Float64(1.0 + Float64(1.0 / N))) end
function tmp = code(N) tmp = log((1.0 + (1.0 / N))); end
code[N_] := N[Log[N[(1.0 + N[(1.0 / N), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(1 + \frac{1}{N}\right)
\end{array}
(FPCore (N) :precision binary64 (+ (+ (+ (/ 1.0 N) (/ -1.0 (* 2.0 (pow N 2.0)))) (/ 1.0 (* 3.0 (pow N 3.0)))) (/ -1.0 (* 4.0 (pow N 4.0)))))
double code(double N) {
return (((1.0 / N) + (-1.0 / (2.0 * pow(N, 2.0)))) + (1.0 / (3.0 * pow(N, 3.0)))) + (-1.0 / (4.0 * pow(N, 4.0)));
}
real(8) function code(n)
real(8), intent (in) :: n
code = (((1.0d0 / n) + ((-1.0d0) / (2.0d0 * (n ** 2.0d0)))) + (1.0d0 / (3.0d0 * (n ** 3.0d0)))) + ((-1.0d0) / (4.0d0 * (n ** 4.0d0)))
end function
public static double code(double N) {
return (((1.0 / N) + (-1.0 / (2.0 * Math.pow(N, 2.0)))) + (1.0 / (3.0 * Math.pow(N, 3.0)))) + (-1.0 / (4.0 * Math.pow(N, 4.0)));
}
def code(N): return (((1.0 / N) + (-1.0 / (2.0 * math.pow(N, 2.0)))) + (1.0 / (3.0 * math.pow(N, 3.0)))) + (-1.0 / (4.0 * math.pow(N, 4.0)))
function code(N) return Float64(Float64(Float64(Float64(1.0 / N) + Float64(-1.0 / Float64(2.0 * (N ^ 2.0)))) + Float64(1.0 / Float64(3.0 * (N ^ 3.0)))) + Float64(-1.0 / Float64(4.0 * (N ^ 4.0)))) end
function tmp = code(N) tmp = (((1.0 / N) + (-1.0 / (2.0 * (N ^ 2.0)))) + (1.0 / (3.0 * (N ^ 3.0)))) + (-1.0 / (4.0 * (N ^ 4.0))); end
code[N_] := N[(N[(N[(N[(1.0 / N), $MachinePrecision] + N[(-1.0 / N[(2.0 * N[Power[N, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(3.0 * N[Power[N, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / N[(4.0 * N[Power[N, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(\frac{1}{N} + \frac{-1}{2 \cdot {N}^{2}}\right) + \frac{1}{3 \cdot {N}^{3}}\right) + \frac{-1}{4 \cdot {N}^{4}}
\end{array}
herbie shell --seed 2024222
(FPCore (N)
:name "2log (problem 3.3.6)"
:precision binary64
:pre (and (> N 1.0) (< N 1e+40))
:alt
(! :herbie-platform default (log1p (/ 1 N)))
:alt
(! :herbie-platform default (log (+ 1 (/ 1 N))))
:alt
(! :herbie-platform default (+ (/ 1 N) (/ -1 (* 2 (pow N 2))) (/ 1 (* 3 (pow N 3))) (/ -1 (* 4 (pow N 4)))))
(- (log (+ N 1.0)) (log N)))