
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
(FPCore (N) :precision binary64 (log1p (/ 1.0 N)))
double code(double N) {
return log1p((1.0 / N));
}
public static double code(double N) {
return Math.log1p((1.0 / N));
}
def code(N): return math.log1p((1.0 / N))
function code(N) return log1p(Float64(1.0 / N)) end
code[N_] := N[Log[1 + N[(1.0 / N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\frac{1}{N}\right)
\end{array}
Initial program 25.1%
diff-log28.1%
Applied egg-rr28.1%
*-lft-identity28.1%
associate-*l/27.8%
distribute-lft-in27.8%
lft-mult-inverse28.1%
*-rgt-identity28.1%
log1p-define99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (N) :precision binary64 (/ (+ 1.0 (/ (- -0.5 (/ (+ -0.3333333333333333 (/ 0.25 N)) N)) N)) N))
double code(double N) {
return (1.0 + ((-0.5 - ((-0.3333333333333333 + (0.25 / N)) / N)) / N)) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 + (((-0.5d0) - (((-0.3333333333333333d0) + (0.25d0 / n)) / n)) / n)) / n
end function
public static double code(double N) {
return (1.0 + ((-0.5 - ((-0.3333333333333333 + (0.25 / N)) / N)) / N)) / N;
}
def code(N): return (1.0 + ((-0.5 - ((-0.3333333333333333 + (0.25 / N)) / N)) / N)) / N
function code(N) return Float64(Float64(1.0 + Float64(Float64(-0.5 - Float64(Float64(-0.3333333333333333 + Float64(0.25 / N)) / N)) / N)) / N) end
function tmp = code(N) tmp = (1.0 + ((-0.5 - ((-0.3333333333333333 + (0.25 / N)) / N)) / N)) / N; end
code[N_] := N[(N[(1.0 + N[(N[(-0.5 - N[(N[(-0.3333333333333333 + N[(0.25 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 + \frac{-0.5 - \frac{-0.3333333333333333 + \frac{0.25}{N}}{N}}{N}}{N}
\end{array}
Initial program 25.1%
diff-log28.1%
Applied egg-rr28.1%
*-lft-identity28.1%
associate-*l/27.8%
distribute-lft-in27.8%
lft-mult-inverse28.1%
*-rgt-identity28.1%
log1p-define99.8%
Simplified99.8%
add-sqr-sqrt99.2%
pow299.2%
Applied egg-rr99.2%
unpow299.2%
add-sqr-sqrt99.8%
log1p-undefine28.1%
flip-+28.1%
log-div28.6%
metadata-eval28.6%
inv-pow28.6%
inv-pow28.6%
pow-prod-up28.6%
metadata-eval28.6%
Applied egg-rr28.6%
sub-neg28.6%
log1p-define28.0%
sub-neg28.0%
distribute-frac-neg228.0%
log1p-define99.7%
distribute-frac-neg299.7%
distribute-neg-frac99.7%
metadata-eval99.7%
Simplified99.7%
Taylor expanded in N around inf 96.3%
Simplified96.4%
Final simplification96.4%
(FPCore (N) :precision binary64 (/ (+ 1.0 (/ (+ -0.5 (/ 0.3333333333333333 N)) N)) N))
double code(double N) {
return (1.0 + ((-0.5 + (0.3333333333333333 / N)) / N)) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 + (((-0.5d0) + (0.3333333333333333d0 / n)) / n)) / n
end function
public static double code(double N) {
return (1.0 + ((-0.5 + (0.3333333333333333 / N)) / N)) / N;
}
def code(N): return (1.0 + ((-0.5 + (0.3333333333333333 / N)) / N)) / N
function code(N) return Float64(Float64(1.0 + Float64(Float64(-0.5 + Float64(0.3333333333333333 / N)) / N)) / N) end
function tmp = code(N) tmp = (1.0 + ((-0.5 + (0.3333333333333333 / N)) / N)) / N; end
code[N_] := N[(N[(1.0 + N[(N[(-0.5 + N[(0.3333333333333333 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 + \frac{-0.5 + \frac{0.3333333333333333}{N}}{N}}{N}
\end{array}
Initial program 25.1%
Taylor expanded in N around inf 94.9%
associate--l+95.0%
unpow295.0%
associate-/r*95.0%
metadata-eval95.0%
associate-*r/95.0%
associate-*r/95.0%
metadata-eval95.0%
div-sub95.0%
sub-neg95.0%
metadata-eval95.0%
+-commutative95.0%
associate-*r/95.0%
metadata-eval95.0%
Simplified95.0%
Final simplification95.0%
(FPCore (N) :precision binary64 (/ (- 1.0 (/ 0.5 N)) N))
double code(double N) {
return (1.0 - (0.5 / N)) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 - (0.5d0 / n)) / n
end function
public static double code(double N) {
return (1.0 - (0.5 / N)) / N;
}
def code(N): return (1.0 - (0.5 / N)) / N
function code(N) return Float64(Float64(1.0 - Float64(0.5 / N)) / N) end
function tmp = code(N) tmp = (1.0 - (0.5 / N)) / N; end
code[N_] := N[(N[(1.0 - N[(0.5 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \frac{0.5}{N}}{N}
\end{array}
Initial program 25.1%
Taylor expanded in N around inf 92.6%
associate-*r/92.6%
metadata-eval92.6%
Simplified92.6%
Final simplification92.6%
(FPCore (N) :precision binary64 (/ 1.0 N))
double code(double N) {
return 1.0 / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / n
end function
public static double code(double N) {
return 1.0 / N;
}
def code(N): return 1.0 / N
function code(N) return Float64(1.0 / N) end
function tmp = code(N) tmp = 1.0 / N; end
code[N_] := N[(1.0 / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N}
\end{array}
Initial program 25.1%
Taylor expanded in N around inf 83.6%
Final simplification83.6%
(FPCore (N) :precision binary64 (log1p (/ 1.0 N)))
double code(double N) {
return log1p((1.0 / N));
}
public static double code(double N) {
return Math.log1p((1.0 / N));
}
def code(N): return math.log1p((1.0 / N))
function code(N) return log1p(Float64(1.0 / N)) end
code[N_] := N[Log[1 + N[(1.0 / N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\frac{1}{N}\right)
\end{array}
herbie shell --seed 2024080
(FPCore (N)
:name "2log (problem 3.3.6)"
:precision binary64
:pre (and (> N 1.0) (< N 1e+40))
:alt
(log1p (/ 1.0 N))
(- (log (+ N 1.0)) (log N)))