
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
(FPCore (N)
:precision binary64
(if (<= N 1150.0)
(log (/ (+ N 1.0) N))
(/
(- (+ 1.0 (/ (+ (/ 0.3333333333333333 N) -0.5) N)) (/ 0.25 (pow N 3.0)))
N)))
double code(double N) {
double tmp;
if (N <= 1150.0) {
tmp = log(((N + 1.0) / N));
} else {
tmp = ((1.0 + (((0.3333333333333333 / N) + -0.5) / N)) - (0.25 / pow(N, 3.0))) / N;
}
return tmp;
}
real(8) function code(n)
real(8), intent (in) :: n
real(8) :: tmp
if (n <= 1150.0d0) then
tmp = log(((n + 1.0d0) / n))
else
tmp = ((1.0d0 + (((0.3333333333333333d0 / n) + (-0.5d0)) / n)) - (0.25d0 / (n ** 3.0d0))) / n
end if
code = tmp
end function
public static double code(double N) {
double tmp;
if (N <= 1150.0) {
tmp = Math.log(((N + 1.0) / N));
} else {
tmp = ((1.0 + (((0.3333333333333333 / N) + -0.5) / N)) - (0.25 / Math.pow(N, 3.0))) / N;
}
return tmp;
}
def code(N): tmp = 0 if N <= 1150.0: tmp = math.log(((N + 1.0) / N)) else: tmp = ((1.0 + (((0.3333333333333333 / N) + -0.5) / N)) - (0.25 / math.pow(N, 3.0))) / N return tmp
function code(N) tmp = 0.0 if (N <= 1150.0) tmp = log(Float64(Float64(N + 1.0) / N)); else tmp = Float64(Float64(Float64(1.0 + Float64(Float64(Float64(0.3333333333333333 / N) + -0.5) / N)) - Float64(0.25 / (N ^ 3.0))) / N); end return tmp end
function tmp_2 = code(N) tmp = 0.0; if (N <= 1150.0) tmp = log(((N + 1.0) / N)); else tmp = ((1.0 + (((0.3333333333333333 / N) + -0.5) / N)) - (0.25 / (N ^ 3.0))) / N; end tmp_2 = tmp; end
code[N_] := If[LessEqual[N, 1150.0], N[Log[N[(N[(N + 1.0), $MachinePrecision] / N), $MachinePrecision]], $MachinePrecision], N[(N[(N[(1.0 + N[(N[(N[(0.3333333333333333 / N), $MachinePrecision] + -0.5), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] - N[(0.25 / N[Power[N, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;N \leq 1150:\\
\;\;\;\;\log \left(\frac{N + 1}{N}\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{\frac{0.3333333333333333}{N} + -0.5}{N}\right) - \frac{0.25}{{N}^{3}}}{N}\\
\end{array}
\end{array}
if N < 1150Initial program 92.3%
+-commutative92.3%
log1p-define92.5%
Simplified92.5%
add-log-exp92.5%
log1p-expm1-u92.5%
log1p-undefine92.4%
diff-log92.7%
log1p-undefine92.5%
rem-exp-log92.8%
+-commutative92.8%
add-exp-log93.0%
log1p-undefine93.0%
log1p-expm1-u93.0%
add-exp-log94.4%
Applied egg-rr94.4%
if 1150 < N Initial program 20.2%
+-commutative20.2%
log1p-define20.2%
Simplified20.2%
Taylor expanded in N around -inf 99.7%
mul-1-neg99.7%
distribute-neg-frac299.7%
Simplified99.7%
Taylor expanded in N around inf 99.7%
associate--r+99.7%
associate--l+99.7%
associate-*r/99.7%
metadata-eval99.7%
associate-*r/99.7%
metadata-eval99.7%
Simplified99.7%
Taylor expanded in N around inf 99.7%
sub-neg99.7%
associate-*r/99.7%
metadata-eval99.7%
metadata-eval99.7%
Simplified99.7%
Final simplification99.3%
(FPCore (N) :precision binary64 (if (<= N 1150.0) (log (/ (+ N 1.0) N)) (/ (+ 1.0 (/ (+ -0.5 (/ (+ 0.3333333333333333 (/ -0.25 N)) N)) N)) N)))
double code(double N) {
double tmp;
if (N <= 1150.0) {
tmp = log(((N + 1.0) / N));
} else {
tmp = (1.0 + ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)) / N;
}
return tmp;
}
real(8) function code(n)
real(8), intent (in) :: n
real(8) :: tmp
if (n <= 1150.0d0) then
tmp = log(((n + 1.0d0) / n))
else
tmp = (1.0d0 + (((-0.5d0) + ((0.3333333333333333d0 + ((-0.25d0) / n)) / n)) / n)) / n
end if
code = tmp
end function
public static double code(double N) {
double tmp;
if (N <= 1150.0) {
tmp = Math.log(((N + 1.0) / N));
} else {
tmp = (1.0 + ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)) / N;
}
return tmp;
}
def code(N): tmp = 0 if N <= 1150.0: tmp = math.log(((N + 1.0) / N)) else: tmp = (1.0 + ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)) / N return tmp
function code(N) tmp = 0.0 if (N <= 1150.0) tmp = log(Float64(Float64(N + 1.0) / N)); else tmp = Float64(Float64(1.0 + Float64(Float64(-0.5 + Float64(Float64(0.3333333333333333 + Float64(-0.25 / N)) / N)) / N)) / N); end return tmp end
function tmp_2 = code(N) tmp = 0.0; if (N <= 1150.0) tmp = log(((N + 1.0) / N)); else tmp = (1.0 + ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)) / N; end tmp_2 = tmp; end
code[N_] := If[LessEqual[N, 1150.0], N[Log[N[(N[(N + 1.0), $MachinePrecision] / N), $MachinePrecision]], $MachinePrecision], N[(N[(1.0 + N[(N[(-0.5 + N[(N[(0.3333333333333333 + N[(-0.25 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;N \leq 1150:\\
\;\;\;\;\log \left(\frac{N + 1}{N}\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 + \frac{-0.5 + \frac{0.3333333333333333 + \frac{-0.25}{N}}{N}}{N}}{N}\\
\end{array}
\end{array}
if N < 1150Initial program 92.3%
+-commutative92.3%
log1p-define92.5%
Simplified92.5%
add-log-exp92.5%
log1p-expm1-u92.5%
log1p-undefine92.4%
diff-log92.7%
log1p-undefine92.5%
rem-exp-log92.8%
+-commutative92.8%
add-exp-log93.0%
log1p-undefine93.0%
log1p-expm1-u93.0%
add-exp-log94.4%
Applied egg-rr94.4%
if 1150 < N Initial program 20.2%
+-commutative20.2%
log1p-define20.2%
Simplified20.2%
Taylor expanded in N around -inf 99.7%
mul-1-neg99.7%
distribute-neg-frac299.7%
Simplified99.7%
Taylor expanded in N around -inf 99.7%
Simplified99.7%
Final simplification99.3%
(FPCore (N) :precision binary64 (/ (+ 1.0 (/ (+ -0.5 (/ (+ 0.3333333333333333 (/ -0.25 N)) N)) N)) N))
double code(double N) {
return (1.0 + ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 + (((-0.5d0) + ((0.3333333333333333d0 + ((-0.25d0) / n)) / n)) / n)) / n
end function
public static double code(double N) {
return (1.0 + ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)) / N;
}
def code(N): return (1.0 + ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)) / N
function code(N) return Float64(Float64(1.0 + Float64(Float64(-0.5 + Float64(Float64(0.3333333333333333 + Float64(-0.25 / N)) / N)) / N)) / N) end
function tmp = code(N) tmp = (1.0 + ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)) / N; end
code[N_] := N[(N[(1.0 + N[(N[(-0.5 + N[(N[(0.3333333333333333 + N[(-0.25 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 + \frac{-0.5 + \frac{0.3333333333333333 + \frac{-0.25}{N}}{N}}{N}}{N}
\end{array}
Initial program 26.4%
+-commutative26.4%
log1p-define26.5%
Simplified26.5%
Taylor expanded in N around -inf 95.6%
mul-1-neg95.6%
distribute-neg-frac295.6%
Simplified95.6%
Taylor expanded in N around -inf 95.6%
Simplified95.6%
Final simplification95.6%
(FPCore (N) :precision binary64 (/ (+ 1.0 (/ (+ (/ 0.3333333333333333 N) -0.5) N)) N))
double code(double N) {
return (1.0 + (((0.3333333333333333 / N) + -0.5) / N)) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 + (((0.3333333333333333d0 / n) + (-0.5d0)) / n)) / n
end function
public static double code(double N) {
return (1.0 + (((0.3333333333333333 / N) + -0.5) / N)) / N;
}
def code(N): return (1.0 + (((0.3333333333333333 / N) + -0.5) / N)) / N
function code(N) return Float64(Float64(1.0 + Float64(Float64(Float64(0.3333333333333333 / N) + -0.5) / N)) / N) end
function tmp = code(N) tmp = (1.0 + (((0.3333333333333333 / N) + -0.5) / N)) / N; end
code[N_] := N[(N[(1.0 + N[(N[(N[(0.3333333333333333 / N), $MachinePrecision] + -0.5), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 + \frac{\frac{0.3333333333333333}{N} + -0.5}{N}}{N}
\end{array}
Initial program 26.4%
+-commutative26.4%
log1p-define26.5%
Simplified26.5%
Taylor expanded in N around inf 94.2%
associate--l+94.2%
unpow294.2%
associate-/r*94.2%
metadata-eval94.2%
associate-*r/94.2%
associate-*r/94.2%
metadata-eval94.2%
div-sub94.2%
sub-neg94.2%
metadata-eval94.2%
+-commutative94.2%
associate-*r/94.2%
metadata-eval94.2%
Simplified94.2%
Final simplification94.2%
(FPCore (N) :precision binary64 (/ (- 1.0 (/ 0.5 N)) N))
double code(double N) {
return (1.0 - (0.5 / N)) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 - (0.5d0 / n)) / n
end function
public static double code(double N) {
return (1.0 - (0.5 / N)) / N;
}
def code(N): return (1.0 - (0.5 / N)) / N
function code(N) return Float64(Float64(1.0 - Float64(0.5 / N)) / N) end
function tmp = code(N) tmp = (1.0 - (0.5 / N)) / N; end
code[N_] := N[(N[(1.0 - N[(0.5 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \frac{0.5}{N}}{N}
\end{array}
Initial program 26.4%
+-commutative26.4%
log1p-define26.5%
Simplified26.5%
Taylor expanded in N around inf 91.4%
associate-*r/91.4%
metadata-eval91.4%
Simplified91.4%
Final simplification91.4%
(FPCore (N) :precision binary64 (/ 1.0 N))
double code(double N) {
return 1.0 / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / n
end function
public static double code(double N) {
return 1.0 / N;
}
def code(N): return 1.0 / N
function code(N) return Float64(1.0 / N) end
function tmp = code(N) tmp = 1.0 / N; end
code[N_] := N[(1.0 / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N}
\end{array}
Initial program 26.4%
+-commutative26.4%
log1p-define26.5%
Simplified26.5%
Taylor expanded in N around inf 82.3%
Final simplification82.3%
(FPCore (N) :precision binary64 0.0)
double code(double N) {
return 0.0;
}
real(8) function code(n)
real(8), intent (in) :: n
code = 0.0d0
end function
public static double code(double N) {
return 0.0;
}
def code(N): return 0.0
function code(N) return 0.0 end
function tmp = code(N) tmp = 0.0; end
code[N_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 26.4%
+-commutative26.4%
log1p-define26.5%
Simplified26.5%
sub-neg26.5%
+-commutative26.5%
add-sqr-sqrt26.5%
distribute-rgt-neg-in26.5%
fma-define27.3%
Applied egg-rr27.3%
Taylor expanded in N around inf 3.3%
distribute-rgt1-in3.3%
metadata-eval3.3%
mul0-lft3.3%
Simplified3.3%
Final simplification3.3%
(FPCore (N) :precision binary64 (log1p (/ 1.0 N)))
double code(double N) {
return log1p((1.0 / N));
}
public static double code(double N) {
return Math.log1p((1.0 / N));
}
def code(N): return math.log1p((1.0 / N))
function code(N) return log1p(Float64(1.0 / N)) end
code[N_] := N[Log[1 + N[(1.0 / N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\frac{1}{N}\right)
\end{array}
herbie shell --seed 2024053
(FPCore (N)
:name "2log (problem 3.3.6)"
:precision binary64
:pre (and (> N 1.0) (< N 1e+40))
:alt
(log1p (/ 1.0 N))
(- (log (+ N 1.0)) (log N)))