
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
(FPCore (N) :precision binary64 (log1p (/ 1.0 N)))
double code(double N) {
return log1p((1.0 / N));
}
public static double code(double N) {
return Math.log1p((1.0 / N));
}
def code(N): return math.log1p((1.0 / N))
function code(N) return log1p(Float64(1.0 / N)) end
code[N_] := N[Log[1 + N[(1.0 / N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\frac{1}{N}\right)
\end{array}
Initial program 24.7%
diff-log27.7%
Applied egg-rr27.7%
*-lft-identity27.7%
associate-*l/27.5%
distribute-lft-in27.6%
*-rgt-identity27.6%
lft-mult-inverse27.7%
log1p-define99.9%
Simplified99.9%
Final simplification99.9%
(FPCore (N) :precision binary64 (- (/ 1.0 N) (/ (/ (+ 0.5 (/ (- (/ 0.25 N) 0.3333333333333333) N)) N) N)))
double code(double N) {
return (1.0 / N) - (((0.5 + (((0.25 / N) - 0.3333333333333333) / N)) / N) / N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 / n) - (((0.5d0 + (((0.25d0 / n) - 0.3333333333333333d0) / n)) / n) / n)
end function
public static double code(double N) {
return (1.0 / N) - (((0.5 + (((0.25 / N) - 0.3333333333333333) / N)) / N) / N);
}
def code(N): return (1.0 / N) - (((0.5 + (((0.25 / N) - 0.3333333333333333) / N)) / N) / N)
function code(N) return Float64(Float64(1.0 / N) - Float64(Float64(Float64(0.5 + Float64(Float64(Float64(0.25 / N) - 0.3333333333333333) / N)) / N) / N)) end
function tmp = code(N) tmp = (1.0 / N) - (((0.5 + (((0.25 / N) - 0.3333333333333333) / N)) / N) / N); end
code[N_] := N[(N[(1.0 / N), $MachinePrecision] - N[(N[(N[(0.5 + N[(N[(N[(0.25 / N), $MachinePrecision] - 0.3333333333333333), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N} - \frac{\frac{0.5 + \frac{\frac{0.25}{N} - 0.3333333333333333}{N}}{N}}{N}
\end{array}
Initial program 24.7%
Taylor expanded in N around inf 95.8%
Taylor expanded in N around -inf 95.8%
mul-1-neg95.8%
unsub-neg95.8%
mul-1-neg95.8%
unsub-neg95.8%
associate-*r/95.8%
metadata-eval95.8%
Simplified95.8%
div-sub95.8%
Applied egg-rr95.8%
Final simplification95.8%
(FPCore (N) :precision binary64 (/ -1.0 (/ N (+ -1.0 (/ (- (/ (- (/ 0.25 N) 0.3333333333333333) N) -0.5) N)))))
double code(double N) {
return -1.0 / (N / (-1.0 + (((((0.25 / N) - 0.3333333333333333) / N) - -0.5) / N)));
}
real(8) function code(n)
real(8), intent (in) :: n
code = (-1.0d0) / (n / ((-1.0d0) + (((((0.25d0 / n) - 0.3333333333333333d0) / n) - (-0.5d0)) / n)))
end function
public static double code(double N) {
return -1.0 / (N / (-1.0 + (((((0.25 / N) - 0.3333333333333333) / N) - -0.5) / N)));
}
def code(N): return -1.0 / (N / (-1.0 + (((((0.25 / N) - 0.3333333333333333) / N) - -0.5) / N)))
function code(N) return Float64(-1.0 / Float64(N / Float64(-1.0 + Float64(Float64(Float64(Float64(Float64(0.25 / N) - 0.3333333333333333) / N) - -0.5) / N)))) end
function tmp = code(N) tmp = -1.0 / (N / (-1.0 + (((((0.25 / N) - 0.3333333333333333) / N) - -0.5) / N))); end
code[N_] := N[(-1.0 / N[(N / N[(-1.0 + N[(N[(N[(N[(N[(0.25 / N), $MachinePrecision] - 0.3333333333333333), $MachinePrecision] / N), $MachinePrecision] - -0.5), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\frac{N}{-1 + \frac{\frac{\frac{0.25}{N} - 0.3333333333333333}{N} - -0.5}{N}}}
\end{array}
Initial program 24.7%
Taylor expanded in N around inf 95.8%
Taylor expanded in N around -inf 95.8%
mul-1-neg95.8%
unsub-neg95.8%
mul-1-neg95.8%
unsub-neg95.8%
associate-*r/95.8%
metadata-eval95.8%
Simplified95.8%
clear-num95.8%
inv-pow95.8%
sub-neg95.8%
distribute-neg-frac95.8%
sub-neg95.8%
distribute-neg-in95.8%
metadata-eval95.8%
distribute-neg-frac295.8%
distribute-frac-neg95.8%
frac-2neg95.8%
Applied egg-rr95.8%
unpow-195.8%
Simplified95.8%
Final simplification95.8%
(FPCore (N) :precision binary64 (/ (+ (/ (- (/ (- 0.3333333333333333 (/ 0.25 N)) N) 0.5) N) 1.0) N))
double code(double N) {
return (((((0.3333333333333333 - (0.25 / N)) / N) - 0.5) / N) + 1.0) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (((((0.3333333333333333d0 - (0.25d0 / n)) / n) - 0.5d0) / n) + 1.0d0) / n
end function
public static double code(double N) {
return (((((0.3333333333333333 - (0.25 / N)) / N) - 0.5) / N) + 1.0) / N;
}
def code(N): return (((((0.3333333333333333 - (0.25 / N)) / N) - 0.5) / N) + 1.0) / N
function code(N) return Float64(Float64(Float64(Float64(Float64(Float64(0.3333333333333333 - Float64(0.25 / N)) / N) - 0.5) / N) + 1.0) / N) end
function tmp = code(N) tmp = (((((0.3333333333333333 - (0.25 / N)) / N) - 0.5) / N) + 1.0) / N; end
code[N_] := N[(N[(N[(N[(N[(N[(0.3333333333333333 - N[(0.25 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision] - 0.5), $MachinePrecision] / N), $MachinePrecision] + 1.0), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\frac{0.3333333333333333 - \frac{0.25}{N}}{N} - 0.5}{N} + 1}{N}
\end{array}
Initial program 24.7%
Taylor expanded in N around inf 95.8%
Taylor expanded in N around -inf 95.8%
mul-1-neg95.8%
unsub-neg95.8%
mul-1-neg95.8%
unsub-neg95.8%
associate-*r/95.8%
metadata-eval95.8%
Simplified95.8%
Final simplification95.8%
(FPCore (N) :precision binary64 (/ -1.0 (/ N (+ (/ (+ 0.5 (/ -0.3333333333333333 N)) N) -1.0))))
double code(double N) {
return -1.0 / (N / (((0.5 + (-0.3333333333333333 / N)) / N) + -1.0));
}
real(8) function code(n)
real(8), intent (in) :: n
code = (-1.0d0) / (n / (((0.5d0 + ((-0.3333333333333333d0) / n)) / n) + (-1.0d0)))
end function
public static double code(double N) {
return -1.0 / (N / (((0.5 + (-0.3333333333333333 / N)) / N) + -1.0));
}
def code(N): return -1.0 / (N / (((0.5 + (-0.3333333333333333 / N)) / N) + -1.0))
function code(N) return Float64(-1.0 / Float64(N / Float64(Float64(Float64(0.5 + Float64(-0.3333333333333333 / N)) / N) + -1.0))) end
function tmp = code(N) tmp = -1.0 / (N / (((0.5 + (-0.3333333333333333 / N)) / N) + -1.0)); end
code[N_] := N[(-1.0 / N[(N / N[(N[(N[(0.5 + N[(-0.3333333333333333 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\frac{N}{\frac{0.5 + \frac{-0.3333333333333333}{N}}{N} + -1}}
\end{array}
Initial program 24.7%
Taylor expanded in N around inf 94.4%
associate--l+94.4%
unpow294.4%
associate-/r*94.4%
metadata-eval94.4%
associate-*r/94.4%
associate-*r/94.4%
metadata-eval94.4%
div-sub94.4%
sub-neg94.4%
metadata-eval94.4%
+-commutative94.4%
associate-*r/94.4%
metadata-eval94.4%
Simplified94.4%
clear-num94.4%
inv-pow94.4%
+-commutative94.4%
Applied egg-rr94.4%
unpow-194.4%
+-commutative94.4%
metadata-eval94.4%
remove-double-neg94.4%
distribute-neg-in94.4%
distribute-neg-frac94.4%
unsub-neg94.4%
distribute-neg-frac94.4%
metadata-eval94.4%
Simplified94.4%
Final simplification94.4%
(FPCore (N) :precision binary64 (/ (+ (/ (+ -0.5 (/ 0.3333333333333333 N)) N) 1.0) N))
double code(double N) {
return (((-0.5 + (0.3333333333333333 / N)) / N) + 1.0) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = ((((-0.5d0) + (0.3333333333333333d0 / n)) / n) + 1.0d0) / n
end function
public static double code(double N) {
return (((-0.5 + (0.3333333333333333 / N)) / N) + 1.0) / N;
}
def code(N): return (((-0.5 + (0.3333333333333333 / N)) / N) + 1.0) / N
function code(N) return Float64(Float64(Float64(Float64(-0.5 + Float64(0.3333333333333333 / N)) / N) + 1.0) / N) end
function tmp = code(N) tmp = (((-0.5 + (0.3333333333333333 / N)) / N) + 1.0) / N; end
code[N_] := N[(N[(N[(N[(-0.5 + N[(0.3333333333333333 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision] + 1.0), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{-0.5 + \frac{0.3333333333333333}{N}}{N} + 1}{N}
\end{array}
Initial program 24.7%
Taylor expanded in N around inf 94.4%
associate--l+94.4%
unpow294.4%
associate-/r*94.4%
metadata-eval94.4%
associate-*r/94.4%
associate-*r/94.4%
metadata-eval94.4%
div-sub94.4%
sub-neg94.4%
metadata-eval94.4%
+-commutative94.4%
associate-*r/94.4%
metadata-eval94.4%
Simplified94.4%
Final simplification94.4%
(FPCore (N) :precision binary64 (- (/ 1.0 N) (/ (/ 0.5 N) N)))
double code(double N) {
return (1.0 / N) - ((0.5 / N) / N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 / n) - ((0.5d0 / n) / n)
end function
public static double code(double N) {
return (1.0 / N) - ((0.5 / N) / N);
}
def code(N): return (1.0 / N) - ((0.5 / N) / N)
function code(N) return Float64(Float64(1.0 / N) - Float64(Float64(0.5 / N) / N)) end
function tmp = code(N) tmp = (1.0 / N) - ((0.5 / N) / N); end
code[N_] := N[(N[(1.0 / N), $MachinePrecision] - N[(N[(0.5 / N), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N} - \frac{\frac{0.5}{N}}{N}
\end{array}
Initial program 24.7%
Taylor expanded in N around inf 91.4%
associate-*r/91.4%
metadata-eval91.4%
Simplified91.4%
div-sub91.5%
Applied egg-rr91.5%
Final simplification91.5%
(FPCore (N) :precision binary64 (/ -1.0 (/ N (+ (/ 0.5 N) -1.0))))
double code(double N) {
return -1.0 / (N / ((0.5 / N) + -1.0));
}
real(8) function code(n)
real(8), intent (in) :: n
code = (-1.0d0) / (n / ((0.5d0 / n) + (-1.0d0)))
end function
public static double code(double N) {
return -1.0 / (N / ((0.5 / N) + -1.0));
}
def code(N): return -1.0 / (N / ((0.5 / N) + -1.0))
function code(N) return Float64(-1.0 / Float64(N / Float64(Float64(0.5 / N) + -1.0))) end
function tmp = code(N) tmp = -1.0 / (N / ((0.5 / N) + -1.0)); end
code[N_] := N[(-1.0 / N[(N / N[(N[(0.5 / N), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\frac{N}{\frac{0.5}{N} + -1}}
\end{array}
Initial program 24.7%
Taylor expanded in N around inf 91.4%
associate-*r/91.4%
metadata-eval91.4%
Simplified91.4%
Taylor expanded in N around 0 91.4%
clear-num91.5%
inv-pow91.5%
div-sub91.5%
*-inverses91.5%
Applied egg-rr91.5%
unpow-191.5%
Simplified91.5%
Final simplification91.5%
(FPCore (N) :precision binary64 (/ (- 1.0 (/ 0.5 N)) N))
double code(double N) {
return (1.0 - (0.5 / N)) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 - (0.5d0 / n)) / n
end function
public static double code(double N) {
return (1.0 - (0.5 / N)) / N;
}
def code(N): return (1.0 - (0.5 / N)) / N
function code(N) return Float64(Float64(1.0 - Float64(0.5 / N)) / N) end
function tmp = code(N) tmp = (1.0 - (0.5 / N)) / N; end
code[N_] := N[(N[(1.0 - N[(0.5 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \frac{0.5}{N}}{N}
\end{array}
Initial program 24.7%
Taylor expanded in N around inf 91.4%
associate-*r/91.4%
metadata-eval91.4%
Simplified91.4%
Final simplification91.4%
(FPCore (N) :precision binary64 (/ 1.0 N))
double code(double N) {
return 1.0 / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / n
end function
public static double code(double N) {
return 1.0 / N;
}
def code(N): return 1.0 / N
function code(N) return Float64(1.0 / N) end
function tmp = code(N) tmp = 1.0 / N; end
code[N_] := N[(1.0 / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N}
\end{array}
Initial program 24.7%
Taylor expanded in N around inf 83.7%
Final simplification83.7%
(FPCore (N) :precision binary64 (log1p (/ 1.0 N)))
double code(double N) {
return log1p((1.0 / N));
}
public static double code(double N) {
return Math.log1p((1.0 / N));
}
def code(N): return math.log1p((1.0 / N))
function code(N) return log1p(Float64(1.0 / N)) end
code[N_] := N[Log[1 + N[(1.0 / N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\frac{1}{N}\right)
\end{array}
herbie shell --seed 2024072
(FPCore (N)
:name "2log (problem 3.3.6)"
:precision binary64
:pre (and (> N 1.0) (< N 1e+40))
:alt
(log1p (/ 1.0 N))
(- (log (+ N 1.0)) (log N)))