
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
(FPCore (N) :precision binary64 (log1p (/ 1.0 N)))
double code(double N) {
return log1p((1.0 / N));
}
public static double code(double N) {
return Math.log1p((1.0 / N));
}
def code(N): return math.log1p((1.0 / N))
function code(N) return log1p(Float64(1.0 / N)) end
code[N_] := N[Log[1 + N[(1.0 / N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\frac{1}{N}\right)
\end{array}
Initial program 23.6%
sub-neg23.6%
+-commutative23.6%
log1p-udef23.5%
Applied egg-rr23.5%
+-commutative23.5%
log-rec23.6%
log1p-def23.6%
+-commutative23.6%
log-prod25.9%
distribute-lft-in25.8%
lft-mult-inverse26.2%
*-rgt-identity26.2%
Simplified26.2%
*-un-lft-identity26.2%
log-prod26.2%
metadata-eval26.2%
log1p-def99.8%
Applied egg-rr99.8%
+-lft-identity99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (N) :precision binary64 (/ (/ (/ (* N (+ N -0.5)) N) N) N))
double code(double N) {
return (((N * (N + -0.5)) / N) / N) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (((n * (n + (-0.5d0))) / n) / n) / n
end function
public static double code(double N) {
return (((N * (N + -0.5)) / N) / N) / N;
}
def code(N): return (((N * (N + -0.5)) / N) / N) / N
function code(N) return Float64(Float64(Float64(Float64(N * Float64(N + -0.5)) / N) / N) / N) end
function tmp = code(N) tmp = (((N * (N + -0.5)) / N) / N) / N; end
code[N_] := N[(N[(N[(N[(N * N[(N + -0.5), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision] / N), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\frac{N \cdot \left(N + -0.5\right)}{N}}{N}}{N}
\end{array}
Initial program 23.6%
Taylor expanded in N around inf 92.4%
associate-*r/92.4%
metadata-eval92.4%
Simplified92.4%
frac-2neg92.4%
metadata-eval92.4%
frac-sub92.2%
*-un-lft-identity92.2%
Applied egg-rr92.2%
div-inv92.1%
distribute-rgt-neg-out92.1%
unpow292.1%
cube-unmult92.0%
Applied egg-rr92.0%
Applied egg-rr92.4%
Final simplification92.4%
(FPCore (N) :precision binary64 (/ 1.0 N))
double code(double N) {
return 1.0 / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / n
end function
public static double code(double N) {
return 1.0 / N;
}
def code(N): return 1.0 / N
function code(N) return Float64(1.0 / N) end
function tmp = code(N) tmp = 1.0 / N; end
code[N_] := N[(1.0 / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N}
\end{array}
Initial program 23.6%
Taylor expanded in N around inf 84.5%
Final simplification84.5%
(FPCore (N) :precision binary64 (log1p (/ 1.0 N)))
double code(double N) {
return log1p((1.0 / N));
}
public static double code(double N) {
return Math.log1p((1.0 / N));
}
def code(N): return math.log1p((1.0 / N))
function code(N) return log1p(Float64(1.0 / N)) end
code[N_] := N[Log[1 + N[(1.0 / N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\frac{1}{N}\right)
\end{array}
herbie shell --seed 2024031
(FPCore (N)
:name "2log (problem 3.3.6)"
:precision binary64
:pre (and (> N 1.0) (< N 1e+40))
:herbie-target
(log1p (/ 1.0 N))
(- (log (+ N 1.0)) (log N)))