
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
(FPCore (N) :precision binary64 (log1p (/ 1.0 N)))
double code(double N) {
return log1p((1.0 / N));
}
public static double code(double N) {
return Math.log1p((1.0 / N));
}
def code(N): return math.log1p((1.0 / N))
function code(N) return log1p(Float64(1.0 / N)) end
code[N_] := N[Log[1 + N[(1.0 / N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\frac{1}{N}\right)
\end{array}
Initial program 61.1%
+-commutative61.1%
log1p-def61.1%
Simplified61.1%
add-log-exp61.1%
log1p-expm1-u7.7%
log1p-udef7.7%
diff-log7.8%
log1p-udef7.8%
rem-exp-log7.1%
+-commutative7.1%
add-exp-log7.1%
log1p-udef7.1%
log1p-expm1-u60.4%
add-exp-log61.4%
Applied egg-rr61.4%
add-cube-cbrt61.2%
unpow261.2%
log-prod60.9%
pow-to-exp60.9%
rem-log-exp60.9%
pow1/361.0%
log-pow61.2%
log-div61.0%
+-commutative61.0%
log1p-udef61.0%
pow1/360.9%
log-pow60.9%
log-div60.9%
+-commutative60.9%
log1p-udef60.8%
Applied egg-rr60.8%
*-commutative60.8%
associate-*r*60.8%
metadata-eval60.8%
distribute-rgt-out61.1%
metadata-eval61.1%
*-rgt-identity61.1%
log1p-def61.1%
+-commutative61.1%
log-div61.4%
*-lft-identity61.4%
associate-*r/61.4%
associate-*l/61.2%
distribute-lft-in61.2%
lft-mult-inverse61.4%
*-rgt-identity61.4%
log1p-def100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (N) :precision binary64 (if (<= N 0.54) (- (log N)) (/ 1.0 N)))
double code(double N) {
double tmp;
if (N <= 0.54) {
tmp = -log(N);
} else {
tmp = 1.0 / N;
}
return tmp;
}
real(8) function code(n)
real(8), intent (in) :: n
real(8) :: tmp
if (n <= 0.54d0) then
tmp = -log(n)
else
tmp = 1.0d0 / n
end if
code = tmp
end function
public static double code(double N) {
double tmp;
if (N <= 0.54) {
tmp = -Math.log(N);
} else {
tmp = 1.0 / N;
}
return tmp;
}
def code(N): tmp = 0 if N <= 0.54: tmp = -math.log(N) else: tmp = 1.0 / N return tmp
function code(N) tmp = 0.0 if (N <= 0.54) tmp = Float64(-log(N)); else tmp = Float64(1.0 / N); end return tmp end
function tmp_2 = code(N) tmp = 0.0; if (N <= 0.54) tmp = -log(N); else tmp = 1.0 / N; end tmp_2 = tmp; end
code[N_] := If[LessEqual[N, 0.54], (-N[Log[N], $MachinePrecision]), N[(1.0 / N), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;N \leq 0.54:\\
\;\;\;\;-\log N\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{N}\\
\end{array}
\end{array}
if N < 0.54000000000000004Initial program 99.9%
+-commutative99.9%
log1p-def99.9%
Simplified99.9%
Taylor expanded in N around 0 98.4%
neg-mul-198.4%
Simplified98.4%
if 0.54000000000000004 < N Initial program 9.5%
+-commutative9.5%
log1p-def9.5%
Simplified9.5%
Taylor expanded in N around inf 97.0%
Final simplification97.8%
(FPCore (N) :precision binary64 (/ 1.0 N))
double code(double N) {
return 1.0 / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / n
end function
public static double code(double N) {
return 1.0 / N;
}
def code(N): return 1.0 / N
function code(N) return Float64(1.0 / N) end
function tmp = code(N) tmp = 1.0 / N; end
code[N_] := N[(1.0 / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N}
\end{array}
Initial program 61.1%
+-commutative61.1%
log1p-def61.1%
Simplified61.1%
Taylor expanded in N around inf 44.8%
Final simplification44.8%
(FPCore (N) :precision binary64 N)
double code(double N) {
return N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = n
end function
public static double code(double N) {
return N;
}
def code(N): return N
function code(N) return N end
function tmp = code(N) tmp = N; end
code[N_] := N
\begin{array}{l}
\\
N
\end{array}
Initial program 61.1%
+-commutative61.1%
log1p-def61.1%
Simplified61.1%
Taylor expanded in N around 0 58.2%
neg-mul-158.2%
unsub-neg58.2%
Simplified58.2%
Taylor expanded in N around inf 4.7%
Final simplification4.7%
herbie shell --seed 2024017
(FPCore (N)
:name "2log (problem 3.3.6)"
:precision binary64
(- (log (+ N 1.0)) (log N)))