
(FPCore (n) :precision binary64 (- (- (* (+ n 1.0) (log (+ n 1.0))) (* n (log n))) 1.0))
double code(double n) {
return (((n + 1.0) * log((n + 1.0))) - (n * log(n))) - 1.0;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (((n + 1.0d0) * log((n + 1.0d0))) - (n * log(n))) - 1.0d0
end function
public static double code(double n) {
return (((n + 1.0) * Math.log((n + 1.0))) - (n * Math.log(n))) - 1.0;
}
def code(n): return (((n + 1.0) * math.log((n + 1.0))) - (n * math.log(n))) - 1.0
function code(n) return Float64(Float64(Float64(Float64(n + 1.0) * log(Float64(n + 1.0))) - Float64(n * log(n))) - 1.0) end
function tmp = code(n) tmp = (((n + 1.0) * log((n + 1.0))) - (n * log(n))) - 1.0; end
code[n_] := N[(N[(N[(N[(n + 1.0), $MachinePrecision] * N[Log[N[(n + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - N[(n * N[Log[n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(n + 1\right) \cdot \log \left(n + 1\right) - n \cdot \log n\right) - 1
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 2 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (n) :precision binary64 (- (- (* (+ n 1.0) (log (+ n 1.0))) (* n (log n))) 1.0))
double code(double n) {
return (((n + 1.0) * log((n + 1.0))) - (n * log(n))) - 1.0;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (((n + 1.0d0) * log((n + 1.0d0))) - (n * log(n))) - 1.0d0
end function
public static double code(double n) {
return (((n + 1.0) * Math.log((n + 1.0))) - (n * Math.log(n))) - 1.0;
}
def code(n): return (((n + 1.0) * math.log((n + 1.0))) - (n * math.log(n))) - 1.0
function code(n) return Float64(Float64(Float64(Float64(n + 1.0) * log(Float64(n + 1.0))) - Float64(n * log(n))) - 1.0) end
function tmp = code(n) tmp = (((n + 1.0) * log((n + 1.0))) - (n * log(n))) - 1.0; end
code[n_] := N[(N[(N[(N[(n + 1.0), $MachinePrecision] * N[Log[N[(n + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - N[(n * N[Log[n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(n + 1\right) \cdot \log \left(n + 1\right) - n \cdot \log n\right) - 1
\end{array}
(FPCore (n) :precision binary64 (log n))
double code(double n) {
return log(n);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log(n)
end function
public static double code(double n) {
return Math.log(n);
}
def code(n): return math.log(n)
function code(n) return log(n) end
function tmp = code(n) tmp = log(n); end
code[n_] := N[Log[n], $MachinePrecision]
\begin{array}{l}
\\
\log n
\end{array}
Initial program 1.6%
sub-negN/A
associate--l+N/A
+-commutativeN/A
cancel-sign-subN/A
distribute-lft-neg-inN/A
--lowering--.f64N/A
sub-negN/A
+-commutativeN/A
unsub-negN/A
--lowering--.f64N/A
metadata-evalN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-log1p.f64N/A
+-commutativeN/A
distribute-neg-inN/A
Simplified3.1%
Taylor expanded in n around inf
distribute-rgt1-inN/A
metadata-evalN/A
mul0-lftN/A
--rgt-identityN/A
+-lft-identityN/A
+-lft-identityN/A
mul-1-negN/A
distribute-neg-frac2N/A
mul-1-negN/A
unsub-negN/A
associate--r+N/A
metadata-evalN/A
mul-1-negN/A
Simplified100.0%
mul-1-negN/A
distribute-neg-frac2N/A
metadata-evalN/A
/-rgt-identityN/A
log-lowering-log.f64100.0%
Applied egg-rr100.0%
(FPCore (n) :precision binary64 0.0)
double code(double n) {
return 0.0;
}
real(8) function code(n)
real(8), intent (in) :: n
code = 0.0d0
end function
public static double code(double n) {
return 0.0;
}
def code(n): return 0.0
function code(n) return 0.0 end
function tmp = code(n) tmp = 0.0; end
code[n_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 1.6%
sub-negN/A
associate--l+N/A
+-commutativeN/A
cancel-sign-subN/A
distribute-lft-neg-inN/A
--lowering--.f64N/A
sub-negN/A
+-commutativeN/A
unsub-negN/A
--lowering--.f64N/A
metadata-evalN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-log1p.f64N/A
+-commutativeN/A
distribute-neg-inN/A
Simplified3.1%
Taylor expanded in n around inf
associate-*r*N/A
distribute-rgt1-inN/A
metadata-evalN/A
mul0-lftN/A
mul0-rgt3.1%
Simplified3.1%
(FPCore (n) :precision binary64 (- (log (+ n 1.0)) (- (/ 1.0 (* 2.0 n)) (- (/ 1.0 (* 3.0 (* n n))) (/ 4.0 (pow n 3.0))))))
double code(double n) {
return log((n + 1.0)) - ((1.0 / (2.0 * n)) - ((1.0 / (3.0 * (n * n))) - (4.0 / pow(n, 3.0))));
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - ((1.0d0 / (2.0d0 * n)) - ((1.0d0 / (3.0d0 * (n * n))) - (4.0d0 / (n ** 3.0d0))))
end function
public static double code(double n) {
return Math.log((n + 1.0)) - ((1.0 / (2.0 * n)) - ((1.0 / (3.0 * (n * n))) - (4.0 / Math.pow(n, 3.0))));
}
def code(n): return math.log((n + 1.0)) - ((1.0 / (2.0 * n)) - ((1.0 / (3.0 * (n * n))) - (4.0 / math.pow(n, 3.0))))
function code(n) return Float64(log(Float64(n + 1.0)) - Float64(Float64(1.0 / Float64(2.0 * n)) - Float64(Float64(1.0 / Float64(3.0 * Float64(n * n))) - Float64(4.0 / (n ^ 3.0))))) end
function tmp = code(n) tmp = log((n + 1.0)) - ((1.0 / (2.0 * n)) - ((1.0 / (3.0 * (n * n))) - (4.0 / (n ^ 3.0)))); end
code[n_] := N[(N[Log[N[(n + 1.0), $MachinePrecision]], $MachinePrecision] - N[(N[(1.0 / N[(2.0 * n), $MachinePrecision]), $MachinePrecision] - N[(N[(1.0 / N[(3.0 * N[(n * n), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(4.0 / N[Power[n, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(n + 1\right) - \left(\frac{1}{2 \cdot n} - \left(\frac{1}{3 \cdot \left(n \cdot n\right)} - \frac{4}{{n}^{3}}\right)\right)
\end{array}
herbie shell --seed 2024192
(FPCore (n)
:name "logs (example 3.8)"
:precision binary64
:pre (> n 6.8e+15)
:alt
(! :herbie-platform default (- (log (+ n 1)) (- (/ 1 (* 2 n)) (- (/ 1 (* 3 (* n n))) (/ 4 (pow n 3))))))
(- (- (* (+ n 1.0) (log (+ n 1.0))) (* n (log n))) 1.0))