
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
(FPCore (N)
:precision binary64
(if (<= N 940.0)
(- (log (/ N (+ 1.0 N))))
(pow
(fma
(/ (- 0.5 (/ (- 0.08333333333333333 (/ 0.041666666666666664 N)) N)) N)
N
N)
-1.0)))
double code(double N) {
double tmp;
if (N <= 940.0) {
tmp = -log((N / (1.0 + N)));
} else {
tmp = pow(fma(((0.5 - ((0.08333333333333333 - (0.041666666666666664 / N)) / N)) / N), N, N), -1.0);
}
return tmp;
}
function code(N) tmp = 0.0 if (N <= 940.0) tmp = Float64(-log(Float64(N / Float64(1.0 + N)))); else tmp = fma(Float64(Float64(0.5 - Float64(Float64(0.08333333333333333 - Float64(0.041666666666666664 / N)) / N)) / N), N, N) ^ -1.0; end return tmp end
code[N_] := If[LessEqual[N, 940.0], (-N[Log[N[(N / N[(1.0 + N), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), N[Power[N[(N[(N[(0.5 - N[(N[(0.08333333333333333 - N[(0.041666666666666664 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision] * N + N), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;N \leq 940:\\
\;\;\;\;-\log \left(\frac{N}{1 + N}\right)\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\frac{0.5 - \frac{0.08333333333333333 - \frac{0.041666666666666664}{N}}{N}}{N}, N, N\right)\right)}^{-1}\\
\end{array}
\end{array}
if N < 940Initial program 90.8%
lift--.f64N/A
lift-log.f64N/A
lift-log.f64N/A
diff-logN/A
clear-numN/A
clear-numN/A
log-recN/A
lower-neg.f64N/A
lower-log.f64N/A
lower-/.f64N/A
lower-/.f6494.7
lift-+.f64N/A
+-commutativeN/A
lower-+.f6494.7
Applied rewrites94.7%
lift-/.f64N/A
/-rgt-identity94.7
Applied rewrites94.7%
if 940 < N Initial program 16.4%
Taylor expanded in N around inf
Applied rewrites99.8%
Applied rewrites99.9%
Taylor expanded in N around -inf
Applied rewrites99.8%
Applied rewrites99.9%
Final simplification99.5%
(FPCore (N)
:precision binary64
(if (<= N 780.0)
(log (/ (+ 1.0 N) N))
(pow
(fma
(/ (- 0.5 (/ (- 0.08333333333333333 (/ 0.041666666666666664 N)) N)) N)
N
N)
-1.0)))
double code(double N) {
double tmp;
if (N <= 780.0) {
tmp = log(((1.0 + N) / N));
} else {
tmp = pow(fma(((0.5 - ((0.08333333333333333 - (0.041666666666666664 / N)) / N)) / N), N, N), -1.0);
}
return tmp;
}
function code(N) tmp = 0.0 if (N <= 780.0) tmp = log(Float64(Float64(1.0 + N) / N)); else tmp = fma(Float64(Float64(0.5 - Float64(Float64(0.08333333333333333 - Float64(0.041666666666666664 / N)) / N)) / N), N, N) ^ -1.0; end return tmp end
code[N_] := If[LessEqual[N, 780.0], N[Log[N[(N[(1.0 + N), $MachinePrecision] / N), $MachinePrecision]], $MachinePrecision], N[Power[N[(N[(N[(0.5 - N[(N[(0.08333333333333333 - N[(0.041666666666666664 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision] * N + N), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;N \leq 780:\\
\;\;\;\;\log \left(\frac{1 + N}{N}\right)\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(\frac{0.5 - \frac{0.08333333333333333 - \frac{0.041666666666666664}{N}}{N}}{N}, N, N\right)\right)}^{-1}\\
\end{array}
\end{array}
if N < 780Initial program 90.8%
lift--.f64N/A
lift-log.f64N/A
lift-log.f64N/A
diff-logN/A
lower-log.f64N/A
lower-/.f6493.3
lift-+.f64N/A
+-commutativeN/A
lower-+.f6493.3
Applied rewrites93.3%
if 780 < N Initial program 16.4%
Taylor expanded in N around inf
Applied rewrites99.8%
Applied rewrites99.9%
Taylor expanded in N around -inf
Applied rewrites99.8%
Applied rewrites99.9%
Final simplification99.4%
(FPCore (N) :precision binary64 (pow (fma (/ (- 0.5 (/ (- 0.08333333333333333 (/ 0.041666666666666664 N)) N)) N) N N) -1.0))
double code(double N) {
return pow(fma(((0.5 - ((0.08333333333333333 - (0.041666666666666664 / N)) / N)) / N), N, N), -1.0);
}
function code(N) return fma(Float64(Float64(0.5 - Float64(Float64(0.08333333333333333 - Float64(0.041666666666666664 / N)) / N)) / N), N, N) ^ -1.0 end
code[N_] := N[Power[N[(N[(N[(0.5 - N[(N[(0.08333333333333333 - N[(0.041666666666666664 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision] * N + N), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\mathsf{fma}\left(\frac{0.5 - \frac{0.08333333333333333 - \frac{0.041666666666666664}{N}}{N}}{N}, N, N\right)\right)}^{-1}
\end{array}
Initial program 22.2%
Taylor expanded in N around inf
Applied rewrites96.7%
Applied rewrites96.7%
Taylor expanded in N around -inf
Applied rewrites97.0%
Applied rewrites97.1%
Final simplification97.1%
(FPCore (N) :precision binary64 (pow (/ (fma (fma (+ 0.5 N) N -0.08333333333333333) N 0.041666666666666664) (* N N)) -1.0))
double code(double N) {
return pow((fma(fma((0.5 + N), N, -0.08333333333333333), N, 0.041666666666666664) / (N * N)), -1.0);
}
function code(N) return Float64(fma(fma(Float64(0.5 + N), N, -0.08333333333333333), N, 0.041666666666666664) / Float64(N * N)) ^ -1.0 end
code[N_] := N[Power[N[(N[(N[(N[(0.5 + N), $MachinePrecision] * N + -0.08333333333333333), $MachinePrecision] * N + 0.041666666666666664), $MachinePrecision] / N[(N * N), $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\frac{\mathsf{fma}\left(\mathsf{fma}\left(0.5 + N, N, -0.08333333333333333\right), N, 0.041666666666666664\right)}{N \cdot N}\right)}^{-1}
\end{array}
Initial program 22.2%
Taylor expanded in N around inf
Applied rewrites96.7%
Applied rewrites96.7%
Taylor expanded in N around -inf
Applied rewrites97.0%
Taylor expanded in N around 0
Applied rewrites96.9%
Final simplification96.9%
(FPCore (N) :precision binary64 (pow (+ 0.5 N) -1.0))
double code(double N) {
return pow((0.5 + N), -1.0);
}
real(8) function code(n)
real(8), intent (in) :: n
code = (0.5d0 + n) ** (-1.0d0)
end function
public static double code(double N) {
return Math.pow((0.5 + N), -1.0);
}
def code(N): return math.pow((0.5 + N), -1.0)
function code(N) return Float64(0.5 + N) ^ -1.0 end
function tmp = code(N) tmp = (0.5 + N) ^ -1.0; end
code[N_] := N[Power[N[(0.5 + N), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(0.5 + N\right)}^{-1}
\end{array}
Initial program 22.2%
Taylor expanded in N around inf
Applied rewrites96.7%
Applied rewrites96.7%
Taylor expanded in N around inf
Applied rewrites94.0%
Taylor expanded in N around 0
Applied rewrites94.1%
Final simplification94.1%
(FPCore (N) :precision binary64 (pow N -1.0))
double code(double N) {
return pow(N, -1.0);
}
real(8) function code(n)
real(8), intent (in) :: n
code = n ** (-1.0d0)
end function
public static double code(double N) {
return Math.pow(N, -1.0);
}
def code(N): return math.pow(N, -1.0)
function code(N) return N ^ -1.0 end
function tmp = code(N) tmp = N ^ -1.0; end
code[N_] := N[Power[N, -1.0], $MachinePrecision]
\begin{array}{l}
\\
{N}^{-1}
\end{array}
Initial program 22.2%
Taylor expanded in N around inf
lower-/.f6485.9
Applied rewrites85.9%
Final simplification85.9%
(FPCore (N) :precision binary64 (/ (- (/ (- (/ 0.3333333333333333 N) 0.5) N) -1.0) N))
double code(double N) {
return ((((0.3333333333333333 / N) - 0.5) / N) - -1.0) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = ((((0.3333333333333333d0 / n) - 0.5d0) / n) - (-1.0d0)) / n
end function
public static double code(double N) {
return ((((0.3333333333333333 / N) - 0.5) / N) - -1.0) / N;
}
def code(N): return ((((0.3333333333333333 / N) - 0.5) / N) - -1.0) / N
function code(N) return Float64(Float64(Float64(Float64(Float64(0.3333333333333333 / N) - 0.5) / N) - -1.0) / N) end
function tmp = code(N) tmp = ((((0.3333333333333333 / N) - 0.5) / N) - -1.0) / N; end
code[N_] := N[(N[(N[(N[(N[(0.3333333333333333 / N), $MachinePrecision] - 0.5), $MachinePrecision] / N), $MachinePrecision] - -1.0), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\frac{0.3333333333333333}{N} - 0.5}{N} - -1}{N}
\end{array}
Initial program 22.2%
Taylor expanded in N around inf
lower-/.f64N/A
Applied rewrites95.4%
(FPCore (N) :precision binary64 (+ (+ (+ (/ 1.0 N) (/ -1.0 (* 2.0 (pow N 2.0)))) (/ 1.0 (* 3.0 (pow N 3.0)))) (/ -1.0 (* 4.0 (pow N 4.0)))))
double code(double N) {
return (((1.0 / N) + (-1.0 / (2.0 * pow(N, 2.0)))) + (1.0 / (3.0 * pow(N, 3.0)))) + (-1.0 / (4.0 * pow(N, 4.0)));
}
real(8) function code(n)
real(8), intent (in) :: n
code = (((1.0d0 / n) + ((-1.0d0) / (2.0d0 * (n ** 2.0d0)))) + (1.0d0 / (3.0d0 * (n ** 3.0d0)))) + ((-1.0d0) / (4.0d0 * (n ** 4.0d0)))
end function
public static double code(double N) {
return (((1.0 / N) + (-1.0 / (2.0 * Math.pow(N, 2.0)))) + (1.0 / (3.0 * Math.pow(N, 3.0)))) + (-1.0 / (4.0 * Math.pow(N, 4.0)));
}
def code(N): return (((1.0 / N) + (-1.0 / (2.0 * math.pow(N, 2.0)))) + (1.0 / (3.0 * math.pow(N, 3.0)))) + (-1.0 / (4.0 * math.pow(N, 4.0)))
function code(N) return Float64(Float64(Float64(Float64(1.0 / N) + Float64(-1.0 / Float64(2.0 * (N ^ 2.0)))) + Float64(1.0 / Float64(3.0 * (N ^ 3.0)))) + Float64(-1.0 / Float64(4.0 * (N ^ 4.0)))) end
function tmp = code(N) tmp = (((1.0 / N) + (-1.0 / (2.0 * (N ^ 2.0)))) + (1.0 / (3.0 * (N ^ 3.0)))) + (-1.0 / (4.0 * (N ^ 4.0))); end
code[N_] := N[(N[(N[(N[(1.0 / N), $MachinePrecision] + N[(-1.0 / N[(2.0 * N[Power[N, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(3.0 * N[Power[N, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / N[(4.0 * N[Power[N, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(\frac{1}{N} + \frac{-1}{2 \cdot {N}^{2}}\right) + \frac{1}{3 \cdot {N}^{3}}\right) + \frac{-1}{4 \cdot {N}^{4}}
\end{array}
herbie shell --seed 2024327
(FPCore (N)
:name "2log (problem 3.3.6)"
:precision binary64
:pre (and (> N 1.0) (< N 1e+40))
:alt
(! :herbie-platform default (+ (/ 1 N) (/ -1 (* 2 (pow N 2))) (/ 1 (* 3 (pow N 3))) (/ -1 (* 4 (pow N 4)))))
(- (log (+ N 1.0)) (log N)))