
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 20 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
(FPCore (N)
:precision binary64
(let* ((t_0 (log (* N (+ N 1.0)))))
(if (<= (- (log (+ N 1.0)) (log N)) 0.0005)
(/ (+ 1.0 (/ (+ -0.5 (/ (- 0.3333333333333333 (/ 0.25 N)) N)) N)) N)
(exp (- 0.0 (log (/ t_0 (* t_0 (- 0.0 (log (/ N (+ N 1.0))))))))))))
double code(double N) {
double t_0 = log((N * (N + 1.0)));
double tmp;
if ((log((N + 1.0)) - log(N)) <= 0.0005) {
tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N;
} else {
tmp = exp((0.0 - log((t_0 / (t_0 * (0.0 - log((N / (N + 1.0)))))))));
}
return tmp;
}
real(8) function code(n)
real(8), intent (in) :: n
real(8) :: t_0
real(8) :: tmp
t_0 = log((n * (n + 1.0d0)))
if ((log((n + 1.0d0)) - log(n)) <= 0.0005d0) then
tmp = (1.0d0 + (((-0.5d0) + ((0.3333333333333333d0 - (0.25d0 / n)) / n)) / n)) / n
else
tmp = exp((0.0d0 - log((t_0 / (t_0 * (0.0d0 - log((n / (n + 1.0d0)))))))))
end if
code = tmp
end function
public static double code(double N) {
double t_0 = Math.log((N * (N + 1.0)));
double tmp;
if ((Math.log((N + 1.0)) - Math.log(N)) <= 0.0005) {
tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N;
} else {
tmp = Math.exp((0.0 - Math.log((t_0 / (t_0 * (0.0 - Math.log((N / (N + 1.0)))))))));
}
return tmp;
}
def code(N): t_0 = math.log((N * (N + 1.0))) tmp = 0 if (math.log((N + 1.0)) - math.log(N)) <= 0.0005: tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N else: tmp = math.exp((0.0 - math.log((t_0 / (t_0 * (0.0 - math.log((N / (N + 1.0))))))))) return tmp
function code(N) t_0 = log(Float64(N * Float64(N + 1.0))) tmp = 0.0 if (Float64(log(Float64(N + 1.0)) - log(N)) <= 0.0005) tmp = Float64(Float64(1.0 + Float64(Float64(-0.5 + Float64(Float64(0.3333333333333333 - Float64(0.25 / N)) / N)) / N)) / N); else tmp = exp(Float64(0.0 - log(Float64(t_0 / Float64(t_0 * Float64(0.0 - log(Float64(N / Float64(N + 1.0))))))))); end return tmp end
function tmp_2 = code(N) t_0 = log((N * (N + 1.0))); tmp = 0.0; if ((log((N + 1.0)) - log(N)) <= 0.0005) tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N; else tmp = exp((0.0 - log((t_0 / (t_0 * (0.0 - log((N / (N + 1.0))))))))); end tmp_2 = tmp; end
code[N_] := Block[{t$95$0 = N[Log[N[(N * N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision], 0.0005], N[(N[(1.0 + N[(N[(-0.5 + N[(N[(0.3333333333333333 - N[(0.25 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision], N[Exp[N[(0.0 - N[Log[N[(t$95$0 / N[(t$95$0 * N[(0.0 - N[Log[N[(N / N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \log \left(N \cdot \left(N + 1\right)\right)\\
\mathbf{if}\;\log \left(N + 1\right) - \log N \leq 0.0005:\\
\;\;\;\;\frac{1 + \frac{-0.5 + \frac{0.3333333333333333 - \frac{0.25}{N}}{N}}{N}}{N}\\
\mathbf{else}:\\
\;\;\;\;e^{0 - \log \left(\frac{t\_0}{t\_0 \cdot \left(0 - \log \left(\frac{N}{N + 1}\right)\right)}\right)}\\
\end{array}
\end{array}
if (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) < 5.0000000000000001e-4Initial program 17.7%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6417.7%
Simplified17.7%
Taylor expanded in N around inf
Simplified99.9%
if 5.0000000000000001e-4 < (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) Initial program 92.2%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6492.4%
Simplified92.4%
sub-negN/A
+-commutativeN/A
flip-+N/A
neg-mul-1N/A
fmm-defN/A
fma-defineN/A
neg-mul-1N/A
distribute-neg-inN/A
+-commutativeN/A
/-lowering-/.f64N/A
Applied egg-rr91.8%
clear-numN/A
inv-powN/A
pow-to-expN/A
exp-lowering-exp.f64N/A
*-lowering-*.f64N/A
Applied egg-rr95.3%
Final simplification99.5%
(FPCore (N)
:precision binary64
(let* ((t_0 (log (* N (+ N 1.0)))))
(if (<= (- (log (+ N 1.0)) (log N)) 0.0005)
(/ (+ 1.0 (/ (+ -0.5 (/ (- 0.3333333333333333 (/ 0.25 N)) N)) N)) N)
(/ 1.0 (* t_0 (/ -1.0 (* t_0 (log (/ N (+ N 1.0))))))))))
double code(double N) {
double t_0 = log((N * (N + 1.0)));
double tmp;
if ((log((N + 1.0)) - log(N)) <= 0.0005) {
tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N;
} else {
tmp = 1.0 / (t_0 * (-1.0 / (t_0 * log((N / (N + 1.0))))));
}
return tmp;
}
real(8) function code(n)
real(8), intent (in) :: n
real(8) :: t_0
real(8) :: tmp
t_0 = log((n * (n + 1.0d0)))
if ((log((n + 1.0d0)) - log(n)) <= 0.0005d0) then
tmp = (1.0d0 + (((-0.5d0) + ((0.3333333333333333d0 - (0.25d0 / n)) / n)) / n)) / n
else
tmp = 1.0d0 / (t_0 * ((-1.0d0) / (t_0 * log((n / (n + 1.0d0))))))
end if
code = tmp
end function
public static double code(double N) {
double t_0 = Math.log((N * (N + 1.0)));
double tmp;
if ((Math.log((N + 1.0)) - Math.log(N)) <= 0.0005) {
tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N;
} else {
tmp = 1.0 / (t_0 * (-1.0 / (t_0 * Math.log((N / (N + 1.0))))));
}
return tmp;
}
def code(N): t_0 = math.log((N * (N + 1.0))) tmp = 0 if (math.log((N + 1.0)) - math.log(N)) <= 0.0005: tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N else: tmp = 1.0 / (t_0 * (-1.0 / (t_0 * math.log((N / (N + 1.0)))))) return tmp
function code(N) t_0 = log(Float64(N * Float64(N + 1.0))) tmp = 0.0 if (Float64(log(Float64(N + 1.0)) - log(N)) <= 0.0005) tmp = Float64(Float64(1.0 + Float64(Float64(-0.5 + Float64(Float64(0.3333333333333333 - Float64(0.25 / N)) / N)) / N)) / N); else tmp = Float64(1.0 / Float64(t_0 * Float64(-1.0 / Float64(t_0 * log(Float64(N / Float64(N + 1.0))))))); end return tmp end
function tmp_2 = code(N) t_0 = log((N * (N + 1.0))); tmp = 0.0; if ((log((N + 1.0)) - log(N)) <= 0.0005) tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N; else tmp = 1.0 / (t_0 * (-1.0 / (t_0 * log((N / (N + 1.0)))))); end tmp_2 = tmp; end
code[N_] := Block[{t$95$0 = N[Log[N[(N * N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision], 0.0005], N[(N[(1.0 + N[(N[(-0.5 + N[(N[(0.3333333333333333 - N[(0.25 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision], N[(1.0 / N[(t$95$0 * N[(-1.0 / N[(t$95$0 * N[Log[N[(N / N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \log \left(N \cdot \left(N + 1\right)\right)\\
\mathbf{if}\;\log \left(N + 1\right) - \log N \leq 0.0005:\\
\;\;\;\;\frac{1 + \frac{-0.5 + \frac{0.3333333333333333 - \frac{0.25}{N}}{N}}{N}}{N}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{t\_0 \cdot \frac{-1}{t\_0 \cdot \log \left(\frac{N}{N + 1}\right)}}\\
\end{array}
\end{array}
if (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) < 5.0000000000000001e-4Initial program 17.7%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6417.7%
Simplified17.7%
Taylor expanded in N around inf
Simplified99.9%
if 5.0000000000000001e-4 < (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) Initial program 92.2%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6492.4%
Simplified92.4%
flip--N/A
frac-2negN/A
distribute-frac-neg2N/A
neg-lowering-neg.f64N/A
/-lowering-/.f64N/A
Applied egg-rr95.1%
clear-numN/A
inv-powN/A
div-invN/A
unpow-prod-downN/A
inv-powN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
log-lowering-log.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
pow-lowering-pow.f64N/A
Applied egg-rr95.0%
*-commutativeN/A
unpow-1N/A
frac-timesN/A
metadata-evalN/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
Applied egg-rr95.1%
Final simplification99.5%
(FPCore (N)
:precision binary64
(let* ((t_0 (log (* N (+ N 1.0)))))
(if (<= (- (log (+ N 1.0)) (log N)) 0.0005)
(/ (+ 1.0 (/ (+ -0.5 (/ (- 0.3333333333333333 (/ 0.25 N)) N)) N)) N)
(/ (* t_0 (- 0.0 (log (/ N (+ N 1.0))))) t_0))))
double code(double N) {
double t_0 = log((N * (N + 1.0)));
double tmp;
if ((log((N + 1.0)) - log(N)) <= 0.0005) {
tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N;
} else {
tmp = (t_0 * (0.0 - log((N / (N + 1.0))))) / t_0;
}
return tmp;
}
real(8) function code(n)
real(8), intent (in) :: n
real(8) :: t_0
real(8) :: tmp
t_0 = log((n * (n + 1.0d0)))
if ((log((n + 1.0d0)) - log(n)) <= 0.0005d0) then
tmp = (1.0d0 + (((-0.5d0) + ((0.3333333333333333d0 - (0.25d0 / n)) / n)) / n)) / n
else
tmp = (t_0 * (0.0d0 - log((n / (n + 1.0d0))))) / t_0
end if
code = tmp
end function
public static double code(double N) {
double t_0 = Math.log((N * (N + 1.0)));
double tmp;
if ((Math.log((N + 1.0)) - Math.log(N)) <= 0.0005) {
tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N;
} else {
tmp = (t_0 * (0.0 - Math.log((N / (N + 1.0))))) / t_0;
}
return tmp;
}
def code(N): t_0 = math.log((N * (N + 1.0))) tmp = 0 if (math.log((N + 1.0)) - math.log(N)) <= 0.0005: tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N else: tmp = (t_0 * (0.0 - math.log((N / (N + 1.0))))) / t_0 return tmp
function code(N) t_0 = log(Float64(N * Float64(N + 1.0))) tmp = 0.0 if (Float64(log(Float64(N + 1.0)) - log(N)) <= 0.0005) tmp = Float64(Float64(1.0 + Float64(Float64(-0.5 + Float64(Float64(0.3333333333333333 - Float64(0.25 / N)) / N)) / N)) / N); else tmp = Float64(Float64(t_0 * Float64(0.0 - log(Float64(N / Float64(N + 1.0))))) / t_0); end return tmp end
function tmp_2 = code(N) t_0 = log((N * (N + 1.0))); tmp = 0.0; if ((log((N + 1.0)) - log(N)) <= 0.0005) tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N; else tmp = (t_0 * (0.0 - log((N / (N + 1.0))))) / t_0; end tmp_2 = tmp; end
code[N_] := Block[{t$95$0 = N[Log[N[(N * N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision], 0.0005], N[(N[(1.0 + N[(N[(-0.5 + N[(N[(0.3333333333333333 - N[(0.25 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision], N[(N[(t$95$0 * N[(0.0 - N[Log[N[(N / N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \log \left(N \cdot \left(N + 1\right)\right)\\
\mathbf{if}\;\log \left(N + 1\right) - \log N \leq 0.0005:\\
\;\;\;\;\frac{1 + \frac{-0.5 + \frac{0.3333333333333333 - \frac{0.25}{N}}{N}}{N}}{N}\\
\mathbf{else}:\\
\;\;\;\;\frac{t\_0 \cdot \left(0 - \log \left(\frac{N}{N + 1}\right)\right)}{t\_0}\\
\end{array}
\end{array}
if (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) < 5.0000000000000001e-4Initial program 17.7%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6417.7%
Simplified17.7%
Taylor expanded in N around inf
Simplified99.9%
if 5.0000000000000001e-4 < (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) Initial program 92.2%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6492.4%
Simplified92.4%
flip--N/A
frac-2negN/A
distribute-frac-neg2N/A
neg-lowering-neg.f64N/A
/-lowering-/.f64N/A
Applied egg-rr95.1%
Final simplification99.5%
(FPCore (N) :precision binary64 (if (<= (- (log (+ N 1.0)) (log N)) 0.0005) (/ (+ 1.0 (/ (+ -0.5 (/ (- 0.3333333333333333 (/ 0.25 N)) N)) N)) N) (- 0.0 (log (/ N (+ N 1.0))))))
double code(double N) {
double tmp;
if ((log((N + 1.0)) - log(N)) <= 0.0005) {
tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N;
} else {
tmp = 0.0 - log((N / (N + 1.0)));
}
return tmp;
}
real(8) function code(n)
real(8), intent (in) :: n
real(8) :: tmp
if ((log((n + 1.0d0)) - log(n)) <= 0.0005d0) then
tmp = (1.0d0 + (((-0.5d0) + ((0.3333333333333333d0 - (0.25d0 / n)) / n)) / n)) / n
else
tmp = 0.0d0 - log((n / (n + 1.0d0)))
end if
code = tmp
end function
public static double code(double N) {
double tmp;
if ((Math.log((N + 1.0)) - Math.log(N)) <= 0.0005) {
tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N;
} else {
tmp = 0.0 - Math.log((N / (N + 1.0)));
}
return tmp;
}
def code(N): tmp = 0 if (math.log((N + 1.0)) - math.log(N)) <= 0.0005: tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N else: tmp = 0.0 - math.log((N / (N + 1.0))) return tmp
function code(N) tmp = 0.0 if (Float64(log(Float64(N + 1.0)) - log(N)) <= 0.0005) tmp = Float64(Float64(1.0 + Float64(Float64(-0.5 + Float64(Float64(0.3333333333333333 - Float64(0.25 / N)) / N)) / N)) / N); else tmp = Float64(0.0 - log(Float64(N / Float64(N + 1.0)))); end return tmp end
function tmp_2 = code(N) tmp = 0.0; if ((log((N + 1.0)) - log(N)) <= 0.0005) tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N; else tmp = 0.0 - log((N / (N + 1.0))); end tmp_2 = tmp; end
code[N_] := If[LessEqual[N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision], 0.0005], N[(N[(1.0 + N[(N[(-0.5 + N[(N[(0.3333333333333333 - N[(0.25 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision], N[(0.0 - N[Log[N[(N / N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\log \left(N + 1\right) - \log N \leq 0.0005:\\
\;\;\;\;\frac{1 + \frac{-0.5 + \frac{0.3333333333333333 - \frac{0.25}{N}}{N}}{N}}{N}\\
\mathbf{else}:\\
\;\;\;\;0 - \log \left(\frac{N}{N + 1}\right)\\
\end{array}
\end{array}
if (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) < 5.0000000000000001e-4Initial program 17.7%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6417.7%
Simplified17.7%
Taylor expanded in N around inf
Simplified99.9%
if 5.0000000000000001e-4 < (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) Initial program 92.2%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6492.4%
Simplified92.4%
diff-logN/A
clear-numN/A
log-recN/A
diff-logN/A
neg-lowering-neg.f64N/A
diff-logN/A
log-lowering-log.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f6495.1%
Applied egg-rr95.1%
Final simplification99.5%
(FPCore (N) :precision binary64 (if (<= N 1750.0) (log (+ 1.0 (/ 1.0 N))) (/ (+ 1.0 (/ (+ -0.5 (/ (- 0.3333333333333333 (/ 0.25 N)) N)) N)) N)))
double code(double N) {
double tmp;
if (N <= 1750.0) {
tmp = log((1.0 + (1.0 / N)));
} else {
tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N;
}
return tmp;
}
real(8) function code(n)
real(8), intent (in) :: n
real(8) :: tmp
if (n <= 1750.0d0) then
tmp = log((1.0d0 + (1.0d0 / n)))
else
tmp = (1.0d0 + (((-0.5d0) + ((0.3333333333333333d0 - (0.25d0 / n)) / n)) / n)) / n
end if
code = tmp
end function
public static double code(double N) {
double tmp;
if (N <= 1750.0) {
tmp = Math.log((1.0 + (1.0 / N)));
} else {
tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N;
}
return tmp;
}
def code(N): tmp = 0 if N <= 1750.0: tmp = math.log((1.0 + (1.0 / N))) else: tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N return tmp
function code(N) tmp = 0.0 if (N <= 1750.0) tmp = log(Float64(1.0 + Float64(1.0 / N))); else tmp = Float64(Float64(1.0 + Float64(Float64(-0.5 + Float64(Float64(0.3333333333333333 - Float64(0.25 / N)) / N)) / N)) / N); end return tmp end
function tmp_2 = code(N) tmp = 0.0; if (N <= 1750.0) tmp = log((1.0 + (1.0 / N))); else tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N; end tmp_2 = tmp; end
code[N_] := If[LessEqual[N, 1750.0], N[Log[N[(1.0 + N[(1.0 / N), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[(N[(1.0 + N[(N[(-0.5 + N[(N[(0.3333333333333333 - N[(0.25 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;N \leq 1750:\\
\;\;\;\;\log \left(1 + \frac{1}{N}\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 + \frac{-0.5 + \frac{0.3333333333333333 - \frac{0.25}{N}}{N}}{N}}{N}\\
\end{array}
\end{array}
if N < 1750Initial program 92.2%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6492.4%
Simplified92.4%
diff-logN/A
log-lowering-log.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f6493.9%
Applied egg-rr93.9%
Taylor expanded in N around inf
+-lowering-+.f64N/A
/-lowering-/.f6494.0%
Simplified94.0%
if 1750 < N Initial program 17.7%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6417.7%
Simplified17.7%
Taylor expanded in N around inf
Simplified99.9%
(FPCore (N)
:precision binary64
(let* ((t_0 (/ (+ 0.08333333333333333 (/ -0.041666666666666664 N)) N))
(t_1 (- 0.5 t_0))
(t_2 (+ -0.5 t_0)))
(/
1.0
(/
(* N (- (* (/ t_2 N) (/ (* t_1 (- t_0 0.5)) (* N N))) -1.0))
(- (+ 1.0 (/ (* t_1 t_1) (* N N))) (/ -1.0 (/ N t_2)))))))
double code(double N) {
double t_0 = (0.08333333333333333 + (-0.041666666666666664 / N)) / N;
double t_1 = 0.5 - t_0;
double t_2 = -0.5 + t_0;
return 1.0 / ((N * (((t_2 / N) * ((t_1 * (t_0 - 0.5)) / (N * N))) - -1.0)) / ((1.0 + ((t_1 * t_1) / (N * N))) - (-1.0 / (N / t_2))));
}
real(8) function code(n)
real(8), intent (in) :: n
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
t_0 = (0.08333333333333333d0 + ((-0.041666666666666664d0) / n)) / n
t_1 = 0.5d0 - t_0
t_2 = (-0.5d0) + t_0
code = 1.0d0 / ((n * (((t_2 / n) * ((t_1 * (t_0 - 0.5d0)) / (n * n))) - (-1.0d0))) / ((1.0d0 + ((t_1 * t_1) / (n * n))) - ((-1.0d0) / (n / t_2))))
end function
public static double code(double N) {
double t_0 = (0.08333333333333333 + (-0.041666666666666664 / N)) / N;
double t_1 = 0.5 - t_0;
double t_2 = -0.5 + t_0;
return 1.0 / ((N * (((t_2 / N) * ((t_1 * (t_0 - 0.5)) / (N * N))) - -1.0)) / ((1.0 + ((t_1 * t_1) / (N * N))) - (-1.0 / (N / t_2))));
}
def code(N): t_0 = (0.08333333333333333 + (-0.041666666666666664 / N)) / N t_1 = 0.5 - t_0 t_2 = -0.5 + t_0 return 1.0 / ((N * (((t_2 / N) * ((t_1 * (t_0 - 0.5)) / (N * N))) - -1.0)) / ((1.0 + ((t_1 * t_1) / (N * N))) - (-1.0 / (N / t_2))))
function code(N) t_0 = Float64(Float64(0.08333333333333333 + Float64(-0.041666666666666664 / N)) / N) t_1 = Float64(0.5 - t_0) t_2 = Float64(-0.5 + t_0) return Float64(1.0 / Float64(Float64(N * Float64(Float64(Float64(t_2 / N) * Float64(Float64(t_1 * Float64(t_0 - 0.5)) / Float64(N * N))) - -1.0)) / Float64(Float64(1.0 + Float64(Float64(t_1 * t_1) / Float64(N * N))) - Float64(-1.0 / Float64(N / t_2))))) end
function tmp = code(N) t_0 = (0.08333333333333333 + (-0.041666666666666664 / N)) / N; t_1 = 0.5 - t_0; t_2 = -0.5 + t_0; tmp = 1.0 / ((N * (((t_2 / N) * ((t_1 * (t_0 - 0.5)) / (N * N))) - -1.0)) / ((1.0 + ((t_1 * t_1) / (N * N))) - (-1.0 / (N / t_2)))); end
code[N_] := Block[{t$95$0 = N[(N[(0.08333333333333333 + N[(-0.041666666666666664 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]}, Block[{t$95$1 = N[(0.5 - t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(-0.5 + t$95$0), $MachinePrecision]}, N[(1.0 / N[(N[(N * N[(N[(N[(t$95$2 / N), $MachinePrecision] * N[(N[(t$95$1 * N[(t$95$0 - 0.5), $MachinePrecision]), $MachinePrecision] / N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision] / N[(N[(1.0 + N[(N[(t$95$1 * t$95$1), $MachinePrecision] / N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(-1.0 / N[(N / t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{0.08333333333333333 + \frac{-0.041666666666666664}{N}}{N}\\
t_1 := 0.5 - t\_0\\
t_2 := -0.5 + t\_0\\
\frac{1}{\frac{N \cdot \left(\frac{t\_2}{N} \cdot \frac{t\_1 \cdot \left(t\_0 - 0.5\right)}{N \cdot N} - -1\right)}{\left(1 + \frac{t\_1 \cdot t\_1}{N \cdot N}\right) - \frac{-1}{\frac{N}{t\_2}}}}
\end{array}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
Simplified96.3%
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f6496.2%
Applied egg-rr96.2%
Taylor expanded in N around -inf
mul-1-negN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
*-lowering-*.f64N/A
Simplified96.6%
Applied egg-rr96.6%
Final simplification96.6%
(FPCore (N)
:precision binary64
(/
1.0
(*
N
(-
(/ (- 0.5 (/ (+ 0.08333333333333333 (/ -0.041666666666666664 N)) N)) N)
-1.0))))
double code(double N) {
return 1.0 / (N * (((0.5 - ((0.08333333333333333 + (-0.041666666666666664 / N)) / N)) / N) - -1.0));
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / (n * (((0.5d0 - ((0.08333333333333333d0 + ((-0.041666666666666664d0) / n)) / n)) / n) - (-1.0d0)))
end function
public static double code(double N) {
return 1.0 / (N * (((0.5 - ((0.08333333333333333 + (-0.041666666666666664 / N)) / N)) / N) - -1.0));
}
def code(N): return 1.0 / (N * (((0.5 - ((0.08333333333333333 + (-0.041666666666666664 / N)) / N)) / N) - -1.0))
function code(N) return Float64(1.0 / Float64(N * Float64(Float64(Float64(0.5 - Float64(Float64(0.08333333333333333 + Float64(-0.041666666666666664 / N)) / N)) / N) - -1.0))) end
function tmp = code(N) tmp = 1.0 / (N * (((0.5 - ((0.08333333333333333 + (-0.041666666666666664 / N)) / N)) / N) - -1.0)); end
code[N_] := N[(1.0 / N[(N * N[(N[(N[(0.5 - N[(N[(0.08333333333333333 + N[(-0.041666666666666664 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N \cdot \left(\frac{0.5 - \frac{0.08333333333333333 + \frac{-0.041666666666666664}{N}}{N}}{N} - -1\right)}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
Simplified96.3%
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f6496.2%
Applied egg-rr96.2%
Taylor expanded in N around -inf
mul-1-negN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
*-lowering-*.f64N/A
Simplified96.6%
Final simplification96.6%
(FPCore (N)
:precision binary64
(/
(/
-1.0
(+
-1.0
(/ (+ -0.5 (/ (+ 0.08333333333333333 (/ -0.041666666666666664 N)) N)) N)))
N))
double code(double N) {
return (-1.0 / (-1.0 + ((-0.5 + ((0.08333333333333333 + (-0.041666666666666664 / N)) / N)) / N))) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = ((-1.0d0) / ((-1.0d0) + (((-0.5d0) + ((0.08333333333333333d0 + ((-0.041666666666666664d0) / n)) / n)) / n))) / n
end function
public static double code(double N) {
return (-1.0 / (-1.0 + ((-0.5 + ((0.08333333333333333 + (-0.041666666666666664 / N)) / N)) / N))) / N;
}
def code(N): return (-1.0 / (-1.0 + ((-0.5 + ((0.08333333333333333 + (-0.041666666666666664 / N)) / N)) / N))) / N
function code(N) return Float64(Float64(-1.0 / Float64(-1.0 + Float64(Float64(-0.5 + Float64(Float64(0.08333333333333333 + Float64(-0.041666666666666664 / N)) / N)) / N))) / N) end
function tmp = code(N) tmp = (-1.0 / (-1.0 + ((-0.5 + ((0.08333333333333333 + (-0.041666666666666664 / N)) / N)) / N))) / N; end
code[N_] := N[(N[(-1.0 / N[(-1.0 + N[(N[(-0.5 + N[(N[(0.08333333333333333 + N[(-0.041666666666666664 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{-1}{-1 + \frac{-0.5 + \frac{0.08333333333333333 + \frac{-0.041666666666666664}{N}}{N}}{N}}}{N}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
Simplified96.3%
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f6496.2%
Applied egg-rr96.2%
Taylor expanded in N around -inf
mul-1-negN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
*-lowering-*.f64N/A
Simplified96.6%
associate-/r*N/A
frac-2negN/A
sub0-negN/A
remove-double-negN/A
/-lowering-/.f64N/A
Applied egg-rr96.6%
Final simplification96.6%
(FPCore (N) :precision binary64 (/ (/ -1.0 N) (+ -1.0 (/ (+ -0.5 (/ (+ 0.08333333333333333 (/ -0.041666666666666664 N)) N)) N))))
double code(double N) {
return (-1.0 / N) / (-1.0 + ((-0.5 + ((0.08333333333333333 + (-0.041666666666666664 / N)) / N)) / N));
}
real(8) function code(n)
real(8), intent (in) :: n
code = ((-1.0d0) / n) / ((-1.0d0) + (((-0.5d0) + ((0.08333333333333333d0 + ((-0.041666666666666664d0) / n)) / n)) / n))
end function
public static double code(double N) {
return (-1.0 / N) / (-1.0 + ((-0.5 + ((0.08333333333333333 + (-0.041666666666666664 / N)) / N)) / N));
}
def code(N): return (-1.0 / N) / (-1.0 + ((-0.5 + ((0.08333333333333333 + (-0.041666666666666664 / N)) / N)) / N))
function code(N) return Float64(Float64(-1.0 / N) / Float64(-1.0 + Float64(Float64(-0.5 + Float64(Float64(0.08333333333333333 + Float64(-0.041666666666666664 / N)) / N)) / N))) end
function tmp = code(N) tmp = (-1.0 / N) / (-1.0 + ((-0.5 + ((0.08333333333333333 + (-0.041666666666666664 / N)) / N)) / N)); end
code[N_] := N[(N[(-1.0 / N), $MachinePrecision] / N[(-1.0 + N[(N[(-0.5 + N[(N[(0.08333333333333333 + N[(-0.041666666666666664 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{-1}{N}}{-1 + \frac{-0.5 + \frac{0.08333333333333333 + \frac{-0.041666666666666664}{N}}{N}}{N}}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
Simplified96.3%
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f6496.2%
Applied egg-rr96.2%
Taylor expanded in N around -inf
mul-1-negN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
*-lowering-*.f64N/A
Simplified96.6%
*-commutativeN/A
associate-/r*N/A
/-lowering-/.f64N/A
frac-2negN/A
metadata-evalN/A
sub0-negN/A
remove-double-negN/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
Applied egg-rr96.5%
Final simplification96.5%
(FPCore (N) :precision binary64 (/ 1.0 (/ (+ 0.041666666666666664 (* N (+ (* N (+ N 0.5)) -0.08333333333333333))) (* N N))))
double code(double N) {
return 1.0 / ((0.041666666666666664 + (N * ((N * (N + 0.5)) + -0.08333333333333333))) / (N * N));
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / ((0.041666666666666664d0 + (n * ((n * (n + 0.5d0)) + (-0.08333333333333333d0)))) / (n * n))
end function
public static double code(double N) {
return 1.0 / ((0.041666666666666664 + (N * ((N * (N + 0.5)) + -0.08333333333333333))) / (N * N));
}
def code(N): return 1.0 / ((0.041666666666666664 + (N * ((N * (N + 0.5)) + -0.08333333333333333))) / (N * N))
function code(N) return Float64(1.0 / Float64(Float64(0.041666666666666664 + Float64(N * Float64(Float64(N * Float64(N + 0.5)) + -0.08333333333333333))) / Float64(N * N))) end
function tmp = code(N) tmp = 1.0 / ((0.041666666666666664 + (N * ((N * (N + 0.5)) + -0.08333333333333333))) / (N * N)); end
code[N_] := N[(1.0 / N[(N[(0.041666666666666664 + N[(N * N[(N[(N * N[(N + 0.5), $MachinePrecision]), $MachinePrecision] + -0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{0.041666666666666664 + N \cdot \left(N \cdot \left(N + 0.5\right) + -0.08333333333333333\right)}{N \cdot N}}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
Simplified96.3%
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f6496.2%
Applied egg-rr96.2%
Taylor expanded in N around -inf
mul-1-negN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
*-lowering-*.f64N/A
Simplified96.6%
Taylor expanded in N around 0
/-lowering-/.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
*-lowering-*.f6496.5%
Simplified96.5%
(FPCore (N) :precision binary64 (/ (+ 1.0 (/ (+ -0.5 (/ (- 0.3333333333333333 (/ 0.25 N)) N)) N)) N))
double code(double N) {
return (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 + (((-0.5d0) + ((0.3333333333333333d0 - (0.25d0 / n)) / n)) / n)) / n
end function
public static double code(double N) {
return (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N;
}
def code(N): return (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N
function code(N) return Float64(Float64(1.0 + Float64(Float64(-0.5 + Float64(Float64(0.3333333333333333 - Float64(0.25 / N)) / N)) / N)) / N) end
function tmp = code(N) tmp = (1.0 + ((-0.5 + ((0.3333333333333333 - (0.25 / N)) / N)) / N)) / N; end
code[N_] := N[(N[(1.0 + N[(N[(-0.5 + N[(N[(0.3333333333333333 - N[(0.25 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 + \frac{-0.5 + \frac{0.3333333333333333 - \frac{0.25}{N}}{N}}{N}}{N}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
Simplified96.3%
(FPCore (N) :precision binary64 (/ 1.0 (* N (+ (+ 1.0 (/ 0.5 N)) (/ -0.08333333333333333 (* N N))))))
double code(double N) {
return 1.0 / (N * ((1.0 + (0.5 / N)) + (-0.08333333333333333 / (N * N))));
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / (n * ((1.0d0 + (0.5d0 / n)) + ((-0.08333333333333333d0) / (n * n))))
end function
public static double code(double N) {
return 1.0 / (N * ((1.0 + (0.5 / N)) + (-0.08333333333333333 / (N * N))));
}
def code(N): return 1.0 / (N * ((1.0 + (0.5 / N)) + (-0.08333333333333333 / (N * N))))
function code(N) return Float64(1.0 / Float64(N * Float64(Float64(1.0 + Float64(0.5 / N)) + Float64(-0.08333333333333333 / Float64(N * N))))) end
function tmp = code(N) tmp = 1.0 / (N * ((1.0 + (0.5 / N)) + (-0.08333333333333333 / (N * N)))); end
code[N_] := N[(1.0 / N[(N * N[(N[(1.0 + N[(0.5 / N), $MachinePrecision]), $MachinePrecision] + N[(-0.08333333333333333 / N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N \cdot \left(\left(1 + \frac{0.5}{N}\right) + \frac{-0.08333333333333333}{N \cdot N}\right)}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
Simplified96.3%
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f6496.2%
Applied egg-rr96.2%
Taylor expanded in N around inf
*-lowering-*.f64N/A
sub-negN/A
+-lowering-+.f64N/A
+-lowering-+.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f64N/A
distribute-neg-fracN/A
/-lowering-/.f64N/A
metadata-evalN/A
unpow2N/A
*-lowering-*.f6495.4%
Simplified95.4%
(FPCore (N) :precision binary64 (/ -1.0 (* N (+ -1.0 (/ (+ -0.5 (/ 0.08333333333333333 N)) N)))))
double code(double N) {
return -1.0 / (N * (-1.0 + ((-0.5 + (0.08333333333333333 / N)) / N)));
}
real(8) function code(n)
real(8), intent (in) :: n
code = (-1.0d0) / (n * ((-1.0d0) + (((-0.5d0) + (0.08333333333333333d0 / n)) / n)))
end function
public static double code(double N) {
return -1.0 / (N * (-1.0 + ((-0.5 + (0.08333333333333333 / N)) / N)));
}
def code(N): return -1.0 / (N * (-1.0 + ((-0.5 + (0.08333333333333333 / N)) / N)))
function code(N) return Float64(-1.0 / Float64(N * Float64(-1.0 + Float64(Float64(-0.5 + Float64(0.08333333333333333 / N)) / N)))) end
function tmp = code(N) tmp = -1.0 / (N * (-1.0 + ((-0.5 + (0.08333333333333333 / N)) / N))); end
code[N_] := N[(-1.0 / N[(N * N[(-1.0 + N[(N[(-0.5 + N[(0.08333333333333333 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{N \cdot \left(-1 + \frac{-0.5 + \frac{0.08333333333333333}{N}}{N}\right)}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
Simplified96.3%
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f6496.2%
Applied egg-rr96.2%
Taylor expanded in N around -inf
mul-1-negN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
*-lowering-*.f64N/A
Simplified96.6%
Taylor expanded in N around inf
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f6495.4%
Simplified95.4%
Final simplification95.4%
(FPCore (N) :precision binary64 (/ 1.0 (/ N (+ 1.0 (/ (- (/ 0.3333333333333333 N) 0.5) N)))))
double code(double N) {
return 1.0 / (N / (1.0 + (((0.3333333333333333 / N) - 0.5) / N)));
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / (n / (1.0d0 + (((0.3333333333333333d0 / n) - 0.5d0) / n)))
end function
public static double code(double N) {
return 1.0 / (N / (1.0 + (((0.3333333333333333 / N) - 0.5) / N)));
}
def code(N): return 1.0 / (N / (1.0 + (((0.3333333333333333 / N) - 0.5) / N)))
function code(N) return Float64(1.0 / Float64(N / Float64(1.0 + Float64(Float64(Float64(0.3333333333333333 / N) - 0.5) / N)))) end
function tmp = code(N) tmp = 1.0 / (N / (1.0 + (((0.3333333333333333 / N) - 0.5) / N))); end
code[N_] := N[(1.0 / N[(N / N[(1.0 + N[(N[(N[(0.3333333333333333 / N), $MachinePrecision] - 0.5), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{N}{1 + \frac{\frac{0.3333333333333333}{N} - 0.5}{N}}}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
Simplified96.3%
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f6496.2%
Applied egg-rr96.2%
Taylor expanded in N around -inf
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f6495.0%
Simplified95.0%
Final simplification95.0%
(FPCore (N) :precision binary64 (/ (+ 1.0 (/ (+ -0.5 (/ 0.3333333333333333 N)) N)) N))
double code(double N) {
return (1.0 + ((-0.5 + (0.3333333333333333 / N)) / N)) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 + (((-0.5d0) + (0.3333333333333333d0 / n)) / n)) / n
end function
public static double code(double N) {
return (1.0 + ((-0.5 + (0.3333333333333333 / N)) / N)) / N;
}
def code(N): return (1.0 + ((-0.5 + (0.3333333333333333 / N)) / N)) / N
function code(N) return Float64(Float64(1.0 + Float64(Float64(-0.5 + Float64(0.3333333333333333 / N)) / N)) / N) end
function tmp = code(N) tmp = (1.0 + ((-0.5 + (0.3333333333333333 / N)) / N)) / N; end
code[N_] := N[(N[(1.0 + N[(N[(-0.5 + N[(0.3333333333333333 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 + \frac{-0.5 + \frac{0.3333333333333333}{N}}{N}}{N}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
/-lowering-/.f64N/A
Simplified95.0%
(FPCore (N) :precision binary64 (/ 1.0 (* N (+ 1.0 (/ 0.5 N)))))
double code(double N) {
return 1.0 / (N * (1.0 + (0.5 / N)));
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / (n * (1.0d0 + (0.5d0 / n)))
end function
public static double code(double N) {
return 1.0 / (N * (1.0 + (0.5 / N)));
}
def code(N): return 1.0 / (N * (1.0 + (0.5 / N)))
function code(N) return Float64(1.0 / Float64(N * Float64(1.0 + Float64(0.5 / N)))) end
function tmp = code(N) tmp = 1.0 / (N * (1.0 + (0.5 / N))); end
code[N_] := N[(1.0 / N[(N * N[(1.0 + N[(0.5 / N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N \cdot \left(1 + \frac{0.5}{N}\right)}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
Simplified96.3%
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f6496.2%
Applied egg-rr96.2%
Taylor expanded in N around inf
*-lowering-*.f64N/A
+-lowering-+.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f6492.9%
Simplified92.9%
(FPCore (N) :precision binary64 (/ (- 1.0 (/ 0.5 N)) N))
double code(double N) {
return (1.0 - (0.5 / N)) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 - (0.5d0 / n)) / n
end function
public static double code(double N) {
return (1.0 - (0.5 / N)) / N;
}
def code(N): return (1.0 - (0.5 / N)) / N
function code(N) return Float64(Float64(1.0 - Float64(0.5 / N)) / N) end
function tmp = code(N) tmp = (1.0 - (0.5 / N)) / N; end
code[N_] := N[(N[(1.0 - N[(0.5 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \frac{0.5}{N}}{N}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
/-lowering-/.f64N/A
--lowering--.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f6492.3%
Simplified92.3%
(FPCore (N) :precision binary64 (/ (+ N -0.5) (* N N)))
double code(double N) {
return (N + -0.5) / (N * N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = (n + (-0.5d0)) / (n * n)
end function
public static double code(double N) {
return (N + -0.5) / (N * N);
}
def code(N): return (N + -0.5) / (N * N)
function code(N) return Float64(Float64(N + -0.5) / Float64(N * N)) end
function tmp = code(N) tmp = (N + -0.5) / (N * N); end
code[N_] := N[(N[(N + -0.5), $MachinePrecision] / N[(N * N), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{N + -0.5}{N \cdot N}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
/-lowering-/.f64N/A
--lowering--.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f6492.3%
Simplified92.3%
Taylor expanded in N around 0
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
unpow2N/A
*-lowering-*.f6492.0%
Simplified92.0%
(FPCore (N) :precision binary64 (/ 1.0 N))
double code(double N) {
return 1.0 / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / n
end function
public static double code(double N) {
return 1.0 / N;
}
def code(N): return 1.0 / N
function code(N) return Float64(1.0 / N) end
function tmp = code(N) tmp = 1.0 / N; end
code[N_] := N[(1.0 / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N}
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Taylor expanded in N around inf
/-lowering-/.f6484.5%
Simplified84.5%
(FPCore (N) :precision binary64 0.0)
double code(double N) {
return 0.0;
}
real(8) function code(n)
real(8), intent (in) :: n
code = 0.0d0
end function
public static double code(double N) {
return 0.0;
}
def code(N): return 0.0
function code(N) return 0.0 end
function tmp = code(N) tmp = 0.0; end
code[N_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 23.5%
--lowering--.f64N/A
+-commutativeN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
log-lowering-log.f6423.5%
Simplified23.5%
Applied egg-rr25.2%
Taylor expanded in N around inf
distribute-lft1-inN/A
metadata-evalN/A
mul0-lft3.3%
Simplified3.3%
(FPCore (N) :precision binary64 (log1p (/ 1.0 N)))
double code(double N) {
return log1p((1.0 / N));
}
public static double code(double N) {
return Math.log1p((1.0 / N));
}
def code(N): return math.log1p((1.0 / N))
function code(N) return log1p(Float64(1.0 / N)) end
code[N_] := N[Log[1 + N[(1.0 / N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\frac{1}{N}\right)
\end{array}
herbie shell --seed 2024161
(FPCore (N)
:name "2log (problem 3.3.6)"
:precision binary64
:pre (and (> N 1.0) (< N 1e+40))
:alt
(! :herbie-platform default (log1p (/ 1 N)))
(- (log (+ N 1.0)) (log N)))