?

Average Accuracy: 99.6% → 100.0%
Time: 2.1s
Precision: binary64
Cost: 12992

?

\[\log x - \log \log x \]
\[\log \left(\frac{x}{\log x}\right) \]
(FPCore (x) :precision binary64 (- (log x) (log (log x))))
(FPCore (x) :precision binary64 (log (/ x (log x))))
double code(double x) {
	return log(x) - log(log(x));
}
double code(double x) {
	return log((x / log(x)));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = log(x) - log(log(x))
end function
real(8) function code(x)
    real(8), intent (in) :: x
    code = log((x / log(x)))
end function
public static double code(double x) {
	return Math.log(x) - Math.log(Math.log(x));
}
public static double code(double x) {
	return Math.log((x / Math.log(x)));
}
def code(x):
	return math.log(x) - math.log(math.log(x))
def code(x):
	return math.log((x / math.log(x)))
function code(x)
	return Float64(log(x) - log(log(x)))
end
function code(x)
	return log(Float64(x / log(x)))
end
function tmp = code(x)
	tmp = log(x) - log(log(x));
end
function tmp = code(x)
	tmp = log((x / log(x)));
end
code[x_] := N[(N[Log[x], $MachinePrecision] - N[Log[N[Log[x], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
code[x_] := N[Log[N[(x / N[Log[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\log x - \log \log x
\log \left(\frac{x}{\log x}\right)

Error?

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation?

  1. Initial program 99.6%

    \[\log x - \log \log x \]
  2. Taylor expanded in x around inf 99.6%

    \[\leadsto \color{blue}{-1 \cdot \log \left(\frac{1}{x}\right) - \log \left(-1 \cdot \log \left(\frac{1}{x}\right)\right)} \]
  3. Simplified100.0%

    \[\leadsto \color{blue}{\log \left(\frac{x}{\log x}\right)} \]
    Proof

    [Start]99.6

    \[ -1 \cdot \log \left(\frac{1}{x}\right) - \log \left(-1 \cdot \log \left(\frac{1}{x}\right)\right) \]

    log-prod [=>]0.0

    \[ -1 \cdot \log \left(\frac{1}{x}\right) - \color{blue}{\left(\log -1 + \log \log \left(\frac{1}{x}\right)\right)} \]

    mul-1-neg [=>]0.0

    \[ \color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} - \left(\log -1 + \log \log \left(\frac{1}{x}\right)\right) \]

    log-rec [=>]0.0

    \[ \left(-\color{blue}{\left(-\log x\right)}\right) - \left(\log -1 + \log \log \left(\frac{1}{x}\right)\right) \]

    remove-double-neg [=>]0.0

    \[ \color{blue}{\log x} - \left(\log -1 + \log \log \left(\frac{1}{x}\right)\right) \]

    log-prod [<=]99.6

    \[ \log x - \color{blue}{\log \left(-1 \cdot \log \left(\frac{1}{x}\right)\right)} \]

    mul-1-neg [=>]99.6

    \[ \log x - \log \color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} \]

    log-rec [=>]99.6

    \[ \log x - \log \left(-\color{blue}{\left(-\log x\right)}\right) \]

    remove-double-neg [=>]99.6

    \[ \log x - \log \color{blue}{\log x} \]

    log-div [<=]100.0

    \[ \color{blue}{\log \left(\frac{x}{\log x}\right)} \]
  4. Final simplification100.0%

    \[\leadsto \log \left(\frac{x}{\log x}\right) \]

Reproduce?

herbie shell --seed 2023126 
(FPCore (x)
  :name "Jmat.Real.lambertw, estimator"
  :precision binary64
  (- (log x) (log (log x))))