
(FPCore (x) :precision binary64 (- (log x) (log (log x))))
double code(double x) {
return log(x) - log(log(x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log(x) - log(log(x))
end function
public static double code(double x) {
return Math.log(x) - Math.log(Math.log(x));
}
def code(x): return math.log(x) - math.log(math.log(x))
function code(x) return Float64(log(x) - log(log(x))) end
function tmp = code(x) tmp = log(x) - log(log(x)); end
code[x_] := N[(N[Log[x], $MachinePrecision] - N[Log[N[Log[x], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log x - \log \log x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (log x) (log (log x))))
double code(double x) {
return log(x) - log(log(x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log(x) - log(log(x))
end function
public static double code(double x) {
return Math.log(x) - Math.log(Math.log(x));
}
def code(x): return math.log(x) - math.log(math.log(x))
function code(x) return Float64(log(x) - log(log(x))) end
function tmp = code(x) tmp = log(x) - log(log(x)); end
code[x_] := N[(N[Log[x], $MachinePrecision] - N[Log[N[Log[x], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log x - \log \log x
\end{array}
(FPCore (x) :precision binary64 (log (/ x (log x))))
double code(double x) {
return log((x / log(x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x / log(x)))
end function
public static double code(double x) {
return Math.log((x / Math.log(x)));
}
def code(x): return math.log((x / math.log(x)))
function code(x) return log(Float64(x / log(x))) end
function tmp = code(x) tmp = log((x / log(x))); end
code[x_] := N[Log[N[(x / N[Log[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{x}{\log x}\right)
\end{array}
Initial program 99.6%
lift-log.f6499.6
unpow1N/A
sqr-powN/A
lower-*.f64N/A
lower-pow.f64N/A
metadata-evalN/A
lower-pow.f64N/A
metadata-eval99.6
Applied rewrites99.6%
lift-log.f64N/A
unpow1N/A
metadata-evalN/A
pow-prod-upN/A
lift-pow.f64N/A
lift-pow.f64N/A
lift-*.f64N/A
lift-log.f64N/A
unpow1N/A
metadata-evalN/A
pow-prod-upN/A
lift-pow.f64N/A
lift-pow.f64N/A
lift-*.f64N/A
unpow-prod-downN/A
Applied rewrites100.0%
(FPCore (x) :precision binary64 (log (* x (log x))))
double code(double x) {
return log((x * log(x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x * log(x)))
end function
public static double code(double x) {
return Math.log((x * Math.log(x)));
}
def code(x): return math.log((x * math.log(x)))
function code(x) return log(Float64(x * log(x))) end
function tmp = code(x) tmp = log((x * log(x))); end
code[x_] := N[Log[N[(x * N[Log[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x \cdot \log x\right)
\end{array}
Initial program 99.6%
lift-log.f64N/A
diff-logN/A
lower-log.f64N/A
div-invN/A
inv-powN/A
pow-to-expN/A
lift-log.f64N/A
*-commutativeN/A
neg-mul-1N/A
neg-sub0N/A
flip3--N/A
metadata-evalN/A
neg-sub0N/A
distribute-frac-negN/A
Applied rewrites25.1%
(FPCore (x) :precision binary64 (+ (log x) (/ 0.0 0.0)))
double code(double x) {
return log(x) + (0.0 / 0.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = log(x) + (0.0d0 / 0.0d0)
end function
public static double code(double x) {
return Math.log(x) + (0.0 / 0.0);
}
def code(x): return math.log(x) + (0.0 / 0.0)
function code(x) return Float64(log(x) + Float64(0.0 / 0.0)) end
function tmp = code(x) tmp = log(x) + (0.0 / 0.0); end
code[x_] := N[(N[Log[x], $MachinePrecision] + N[(0.0 / 0.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log x + \frac{0}{0}
\end{array}
Initial program 99.6%
lift-log.f6499.6
unpow1N/A
sqr-powN/A
lower-*.f64N/A
lower-pow.f64N/A
metadata-evalN/A
lower-pow.f64N/A
metadata-eval99.6
Applied rewrites99.6%
lift-log.f64N/A
lift-log.f64N/A
unpow1N/A
metadata-evalN/A
pow-prod-upN/A
lift-pow.f64N/A
lift-pow.f64N/A
lift-*.f64N/A
lift-log.f64N/A
unpow1N/A
metadata-evalN/A
pow-prod-upN/A
lift-pow.f64N/A
lift-pow.f64N/A
lift-*.f64N/A
unpow-prod-downN/A
Applied rewrites0.0%
Final simplification0.0%
herbie shell --seed 2024214
(FPCore (x)
:name "Jmat.Real.lambertw, estimator"
:precision binary64
(- (log x) (log (log x))))