
(FPCore (x) :precision binary64 (- (log x) (log (log x))))
double code(double x) {
return log(x) - log(log(x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log(x) - log(log(x))
end function
public static double code(double x) {
return Math.log(x) - Math.log(Math.log(x));
}
def code(x): return math.log(x) - math.log(math.log(x))
function code(x) return Float64(log(x) - log(log(x))) end
function tmp = code(x) tmp = log(x) - log(log(x)); end
code[x_] := N[(N[Log[x], $MachinePrecision] - N[Log[N[Log[x], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log x - \log \log x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 2 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (log x) (log (log x))))
double code(double x) {
return log(x) - log(log(x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log(x) - log(log(x))
end function
public static double code(double x) {
return Math.log(x) - Math.log(Math.log(x));
}
def code(x): return math.log(x) - math.log(math.log(x))
function code(x) return Float64(log(x) - log(log(x))) end
function tmp = code(x) tmp = log(x) - log(log(x)); end
code[x_] := N[(N[Log[x], $MachinePrecision] - N[Log[N[Log[x], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log x - \log \log x
\end{array}
(FPCore (x) :precision binary64 (log (* x (/ 1.0 (log x)))))
double code(double x) {
return log((x * (1.0 / log(x))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x * (1.0d0 / log(x))))
end function
public static double code(double x) {
return Math.log((x * (1.0 / Math.log(x))));
}
def code(x): return math.log((x * (1.0 / math.log(x))))
function code(x) return log(Float64(x * Float64(1.0 / log(x)))) end
function tmp = code(x) tmp = log((x * (1.0 / log(x)))); end
code[x_] := N[Log[N[(x * N[(1.0 / N[Log[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x \cdot \frac{1}{\log x}\right)
\end{array}
Initial program 99.6%
add-cube-cbrt98.1%
*-un-lft-identity98.1%
prod-diff98.1%
*-commutative98.1%
*-un-lft-identity98.1%
fma-neg98.1%
add-cube-cbrt99.6%
diff-log100.0%
*-commutative100.0%
*-un-lft-identity100.0%
Applied egg-rr100.0%
fma-undefine100.0%
*-rgt-identity100.0%
+-commutative100.0%
unsub-neg100.0%
+-inverses100.0%
+-rgt-identity100.0%
Simplified100.0%
clear-num100.0%
associate-/r/100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (log (/ x (log x))))
double code(double x) {
return log((x / log(x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x / log(x)))
end function
public static double code(double x) {
return Math.log((x / Math.log(x)));
}
def code(x): return math.log((x / math.log(x)))
function code(x) return log(Float64(x / log(x))) end
function tmp = code(x) tmp = log((x / log(x))); end
code[x_] := N[Log[N[(x / N[Log[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{x}{\log x}\right)
\end{array}
Initial program 99.6%
add-cube-cbrt98.1%
*-un-lft-identity98.1%
prod-diff98.1%
*-commutative98.1%
*-un-lft-identity98.1%
fma-neg98.1%
add-cube-cbrt99.6%
diff-log100.0%
*-commutative100.0%
*-un-lft-identity100.0%
Applied egg-rr100.0%
fma-undefine100.0%
*-rgt-identity100.0%
+-commutative100.0%
unsub-neg100.0%
+-inverses100.0%
+-rgt-identity100.0%
Simplified100.0%
Final simplification100.0%
herbie shell --seed 2024072
(FPCore (x)
:name "Jmat.Real.lambertw, estimator"
:precision binary64
(- (log x) (log (log x))))