math.log/1 on complex, real part

Percentage Accurate: 52.2% → 99.7%
Time: 3.8s
Alternatives: 2
Speedup: 1.2×

Specification

?
\[\begin{array}{l} \\ \log \left(\sqrt{re \cdot re + im \cdot im}\right) \end{array} \]
(FPCore (re im) :precision binary64 (log (sqrt (+ (* re re) (* im im)))))
double code(double re, double im) {
	return log(sqrt(((re * re) + (im * im))));
}
real(8) function code(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = log(sqrt(((re * re) + (im * im))))
end function
public static double code(double re, double im) {
	return Math.log(Math.sqrt(((re * re) + (im * im))));
}
def code(re, im):
	return math.log(math.sqrt(((re * re) + (im * im))))
function code(re, im)
	return log(sqrt(Float64(Float64(re * re) + Float64(im * im))))
end
function tmp = code(re, im)
	tmp = log(sqrt(((re * re) + (im * im))));
end
code[re_, im_] := N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\log \left(\sqrt{re \cdot re + im \cdot im}\right)
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 2 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 52.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log \left(\sqrt{re \cdot re + im \cdot im}\right) \end{array} \]
(FPCore (re im) :precision binary64 (log (sqrt (+ (* re re) (* im im)))))
double code(double re, double im) {
	return log(sqrt(((re * re) + (im * im))));
}
real(8) function code(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = log(sqrt(((re * re) + (im * im))))
end function
public static double code(double re, double im) {
	return Math.log(Math.sqrt(((re * re) + (im * im))));
}
def code(re, im):
	return math.log(math.sqrt(((re * re) + (im * im))))
function code(re, im)
	return log(sqrt(Float64(Float64(re * re) + Float64(im * im))))
end
function tmp = code(re, im)
	tmp = log(sqrt(((re * re) + (im * im))));
end
code[re_, im_] := N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\log \left(\sqrt{re \cdot re + im \cdot im}\right)
\end{array}

Alternative 1: 99.7% accurate, 1.0× speedup?

\[\begin{array}{l} im_m = \left|im\right| \\ re_m = \left|re\right| \\ [re_m, im_m] = \mathsf{sort}([re_m, im_m])\\ \\ \log \left(\mathsf{fma}\left(re\_m, 0.5 \cdot \frac{re\_m}{im\_m}, im\_m\right)\right) \end{array} \]
im_m = (fabs.f64 im)
re_m = (fabs.f64 re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
(FPCore (re_m im_m)
 :precision binary64
 (log (fma re_m (* 0.5 (/ re_m im_m)) im_m)))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
	return log(fma(re_m, (0.5 * (re_m / im_m)), im_m));
}
im_m = abs(im)
re_m = abs(re)
re_m, im_m = sort([re_m, im_m])
function code(re_m, im_m)
	return log(fma(re_m, Float64(0.5 * Float64(re_m / im_m)), im_m))
end
im_m = N[Abs[im], $MachinePrecision]
re_m = N[Abs[re], $MachinePrecision]
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
code[re$95$m_, im$95$m_] := N[Log[N[(re$95$m * N[(0.5 * N[(re$95$m / im$95$m), $MachinePrecision]), $MachinePrecision] + im$95$m), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\log \left(\mathsf{fma}\left(re\_m, 0.5 \cdot \frac{re\_m}{im\_m}, im\_m\right)\right)
\end{array}
Derivation
  1. Initial program 50.4%

    \[\log \left(\sqrt{re \cdot re + im \cdot im}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in re around 0

    \[\leadsto \log \color{blue}{\left(im + \frac{1}{2} \cdot \frac{{re}^{2}}{im}\right)} \]
  4. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \log \color{blue}{\left(\frac{1}{2} \cdot \frac{{re}^{2}}{im} + im\right)} \]
    2. *-lft-identityN/A

      \[\leadsto \log \left(\frac{1}{2} \cdot \frac{\color{blue}{1 \cdot {re}^{2}}}{im} + im\right) \]
    3. associate-*l/N/A

      \[\leadsto \log \left(\frac{1}{2} \cdot \color{blue}{\left(\frac{1}{im} \cdot {re}^{2}\right)} + im\right) \]
    4. associate-*l*N/A

      \[\leadsto \log \left(\color{blue}{\left(\frac{1}{2} \cdot \frac{1}{im}\right) \cdot {re}^{2}} + im\right) \]
    5. unpow2N/A

      \[\leadsto \log \left(\left(\frac{1}{2} \cdot \frac{1}{im}\right) \cdot \color{blue}{\left(re \cdot re\right)} + im\right) \]
    6. associate-*r*N/A

      \[\leadsto \log \left(\color{blue}{\left(\left(\frac{1}{2} \cdot \frac{1}{im}\right) \cdot re\right) \cdot re} + im\right) \]
    7. *-commutativeN/A

      \[\leadsto \log \left(\color{blue}{re \cdot \left(\left(\frac{1}{2} \cdot \frac{1}{im}\right) \cdot re\right)} + im\right) \]
    8. lower-fma.f64N/A

      \[\leadsto \log \color{blue}{\left(\mathsf{fma}\left(re, \left(\frac{1}{2} \cdot \frac{1}{im}\right) \cdot re, im\right)\right)} \]
    9. associate-*l*N/A

      \[\leadsto \log \left(\mathsf{fma}\left(re, \color{blue}{\frac{1}{2} \cdot \left(\frac{1}{im} \cdot re\right)}, im\right)\right) \]
    10. /-rgt-identityN/A

      \[\leadsto \log \left(\mathsf{fma}\left(re, \frac{1}{2} \cdot \left(\frac{1}{im} \cdot \color{blue}{\frac{re}{1}}\right), im\right)\right) \]
    11. times-fracN/A

      \[\leadsto \log \left(\mathsf{fma}\left(re, \frac{1}{2} \cdot \color{blue}{\frac{1 \cdot re}{im \cdot 1}}, im\right)\right) \]
    12. *-lft-identityN/A

      \[\leadsto \log \left(\mathsf{fma}\left(re, \frac{1}{2} \cdot \frac{\color{blue}{re}}{im \cdot 1}, im\right)\right) \]
    13. *-rgt-identityN/A

      \[\leadsto \log \left(\mathsf{fma}\left(re, \frac{1}{2} \cdot \frac{re}{\color{blue}{im}}, im\right)\right) \]
    14. lower-*.f64N/A

      \[\leadsto \log \left(\mathsf{fma}\left(re, \color{blue}{\frac{1}{2} \cdot \frac{re}{im}}, im\right)\right) \]
    15. lower-/.f6499.8

      \[\leadsto \log \left(\mathsf{fma}\left(re, 0.5 \cdot \color{blue}{\frac{re}{im}}, im\right)\right) \]
  5. Applied rewrites99.8%

    \[\leadsto \log \color{blue}{\left(\mathsf{fma}\left(re, 0.5 \cdot \frac{re}{im}, im\right)\right)} \]
  6. Add Preprocessing

Alternative 2: 99.3% accurate, 1.2× speedup?

\[\begin{array}{l} im_m = \left|im\right| \\ re_m = \left|re\right| \\ [re_m, im_m] = \mathsf{sort}([re_m, im_m])\\ \\ \log im\_m \end{array} \]
im_m = (fabs.f64 im)
re_m = (fabs.f64 re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
(FPCore (re_m im_m) :precision binary64 (log im_m))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
	return log(im_m);
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
    real(8), intent (in) :: re_m
    real(8), intent (in) :: im_m
    code = log(im_m)
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
	return Math.log(im_m);
}
im_m = math.fabs(im)
re_m = math.fabs(re)
[re_m, im_m] = sort([re_m, im_m])
def code(re_m, im_m):
	return math.log(im_m)
im_m = abs(im)
re_m = abs(re)
re_m, im_m = sort([re_m, im_m])
function code(re_m, im_m)
	return log(im_m)
end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
	tmp = log(im_m);
end
im_m = N[Abs[im], $MachinePrecision]
re_m = N[Abs[re], $MachinePrecision]
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
code[re$95$m_, im$95$m_] := N[Log[im$95$m], $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\log im\_m
\end{array}
Derivation
  1. Initial program 52.2%

    \[\log \left(\sqrt{re \cdot re + im \cdot im}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in re around 0

    \[\leadsto \color{blue}{\log im} \]
  4. Step-by-step derivation
    1. lower-log.f6499.3

      \[\leadsto \color{blue}{\log im} \]
  5. Applied rewrites99.3%

    \[\leadsto \color{blue}{\log im} \]
  6. Add Preprocessing

Reproduce

?
herbie shell --seed 2024230 
(FPCore (re im)
  :name "math.log/1 on complex, real part"
  :precision binary64
  (log (sqrt (+ (* re re) (* im im)))))