math.log/2 on complex, real part

Percentage Accurate: 52.7% → 99.2%
Time: 26.7s
Alternatives: 5
Speedup: 2.7×

Specification

?
\[\begin{array}{l} \\ \frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \end{array} \]
(FPCore (re im base)
 :precision binary64
 (/
  (+ (* (log (sqrt (+ (* re re) (* im im)))) (log base)) (* (atan2 im re) 0.0))
  (+ (* (log base) (log base)) (* 0.0 0.0))))
double code(double re, double im, double base) {
	return ((log(sqrt(((re * re) + (im * im)))) * log(base)) + (atan2(im, re) * 0.0)) / ((log(base) * log(base)) + (0.0 * 0.0));
}
real(8) function code(re, im, base)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    real(8), intent (in) :: base
    code = ((log(sqrt(((re * re) + (im * im)))) * log(base)) + (atan2(im, re) * 0.0d0)) / ((log(base) * log(base)) + (0.0d0 * 0.0d0))
end function
public static double code(double re, double im, double base) {
	return ((Math.log(Math.sqrt(((re * re) + (im * im)))) * Math.log(base)) + (Math.atan2(im, re) * 0.0)) / ((Math.log(base) * Math.log(base)) + (0.0 * 0.0));
}
def code(re, im, base):
	return ((math.log(math.sqrt(((re * re) + (im * im)))) * math.log(base)) + (math.atan2(im, re) * 0.0)) / ((math.log(base) * math.log(base)) + (0.0 * 0.0))
function code(re, im, base)
	return Float64(Float64(Float64(log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) * log(base)) + Float64(atan(im, re) * 0.0)) / Float64(Float64(log(base) * log(base)) + Float64(0.0 * 0.0)))
end
function tmp = code(re, im, base)
	tmp = ((log(sqrt(((re * re) + (im * im)))) * log(base)) + (atan2(im, re) * 0.0)) / ((log(base) * log(base)) + (0.0 * 0.0));
end
code[re_, im_, base_] := N[(N[(N[(N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] * N[Log[base], $MachinePrecision]), $MachinePrecision] + N[(N[ArcTan[im / re], $MachinePrecision] * 0.0), $MachinePrecision]), $MachinePrecision] / N[(N[(N[Log[base], $MachinePrecision] * N[Log[base], $MachinePrecision]), $MachinePrecision] + N[(0.0 * 0.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 5 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 52.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \end{array} \]
(FPCore (re im base)
 :precision binary64
 (/
  (+ (* (log (sqrt (+ (* re re) (* im im)))) (log base)) (* (atan2 im re) 0.0))
  (+ (* (log base) (log base)) (* 0.0 0.0))))
double code(double re, double im, double base) {
	return ((log(sqrt(((re * re) + (im * im)))) * log(base)) + (atan2(im, re) * 0.0)) / ((log(base) * log(base)) + (0.0 * 0.0));
}
real(8) function code(re, im, base)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    real(8), intent (in) :: base
    code = ((log(sqrt(((re * re) + (im * im)))) * log(base)) + (atan2(im, re) * 0.0d0)) / ((log(base) * log(base)) + (0.0d0 * 0.0d0))
end function
public static double code(double re, double im, double base) {
	return ((Math.log(Math.sqrt(((re * re) + (im * im)))) * Math.log(base)) + (Math.atan2(im, re) * 0.0)) / ((Math.log(base) * Math.log(base)) + (0.0 * 0.0));
}
def code(re, im, base):
	return ((math.log(math.sqrt(((re * re) + (im * im)))) * math.log(base)) + (math.atan2(im, re) * 0.0)) / ((math.log(base) * math.log(base)) + (0.0 * 0.0))
function code(re, im, base)
	return Float64(Float64(Float64(log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) * log(base)) + Float64(atan(im, re) * 0.0)) / Float64(Float64(log(base) * log(base)) + Float64(0.0 * 0.0)))
end
function tmp = code(re, im, base)
	tmp = ((log(sqrt(((re * re) + (im * im)))) * log(base)) + (atan2(im, re) * 0.0)) / ((log(base) * log(base)) + (0.0 * 0.0));
end
code[re_, im_, base_] := N[(N[(N[(N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] * N[Log[base], $MachinePrecision]), $MachinePrecision] + N[(N[ArcTan[im / re], $MachinePrecision] * 0.0), $MachinePrecision]), $MachinePrecision] / N[(N[(N[Log[base], $MachinePrecision] * N[Log[base], $MachinePrecision]), $MachinePrecision] + N[(0.0 * 0.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0}
\end{array}

Alternative 1: 99.2% accurate, 0.8× speedup?

\[\begin{array}{l} im_m = \left|im\right| \\ \frac{\log \left(\mathsf{hypot}\left(e^{\log im\_m}, re\right)\right) \cdot \log base + 0}{{\log base}^{2}} \end{array} \]
im_m = (fabs.f64 im)
(FPCore (re im_m base)
 :precision binary64
 (/
  (+ (* (log (hypot (exp (log im_m)) re)) (log base)) 0.0)
  (pow (log base) 2.0)))
im_m = fabs(im);
double code(double re, double im_m, double base) {
	return ((log(hypot(exp(log(im_m)), re)) * log(base)) + 0.0) / pow(log(base), 2.0);
}
im_m = Math.abs(im);
public static double code(double re, double im_m, double base) {
	return ((Math.log(Math.hypot(Math.exp(Math.log(im_m)), re)) * Math.log(base)) + 0.0) / Math.pow(Math.log(base), 2.0);
}
im_m = math.fabs(im)
def code(re, im_m, base):
	return ((math.log(math.hypot(math.exp(math.log(im_m)), re)) * math.log(base)) + 0.0) / math.pow(math.log(base), 2.0)
im_m = abs(im)
function code(re, im_m, base)
	return Float64(Float64(Float64(log(hypot(exp(log(im_m)), re)) * log(base)) + 0.0) / (log(base) ^ 2.0))
end
im_m = abs(im);
function tmp = code(re, im_m, base)
	tmp = ((log(hypot(exp(log(im_m)), re)) * log(base)) + 0.0) / (log(base) ^ 2.0);
end
im_m = N[Abs[im], $MachinePrecision]
code[re_, im$95$m_, base_] := N[(N[(N[(N[Log[N[Sqrt[N[Exp[N[Log[im$95$m], $MachinePrecision]], $MachinePrecision] ^ 2 + re ^ 2], $MachinePrecision]], $MachinePrecision] * N[Log[base], $MachinePrecision]), $MachinePrecision] + 0.0), $MachinePrecision] / N[Power[N[Log[base], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|

\\
\frac{\log \left(\mathsf{hypot}\left(e^{\log im\_m}, re\right)\right) \cdot \log base + 0}{{\log base}^{2}}
\end{array}
Derivation
  1. Initial program 54.6%

    \[\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift-sqrt.f64N/A

      \[\leadsto \frac{\log \color{blue}{\left(\sqrt{re \cdot re + im \cdot im}\right)} \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    2. lift-+.f64N/A

      \[\leadsto \frac{\log \left(\sqrt{\color{blue}{re \cdot re + im \cdot im}}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    3. +-commutativeN/A

      \[\leadsto \frac{\log \left(\sqrt{\color{blue}{im \cdot im + re \cdot re}}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    4. lift-*.f64N/A

      \[\leadsto \frac{\log \left(\sqrt{\color{blue}{im \cdot im} + re \cdot re}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    5. pow2N/A

      \[\leadsto \frac{\log \left(\sqrt{\color{blue}{{im}^{2}} + re \cdot re}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    6. pow-to-expN/A

      \[\leadsto \frac{\log \left(\sqrt{\color{blue}{e^{\log im \cdot 2}} + re \cdot re}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    7. exp-lft-sqrN/A

      \[\leadsto \frac{\log \left(\sqrt{\color{blue}{e^{\log im} \cdot e^{\log im}} + re \cdot re}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    8. lift-*.f64N/A

      \[\leadsto \frac{\log \left(\sqrt{e^{\log im} \cdot e^{\log im} + \color{blue}{re \cdot re}}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    9. lower-hypot.f64N/A

      \[\leadsto \frac{\log \color{blue}{\left(\mathsf{hypot}\left(e^{\log im}, re\right)\right)} \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    10. lower-exp.f64N/A

      \[\leadsto \frac{\log \left(\mathsf{hypot}\left(\color{blue}{e^{\log im}}, re\right)\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    11. lower-log.f6448.1

      \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\color{blue}{\log im}}, re\right)\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
  4. Applied rewrites48.1%

    \[\leadsto \frac{\log \color{blue}{\left(\mathsf{hypot}\left(e^{\log im}, re\right)\right)} \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
  5. Taylor expanded in im around 0

    \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + \color{blue}{0}}{\log base \cdot \log base + 0 \cdot 0} \]
  6. Step-by-step derivation
    1. Applied rewrites48.1%

      \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + \color{blue}{0}}{\log base \cdot \log base + 0 \cdot 0} \]
    2. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + 0}{\log base \cdot \log base + \color{blue}{0 \cdot 0}} \]
      2. metadata-eval48.1

        \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + 0}{\log base \cdot \log base + \color{blue}{0}} \]
    3. Applied rewrites48.1%

      \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + 0}{\log base \cdot \log base + \color{blue}{0}} \]
    4. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + 0}{\color{blue}{\log base \cdot \log base + 0}} \]
      2. +-rgt-identity48.1

        \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + 0}{\color{blue}{\log base \cdot \log base}} \]
      3. lift-*.f64N/A

        \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + 0}{\color{blue}{\log base \cdot \log base}} \]
      4. pow2N/A

        \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + 0}{\color{blue}{{\log base}^{2}}} \]
      5. lower-pow.f6448.1

        \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + 0}{\color{blue}{{\log base}^{2}}} \]
    5. Applied rewrites48.1%

      \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + 0}{\color{blue}{{\log base}^{2}}} \]
    6. Add Preprocessing

    Alternative 2: 99.2% accurate, 1.1× speedup?

    \[\begin{array}{l} im_m = \left|im\right| \\ \frac{0 + \log base \cdot \log \left(\mathsf{hypot}\left(im\_m, re\right)\right)}{{\log base}^{2}} \end{array} \]
    im_m = (fabs.f64 im)
    (FPCore (re im_m base)
     :precision binary64
     (/ (+ 0.0 (* (log base) (log (hypot im_m re)))) (pow (log base) 2.0)))
    im_m = fabs(im);
    double code(double re, double im_m, double base) {
    	return (0.0 + (log(base) * log(hypot(im_m, re)))) / pow(log(base), 2.0);
    }
    
    im_m = Math.abs(im);
    public static double code(double re, double im_m, double base) {
    	return (0.0 + (Math.log(base) * Math.log(Math.hypot(im_m, re)))) / Math.pow(Math.log(base), 2.0);
    }
    
    im_m = math.fabs(im)
    def code(re, im_m, base):
    	return (0.0 + (math.log(base) * math.log(math.hypot(im_m, re)))) / math.pow(math.log(base), 2.0)
    
    im_m = abs(im)
    function code(re, im_m, base)
    	return Float64(Float64(0.0 + Float64(log(base) * log(hypot(im_m, re)))) / (log(base) ^ 2.0))
    end
    
    im_m = abs(im);
    function tmp = code(re, im_m, base)
    	tmp = (0.0 + (log(base) * log(hypot(im_m, re)))) / (log(base) ^ 2.0);
    end
    
    im_m = N[Abs[im], $MachinePrecision]
    code[re_, im$95$m_, base_] := N[(N[(0.0 + N[(N[Log[base], $MachinePrecision] * N[Log[N[Sqrt[im$95$m ^ 2 + re ^ 2], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[N[Log[base], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    im_m = \left|im\right|
    
    \\
    \frac{0 + \log base \cdot \log \left(\mathsf{hypot}\left(im\_m, re\right)\right)}{{\log base}^{2}}
    \end{array}
    
    Derivation
    1. Initial program 54.6%

      \[\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-sqrt.f64N/A

        \[\leadsto \frac{\log \color{blue}{\left(\sqrt{re \cdot re + im \cdot im}\right)} \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
      2. lift-+.f64N/A

        \[\leadsto \frac{\log \left(\sqrt{\color{blue}{re \cdot re + im \cdot im}}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
      3. +-commutativeN/A

        \[\leadsto \frac{\log \left(\sqrt{\color{blue}{im \cdot im + re \cdot re}}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
      4. lift-*.f64N/A

        \[\leadsto \frac{\log \left(\sqrt{\color{blue}{im \cdot im} + re \cdot re}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
      5. pow2N/A

        \[\leadsto \frac{\log \left(\sqrt{\color{blue}{{im}^{2}} + re \cdot re}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
      6. pow-to-expN/A

        \[\leadsto \frac{\log \left(\sqrt{\color{blue}{e^{\log im \cdot 2}} + re \cdot re}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
      7. exp-lft-sqrN/A

        \[\leadsto \frac{\log \left(\sqrt{\color{blue}{e^{\log im} \cdot e^{\log im}} + re \cdot re}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
      8. lift-*.f64N/A

        \[\leadsto \frac{\log \left(\sqrt{e^{\log im} \cdot e^{\log im} + \color{blue}{re \cdot re}}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
      9. lower-hypot.f64N/A

        \[\leadsto \frac{\log \color{blue}{\left(\mathsf{hypot}\left(e^{\log im}, re\right)\right)} \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
      10. lower-exp.f64N/A

        \[\leadsto \frac{\log \left(\mathsf{hypot}\left(\color{blue}{e^{\log im}}, re\right)\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
      11. lower-log.f6448.1

        \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\color{blue}{\log im}}, re\right)\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    4. Applied rewrites48.1%

      \[\leadsto \frac{\log \color{blue}{\left(\mathsf{hypot}\left(e^{\log im}, re\right)\right)} \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
    5. Taylor expanded in im around 0

      \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + \color{blue}{0}}{\log base \cdot \log base + 0 \cdot 0} \]
    6. Step-by-step derivation
      1. Applied rewrites48.1%

        \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\log im}, re\right)\right) \cdot \log base + \color{blue}{0}}{\log base \cdot \log base + 0 \cdot 0} \]
      2. Step-by-step derivation
        1. lift-exp.f64N/A

          \[\leadsto \frac{\log \left(\mathsf{hypot}\left(\color{blue}{e^{\log im}}, re\right)\right) \cdot \log base + 0}{\log base \cdot \log base + 0 \cdot 0} \]
        2. lift-log.f64N/A

          \[\leadsto \frac{\log \left(\mathsf{hypot}\left(e^{\color{blue}{\log im}}, re\right)\right) \cdot \log base + 0}{\log base \cdot \log base + 0 \cdot 0} \]
        3. rem-exp-log99.2

          \[\leadsto \frac{\log \left(\mathsf{hypot}\left(\color{blue}{im}, re\right)\right) \cdot \log base + 0}{\log base \cdot \log base + 0 \cdot 0} \]
      3. Applied rewrites99.2%

        \[\leadsto \frac{\log \left(\mathsf{hypot}\left(\color{blue}{im}, re\right)\right) \cdot \log base + 0}{\log base \cdot \log base + 0 \cdot 0} \]
      4. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto \frac{\log \left(\mathsf{hypot}\left(im, re\right)\right) \cdot \log base + 0}{\color{blue}{\log base \cdot \log base + 0 \cdot 0}} \]
        2. lift-*.f64N/A

          \[\leadsto \frac{\log \left(\mathsf{hypot}\left(im, re\right)\right) \cdot \log base + 0}{\log base \cdot \log base + \color{blue}{0 \cdot 0}} \]
        3. metadata-evalN/A

          \[\leadsto \frac{\log \left(\mathsf{hypot}\left(im, re\right)\right) \cdot \log base + 0}{\log base \cdot \log base + \color{blue}{0}} \]
        4. +-rgt-identity99.2

          \[\leadsto \frac{\log \left(\mathsf{hypot}\left(im, re\right)\right) \cdot \log base + 0}{\color{blue}{\log base \cdot \log base}} \]
        5. lift-*.f64N/A

          \[\leadsto \frac{\log \left(\mathsf{hypot}\left(im, re\right)\right) \cdot \log base + 0}{\color{blue}{\log base \cdot \log base}} \]
        6. pow2N/A

          \[\leadsto \frac{\log \left(\mathsf{hypot}\left(im, re\right)\right) \cdot \log base + 0}{\color{blue}{{\log base}^{2}}} \]
        7. lower-pow.f6499.2

          \[\leadsto \frac{\log \left(\mathsf{hypot}\left(im, re\right)\right) \cdot \log base + 0}{\color{blue}{{\log base}^{2}}} \]
      5. Applied rewrites99.2%

        \[\leadsto \frac{\log \left(\mathsf{hypot}\left(im, re\right)\right) \cdot \log base + 0}{\color{blue}{{\log base}^{2}}} \]
      6. Final simplification99.2%

        \[\leadsto \frac{0 + \log base \cdot \log \left(\mathsf{hypot}\left(im, re\right)\right)}{{\log base}^{2}} \]
      7. Add Preprocessing

      Alternative 3: 53.9% accurate, 2.4× speedup?

      \[\begin{array}{l} im_m = \left|im\right| \\ \frac{\log im\_m}{\log \left(\frac{1}{\frac{1}{base}}\right)} \end{array} \]
      im_m = (fabs.f64 im)
      (FPCore (re im_m base)
       :precision binary64
       (/ (log im_m) (log (/ 1.0 (/ 1.0 base)))))
      im_m = fabs(im);
      double code(double re, double im_m, double base) {
      	return log(im_m) / log((1.0 / (1.0 / base)));
      }
      
      im_m = abs(im)
      real(8) function code(re, im_m, base)
          real(8), intent (in) :: re
          real(8), intent (in) :: im_m
          real(8), intent (in) :: base
          code = log(im_m) / log((1.0d0 / (1.0d0 / base)))
      end function
      
      im_m = Math.abs(im);
      public static double code(double re, double im_m, double base) {
      	return Math.log(im_m) / Math.log((1.0 / (1.0 / base)));
      }
      
      im_m = math.fabs(im)
      def code(re, im_m, base):
      	return math.log(im_m) / math.log((1.0 / (1.0 / base)))
      
      im_m = abs(im)
      function code(re, im_m, base)
      	return Float64(log(im_m) / log(Float64(1.0 / Float64(1.0 / base))))
      end
      
      im_m = abs(im);
      function tmp = code(re, im_m, base)
      	tmp = log(im_m) / log((1.0 / (1.0 / base)));
      end
      
      im_m = N[Abs[im], $MachinePrecision]
      code[re_, im$95$m_, base_] := N[(N[Log[im$95$m], $MachinePrecision] / N[Log[N[(1.0 / N[(1.0 / base), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
      
      \begin{array}{l}
      im_m = \left|im\right|
      
      \\
      \frac{\log im\_m}{\log \left(\frac{1}{\frac{1}{base}}\right)}
      \end{array}
      
      Derivation
      1. Initial program 54.6%

        \[\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
      2. Add Preprocessing
      3. Taylor expanded in re around 0

        \[\leadsto \color{blue}{\frac{\log im}{\log base}} \]
      4. Step-by-step derivation
        1. lower-/.f64N/A

          \[\leadsto \color{blue}{\frac{\log im}{\log base}} \]
        2. lower-log.f64N/A

          \[\leadsto \frac{\color{blue}{\log im}}{\log base} \]
        3. lower-log.f6429.1

          \[\leadsto \frac{\log im}{\color{blue}{\log base}} \]
      5. Applied rewrites29.1%

        \[\leadsto \color{blue}{\frac{\log im}{\log base}} \]
      6. Step-by-step derivation
        1. Applied rewrites29.1%

          \[\leadsto \frac{\log im}{\log \left(\frac{1}{\frac{1}{base}}\right)} \]
        2. Add Preprocessing

        Alternative 4: 53.9% accurate, 2.7× speedup?

        \[\begin{array}{l} im_m = \left|im\right| \\ \frac{\log im\_m}{\log base} \end{array} \]
        im_m = (fabs.f64 im)
        (FPCore (re im_m base) :precision binary64 (/ (log im_m) (log base)))
        im_m = fabs(im);
        double code(double re, double im_m, double base) {
        	return log(im_m) / log(base);
        }
        
        im_m = abs(im)
        real(8) function code(re, im_m, base)
            real(8), intent (in) :: re
            real(8), intent (in) :: im_m
            real(8), intent (in) :: base
            code = log(im_m) / log(base)
        end function
        
        im_m = Math.abs(im);
        public static double code(double re, double im_m, double base) {
        	return Math.log(im_m) / Math.log(base);
        }
        
        im_m = math.fabs(im)
        def code(re, im_m, base):
        	return math.log(im_m) / math.log(base)
        
        im_m = abs(im)
        function code(re, im_m, base)
        	return Float64(log(im_m) / log(base))
        end
        
        im_m = abs(im);
        function tmp = code(re, im_m, base)
        	tmp = log(im_m) / log(base);
        end
        
        im_m = N[Abs[im], $MachinePrecision]
        code[re_, im$95$m_, base_] := N[(N[Log[im$95$m], $MachinePrecision] / N[Log[base], $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        im_m = \left|im\right|
        
        \\
        \frac{\log im\_m}{\log base}
        \end{array}
        
        Derivation
        1. Initial program 54.6%

          \[\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
        2. Add Preprocessing
        3. Taylor expanded in re around 0

          \[\leadsto \color{blue}{\frac{\log im}{\log base}} \]
        4. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \color{blue}{\frac{\log im}{\log base}} \]
          2. lower-log.f64N/A

            \[\leadsto \frac{\color{blue}{\log im}}{\log base} \]
          3. lower-log.f6429.1

            \[\leadsto \frac{\log im}{\color{blue}{\log base}} \]
        5. Applied rewrites29.1%

          \[\leadsto \color{blue}{\frac{\log im}{\log base}} \]
        6. Add Preprocessing

        Alternative 5: 3.1% accurate, 562.0× speedup?

        \[\begin{array}{l} im_m = \left|im\right| \\ 0 \end{array} \]
        im_m = (fabs.f64 im)
        (FPCore (re im_m base) :precision binary64 0.0)
        im_m = fabs(im);
        double code(double re, double im_m, double base) {
        	return 0.0;
        }
        
        im_m = abs(im)
        real(8) function code(re, im_m, base)
            real(8), intent (in) :: re
            real(8), intent (in) :: im_m
            real(8), intent (in) :: base
            code = 0.0d0
        end function
        
        im_m = Math.abs(im);
        public static double code(double re, double im_m, double base) {
        	return 0.0;
        }
        
        im_m = math.fabs(im)
        def code(re, im_m, base):
        	return 0.0
        
        im_m = abs(im)
        function code(re, im_m, base)
        	return 0.0
        end
        
        im_m = abs(im);
        function tmp = code(re, im_m, base)
        	tmp = 0.0;
        end
        
        im_m = N[Abs[im], $MachinePrecision]
        code[re_, im$95$m_, base_] := 0.0
        
        \begin{array}{l}
        im_m = \left|im\right|
        
        \\
        0
        \end{array}
        
        Derivation
        1. Initial program 54.6%

          \[\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
        2. Add Preprocessing
        3. Taylor expanded in re around 0

          \[\leadsto \color{blue}{\frac{\log im}{\log base}} \]
        4. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \color{blue}{\frac{\log im}{\log base}} \]
          2. lower-log.f64N/A

            \[\leadsto \frac{\color{blue}{\log im}}{\log base} \]
          3. lower-log.f6429.1

            \[\leadsto \frac{\log im}{\color{blue}{\log base}} \]
        5. Applied rewrites29.1%

          \[\leadsto \color{blue}{\frac{\log im}{\log base}} \]
        6. Applied rewrites3.1%

          \[\leadsto \color{blue}{0} \]
        7. Add Preprocessing

        Reproduce

        ?
        herbie shell --seed 2024222 
        (FPCore (re im base)
          :name "math.log/2 on complex, real part"
          :precision binary64
          (/ (+ (* (log (sqrt (+ (* re re) (* im im)))) (log base)) (* (atan2 im re) 0.0)) (+ (* (log base) (log base)) (* 0.0 0.0))))