math.log/2 on complex, real part

Percentage Accurate: 25.5% → 49.9%
Time: 9.2s
Alternatives: 4
Speedup: 3.4×

Specification

?
\[\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
(FPCore (re im base)
  :precision binary64
  (/
 (+
  (* (log (sqrt (+ (* re re) (* im im)))) (log base))
  (* (atan2 im re) 0.0))
 (+ (* (log base) (log base)) (* 0.0 0.0))))
double code(double re, double im, double base) {
	return ((log(sqrt(((re * re) + (im * im)))) * log(base)) + (atan2(im, re) * 0.0)) / ((log(base) * log(base)) + (0.0 * 0.0));
}
real(8) function code(re, im, base)
use fmin_fmax_functions
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    real(8), intent (in) :: base
    code = ((log(sqrt(((re * re) + (im * im)))) * log(base)) + (atan2(im, re) * 0.0d0)) / ((log(base) * log(base)) + (0.0d0 * 0.0d0))
end function
public static double code(double re, double im, double base) {
	return ((Math.log(Math.sqrt(((re * re) + (im * im)))) * Math.log(base)) + (Math.atan2(im, re) * 0.0)) / ((Math.log(base) * Math.log(base)) + (0.0 * 0.0));
}
def code(re, im, base):
	return ((math.log(math.sqrt(((re * re) + (im * im)))) * math.log(base)) + (math.atan2(im, re) * 0.0)) / ((math.log(base) * math.log(base)) + (0.0 * 0.0))
function code(re, im, base)
	return Float64(Float64(Float64(log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) * log(base)) + Float64(atan(im, re) * 0.0)) / Float64(Float64(log(base) * log(base)) + Float64(0.0 * 0.0)))
end
function tmp = code(re, im, base)
	tmp = ((log(sqrt(((re * re) + (im * im)))) * log(base)) + (atan2(im, re) * 0.0)) / ((log(base) * log(base)) + (0.0 * 0.0));
end
code[re_, im_, base_] := N[(N[(N[(N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] * N[Log[base], $MachinePrecision]), $MachinePrecision] + N[(N[ArcTan[im / re], $MachinePrecision] * 0.0), $MachinePrecision]), $MachinePrecision] / N[(N[(N[Log[base], $MachinePrecision] * N[Log[base], $MachinePrecision]), $MachinePrecision] + N[(0.0 * 0.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 4 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 25.5% accurate, 1.0× speedup?

\[\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
(FPCore (re im base)
  :precision binary64
  (/
 (+
  (* (log (sqrt (+ (* re re) (* im im)))) (log base))
  (* (atan2 im re) 0.0))
 (+ (* (log base) (log base)) (* 0.0 0.0))))
double code(double re, double im, double base) {
	return ((log(sqrt(((re * re) + (im * im)))) * log(base)) + (atan2(im, re) * 0.0)) / ((log(base) * log(base)) + (0.0 * 0.0));
}
real(8) function code(re, im, base)
use fmin_fmax_functions
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    real(8), intent (in) :: base
    code = ((log(sqrt(((re * re) + (im * im)))) * log(base)) + (atan2(im, re) * 0.0d0)) / ((log(base) * log(base)) + (0.0d0 * 0.0d0))
end function
public static double code(double re, double im, double base) {
	return ((Math.log(Math.sqrt(((re * re) + (im * im)))) * Math.log(base)) + (Math.atan2(im, re) * 0.0)) / ((Math.log(base) * Math.log(base)) + (0.0 * 0.0));
}
def code(re, im, base):
	return ((math.log(math.sqrt(((re * re) + (im * im)))) * math.log(base)) + (math.atan2(im, re) * 0.0)) / ((math.log(base) * math.log(base)) + (0.0 * 0.0))
function code(re, im, base)
	return Float64(Float64(Float64(log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) * log(base)) + Float64(atan(im, re) * 0.0)) / Float64(Float64(log(base) * log(base)) + Float64(0.0 * 0.0)))
end
function tmp = code(re, im, base)
	tmp = ((log(sqrt(((re * re) + (im * im)))) * log(base)) + (atan2(im, re) * 0.0)) / ((log(base) * log(base)) + (0.0 * 0.0));
end
code[re_, im_, base_] := N[(N[(N[(N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] * N[Log[base], $MachinePrecision]), $MachinePrecision] + N[(N[ArcTan[im / re], $MachinePrecision] * 0.0), $MachinePrecision]), $MachinePrecision] / N[(N[(N[Log[base], $MachinePrecision] * N[Log[base], $MachinePrecision]), $MachinePrecision] + N[(0.0 * 0.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0}

Alternative 1: 49.9% accurate, 1.7× speedup?

\[\mathsf{copysign}\left(1, \log base\right) \cdot \frac{\log \left(\mathsf{hypot}\left(re, im\right)\right)}{\left|\log base\right|} \]
(FPCore (re im base)
  :precision binary64
  (*
 (copysign 1.0 (log base))
 (/ (log (hypot re im)) (fabs (log base)))))
double code(double re, double im, double base) {
	return copysign(1.0, log(base)) * (log(hypot(re, im)) / fabs(log(base)));
}
public static double code(double re, double im, double base) {
	return Math.copySign(1.0, Math.log(base)) * (Math.log(Math.hypot(re, im)) / Math.abs(Math.log(base)));
}
def code(re, im, base):
	return math.copysign(1.0, math.log(base)) * (math.log(math.hypot(re, im)) / math.fabs(math.log(base)))
function code(re, im, base)
	return Float64(copysign(1.0, log(base)) * Float64(log(hypot(re, im)) / abs(log(base))))
end
function tmp = code(re, im, base)
	tmp = (sign(log(base)) * abs(1.0)) * (log(hypot(re, im)) / abs(log(base)));
end
code[re_, im_, base_] := N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[N[Log[base], $MachinePrecision]]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * N[(N[Log[N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]], $MachinePrecision] / N[Abs[N[Log[base], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\mathsf{copysign}\left(1, \log base\right) \cdot \frac{\log \left(\mathsf{hypot}\left(re, im\right)\right)}{\left|\log base\right|}
Derivation
  1. Initial program 25.5%

    \[\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
  2. Step-by-step derivation
    1. lift-/.f64N/A

      \[\leadsto \color{blue}{\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0}} \]
    2. lift-+.f64N/A

      \[\leadsto \frac{\color{blue}{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}}{\log base \cdot \log base + 0 \cdot 0} \]
    3. lift-*.f64N/A

      \[\leadsto \frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \color{blue}{\tan^{-1}_* \frac{im}{re} \cdot 0}}{\log base \cdot \log base + 0 \cdot 0} \]
    4. mul0-rgtN/A

      \[\leadsto \frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \color{blue}{0}}{\log base \cdot \log base + 0 \cdot 0} \]
    5. metadata-evalN/A

      \[\leadsto \frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \color{blue}{\left(\mathsf{neg}\left(0\right)\right)}}{\log base \cdot \log base + 0 \cdot 0} \]
    6. sub-flip-reverseN/A

      \[\leadsto \frac{\color{blue}{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base - 0}}{\log base \cdot \log base + 0 \cdot 0} \]
    7. --rgt-identityN/A

      \[\leadsto \frac{\color{blue}{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base}}{\log base \cdot \log base + 0 \cdot 0} \]
    8. lift-*.f64N/A

      \[\leadsto \frac{\color{blue}{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base}}{\log base \cdot \log base + 0 \cdot 0} \]
    9. *-commutativeN/A

      \[\leadsto \frac{\color{blue}{\log base \cdot \log \left(\sqrt{re \cdot re + im \cdot im}\right)}}{\log base \cdot \log base + 0 \cdot 0} \]
    10. lift-+.f64N/A

      \[\leadsto \frac{\log base \cdot \log \left(\sqrt{re \cdot re + im \cdot im}\right)}{\color{blue}{\log base \cdot \log base + 0 \cdot 0}} \]
    11. lift-*.f64N/A

      \[\leadsto \frac{\log base \cdot \log \left(\sqrt{re \cdot re + im \cdot im}\right)}{\log base \cdot \log base + \color{blue}{0 \cdot 0}} \]
    12. metadata-evalN/A

      \[\leadsto \frac{\log base \cdot \log \left(\sqrt{re \cdot re + im \cdot im}\right)}{\log base \cdot \log base + \color{blue}{0}} \]
    13. +-rgt-identityN/A

      \[\leadsto \frac{\log base \cdot \log \left(\sqrt{re \cdot re + im \cdot im}\right)}{\color{blue}{\log base \cdot \log base}} \]
    14. lift-*.f64N/A

      \[\leadsto \frac{\log base \cdot \log \left(\sqrt{re \cdot re + im \cdot im}\right)}{\color{blue}{\log base \cdot \log base}} \]
    15. sqr-abs-revN/A

      \[\leadsto \frac{\log base \cdot \log \left(\sqrt{re \cdot re + im \cdot im}\right)}{\color{blue}{\left|\log base\right| \cdot \left|\log base\right|}} \]
  3. Applied rewrites25.5%

    \[\leadsto \color{blue}{\mathsf{copysign}\left(1, \log base\right) \cdot \frac{\log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right)}{\left|\log base\right|}} \]
  4. Step-by-step derivation
    1. lift-sqrt.f64N/A

      \[\leadsto \mathsf{copysign}\left(1, \log base\right) \cdot \frac{\log \color{blue}{\left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right)}}{\left|\log base\right|} \]
    2. lift-fma.f64N/A

      \[\leadsto \mathsf{copysign}\left(1, \log base\right) \cdot \frac{\log \left(\sqrt{\color{blue}{im \cdot im + re \cdot re}}\right)}{\left|\log base\right|} \]
    3. +-commutativeN/A

      \[\leadsto \mathsf{copysign}\left(1, \log base\right) \cdot \frac{\log \left(\sqrt{\color{blue}{re \cdot re + im \cdot im}}\right)}{\left|\log base\right|} \]
    4. lift-*.f64N/A

      \[\leadsto \mathsf{copysign}\left(1, \log base\right) \cdot \frac{\log \left(\sqrt{\color{blue}{re \cdot re} + im \cdot im}\right)}{\left|\log base\right|} \]
    5. lift-hypot.f6449.9%

      \[\leadsto \mathsf{copysign}\left(1, \log base\right) \cdot \frac{\log \color{blue}{\left(\mathsf{hypot}\left(re, im\right)\right)}}{\left|\log base\right|} \]
  5. Applied rewrites49.9%

    \[\leadsto \mathsf{copysign}\left(1, \log base\right) \cdot \frac{\log \color{blue}{\left(\mathsf{hypot}\left(re, im\right)\right)}}{\left|\log base\right|} \]
  6. Add Preprocessing

Alternative 2: 49.9% accurate, 2.1× speedup?

\[\frac{1}{\log base} \cdot \log \left(\mathsf{hypot}\left(re, im\right)\right) \]
(FPCore (re im base)
  :precision binary64
  (* (/ 1.0 (log base)) (log (hypot re im))))
double code(double re, double im, double base) {
	return (1.0 / log(base)) * log(hypot(re, im));
}
public static double code(double re, double im, double base) {
	return (1.0 / Math.log(base)) * Math.log(Math.hypot(re, im));
}
def code(re, im, base):
	return (1.0 / math.log(base)) * math.log(math.hypot(re, im))
function code(re, im, base)
	return Float64(Float64(1.0 / log(base)) * log(hypot(re, im)))
end
function tmp = code(re, im, base)
	tmp = (1.0 / log(base)) * log(hypot(re, im));
end
code[re_, im_, base_] := N[(N[(1.0 / N[Log[base], $MachinePrecision]), $MachinePrecision] * N[Log[N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\frac{1}{\log base} \cdot \log \left(\mathsf{hypot}\left(re, im\right)\right)
Derivation
  1. Initial program 25.5%

    \[\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
  2. Step-by-step derivation
    1. lift-/.f64N/A

      \[\leadsto \color{blue}{\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0}} \]
    2. mult-flipN/A

      \[\leadsto \color{blue}{\left(\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0\right) \cdot \frac{1}{\log base \cdot \log base + 0 \cdot 0}} \]
    3. lift-+.f64N/A

      \[\leadsto \color{blue}{\left(\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0\right)} \cdot \frac{1}{\log base \cdot \log base + 0 \cdot 0} \]
    4. lift-*.f64N/A

      \[\leadsto \left(\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \color{blue}{\tan^{-1}_* \frac{im}{re} \cdot 0}\right) \cdot \frac{1}{\log base \cdot \log base + 0 \cdot 0} \]
    5. mul0-rgtN/A

      \[\leadsto \left(\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \color{blue}{0}\right) \cdot \frac{1}{\log base \cdot \log base + 0 \cdot 0} \]
    6. +-rgt-identityN/A

      \[\leadsto \color{blue}{\left(\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base\right)} \cdot \frac{1}{\log base \cdot \log base + 0 \cdot 0} \]
    7. *-commutativeN/A

      \[\leadsto \color{blue}{\frac{1}{\log base \cdot \log base + 0 \cdot 0} \cdot \left(\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base\right)} \]
    8. lower-*.f64N/A

      \[\leadsto \color{blue}{\frac{1}{\log base \cdot \log base + 0 \cdot 0} \cdot \left(\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base\right)} \]
  3. Applied rewrites25.5%

    \[\leadsto \color{blue}{\frac{1}{\log base \cdot \log base} \cdot \left(\log base \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right)\right)} \]
  4. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \color{blue}{\frac{1}{\log base \cdot \log base} \cdot \left(\log base \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right)\right)} \]
    2. lift-*.f64N/A

      \[\leadsto \frac{1}{\log base \cdot \log base} \cdot \color{blue}{\left(\log base \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right)\right)} \]
    3. associate-*r*N/A

      \[\leadsto \color{blue}{\left(\frac{1}{\log base \cdot \log base} \cdot \log base\right) \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right)} \]
    4. lift-/.f64N/A

      \[\leadsto \left(\color{blue}{\frac{1}{\log base \cdot \log base}} \cdot \log base\right) \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right) \]
    5. inv-powN/A

      \[\leadsto \left(\color{blue}{{\left(\log base \cdot \log base\right)}^{-1}} \cdot \log base\right) \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right) \]
    6. lift-*.f64N/A

      \[\leadsto \left({\color{blue}{\left(\log base \cdot \log base\right)}}^{-1} \cdot \log base\right) \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right) \]
    7. pow-prod-downN/A

      \[\leadsto \left(\color{blue}{\left({\log base}^{-1} \cdot {\log base}^{-1}\right)} \cdot \log base\right) \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right) \]
    8. pow-prod-upN/A

      \[\leadsto \left(\color{blue}{{\log base}^{\left(-1 + -1\right)}} \cdot \log base\right) \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right) \]
    9. metadata-evalN/A

      \[\leadsto \left({\log base}^{\color{blue}{-2}} \cdot \log base\right) \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right) \]
    10. metadata-evalN/A

      \[\leadsto \left({\log base}^{\color{blue}{\left(\mathsf{neg}\left(2\right)\right)}} \cdot \log base\right) \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right) \]
    11. pow-plusN/A

      \[\leadsto \color{blue}{{\log base}^{\left(\left(\mathsf{neg}\left(2\right)\right) + 1\right)}} \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right) \]
    12. metadata-evalN/A

      \[\leadsto {\log base}^{\left(\color{blue}{-2} + 1\right)} \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right) \]
    13. metadata-evalN/A

      \[\leadsto {\log base}^{\color{blue}{-1}} \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right) \]
    14. inv-powN/A

      \[\leadsto \color{blue}{\frac{1}{\log base}} \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right) \]
    15. lower-*.f64N/A

      \[\leadsto \color{blue}{\frac{1}{\log base} \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right)} \]
    16. lower-/.f6425.5%

      \[\leadsto \color{blue}{\frac{1}{\log base}} \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right) \]
  5. Applied rewrites25.5%

    \[\leadsto \color{blue}{\frac{1}{\log base} \cdot \log \left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right)} \]
  6. Step-by-step derivation
    1. lift-sqrt.f64N/A

      \[\leadsto \frac{1}{\log base} \cdot \log \color{blue}{\left(\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}\right)} \]
    2. pow1/2N/A

      \[\leadsto \frac{1}{\log base} \cdot \log \color{blue}{\left({\left(\mathsf{fma}\left(im, im, re \cdot re\right)\right)}^{\frac{1}{2}}\right)} \]
    3. lift-fma.f64N/A

      \[\leadsto \frac{1}{\log base} \cdot \log \left({\color{blue}{\left(im \cdot im + re \cdot re\right)}}^{\frac{1}{2}}\right) \]
    4. +-commutativeN/A

      \[\leadsto \frac{1}{\log base} \cdot \log \left({\color{blue}{\left(re \cdot re + im \cdot im\right)}}^{\frac{1}{2}}\right) \]
    5. lift-*.f64N/A

      \[\leadsto \frac{1}{\log base} \cdot \log \left({\left(\color{blue}{re \cdot re} + im \cdot im\right)}^{\frac{1}{2}}\right) \]
    6. pow1/2N/A

      \[\leadsto \frac{1}{\log base} \cdot \log \color{blue}{\left(\sqrt{re \cdot re + im \cdot im}\right)} \]
    7. lift-hypot.f6449.9%

      \[\leadsto \frac{1}{\log base} \cdot \log \color{blue}{\left(\mathsf{hypot}\left(re, im\right)\right)} \]
  7. Applied rewrites49.9%

    \[\leadsto \frac{1}{\log base} \cdot \log \color{blue}{\left(\mathsf{hypot}\left(re, im\right)\right)} \]
  8. Add Preprocessing

Alternative 3: 49.6% accurate, 2.6× speedup?

\[-1 \cdot \frac{\log \left(\frac{1}{\mathsf{max}\left(\left|re\right|, \left|im\right|\right)}\right)}{\log base} \]
(FPCore (re im base)
  :precision binary64
  (* -1.0 (/ (log (/ 1.0 (fmax (fabs re) (fabs im)))) (log base))))
double code(double re, double im, double base) {
	return -1.0 * (log((1.0 / fmax(fabs(re), fabs(im)))) / log(base));
}
real(8) function code(re, im, base)
use fmin_fmax_functions
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    real(8), intent (in) :: base
    code = (-1.0d0) * (log((1.0d0 / fmax(abs(re), abs(im)))) / log(base))
end function
public static double code(double re, double im, double base) {
	return -1.0 * (Math.log((1.0 / fmax(Math.abs(re), Math.abs(im)))) / Math.log(base));
}
def code(re, im, base):
	return -1.0 * (math.log((1.0 / fmax(math.fabs(re), math.fabs(im)))) / math.log(base))
function code(re, im, base)
	return Float64(-1.0 * Float64(log(Float64(1.0 / fmax(abs(re), abs(im)))) / log(base)))
end
function tmp = code(re, im, base)
	tmp = -1.0 * (log((1.0 / max(abs(re), abs(im)))) / log(base));
end
code[re_, im_, base_] := N[(-1.0 * N[(N[Log[N[(1.0 / N[Max[N[Abs[re], $MachinePrecision], N[Abs[im], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[Log[base], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
-1 \cdot \frac{\log \left(\frac{1}{\mathsf{max}\left(\left|re\right|, \left|im\right|\right)}\right)}{\log base}
Derivation
  1. Initial program 25.5%

    \[\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
  2. Taylor expanded in im around inf

    \[\leadsto \color{blue}{-1 \cdot \frac{\log \left(\frac{1}{im}\right)}{\log base}} \]
  3. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto -1 \cdot \color{blue}{\frac{\log \left(\frac{1}{im}\right)}{\log base}} \]
    2. lower-/.f64N/A

      \[\leadsto -1 \cdot \frac{\log \left(\frac{1}{im}\right)}{\color{blue}{\log base}} \]
    3. lower-log.f64N/A

      \[\leadsto -1 \cdot \frac{\log \left(\frac{1}{im}\right)}{\log \color{blue}{base}} \]
    4. lower-/.f64N/A

      \[\leadsto -1 \cdot \frac{\log \left(\frac{1}{im}\right)}{\log base} \]
    5. lower-log.f6413.4%

      \[\leadsto -1 \cdot \frac{\log \left(\frac{1}{im}\right)}{\log base} \]
  4. Applied rewrites13.4%

    \[\leadsto \color{blue}{-1 \cdot \frac{\log \left(\frac{1}{im}\right)}{\log base}} \]
  5. Add Preprocessing

Alternative 4: 49.6% accurate, 3.4× speedup?

\[\frac{\log \left(\mathsf{max}\left(\left|re\right|, \left|im\right|\right)\right)}{\log base} \]
(FPCore (re im base)
  :precision binary64
  (/ (log (fmax (fabs re) (fabs im))) (log base)))
double code(double re, double im, double base) {
	return log(fmax(fabs(re), fabs(im))) / log(base);
}
real(8) function code(re, im, base)
use fmin_fmax_functions
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    real(8), intent (in) :: base
    code = log(fmax(abs(re), abs(im))) / log(base)
end function
public static double code(double re, double im, double base) {
	return Math.log(fmax(Math.abs(re), Math.abs(im))) / Math.log(base);
}
def code(re, im, base):
	return math.log(fmax(math.fabs(re), math.fabs(im))) / math.log(base)
function code(re, im, base)
	return Float64(log(fmax(abs(re), abs(im))) / log(base))
end
function tmp = code(re, im, base)
	tmp = log(max(abs(re), abs(im))) / log(base);
end
code[re_, im_, base_] := N[(N[Log[N[Max[N[Abs[re], $MachinePrecision], N[Abs[im], $MachinePrecision]], $MachinePrecision]], $MachinePrecision] / N[Log[base], $MachinePrecision]), $MachinePrecision]
\frac{\log \left(\mathsf{max}\left(\left|re\right|, \left|im\right|\right)\right)}{\log base}
Derivation
  1. Initial program 25.5%

    \[\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right) \cdot \log base + \tan^{-1}_* \frac{im}{re} \cdot 0}{\log base \cdot \log base + 0 \cdot 0} \]
  2. Taylor expanded in im around -inf

    \[\leadsto \color{blue}{-1 \cdot \frac{\log \left(\frac{-1}{im}\right)}{\log base}} \]
  3. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto -1 \cdot \color{blue}{\frac{\log \left(\frac{-1}{im}\right)}{\log base}} \]
    2. lower-/.f64N/A

      \[\leadsto -1 \cdot \frac{\log \left(\frac{-1}{im}\right)}{\color{blue}{\log base}} \]
    3. lower-log.f64N/A

      \[\leadsto -1 \cdot \frac{\log \left(\frac{-1}{im}\right)}{\log \color{blue}{base}} \]
    4. lower-/.f64N/A

      \[\leadsto -1 \cdot \frac{\log \left(\frac{-1}{im}\right)}{\log base} \]
    5. lower-log.f6413.6%

      \[\leadsto -1 \cdot \frac{\log \left(\frac{-1}{im}\right)}{\log base} \]
  4. Applied rewrites13.6%

    \[\leadsto \color{blue}{-1 \cdot \frac{\log \left(\frac{-1}{im}\right)}{\log base}} \]
  5. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto -1 \cdot \color{blue}{\frac{\log \left(\frac{-1}{im}\right)}{\log base}} \]
    2. lift-/.f64N/A

      \[\leadsto -1 \cdot \frac{\log \left(\frac{-1}{im}\right)}{\color{blue}{\log base}} \]
    3. associate-*r/N/A

      \[\leadsto \frac{-1 \cdot \log \left(\frac{-1}{im}\right)}{\color{blue}{\log base}} \]
    4. mul-1-negN/A

      \[\leadsto \frac{\mathsf{neg}\left(\log \left(\frac{-1}{im}\right)\right)}{\log \color{blue}{base}} \]
    5. lower-/.f64N/A

      \[\leadsto \frac{\mathsf{neg}\left(\log \left(\frac{-1}{im}\right)\right)}{\color{blue}{\log base}} \]
    6. lift-log.f64N/A

      \[\leadsto \frac{\mathsf{neg}\left(\log \left(\frac{-1}{im}\right)\right)}{\log base} \]
    7. lift-/.f64N/A

      \[\leadsto \frac{\mathsf{neg}\left(\log \left(\frac{-1}{im}\right)\right)}{\log base} \]
    8. log-divN/A

      \[\leadsto \frac{\mathsf{neg}\left(\left(\log \left(\left|-1\right|\right) - \log \left(\left|im\right|\right)\right)\right)}{\log base} \]
    9. metadata-evalN/A

      \[\leadsto \frac{\mathsf{neg}\left(\left(\log 1 - \log \left(\left|im\right|\right)\right)\right)}{\log base} \]
    10. metadata-evalN/A

      \[\leadsto \frac{\mathsf{neg}\left(\left(\log \left(\left|1\right|\right) - \log \left(\left|im\right|\right)\right)\right)}{\log base} \]
    11. log-divN/A

      \[\leadsto \frac{\mathsf{neg}\left(\log \left(\frac{1}{im}\right)\right)}{\log base} \]
    12. log-recN/A

      \[\leadsto \frac{\mathsf{neg}\left(\left(\mathsf{neg}\left(\log im\right)\right)\right)}{\log base} \]
    13. remove-double-negN/A

      \[\leadsto \frac{\log im}{\log \color{blue}{base}} \]
    14. lower-log.f6413.4%

      \[\leadsto \frac{\log im}{\log \color{blue}{base}} \]
  6. Applied rewrites13.4%

    \[\leadsto \frac{\log im}{\color{blue}{\log base}} \]
  7. Add Preprocessing

Reproduce

?
herbie shell --seed 2025313 -o setup:search
(FPCore (re im base)
  :name "math.log/2 on complex, real part"
  :precision binary64
  (/ (+ (* (log (sqrt (+ (* re re) (* im im)))) (log base)) (* (atan2 im re) 0.0)) (+ (* (log base) (log base)) (* 0.0 0.0))))