math.abs on complex

Percentage Accurate: 53.8% → 100.0%
Time: 5.1s
Alternatives: 6
Speedup: 1.1×

Specification

?
\[\begin{array}{l} \\ \sqrt{re \cdot re + im \cdot im} \end{array} \]
(FPCore modulus (re im) :precision binary64 (sqrt (+ (* re re) (* im im))))
double modulus(double re, double im) {
	return sqrt(((re * re) + (im * im)));
}
real(8) function modulus(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    modulus = sqrt(((re * re) + (im * im)))
end function
public static double modulus(double re, double im) {
	return Math.sqrt(((re * re) + (im * im)));
}
def modulus(re, im):
	return math.sqrt(((re * re) + (im * im)))
function modulus(re, im)
	return sqrt(Float64(Float64(re * re) + Float64(im * im)))
end
function tmp = modulus(re, im)
	tmp = sqrt(((re * re) + (im * im)));
end
modulus[re_, im_] := N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\sqrt{re \cdot re + im \cdot im}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 6 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 53.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \sqrt{re \cdot re + im \cdot im} \end{array} \]
(FPCore modulus (re im) :precision binary64 (sqrt (+ (* re re) (* im im))))
double modulus(double re, double im) {
	return sqrt(((re * re) + (im * im)));
}
real(8) function modulus(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    modulus = sqrt(((re * re) + (im * im)))
end function
public static double modulus(double re, double im) {
	return Math.sqrt(((re * re) + (im * im)));
}
def modulus(re, im):
	return math.sqrt(((re * re) + (im * im)))
function modulus(re, im)
	return sqrt(Float64(Float64(re * re) + Float64(im * im)))
end
function tmp = modulus(re, im)
	tmp = sqrt(((re * re) + (im * im)));
end
modulus[re_, im_] := N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\sqrt{re \cdot re + im \cdot im}
\end{array}

Alternative 1: 100.0% accurate, 0.2× speedup?

\[\begin{array}{l} \\ \mathsf{hypot}\left(re, im\right) \end{array} \]
(FPCore modulus (re im) :precision binary64 (hypot re im))
double modulus(double re, double im) {
	return hypot(re, im);
}
public static double modulus(double re, double im) {
	return Math.hypot(re, im);
}
def modulus(re, im):
	return math.hypot(re, im)
function modulus(re, im)
	return hypot(re, im)
end
function tmp = modulus(re, im)
	tmp = hypot(re, im);
end
modulus[re_, im_] := N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]
\begin{array}{l}

\\
\mathsf{hypot}\left(re, im\right)
\end{array}
Derivation
  1. Initial program 58.7%

    \[\sqrt{re \cdot re + im \cdot im} \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift-sqrt.f64N/A

      \[\leadsto \color{blue}{\sqrt{re \cdot re + im \cdot im}} \]
    2. lift-+.f64N/A

      \[\leadsto \sqrt{\color{blue}{re \cdot re + im \cdot im}} \]
    3. lift-*.f64N/A

      \[\leadsto \sqrt{\color{blue}{re \cdot re} + im \cdot im} \]
    4. lift-*.f64N/A

      \[\leadsto \sqrt{re \cdot re + \color{blue}{im \cdot im}} \]
    5. lower-hypot.f64100.0

      \[\leadsto \color{blue}{\mathsf{hypot}\left(re, im\right)} \]
  4. Applied rewrites100.0%

    \[\leadsto \color{blue}{\mathsf{hypot}\left(re, im\right)} \]
  5. Add Preprocessing

Alternative 2: 26.5% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(\frac{1}{\frac{im}{re}} \cdot re, 0.5, im\right) \end{array} \]
(FPCore modulus (re im)
 :precision binary64
 (fma (* (/ 1.0 (/ im re)) re) 0.5 im))
double modulus(double re, double im) {
	return fma(((1.0 / (im / re)) * re), 0.5, im);
}
function modulus(re, im)
	return fma(Float64(Float64(1.0 / Float64(im / re)) * re), 0.5, im)
end
modulus[re_, im_] := N[(N[(N[(1.0 / N[(im / re), $MachinePrecision]), $MachinePrecision] * re), $MachinePrecision] * 0.5 + im), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(\frac{1}{\frac{im}{re}} \cdot re, 0.5, im\right)
\end{array}
Derivation
  1. Initial program 58.7%

    \[\sqrt{re \cdot re + im \cdot im} \]
  2. Add Preprocessing
  3. Taylor expanded in re around 0

    \[\leadsto \color{blue}{im + \frac{1}{2} \cdot \frac{{re}^{2}}{im}} \]
  4. Step-by-step derivation
    1. *-lft-identityN/A

      \[\leadsto im + \frac{1}{2} \cdot \frac{\color{blue}{1 \cdot {re}^{2}}}{im} \]
    2. associate-*l/N/A

      \[\leadsto im + \frac{1}{2} \cdot \color{blue}{\left(\frac{1}{im} \cdot {re}^{2}\right)} \]
    3. associate-*l*N/A

      \[\leadsto im + \color{blue}{\left(\frac{1}{2} \cdot \frac{1}{im}\right) \cdot {re}^{2}} \]
    4. +-commutativeN/A

      \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \frac{1}{im}\right) \cdot {re}^{2} + im} \]
    5. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{1}{2} \cdot \frac{1}{im}, {re}^{2}, im\right)} \]
    6. associate-*r/N/A

      \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{\frac{1}{2} \cdot 1}{im}}, {re}^{2}, im\right) \]
    7. metadata-evalN/A

      \[\leadsto \mathsf{fma}\left(\frac{\color{blue}{\frac{1}{2}}}{im}, {re}^{2}, im\right) \]
    8. lower-/.f64N/A

      \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{\frac{1}{2}}{im}}, {re}^{2}, im\right) \]
    9. unpow2N/A

      \[\leadsto \mathsf{fma}\left(\frac{\frac{1}{2}}{im}, \color{blue}{re \cdot re}, im\right) \]
    10. lower-*.f6422.3

      \[\leadsto \mathsf{fma}\left(\frac{0.5}{im}, \color{blue}{re \cdot re}, im\right) \]
  5. Applied rewrites22.3%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{0.5}{im}, re \cdot re, im\right)} \]
  6. Step-by-step derivation
    1. Applied rewrites24.6%

      \[\leadsto \mathsf{fma}\left(\frac{re}{im} \cdot re, \color{blue}{0.5}, im\right) \]
    2. Step-by-step derivation
      1. Applied rewrites24.6%

        \[\leadsto \mathsf{fma}\left(\frac{1}{\frac{im}{re}} \cdot re, 0.5, im\right) \]
      2. Add Preprocessing

      Alternative 3: 26.5% accurate, 1.0× speedup?

      \[\begin{array}{l} \\ \mathsf{fma}\left(\frac{re}{im} \cdot re, 0.5, im\right) \end{array} \]
      (FPCore modulus (re im) :precision binary64 (fma (* (/ re im) re) 0.5 im))
      double modulus(double re, double im) {
      	return fma(((re / im) * re), 0.5, im);
      }
      
      function modulus(re, im)
      	return fma(Float64(Float64(re / im) * re), 0.5, im)
      end
      
      modulus[re_, im_] := N[(N[(N[(re / im), $MachinePrecision] * re), $MachinePrecision] * 0.5 + im), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \mathsf{fma}\left(\frac{re}{im} \cdot re, 0.5, im\right)
      \end{array}
      
      Derivation
      1. Initial program 58.7%

        \[\sqrt{re \cdot re + im \cdot im} \]
      2. Add Preprocessing
      3. Taylor expanded in re around 0

        \[\leadsto \color{blue}{im + \frac{1}{2} \cdot \frac{{re}^{2}}{im}} \]
      4. Step-by-step derivation
        1. *-lft-identityN/A

          \[\leadsto im + \frac{1}{2} \cdot \frac{\color{blue}{1 \cdot {re}^{2}}}{im} \]
        2. associate-*l/N/A

          \[\leadsto im + \frac{1}{2} \cdot \color{blue}{\left(\frac{1}{im} \cdot {re}^{2}\right)} \]
        3. associate-*l*N/A

          \[\leadsto im + \color{blue}{\left(\frac{1}{2} \cdot \frac{1}{im}\right) \cdot {re}^{2}} \]
        4. +-commutativeN/A

          \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \frac{1}{im}\right) \cdot {re}^{2} + im} \]
        5. lower-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{1}{2} \cdot \frac{1}{im}, {re}^{2}, im\right)} \]
        6. associate-*r/N/A

          \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{\frac{1}{2} \cdot 1}{im}}, {re}^{2}, im\right) \]
        7. metadata-evalN/A

          \[\leadsto \mathsf{fma}\left(\frac{\color{blue}{\frac{1}{2}}}{im}, {re}^{2}, im\right) \]
        8. lower-/.f64N/A

          \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{\frac{1}{2}}{im}}, {re}^{2}, im\right) \]
        9. unpow2N/A

          \[\leadsto \mathsf{fma}\left(\frac{\frac{1}{2}}{im}, \color{blue}{re \cdot re}, im\right) \]
        10. lower-*.f6422.3

          \[\leadsto \mathsf{fma}\left(\frac{0.5}{im}, \color{blue}{re \cdot re}, im\right) \]
      5. Applied rewrites22.3%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{0.5}{im}, re \cdot re, im\right)} \]
      6. Step-by-step derivation
        1. Applied rewrites24.6%

          \[\leadsto \mathsf{fma}\left(\frac{re}{im} \cdot re, \color{blue}{0.5}, im\right) \]
        2. Add Preprocessing

        Alternative 4: 53.8% accurate, 1.1× speedup?

        \[\begin{array}{l} \\ \sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)} \end{array} \]
        (FPCore modulus (re im) :precision binary64 (sqrt (fma im im (* re re))))
        double modulus(double re, double im) {
        	return sqrt(fma(im, im, (re * re)));
        }
        
        function modulus(re, im)
        	return sqrt(fma(im, im, Float64(re * re)))
        end
        
        modulus[re_, im_] := N[Sqrt[N[(im * im + N[(re * re), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}
        \end{array}
        
        Derivation
        1. Initial program 58.7%

          \[\sqrt{re \cdot re + im \cdot im} \]
        2. Add Preprocessing
        3. Step-by-step derivation
          1. lift-+.f64N/A

            \[\leadsto \sqrt{\color{blue}{re \cdot re + im \cdot im}} \]
          2. +-commutativeN/A

            \[\leadsto \sqrt{\color{blue}{im \cdot im + re \cdot re}} \]
          3. lift-*.f64N/A

            \[\leadsto \sqrt{\color{blue}{im \cdot im} + re \cdot re} \]
          4. lower-fma.f6458.7

            \[\leadsto \sqrt{\color{blue}{\mathsf{fma}\left(im, im, re \cdot re\right)}} \]
        4. Applied rewrites58.7%

          \[\leadsto \sqrt{\color{blue}{\mathsf{fma}\left(im, im, re \cdot re\right)}} \]
        5. Add Preprocessing

        Alternative 5: 29.0% accurate, 1.5× speedup?

        \[\begin{array}{l} \\ \sqrt{im \cdot im} \end{array} \]
        (FPCore modulus (re im) :precision binary64 (sqrt (* im im)))
        double modulus(double re, double im) {
        	return sqrt((im * im));
        }
        
        real(8) function modulus(re, im)
            real(8), intent (in) :: re
            real(8), intent (in) :: im
            modulus = sqrt((im * im))
        end function
        
        public static double modulus(double re, double im) {
        	return Math.sqrt((im * im));
        }
        
        def modulus(re, im):
        	return math.sqrt((im * im))
        
        function modulus(re, im)
        	return sqrt(Float64(im * im))
        end
        
        function tmp = modulus(re, im)
        	tmp = sqrt((im * im));
        end
        
        modulus[re_, im_] := N[Sqrt[N[(im * im), $MachinePrecision]], $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \sqrt{im \cdot im}
        \end{array}
        
        Derivation
        1. Initial program 58.7%

          \[\sqrt{re \cdot re + im \cdot im} \]
        2. Add Preprocessing
        3. Taylor expanded in re around 0

          \[\leadsto \sqrt{\color{blue}{{im}^{2}}} \]
        4. Step-by-step derivation
          1. unpow2N/A

            \[\leadsto \sqrt{\color{blue}{im \cdot im}} \]
          2. lower-*.f6427.8

            \[\leadsto \sqrt{\color{blue}{im \cdot im}} \]
        5. Applied rewrites27.8%

          \[\leadsto \sqrt{\color{blue}{im \cdot im}} \]
        6. Add Preprocessing

        Alternative 6: 26.7% accurate, 8.0× speedup?

        \[\begin{array}{l} \\ -re \end{array} \]
        (FPCore modulus (re im) :precision binary64 (- re))
        double modulus(double re, double im) {
        	return -re;
        }
        
        real(8) function modulus(re, im)
            real(8), intent (in) :: re
            real(8), intent (in) :: im
            modulus = -re
        end function
        
        public static double modulus(double re, double im) {
        	return -re;
        }
        
        def modulus(re, im):
        	return -re
        
        function modulus(re, im)
        	return Float64(-re)
        end
        
        function tmp = modulus(re, im)
        	tmp = -re;
        end
        
        modulus[re_, im_] := (-re)
        
        \begin{array}{l}
        
        \\
        -re
        \end{array}
        
        Derivation
        1. Initial program 58.7%

          \[\sqrt{re \cdot re + im \cdot im} \]
        2. Add Preprocessing
        3. Taylor expanded in re around -inf

          \[\leadsto \color{blue}{-1 \cdot re} \]
        4. Step-by-step derivation
          1. mul-1-negN/A

            \[\leadsto \color{blue}{\mathsf{neg}\left(re\right)} \]
          2. lower-neg.f6430.0

            \[\leadsto \color{blue}{-re} \]
        5. Applied rewrites30.0%

          \[\leadsto \color{blue}{-re} \]
        6. Add Preprocessing

        Reproduce

        ?
        herbie shell --seed 2024331 
        (FPCore modulus (re im)
          :name "math.abs on complex"
          :precision binary64
          (sqrt (+ (* re re) (* im im))))