
(FPCore modulus (re im) :precision binary64 (sqrt (+ (* re re) (* im im))))
double modulus(double re, double im) {
return sqrt(((re * re) + (im * im)));
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = sqrt(((re * re) + (im * im)))
end function
public static double modulus(double re, double im) {
return Math.sqrt(((re * re) + (im * im)));
}
def modulus(re, im): return math.sqrt(((re * re) + (im * im)))
function modulus(re, im) return sqrt(Float64(Float64(re * re) + Float64(im * im))) end
function tmp = modulus(re, im) tmp = sqrt(((re * re) + (im * im))); end
modulus[re_, im_] := N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{re \cdot re + im \cdot im}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore modulus (re im) :precision binary64 (sqrt (+ (* re re) (* im im))))
double modulus(double re, double im) {
return sqrt(((re * re) + (im * im)));
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = sqrt(((re * re) + (im * im)))
end function
public static double modulus(double re, double im) {
return Math.sqrt(((re * re) + (im * im)));
}
def modulus(re, im): return math.sqrt(((re * re) + (im * im)))
function modulus(re, im) return sqrt(Float64(Float64(re * re) + Float64(im * im))) end
function tmp = modulus(re, im) tmp = sqrt(((re * re) + (im * im))); end
modulus[re_, im_] := N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{re \cdot re + im \cdot im}
\end{array}
(FPCore modulus (re im) :precision binary64 (hypot re im))
double modulus(double re, double im) {
return hypot(re, im);
}
public static double modulus(double re, double im) {
return Math.hypot(re, im);
}
def modulus(re, im): return math.hypot(re, im)
function modulus(re, im) return hypot(re, im) end
function tmp = modulus(re, im) tmp = hypot(re, im); end
modulus[re_, im_] := N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{hypot}\left(re, im\right)
\end{array}
Initial program 55.9%
lift-sqrt.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lower-hypot.f64100.0
Applied rewrites100.0%
(FPCore modulus (re im) :precision binary64 (fma re (/ (* re 0.5) im) im))
double modulus(double re, double im) {
return fma(re, ((re * 0.5) / im), im);
}
function modulus(re, im) return fma(re, Float64(Float64(re * 0.5) / im), im) end
modulus[re_, im_] := N[(re * N[(N[(re * 0.5), $MachinePrecision] / im), $MachinePrecision] + im), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(re, \frac{re \cdot 0.5}{im}, im\right)
\end{array}
Initial program 55.9%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r/N/A
metadata-evalN/A
associate-*l/N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f6431.7
Applied rewrites31.7%
(FPCore modulus (re im) :precision binary64 (sqrt (fma re re (* im im))))
double modulus(double re, double im) {
return sqrt(fma(re, re, (im * im)));
}
function modulus(re, im) return sqrt(fma(re, re, Float64(im * im))) end
modulus[re_, im_] := N[Sqrt[N[(re * re + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\mathsf{fma}\left(re, re, im \cdot im\right)}
\end{array}
Initial program 55.9%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6455.9
Applied rewrites55.9%
(FPCore modulus (re im) :precision binary64 (sqrt (* im im)))
double modulus(double re, double im) {
return sqrt((im * im));
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = sqrt((im * im))
end function
public static double modulus(double re, double im) {
return Math.sqrt((im * im));
}
def modulus(re, im): return math.sqrt((im * im))
function modulus(re, im) return sqrt(Float64(im * im)) end
function tmp = modulus(re, im) tmp = sqrt((im * im)); end
modulus[re_, im_] := N[Sqrt[N[(im * im), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{im \cdot im}
\end{array}
Initial program 55.9%
Taylor expanded in re around 0
unpow2N/A
lower-*.f6429.9
Applied rewrites29.9%
(FPCore modulus (re im) :precision binary64 (- re))
double modulus(double re, double im) {
return -re;
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = -re
end function
public static double modulus(double re, double im) {
return -re;
}
def modulus(re, im): return -re
function modulus(re, im) return Float64(-re) end
function tmp = modulus(re, im) tmp = -re; end
modulus[re_, im_] := (-re)
\begin{array}{l}
\\
-re
\end{array}
Initial program 55.9%
Taylor expanded in re around -inf
mul-1-negN/A
lower-neg.f6429.0
Applied rewrites29.0%
herbie shell --seed 2024233
(FPCore modulus (re im)
:name "math.abs on complex"
:precision binary64
(sqrt (+ (* re re) (* im im))))