
(FPCore modulus (re im) :precision binary64 (sqrt (+ (* re re) (* im im))))
double modulus(double re, double im) {
return sqrt(((re * re) + (im * im)));
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = sqrt(((re * re) + (im * im)))
end function
public static double modulus(double re, double im) {
return Math.sqrt(((re * re) + (im * im)));
}
def modulus(re, im): return math.sqrt(((re * re) + (im * im)))
function modulus(re, im) return sqrt(Float64(Float64(re * re) + Float64(im * im))) end
function tmp = modulus(re, im) tmp = sqrt(((re * re) + (im * im))); end
modulus[re_, im_] := N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{re \cdot re + im \cdot im}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore modulus (re im) :precision binary64 (sqrt (+ (* re re) (* im im))))
double modulus(double re, double im) {
return sqrt(((re * re) + (im * im)));
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = sqrt(((re * re) + (im * im)))
end function
public static double modulus(double re, double im) {
return Math.sqrt(((re * re) + (im * im)));
}
def modulus(re, im): return math.sqrt(((re * re) + (im * im)))
function modulus(re, im) return sqrt(Float64(Float64(re * re) + Float64(im * im))) end
function tmp = modulus(re, im) tmp = sqrt(((re * re) + (im * im))); end
modulus[re_, im_] := N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{re \cdot re + im \cdot im}
\end{array}
(FPCore modulus (re im) :precision binary64 (hypot re im))
double modulus(double re, double im) {
return hypot(re, im);
}
public static double modulus(double re, double im) {
return Math.hypot(re, im);
}
def modulus(re, im): return math.hypot(re, im)
function modulus(re, im) return hypot(re, im) end
function tmp = modulus(re, im) tmp = hypot(re, im); end
modulus[re_, im_] := N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{hypot}\left(re, im\right)
\end{array}
Initial program 49.5%
lift-sqrt.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lower-hypot.f64100.0
Applied rewrites100.0%
(FPCore modulus (re im) :precision binary64 (fma (* (/ 0.5 im) re) re im))
double modulus(double re, double im) {
return fma(((0.5 / im) * re), re, im);
}
function modulus(re, im) return fma(Float64(Float64(0.5 / im) * re), re, im) end
modulus[re_, im_] := N[(N[(N[(0.5 / im), $MachinePrecision] * re), $MachinePrecision] * re + im), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{0.5}{im} \cdot re, re, im\right)
\end{array}
Initial program 49.5%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
lower-fma.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f6426.9
Applied rewrites26.9%
Applied rewrites27.6%
(FPCore modulus (re im) :precision binary64 (sqrt (fma re re (* im im))))
double modulus(double re, double im) {
return sqrt(fma(re, re, (im * im)));
}
function modulus(re, im) return sqrt(fma(re, re, Float64(im * im))) end
modulus[re_, im_] := N[Sqrt[N[(re * re + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\mathsf{fma}\left(re, re, im \cdot im\right)}
\end{array}
Initial program 49.5%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6449.5
Applied rewrites49.5%
(FPCore modulus (re im) :precision binary64 (sqrt (* im im)))
double modulus(double re, double im) {
return sqrt((im * im));
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = sqrt((im * im))
end function
public static double modulus(double re, double im) {
return Math.sqrt((im * im));
}
def modulus(re, im): return math.sqrt((im * im))
function modulus(re, im) return sqrt(Float64(im * im)) end
function tmp = modulus(re, im) tmp = sqrt((im * im)); end
modulus[re_, im_] := N[Sqrt[N[(im * im), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{im \cdot im}
\end{array}
Initial program 49.5%
Taylor expanded in re around 0
unpow2N/A
lower-*.f6430.0
Applied rewrites30.0%
(FPCore modulus (re im) :precision binary64 (- re))
double modulus(double re, double im) {
return -re;
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = -re
end function
public static double modulus(double re, double im) {
return -re;
}
def modulus(re, im): return -re
function modulus(re, im) return Float64(-re) end
function tmp = modulus(re, im) tmp = -re; end
modulus[re_, im_] := (-re)
\begin{array}{l}
\\
-re
\end{array}
Initial program 49.5%
Taylor expanded in re around -inf
mul-1-negN/A
lower-neg.f6428.0
Applied rewrites28.0%
herbie shell --seed 2024308
(FPCore modulus (re im)
:name "math.abs on complex"
:precision binary64
(sqrt (+ (* re re) (* im im))))