
(FPCore modulus (re im) :precision binary64 (sqrt (+ (* re re) (* im im))))
double modulus(double re, double im) {
return sqrt(((re * re) + (im * im)));
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = sqrt(((re * re) + (im * im)))
end function
public static double modulus(double re, double im) {
return Math.sqrt(((re * re) + (im * im)));
}
def modulus(re, im): return math.sqrt(((re * re) + (im * im)))
function modulus(re, im) return sqrt(Float64(Float64(re * re) + Float64(im * im))) end
function tmp = modulus(re, im) tmp = sqrt(((re * re) + (im * im))); end
modulus[re_, im_] := N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{re \cdot re + im \cdot im}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore modulus (re im) :precision binary64 (sqrt (+ (* re re) (* im im))))
double modulus(double re, double im) {
return sqrt(((re * re) + (im * im)));
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = sqrt(((re * re) + (im * im)))
end function
public static double modulus(double re, double im) {
return Math.sqrt(((re * re) + (im * im)));
}
def modulus(re, im): return math.sqrt(((re * re) + (im * im)))
function modulus(re, im) return sqrt(Float64(Float64(re * re) + Float64(im * im))) end
function tmp = modulus(re, im) tmp = sqrt(((re * re) + (im * im))); end
modulus[re_, im_] := N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{re \cdot re + im \cdot im}
\end{array}
(FPCore modulus (re im) :precision binary64 (hypot re im))
double modulus(double re, double im) {
return hypot(re, im);
}
public static double modulus(double re, double im) {
return Math.hypot(re, im);
}
def modulus(re, im): return math.hypot(re, im)
function modulus(re, im) return hypot(re, im) end
function tmp = modulus(re, im) tmp = hypot(re, im); end
modulus[re_, im_] := N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{hypot}\left(re, im\right)
\end{array}
Initial program 58.7%
lift-sqrt.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lower-hypot.f64100.0
Applied rewrites100.0%
(FPCore modulus (re im) :precision binary64 (fma (* (/ 1.0 (/ im re)) re) 0.5 im))
double modulus(double re, double im) {
return fma(((1.0 / (im / re)) * re), 0.5, im);
}
function modulus(re, im) return fma(Float64(Float64(1.0 / Float64(im / re)) * re), 0.5, im) end
modulus[re_, im_] := N[(N[(N[(1.0 / N[(im / re), $MachinePrecision]), $MachinePrecision] * re), $MachinePrecision] * 0.5 + im), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{1}{\frac{im}{re}} \cdot re, 0.5, im\right)
\end{array}
Initial program 58.7%
Taylor expanded in re around 0
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
+-commutativeN/A
lower-fma.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f6422.3
Applied rewrites22.3%
Applied rewrites24.6%
Applied rewrites24.6%
(FPCore modulus (re im) :precision binary64 (fma (* (/ re im) re) 0.5 im))
double modulus(double re, double im) {
return fma(((re / im) * re), 0.5, im);
}
function modulus(re, im) return fma(Float64(Float64(re / im) * re), 0.5, im) end
modulus[re_, im_] := N[(N[(N[(re / im), $MachinePrecision] * re), $MachinePrecision] * 0.5 + im), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{re}{im} \cdot re, 0.5, im\right)
\end{array}
Initial program 58.7%
Taylor expanded in re around 0
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
+-commutativeN/A
lower-fma.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f6422.3
Applied rewrites22.3%
Applied rewrites24.6%
(FPCore modulus (re im) :precision binary64 (sqrt (fma im im (* re re))))
double modulus(double re, double im) {
return sqrt(fma(im, im, (re * re)));
}
function modulus(re, im) return sqrt(fma(im, im, Float64(re * re))) end
modulus[re_, im_] := N[Sqrt[N[(im * im + N[(re * re), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\mathsf{fma}\left(im, im, re \cdot re\right)}
\end{array}
Initial program 58.7%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lower-fma.f6458.7
Applied rewrites58.7%
(FPCore modulus (re im) :precision binary64 (sqrt (* im im)))
double modulus(double re, double im) {
return sqrt((im * im));
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = sqrt((im * im))
end function
public static double modulus(double re, double im) {
return Math.sqrt((im * im));
}
def modulus(re, im): return math.sqrt((im * im))
function modulus(re, im) return sqrt(Float64(im * im)) end
function tmp = modulus(re, im) tmp = sqrt((im * im)); end
modulus[re_, im_] := N[Sqrt[N[(im * im), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{im \cdot im}
\end{array}
Initial program 58.7%
Taylor expanded in re around 0
unpow2N/A
lower-*.f6427.8
Applied rewrites27.8%
(FPCore modulus (re im) :precision binary64 (- re))
double modulus(double re, double im) {
return -re;
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = -re
end function
public static double modulus(double re, double im) {
return -re;
}
def modulus(re, im): return -re
function modulus(re, im) return Float64(-re) end
function tmp = modulus(re, im) tmp = -re; end
modulus[re_, im_] := (-re)
\begin{array}{l}
\\
-re
\end{array}
Initial program 58.7%
Taylor expanded in re around -inf
mul-1-negN/A
lower-neg.f6430.0
Applied rewrites30.0%
herbie shell --seed 2024331
(FPCore modulus (re im)
:name "math.abs on complex"
:precision binary64
(sqrt (+ (* re re) (* im im))))