
(FPCore modulus (re im) :precision binary64 (sqrt (+ (* re re) (* im im))))
double modulus(double re, double im) {
return sqrt(((re * re) + (im * im)));
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = sqrt(((re * re) + (im * im)))
end function
public static double modulus(double re, double im) {
return Math.sqrt(((re * re) + (im * im)));
}
def modulus(re, im): return math.sqrt(((re * re) + (im * im)))
function modulus(re, im) return sqrt(Float64(Float64(re * re) + Float64(im * im))) end
function tmp = modulus(re, im) tmp = sqrt(((re * re) + (im * im))); end
modulus[re_, im_] := N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{re \cdot re + im \cdot im}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore modulus (re im) :precision binary64 (sqrt (+ (* re re) (* im im))))
double modulus(double re, double im) {
return sqrt(((re * re) + (im * im)));
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = sqrt(((re * re) + (im * im)))
end function
public static double modulus(double re, double im) {
return Math.sqrt(((re * re) + (im * im)));
}
def modulus(re, im): return math.sqrt(((re * re) + (im * im)))
function modulus(re, im) return sqrt(Float64(Float64(re * re) + Float64(im * im))) end
function tmp = modulus(re, im) tmp = sqrt(((re * re) + (im * im))); end
modulus[re_, im_] := N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{re \cdot re + im \cdot im}
\end{array}
(FPCore modulus (re im) :precision binary64 (hypot re im))
double modulus(double re, double im) {
return hypot(re, im);
}
public static double modulus(double re, double im) {
return Math.hypot(re, im);
}
def modulus(re, im): return math.hypot(re, im)
function modulus(re, im) return hypot(re, im) end
function tmp = modulus(re, im) tmp = hypot(re, im); end
modulus[re_, im_] := N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{hypot}\left(re, im\right)
\end{array}
Initial program 56.0%
lift-sqrt.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lower-hypot.f64100.0
Applied rewrites100.0%
(FPCore modulus (re im) :precision binary64 (let* ((t_0 (fma (* (/ 0.5 im) re) re (- im)))) (* t_0 (/ (fma (* 0.5 re) (/ re im) im) t_0))))
double modulus(double re, double im) {
double t_0 = fma(((0.5 / im) * re), re, -im);
return t_0 * (fma((0.5 * re), (re / im), im) / t_0);
}
function modulus(re, im) t_0 = fma(Float64(Float64(0.5 / im) * re), re, Float64(-im)) return Float64(t_0 * Float64(fma(Float64(0.5 * re), Float64(re / im), im) / t_0)) end
modulus[re_, im_] := Block[{t$95$0 = N[(N[(N[(0.5 / im), $MachinePrecision] * re), $MachinePrecision] * re + (-im)), $MachinePrecision]}, N[(t$95$0 * N[(N[(N[(0.5 * re), $MachinePrecision] * N[(re / im), $MachinePrecision] + im), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\frac{0.5}{im} \cdot re, re, -im\right)\\
t\_0 \cdot \frac{\mathsf{fma}\left(0.5 \cdot re, \frac{re}{im}, im\right)}{t\_0}
\end{array}
\end{array}
Initial program 56.0%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
lower-fma.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f6427.5
Applied rewrites27.5%
Applied rewrites28.3%
Applied rewrites27.6%
Applied rewrites27.6%
(FPCore modulus (re im) :precision binary64 (fma (* (/ 0.5 im) re) re im))
double modulus(double re, double im) {
return fma(((0.5 / im) * re), re, im);
}
function modulus(re, im) return fma(Float64(Float64(0.5 / im) * re), re, im) end
modulus[re_, im_] := N[(N[(N[(0.5 / im), $MachinePrecision] * re), $MachinePrecision] * re + im), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{0.5}{im} \cdot re, re, im\right)
\end{array}
Initial program 56.0%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
lower-fma.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f6427.5
Applied rewrites27.5%
Applied rewrites28.3%
(FPCore modulus (re im) :precision binary64 (sqrt (fma re re (* im im))))
double modulus(double re, double im) {
return sqrt(fma(re, re, (im * im)));
}
function modulus(re, im) return sqrt(fma(re, re, Float64(im * im))) end
modulus[re_, im_] := N[Sqrt[N[(re * re + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\mathsf{fma}\left(re, re, im \cdot im\right)}
\end{array}
Initial program 56.0%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6456.0
Applied rewrites56.0%
(FPCore modulus (re im) :precision binary64 (sqrt (* im im)))
double modulus(double re, double im) {
return sqrt((im * im));
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = sqrt((im * im))
end function
public static double modulus(double re, double im) {
return Math.sqrt((im * im));
}
def modulus(re, im): return math.sqrt((im * im))
function modulus(re, im) return sqrt(Float64(im * im)) end
function tmp = modulus(re, im) tmp = sqrt((im * im)); end
modulus[re_, im_] := N[Sqrt[N[(im * im), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{im \cdot im}
\end{array}
Initial program 56.0%
Taylor expanded in re around 0
unpow2N/A
lower-*.f6432.5
Applied rewrites32.5%
(FPCore modulus (re im) :precision binary64 (- re))
double modulus(double re, double im) {
return -re;
}
real(8) function modulus(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
modulus = -re
end function
public static double modulus(double re, double im) {
return -re;
}
def modulus(re, im): return -re
function modulus(re, im) return Float64(-re) end
function tmp = modulus(re, im) tmp = -re; end
modulus[re_, im_] := (-re)
\begin{array}{l}
\\
-re
\end{array}
Initial program 56.0%
Taylor expanded in re around -inf
mul-1-negN/A
lower-neg.f6423.9
Applied rewrites23.9%
herbie shell --seed 2024339
(FPCore modulus (re im)
:name "math.abs on complex"
:precision binary64
(sqrt (+ (* re re) (* im im))))