
(FPCore re_sqr (re im) :precision binary64 (- (* re re) (* im im)))
double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (re * re) - (im * im)
end function
public static double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
def re_sqr(re, im): return (re * re) - (im * im)
function re_sqr(re, im) return Float64(Float64(re * re) - Float64(im * im)) end
function tmp = re_sqr(re, im) tmp = (re * re) - (im * im); end
re$95$sqr[re_, im_] := N[(N[(re * re), $MachinePrecision] - N[(im * im), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
re \cdot re - im \cdot im
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore re_sqr (re im) :precision binary64 (- (* re re) (* im im)))
double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (re * re) - (im * im)
end function
public static double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
def re_sqr(re, im): return (re * re) - (im * im)
function re_sqr(re, im) return Float64(Float64(re * re) - Float64(im * im)) end
function tmp = re_sqr(re, im) tmp = (re * re) - (im * im); end
re$95$sqr[re_, im_] := N[(N[(re * re), $MachinePrecision] - N[(im * im), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
re \cdot re - im \cdot im
\end{array}
(FPCore re_sqr (re im) :precision binary64 (* (- re im) (+ im re)))
double re_sqr(double re, double im) {
return (re - im) * (im + re);
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (re - im) * (im + re)
end function
public static double re_sqr(double re, double im) {
return (re - im) * (im + re);
}
def re_sqr(re, im): return (re - im) * (im + re)
function re_sqr(re, im) return Float64(Float64(re - im) * Float64(im + re)) end
function tmp = re_sqr(re, im) tmp = (re - im) * (im + re); end
re$95$sqr[re_, im_] := N[(N[(re - im), $MachinePrecision] * N[(im + re), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(re - im\right) \cdot \left(im + re\right)
\end{array}
Initial program 94.9%
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
difference-of-squaresN/A
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
+-commutativeN/A
lower-+.f64100.0
Applied rewrites100.0%
(FPCore re_sqr (re im) :precision binary64 (if (<= (- (* re re) (* im im)) -2e-322) (* (- im) im) (* re re)))
double re_sqr(double re, double im) {
double tmp;
if (((re * re) - (im * im)) <= -2e-322) {
tmp = -im * im;
} else {
tmp = re * re;
}
return tmp;
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
real(8) :: tmp
if (((re * re) - (im * im)) <= (-2d-322)) then
tmp = -im * im
else
tmp = re * re
end if
re_sqr = tmp
end function
public static double re_sqr(double re, double im) {
double tmp;
if (((re * re) - (im * im)) <= -2e-322) {
tmp = -im * im;
} else {
tmp = re * re;
}
return tmp;
}
def re_sqr(re, im): tmp = 0 if ((re * re) - (im * im)) <= -2e-322: tmp = -im * im else: tmp = re * re return tmp
function re_sqr(re, im) tmp = 0.0 if (Float64(Float64(re * re) - Float64(im * im)) <= -2e-322) tmp = Float64(Float64(-im) * im); else tmp = Float64(re * re); end return tmp end
function tmp_2 = re_sqr(re, im) tmp = 0.0; if (((re * re) - (im * im)) <= -2e-322) tmp = -im * im; else tmp = re * re; end tmp_2 = tmp; end
re$95$sqr[re_, im_] := If[LessEqual[N[(N[(re * re), $MachinePrecision] - N[(im * im), $MachinePrecision]), $MachinePrecision], -2e-322], N[((-im) * im), $MachinePrecision], N[(re * re), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;re \cdot re - im \cdot im \leq -2 \cdot 10^{-322}:\\
\;\;\;\;\left(-im\right) \cdot im\\
\mathbf{else}:\\
\;\;\;\;re \cdot re\\
\end{array}
\end{array}
if (-.f64 (*.f64 re re) (*.f64 im im)) < -1.97626e-322Initial program 100.0%
Taylor expanded in re around 0
mul-1-negN/A
unpow2N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
lower-neg.f6499.5
Applied rewrites99.5%
if -1.97626e-322 < (-.f64 (*.f64 re re) (*.f64 im im)) Initial program 89.9%
Taylor expanded in re around 0
mul-1-negN/A
unpow2N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
lower-neg.f6419.4
Applied rewrites19.4%
Taylor expanded in re around inf
unpow2N/A
lower-*.f6495.3
Applied rewrites95.3%
(FPCore re_sqr (re im) :precision binary64 (* re re))
double re_sqr(double re, double im) {
return re * re;
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = re * re
end function
public static double re_sqr(double re, double im) {
return re * re;
}
def re_sqr(re, im): return re * re
function re_sqr(re, im) return Float64(re * re) end
function tmp = re_sqr(re, im) tmp = re * re; end
re$95$sqr[re_, im_] := N[(re * re), $MachinePrecision]
\begin{array}{l}
\\
re \cdot re
\end{array}
Initial program 94.9%
Taylor expanded in re around 0
mul-1-negN/A
unpow2N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
lower-neg.f6459.2
Applied rewrites59.2%
Taylor expanded in re around inf
unpow2N/A
lower-*.f6449.8
Applied rewrites49.8%
herbie shell --seed 2024339
(FPCore re_sqr (re im)
:name "math.square on complex, real part"
:precision binary64
(- (* re re) (* im im)))