
(FPCore re_sqr (re im) :precision binary64 (- (* re re) (* im im)))
double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (re * re) - (im * im)
end function
public static double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
def re_sqr(re, im): return (re * re) - (im * im)
function re_sqr(re, im) return Float64(Float64(re * re) - Float64(im * im)) end
function tmp = re_sqr(re, im) tmp = (re * re) - (im * im); end
re$95$sqr[re_, im_] := N[(N[(re * re), $MachinePrecision] - N[(im * im), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
re \cdot re - im \cdot im
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore re_sqr (re im) :precision binary64 (- (* re re) (* im im)))
double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (re * re) - (im * im)
end function
public static double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
def re_sqr(re, im): return (re * re) - (im * im)
function re_sqr(re, im) return Float64(Float64(re * re) - Float64(im * im)) end
function tmp = re_sqr(re, im) tmp = (re * re) - (im * im); end
re$95$sqr[re_, im_] := N[(N[(re * re), $MachinePrecision] - N[(im * im), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
re \cdot re - im \cdot im
\end{array}
(FPCore re_sqr (re im) :precision binary64 (* (- re im) (+ re im)))
double re_sqr(double re, double im) {
return (re - im) * (re + im);
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (re - im) * (re + im)
end function
public static double re_sqr(double re, double im) {
return (re - im) * (re + im);
}
def re_sqr(re, im): return (re - im) * (re + im)
function re_sqr(re, im) return Float64(Float64(re - im) * Float64(re + im)) end
function tmp = re_sqr(re, im) tmp = (re - im) * (re + im); end
re$95$sqr[re_, im_] := N[(N[(re - im), $MachinePrecision] * N[(re + im), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(re - im\right) \cdot \left(re + im\right)
\end{array}
Initial program 94.5%
difference-of-squaresN/A
*-commutativeN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
+-lowering-+.f64100.0
Applied egg-rr100.0%
(FPCore re_sqr (re im) :precision binary64 (if (<= (- (* re re) (* im im)) -1e-285) (* im (- 0.0 im)) (* re re)))
double re_sqr(double re, double im) {
double tmp;
if (((re * re) - (im * im)) <= -1e-285) {
tmp = im * (0.0 - im);
} else {
tmp = re * re;
}
return tmp;
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
real(8) :: tmp
if (((re * re) - (im * im)) <= (-1d-285)) then
tmp = im * (0.0d0 - im)
else
tmp = re * re
end if
re_sqr = tmp
end function
public static double re_sqr(double re, double im) {
double tmp;
if (((re * re) - (im * im)) <= -1e-285) {
tmp = im * (0.0 - im);
} else {
tmp = re * re;
}
return tmp;
}
def re_sqr(re, im): tmp = 0 if ((re * re) - (im * im)) <= -1e-285: tmp = im * (0.0 - im) else: tmp = re * re return tmp
function re_sqr(re, im) tmp = 0.0 if (Float64(Float64(re * re) - Float64(im * im)) <= -1e-285) tmp = Float64(im * Float64(0.0 - im)); else tmp = Float64(re * re); end return tmp end
function tmp_2 = re_sqr(re, im) tmp = 0.0; if (((re * re) - (im * im)) <= -1e-285) tmp = im * (0.0 - im); else tmp = re * re; end tmp_2 = tmp; end
re$95$sqr[re_, im_] := If[LessEqual[N[(N[(re * re), $MachinePrecision] - N[(im * im), $MachinePrecision]), $MachinePrecision], -1e-285], N[(im * N[(0.0 - im), $MachinePrecision]), $MachinePrecision], N[(re * re), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;re \cdot re - im \cdot im \leq -1 \cdot 10^{-285}:\\
\;\;\;\;im \cdot \left(0 - im\right)\\
\mathbf{else}:\\
\;\;\;\;re \cdot re\\
\end{array}
\end{array}
if (-.f64 (*.f64 re re) (*.f64 im im)) < -1.00000000000000007e-285Initial program 100.0%
Taylor expanded in re around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f64N/A
+-rgt-identityN/A
unpow2N/A
accelerator-lowering-fma.f6498.0
Simplified98.0%
+-rgt-identityN/A
*-lowering-*.f6498.0
Applied egg-rr98.0%
if -1.00000000000000007e-285 < (-.f64 (*.f64 re re) (*.f64 im im)) Initial program 90.2%
Taylor expanded in re around inf
+-rgt-identityN/A
unpow2N/A
accelerator-lowering-fma.f6495.6
Simplified95.6%
+-rgt-identityN/A
*-lowering-*.f6495.6
Applied egg-rr95.6%
Final simplification96.7%
(FPCore re_sqr (re im) :precision binary64 (* re re))
double re_sqr(double re, double im) {
return re * re;
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = re * re
end function
public static double re_sqr(double re, double im) {
return re * re;
}
def re_sqr(re, im): return re * re
function re_sqr(re, im) return Float64(re * re) end
function tmp = re_sqr(re, im) tmp = re * re; end
re$95$sqr[re_, im_] := N[(re * re), $MachinePrecision]
\begin{array}{l}
\\
re \cdot re
\end{array}
Initial program 94.5%
Taylor expanded in re around inf
+-rgt-identityN/A
unpow2N/A
accelerator-lowering-fma.f6454.4
Simplified54.4%
+-rgt-identityN/A
*-lowering-*.f6454.4
Applied egg-rr54.4%
herbie shell --seed 2024196
(FPCore re_sqr (re im)
:name "math.square on complex, real part"
:precision binary64
(- (* re re) (* im im)))