
(FPCore re_sqr (re im) :precision binary64 (- (* re re) (* im im)))
double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (re * re) - (im * im)
end function
public static double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
def re_sqr(re, im): return (re * re) - (im * im)
function re_sqr(re, im) return Float64(Float64(re * re) - Float64(im * im)) end
function tmp = re_sqr(re, im) tmp = (re * re) - (im * im); end
re$95$sqr[re_, im_] := N[(N[(re * re), $MachinePrecision] - N[(im * im), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
re \cdot re - im \cdot im
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore re_sqr (re im) :precision binary64 (- (* re re) (* im im)))
double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (re * re) - (im * im)
end function
public static double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
def re_sqr(re, im): return (re * re) - (im * im)
function re_sqr(re, im) return Float64(Float64(re * re) - Float64(im * im)) end
function tmp = re_sqr(re, im) tmp = (re * re) - (im * im); end
re$95$sqr[re_, im_] := N[(N[(re * re), $MachinePrecision] - N[(im * im), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
re \cdot re - im \cdot im
\end{array}
(FPCore re_sqr (re im) :precision binary64 (* (- re im) (+ re im)))
double re_sqr(double re, double im) {
return (re - im) * (re + im);
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (re - im) * (re + im)
end function
public static double re_sqr(double re, double im) {
return (re - im) * (re + im);
}
def re_sqr(re, im): return (re - im) * (re + im)
function re_sqr(re, im) return Float64(Float64(re - im) * Float64(re + im)) end
function tmp = re_sqr(re, im) tmp = (re - im) * (re + im); end
re$95$sqr[re_, im_] := N[(N[(re - im), $MachinePrecision] * N[(re + im), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(re - im\right) \cdot \left(re + im\right)
\end{array}
Initial program 95.7%
difference-of-squaresN/A
*-commutativeN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
+-lowering-+.f64100.0%
Applied egg-rr100.0%
(FPCore re_sqr (re im) :precision binary64 (if (<= (* re re) 3.4e+192) (- 0.0 (* im im)) (* re re)))
double re_sqr(double re, double im) {
double tmp;
if ((re * re) <= 3.4e+192) {
tmp = 0.0 - (im * im);
} else {
tmp = re * re;
}
return tmp;
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
real(8) :: tmp
if ((re * re) <= 3.4d+192) then
tmp = 0.0d0 - (im * im)
else
tmp = re * re
end if
re_sqr = tmp
end function
public static double re_sqr(double re, double im) {
double tmp;
if ((re * re) <= 3.4e+192) {
tmp = 0.0 - (im * im);
} else {
tmp = re * re;
}
return tmp;
}
def re_sqr(re, im): tmp = 0 if (re * re) <= 3.4e+192: tmp = 0.0 - (im * im) else: tmp = re * re return tmp
function re_sqr(re, im) tmp = 0.0 if (Float64(re * re) <= 3.4e+192) tmp = Float64(0.0 - Float64(im * im)); else tmp = Float64(re * re); end return tmp end
function tmp_2 = re_sqr(re, im) tmp = 0.0; if ((re * re) <= 3.4e+192) tmp = 0.0 - (im * im); else tmp = re * re; end tmp_2 = tmp; end
re$95$sqr[re_, im_] := If[LessEqual[N[(re * re), $MachinePrecision], 3.4e+192], N[(0.0 - N[(im * im), $MachinePrecision]), $MachinePrecision], N[(re * re), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;re \cdot re \leq 3.4 \cdot 10^{+192}:\\
\;\;\;\;0 - im \cdot im\\
\mathbf{else}:\\
\;\;\;\;re \cdot re\\
\end{array}
\end{array}
if (*.f64 re re) < 3.39999999999999996e192Initial program 100.0%
Taylor expanded in re around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f64N/A
unpow2N/A
*-lowering-*.f6475.2%
Simplified75.2%
sub0-negN/A
neg-lowering-neg.f64N/A
*-lowering-*.f6475.2%
Applied egg-rr75.2%
if 3.39999999999999996e192 < (*.f64 re re) Initial program 86.9%
Taylor expanded in re around inf
unpow2N/A
*-lowering-*.f6491.7%
Simplified91.7%
Final simplification80.6%
(FPCore re_sqr (re im) :precision binary64 (* re re))
double re_sqr(double re, double im) {
return re * re;
}
real(8) function re_sqr(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = re * re
end function
public static double re_sqr(double re, double im) {
return re * re;
}
def re_sqr(re, im): return re * re
function re_sqr(re, im) return Float64(re * re) end
function tmp = re_sqr(re, im) tmp = re * re; end
re$95$sqr[re_, im_] := N[(re * re), $MachinePrecision]
\begin{array}{l}
\\
re \cdot re
\end{array}
Initial program 95.7%
Taylor expanded in re around inf
unpow2N/A
*-lowering-*.f6450.4%
Simplified50.4%
herbie shell --seed 2024139
(FPCore re_sqr (re im)
:name "math.square on complex, real part"
:precision binary64
(- (* re re) (* im im)))