
(FPCore re_sqr (re im) :precision binary64 (- (* re re) (* im im)))
double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
real(8) function re_sqr(re, im)
use fmin_fmax_functions
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (re * re) - (im * im)
end function
public static double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
def re_sqr(re, im): return (re * re) - (im * im)
function re_sqr(re, im) return Float64(Float64(re * re) - Float64(im * im)) end
function tmp = re_sqr(re, im) tmp = (re * re) - (im * im); end
re$95$sqr[re_, im_] := N[(N[(re * re), $MachinePrecision] - N[(im * im), $MachinePrecision]), $MachinePrecision]
re \cdot re - im \cdot im
Herbie found 2 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore re_sqr (re im) :precision binary64 (- (* re re) (* im im)))
double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
real(8) function re_sqr(re, im)
use fmin_fmax_functions
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (re * re) - (im * im)
end function
public static double re_sqr(double re, double im) {
return (re * re) - (im * im);
}
def re_sqr(re, im): return (re * re) - (im * im)
function re_sqr(re, im) return Float64(Float64(re * re) - Float64(im * im)) end
function tmp = re_sqr(re, im) tmp = (re * re) - (im * im); end
re$95$sqr[re_, im_] := N[(N[(re * re), $MachinePrecision] - N[(im * im), $MachinePrecision]), $MachinePrecision]
re \cdot re - im \cdot im
(FPCore re_sqr (re im) :precision binary64 (* (- re im) (+ im re)))
double re_sqr(double re, double im) {
return (re - im) * (im + re);
}
real(8) function re_sqr(re, im)
use fmin_fmax_functions
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (re - im) * (im + re)
end function
public static double re_sqr(double re, double im) {
return (re - im) * (im + re);
}
def re_sqr(re, im): return (re - im) * (im + re)
function re_sqr(re, im) return Float64(Float64(re - im) * Float64(im + re)) end
function tmp = re_sqr(re, im) tmp = (re - im) * (im + re); end
re$95$sqr[re_, im_] := N[(N[(re - im), $MachinePrecision] * N[(im + re), $MachinePrecision]), $MachinePrecision]
\left(re - im\right) \cdot \left(im + re\right)
Initial program 93.8%
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
difference-of-squaresN/A
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
+-commutativeN/A
lower-+.f64100.0%
Applied rewrites100.0%
(FPCore re_sqr (re im) :precision binary64 (* (- (fabs re) (fabs im)) (fabs im)))
double re_sqr(double re, double im) {
return (fabs(re) - fabs(im)) * fabs(im);
}
real(8) function re_sqr(re, im)
use fmin_fmax_functions
real(8), intent (in) :: re
real(8), intent (in) :: im
re_sqr = (abs(re) - abs(im)) * abs(im)
end function
public static double re_sqr(double re, double im) {
return (Math.abs(re) - Math.abs(im)) * Math.abs(im);
}
def re_sqr(re, im): return (math.fabs(re) - math.fabs(im)) * math.fabs(im)
function re_sqr(re, im) return Float64(Float64(abs(re) - abs(im)) * abs(im)) end
function tmp = re_sqr(re, im) tmp = (abs(re) - abs(im)) * abs(im); end
re$95$sqr[re_, im_] := N[(N[(N[Abs[re], $MachinePrecision] - N[Abs[im], $MachinePrecision]), $MachinePrecision] * N[Abs[im], $MachinePrecision]), $MachinePrecision]
\left(\left|re\right| - \left|im\right|\right) \cdot \left|im\right|
Initial program 93.8%
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
difference-of-squaresN/A
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
+-commutativeN/A
lower-+.f64100.0%
Applied rewrites100.0%
Taylor expanded in re around 0
Applied rewrites57.2%
herbie shell --seed 2025313 -o setup:search
(FPCore re_sqr (re im)
:name "math.square on complex, real part"
:precision binary64
(- (* re re) (* im im)))