
(FPCore (x y) :precision binary64 (+ (+ (+ (* x x) (* y y)) (* y y)) (* y y)))
double code(double x, double y) {
return (((x * x) + (y * y)) + (y * y)) + (y * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (((x * x) + (y * y)) + (y * y)) + (y * y)
end function
public static double code(double x, double y) {
return (((x * x) + (y * y)) + (y * y)) + (y * y);
}
def code(x, y): return (((x * x) + (y * y)) + (y * y)) + (y * y)
function code(x, y) return Float64(Float64(Float64(Float64(x * x) + Float64(y * y)) + Float64(y * y)) + Float64(y * y)) end
function tmp = code(x, y) tmp = (((x * x) + (y * y)) + (y * y)) + (y * y); end
code[x_, y_] := N[(N[(N[(N[(x * x), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x \cdot x + y \cdot y\right) + y \cdot y\right) + y \cdot y
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (+ (+ (+ (* x x) (* y y)) (* y y)) (* y y)))
double code(double x, double y) {
return (((x * x) + (y * y)) + (y * y)) + (y * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (((x * x) + (y * y)) + (y * y)) + (y * y)
end function
public static double code(double x, double y) {
return (((x * x) + (y * y)) + (y * y)) + (y * y);
}
def code(x, y): return (((x * x) + (y * y)) + (y * y)) + (y * y)
function code(x, y) return Float64(Float64(Float64(Float64(x * x) + Float64(y * y)) + Float64(y * y)) + Float64(y * y)) end
function tmp = code(x, y) tmp = (((x * x) + (y * y)) + (y * y)) + (y * y); end
code[x_, y_] := N[(N[(N[(N[(x * x), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x \cdot x + y \cdot y\right) + y \cdot y\right) + y \cdot y
\end{array}
(FPCore (x y) :precision binary64 (fma (* y 2.0) y (pow (hypot x y) 2.0)))
double code(double x, double y) {
return fma((y * 2.0), y, pow(hypot(x, y), 2.0));
}
function code(x, y) return fma(Float64(y * 2.0), y, (hypot(x, y) ^ 2.0)) end
code[x_, y_] := N[(N[(y * 2.0), $MachinePrecision] * y + N[Power[N[Sqrt[x ^ 2 + y ^ 2], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y \cdot 2, y, {\left(\mathsf{hypot}\left(x, y\right)\right)}^{2}\right)
\end{array}
Initial program 99.9%
associate-+r+99.9%
+-commutative99.9%
count-299.9%
associate-*r*99.9%
fma-define100.0%
*-commutative100.0%
fma-define100.0%
add-sqr-sqrt100.0%
pow2100.0%
fma-define100.0%
hypot-define100.0%
Applied egg-rr100.0%
(FPCore (x y) :precision binary64 (fma y y (fma x x (* 2.0 (* y y)))))
double code(double x, double y) {
return fma(y, y, fma(x, x, (2.0 * (y * y))));
}
function code(x, y) return fma(y, y, fma(x, x, Float64(2.0 * Float64(y * y)))) end
code[x_, y_] := N[(y * y + N[(x * x + N[(2.0 * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y, y, \mathsf{fma}\left(x, x, 2 \cdot \left(y \cdot y\right)\right)\right)
\end{array}
Initial program 99.9%
+-commutative99.9%
sqr-neg99.9%
+-commutative99.9%
sqr-neg99.9%
+-commutative99.9%
fma-define100.0%
sqr-neg100.0%
sqr-neg100.0%
associate-+l+100.0%
fma-define100.0%
count-2100.0%
Simplified100.0%
(FPCore (x y) :precision binary64 (+ (fma x x (* y y)) (* y (* y 2.0))))
double code(double x, double y) {
return fma(x, x, (y * y)) + (y * (y * 2.0));
}
function code(x, y) return Float64(fma(x, x, Float64(y * y)) + Float64(y * Float64(y * 2.0))) end
code[x_, y_] := N[(N[(x * x + N[(y * y), $MachinePrecision]), $MachinePrecision] + N[(y * N[(y * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x, y \cdot y\right) + y \cdot \left(y \cdot 2\right)
\end{array}
Initial program 99.9%
associate-+l+99.9%
fma-define99.9%
Simplified99.9%
distribute-lft-out99.9%
*-commutative99.9%
count-299.9%
*-commutative99.9%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (+ (* y y) (+ (* y y) (+ (* y y) (* x x)))))
double code(double x, double y) {
return (y * y) + ((y * y) + ((y * y) + (x * x)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (y * y) + ((y * y) + ((y * y) + (x * x)))
end function
public static double code(double x, double y) {
return (y * y) + ((y * y) + ((y * y) + (x * x)));
}
def code(x, y): return (y * y) + ((y * y) + ((y * y) + (x * x)))
function code(x, y) return Float64(Float64(y * y) + Float64(Float64(y * y) + Float64(Float64(y * y) + Float64(x * x)))) end
function tmp = code(x, y) tmp = (y * y) + ((y * y) + ((y * y) + (x * x))); end
code[x_, y_] := N[(N[(y * y), $MachinePrecision] + N[(N[(y * y), $MachinePrecision] + N[(N[(y * y), $MachinePrecision] + N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
y \cdot y + \left(y \cdot y + \left(y \cdot y + x \cdot x\right)\right)
\end{array}
Initial program 99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (* y (* y 3.0)))
double code(double x, double y) {
return y * (y * 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = y * (y * 3.0d0)
end function
public static double code(double x, double y) {
return y * (y * 3.0);
}
def code(x, y): return y * (y * 3.0)
function code(x, y) return Float64(y * Float64(y * 3.0)) end
function tmp = code(x, y) tmp = y * (y * 3.0); end
code[x_, y_] := N[(y * N[(y * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
y \cdot \left(y \cdot 3\right)
\end{array}
Initial program 99.9%
associate-+r+99.9%
+-commutative99.9%
count-299.9%
associate-*r*99.9%
fma-define100.0%
*-commutative100.0%
fma-define100.0%
add-sqr-sqrt100.0%
pow2100.0%
fma-define100.0%
hypot-define100.0%
Applied egg-rr100.0%
Applied egg-rr99.7%
Taylor expanded in y around inf 59.5%
unpow-prod-down59.4%
pow259.4%
pow259.4%
rem-square-sqrt59.7%
*-commutative59.7%
associate-*l*59.7%
metadata-eval59.7%
distribute-lft1-in59.7%
*-un-lft-identity59.7%
distribute-rgt-out59.7%
metadata-eval59.7%
Applied egg-rr59.7%
Final simplification59.7%
(FPCore (x y) :precision binary64 0.0)
double code(double x, double y) {
return 0.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.0d0
end function
public static double code(double x, double y) {
return 0.0;
}
def code(x, y): return 0.0
function code(x, y) return 0.0 end
function tmp = code(x, y) tmp = 0.0; end
code[x_, y_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 99.9%
associate-+r+99.9%
flip3-+19.2%
flip-+0.0%
frac-add0.0%
Applied egg-rr0.0%
Simplified11.6%
(FPCore (x y) :precision binary64 (+ (* x x) (* y (+ y (+ y y)))))
double code(double x, double y) {
return (x * x) + (y * (y + (y + y)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * x) + (y * (y + (y + y)))
end function
public static double code(double x, double y) {
return (x * x) + (y * (y + (y + y)));
}
def code(x, y): return (x * x) + (y * (y + (y + y)))
function code(x, y) return Float64(Float64(x * x) + Float64(y * Float64(y + Float64(y + y)))) end
function tmp = code(x, y) tmp = (x * x) + (y * (y + (y + y))); end
code[x_, y_] := N[(N[(x * x), $MachinePrecision] + N[(y * N[(y + N[(y + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot x + y \cdot \left(y + \left(y + y\right)\right)
\end{array}
herbie shell --seed 2024143
(FPCore (x y)
:name "Linear.Quaternion:$c/ from linear-1.19.1.3, E"
:precision binary64
:alt
(! :herbie-platform default (+ (* x x) (* y (+ y (+ y y)))))
(+ (+ (+ (* x x) (* y y)) (* y y)) (* y y)))