
(FPCore (x y) :precision binary64 (+ (+ (+ (* x x) (* y y)) (* y y)) (* y y)))
double code(double x, double y) {
return (((x * x) + (y * y)) + (y * y)) + (y * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (((x * x) + (y * y)) + (y * y)) + (y * y)
end function
public static double code(double x, double y) {
return (((x * x) + (y * y)) + (y * y)) + (y * y);
}
def code(x, y): return (((x * x) + (y * y)) + (y * y)) + (y * y)
function code(x, y) return Float64(Float64(Float64(Float64(x * x) + Float64(y * y)) + Float64(y * y)) + Float64(y * y)) end
function tmp = code(x, y) tmp = (((x * x) + (y * y)) + (y * y)) + (y * y); end
code[x_, y_] := N[(N[(N[(N[(x * x), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x \cdot x + y \cdot y\right) + y \cdot y\right) + y \cdot y
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (+ (+ (+ (* x x) (* y y)) (* y y)) (* y y)))
double code(double x, double y) {
return (((x * x) + (y * y)) + (y * y)) + (y * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (((x * x) + (y * y)) + (y * y)) + (y * y)
end function
public static double code(double x, double y) {
return (((x * x) + (y * y)) + (y * y)) + (y * y);
}
def code(x, y): return (((x * x) + (y * y)) + (y * y)) + (y * y)
function code(x, y) return Float64(Float64(Float64(Float64(x * x) + Float64(y * y)) + Float64(y * y)) + Float64(y * y)) end
function tmp = code(x, y) tmp = (((x * x) + (y * y)) + (y * y)) + (y * y); end
code[x_, y_] := N[(N[(N[(N[(x * x), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x \cdot x + y \cdot y\right) + y \cdot y\right) + y \cdot y
\end{array}
(FPCore (x y) :precision binary64 (fma y y (fma x x (* 2.0 (* y y)))))
double code(double x, double y) {
return fma(y, y, fma(x, x, (2.0 * (y * y))));
}
function code(x, y) return fma(y, y, fma(x, x, Float64(2.0 * Float64(y * y)))) end
code[x_, y_] := N[(y * y + N[(x * x + N[(2.0 * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y, y, \mathsf{fma}\left(x, x, 2 \cdot \left(y \cdot y\right)\right)\right)
\end{array}
Initial program 99.9%
+-commutative99.9%
sqr-neg99.9%
+-commutative99.9%
sqr-neg99.9%
+-commutative99.9%
fma-define99.9%
sqr-neg99.9%
sqr-neg99.9%
associate-+l+99.9%
fma-define100.0%
count-2100.0%
Simplified100.0%
(FPCore (x y) :precision binary64 (fma x x (* y (* y 3.0))))
double code(double x, double y) {
return fma(x, x, (y * (y * 3.0)));
}
function code(x, y) return fma(x, x, Float64(y * Float64(y * 3.0))) end
code[x_, y_] := N[(x * x + N[(y * N[(y * 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x, y \cdot \left(y \cdot 3\right)\right)
\end{array}
Initial program 99.9%
associate-+l+99.9%
associate-+l+99.9%
fma-define99.9%
*-lft-identity99.9%
metadata-eval99.9%
count-299.9%
distribute-rgt-out99.9%
metadata-eval99.9%
metadata-eval99.9%
Simplified99.9%
add-sqr-sqrt99.7%
sqrt-unprod87.1%
swap-sqr87.1%
pow287.1%
pow287.1%
pow-prod-up87.1%
metadata-eval87.1%
metadata-eval87.1%
Applied egg-rr87.1%
sqrt-prod87.1%
sqrt-pow199.9%
metadata-eval99.9%
pow299.9%
associate-*l*99.9%
metadata-eval99.9%
Applied egg-rr99.9%
(FPCore (x y)
:precision binary64
(if (or (<= (* x x) 6.5e-72)
(and (not (<= (* x x) 1.95e+61)) (<= (* x x) 1.7e+105)))
(* y (* y 3.0))
(* x x)))
double code(double x, double y) {
double tmp;
if (((x * x) <= 6.5e-72) || (!((x * x) <= 1.95e+61) && ((x * x) <= 1.7e+105))) {
tmp = y * (y * 3.0);
} else {
tmp = x * x;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (((x * x) <= 6.5d-72) .or. (.not. ((x * x) <= 1.95d+61)) .and. ((x * x) <= 1.7d+105)) then
tmp = y * (y * 3.0d0)
else
tmp = x * x
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (((x * x) <= 6.5e-72) || (!((x * x) <= 1.95e+61) && ((x * x) <= 1.7e+105))) {
tmp = y * (y * 3.0);
} else {
tmp = x * x;
}
return tmp;
}
def code(x, y): tmp = 0 if ((x * x) <= 6.5e-72) or (not ((x * x) <= 1.95e+61) and ((x * x) <= 1.7e+105)): tmp = y * (y * 3.0) else: tmp = x * x return tmp
function code(x, y) tmp = 0.0 if ((Float64(x * x) <= 6.5e-72) || (!(Float64(x * x) <= 1.95e+61) && (Float64(x * x) <= 1.7e+105))) tmp = Float64(y * Float64(y * 3.0)); else tmp = Float64(x * x); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (((x * x) <= 6.5e-72) || (~(((x * x) <= 1.95e+61)) && ((x * x) <= 1.7e+105))) tmp = y * (y * 3.0); else tmp = x * x; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[N[(x * x), $MachinePrecision], 6.5e-72], And[N[Not[LessEqual[N[(x * x), $MachinePrecision], 1.95e+61]], $MachinePrecision], LessEqual[N[(x * x), $MachinePrecision], 1.7e+105]]], N[(y * N[(y * 3.0), $MachinePrecision]), $MachinePrecision], N[(x * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \cdot x \leq 6.5 \cdot 10^{-72} \lor \neg \left(x \cdot x \leq 1.95 \cdot 10^{+61}\right) \land x \cdot x \leq 1.7 \cdot 10^{+105}:\\
\;\;\;\;y \cdot \left(y \cdot 3\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot x\\
\end{array}
\end{array}
if (*.f64 x x) < 6.4999999999999997e-72 or 1.94999999999999994e61 < (*.f64 x x) < 1.7e105Initial program 99.8%
associate-+l+99.8%
associate-+l+99.8%
fma-define99.8%
*-lft-identity99.8%
metadata-eval99.8%
count-299.8%
distribute-rgt-out99.8%
metadata-eval99.8%
metadata-eval99.8%
Simplified99.8%
add-sqr-sqrt99.5%
fma-undefine99.5%
add-sqr-sqrt99.5%
hypot-define99.5%
sqrt-prod99.6%
sqrt-prod53.0%
add-sqr-sqrt99.6%
fma-undefine99.6%
add-sqr-sqrt99.6%
hypot-define99.6%
sqrt-prod99.4%
sqrt-prod52.9%
add-sqr-sqrt99.4%
Applied egg-rr99.4%
Simplified99.4%
Taylor expanded in x around 0 85.8%
unpow-prod-down85.7%
pow285.7%
pow285.7%
rem-square-sqrt86.2%
associate-*l*86.2%
Applied egg-rr86.2%
if 6.4999999999999997e-72 < (*.f64 x x) < 1.94999999999999994e61 or 1.7e105 < (*.f64 x x) Initial program 100.0%
Taylor expanded in x around inf 85.7%
pow299.9%
Applied egg-rr85.7%
Final simplification86.0%
(FPCore (x y) :precision binary64 (+ (* y y) (+ (* y (* y 2.0)) (* x x))))
double code(double x, double y) {
return (y * y) + ((y * (y * 2.0)) + (x * x));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (y * y) + ((y * (y * 2.0d0)) + (x * x))
end function
public static double code(double x, double y) {
return (y * y) + ((y * (y * 2.0)) + (x * x));
}
def code(x, y): return (y * y) + ((y * (y * 2.0)) + (x * x))
function code(x, y) return Float64(Float64(y * y) + Float64(Float64(y * Float64(y * 2.0)) + Float64(x * x))) end
function tmp = code(x, y) tmp = (y * y) + ((y * (y * 2.0)) + (x * x)); end
code[x_, y_] := N[(N[(y * y), $MachinePrecision] + N[(N[(y * N[(y * 2.0), $MachinePrecision]), $MachinePrecision] + N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
y \cdot y + \left(y \cdot \left(y \cdot 2\right) + x \cdot x\right)
\end{array}
Initial program 99.9%
associate-+l+99.9%
+-commutative99.9%
count-299.9%
add-sqr-sqrt99.8%
fma-define99.8%
*-commutative99.8%
sqrt-prod99.8%
sqrt-prod51.5%
add-sqr-sqrt75.7%
*-commutative75.7%
sqrt-prod75.7%
sqrt-prod51.4%
add-sqr-sqrt99.7%
pow299.7%
Applied egg-rr99.7%
Simplified99.7%
pow299.7%
Applied egg-rr99.7%
unpow-prod-down99.6%
pow299.6%
pow299.6%
rem-square-sqrt99.9%
associate-*l*99.9%
*-commutative99.9%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (* x x))
double code(double x, double y) {
return x * x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * x
end function
public static double code(double x, double y) {
return x * x;
}
def code(x, y): return x * x
function code(x, y) return Float64(x * x) end
function tmp = code(x, y) tmp = x * x; end
code[x_, y_] := N[(x * x), $MachinePrecision]
\begin{array}{l}
\\
x \cdot x
\end{array}
Initial program 99.9%
Taylor expanded in x around inf 58.8%
pow299.7%
Applied egg-rr58.8%
(FPCore (x y) :precision binary64 (+ (* x x) (* y (+ y (+ y y)))))
double code(double x, double y) {
return (x * x) + (y * (y + (y + y)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * x) + (y * (y + (y + y)))
end function
public static double code(double x, double y) {
return (x * x) + (y * (y + (y + y)));
}
def code(x, y): return (x * x) + (y * (y + (y + y)))
function code(x, y) return Float64(Float64(x * x) + Float64(y * Float64(y + Float64(y + y)))) end
function tmp = code(x, y) tmp = (x * x) + (y * (y + (y + y))); end
code[x_, y_] := N[(N[(x * x), $MachinePrecision] + N[(y * N[(y + N[(y + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot x + y \cdot \left(y + \left(y + y\right)\right)
\end{array}
herbie shell --seed 2024110
(FPCore (x y)
:name "Linear.Quaternion:$c/ from linear-1.19.1.3, E"
:precision binary64
:alt
(+ (* x x) (* y (+ y (+ y y))))
(+ (+ (+ (* x x) (* y y)) (* y y)) (* y y)))