
(FPCore (x y) :precision binary64 (- (pow x 4.0) (pow y 4.0)))
double code(double x, double y) {
return pow(x, 4.0) - pow(y, 4.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x ** 4.0d0) - (y ** 4.0d0)
end function
public static double code(double x, double y) {
return Math.pow(x, 4.0) - Math.pow(y, 4.0);
}
def code(x, y): return math.pow(x, 4.0) - math.pow(y, 4.0)
function code(x, y) return Float64((x ^ 4.0) - (y ^ 4.0)) end
function tmp = code(x, y) tmp = (x ^ 4.0) - (y ^ 4.0); end
code[x_, y_] := N[(N[Power[x, 4.0], $MachinePrecision] - N[Power[y, 4.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{x}^{4} - {y}^{4}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (- (pow x 4.0) (pow y 4.0)))
double code(double x, double y) {
return pow(x, 4.0) - pow(y, 4.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x ** 4.0d0) - (y ** 4.0d0)
end function
public static double code(double x, double y) {
return Math.pow(x, 4.0) - Math.pow(y, 4.0);
}
def code(x, y): return math.pow(x, 4.0) - math.pow(y, 4.0)
function code(x, y) return Float64((x ^ 4.0) - (y ^ 4.0)) end
function tmp = code(x, y) tmp = (x ^ 4.0) - (y ^ 4.0); end
code[x_, y_] := N[(N[Power[x, 4.0], $MachinePrecision] - N[Power[y, 4.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{x}^{4} - {y}^{4}
\end{array}
(FPCore (x y) :precision binary64 (/ (+ x y) (/ 1.0 (* (fma x x (* y y)) (- x y)))))
double code(double x, double y) {
return (x + y) / (1.0 / (fma(x, x, (y * y)) * (x - y)));
}
function code(x, y) return Float64(Float64(x + y) / Float64(1.0 / Float64(fma(x, x, Float64(y * y)) * Float64(x - y)))) end
code[x_, y_] := N[(N[(x + y), $MachinePrecision] / N[(1.0 / N[(N[(x * x + N[(y * y), $MachinePrecision]), $MachinePrecision] * N[(x - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x + y}{\frac{1}{\mathsf{fma}\left(x, x, y \cdot y\right) \cdot \left(x - y\right)}}
\end{array}
Initial program 84.8%
sqr-powN/A
sqr-powN/A
difference-of-squaresN/A
*-lowering-*.f64N/A
metadata-evalN/A
unpow2N/A
accelerator-lowering-fma.f64N/A
metadata-evalN/A
unpow2N/A
*-lowering-*.f64N/A
metadata-evalN/A
unpow2N/A
metadata-evalN/A
unpow2N/A
difference-of-squaresN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
--lowering--.f6499.8
Applied egg-rr99.8%
flip-+N/A
div-invN/A
*-lowering-*.f64N/A
difference-of-squaresN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
--lowering--.f6499.8
Applied egg-rr99.8%
*-commutativeN/A
associate-*r*N/A
difference-of-squaresN/A
div-invN/A
flip-+N/A
flip3-+N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
--lowering--.f64N/A
clear-numN/A
Applied egg-rr99.8%
associate-/r/N/A
/-rgt-identityN/A
*-commutativeN/A
remove-double-divN/A
un-div-invN/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
--lowering--.f6499.9
Applied egg-rr99.9%
(FPCore (x y) :precision binary64 (if (<= (- (pow x 4.0) (pow y 4.0)) -1e-288) (* (* y (* y y)) (- y)) (* x (* x (* x x)))))
double code(double x, double y) {
double tmp;
if ((pow(x, 4.0) - pow(y, 4.0)) <= -1e-288) {
tmp = (y * (y * y)) * -y;
} else {
tmp = x * (x * (x * x));
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (((x ** 4.0d0) - (y ** 4.0d0)) <= (-1d-288)) then
tmp = (y * (y * y)) * -y
else
tmp = x * (x * (x * x))
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((Math.pow(x, 4.0) - Math.pow(y, 4.0)) <= -1e-288) {
tmp = (y * (y * y)) * -y;
} else {
tmp = x * (x * (x * x));
}
return tmp;
}
def code(x, y): tmp = 0 if (math.pow(x, 4.0) - math.pow(y, 4.0)) <= -1e-288: tmp = (y * (y * y)) * -y else: tmp = x * (x * (x * x)) return tmp
function code(x, y) tmp = 0.0 if (Float64((x ^ 4.0) - (y ^ 4.0)) <= -1e-288) tmp = Float64(Float64(y * Float64(y * y)) * Float64(-y)); else tmp = Float64(x * Float64(x * Float64(x * x))); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (((x ^ 4.0) - (y ^ 4.0)) <= -1e-288) tmp = (y * (y * y)) * -y; else tmp = x * (x * (x * x)); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[(N[Power[x, 4.0], $MachinePrecision] - N[Power[y, 4.0], $MachinePrecision]), $MachinePrecision], -1e-288], N[(N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision] * (-y)), $MachinePrecision], N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;{x}^{4} - {y}^{4} \leq -1 \cdot 10^{-288}:\\
\;\;\;\;\left(y \cdot \left(y \cdot y\right)\right) \cdot \left(-y\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(x \cdot \left(x \cdot x\right)\right)\\
\end{array}
\end{array}
if (-.f64 (pow.f64 x #s(literal 4 binary64)) (pow.f64 y #s(literal 4 binary64))) < -1.00000000000000006e-288Initial program 100.0%
Taylor expanded in x around 0
mul-1-negN/A
neg-lowering-neg.f64N/A
pow-lowering-pow.f64100.0
Simplified100.0%
metadata-evalN/A
pow-sqrN/A
pow2N/A
pow2N/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
neg-lowering-neg.f6499.8
Applied egg-rr99.8%
if -1.00000000000000006e-288 < (-.f64 (pow.f64 x #s(literal 4 binary64)) (pow.f64 y #s(literal 4 binary64))) Initial program 74.0%
Taylor expanded in x around inf
pow-lowering-pow.f6487.3
Simplified87.3%
metadata-evalN/A
pow-plusN/A
cube-unmultN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6487.3
Applied egg-rr87.3%
Final simplification92.4%
(FPCore (x y) :precision binary64 (if (<= (- (pow x 4.0) (pow y 4.0)) -1e-288) (* (* y y) (- (* y y))) (* x (* x (* x x)))))
double code(double x, double y) {
double tmp;
if ((pow(x, 4.0) - pow(y, 4.0)) <= -1e-288) {
tmp = (y * y) * -(y * y);
} else {
tmp = x * (x * (x * x));
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (((x ** 4.0d0) - (y ** 4.0d0)) <= (-1d-288)) then
tmp = (y * y) * -(y * y)
else
tmp = x * (x * (x * x))
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((Math.pow(x, 4.0) - Math.pow(y, 4.0)) <= -1e-288) {
tmp = (y * y) * -(y * y);
} else {
tmp = x * (x * (x * x));
}
return tmp;
}
def code(x, y): tmp = 0 if (math.pow(x, 4.0) - math.pow(y, 4.0)) <= -1e-288: tmp = (y * y) * -(y * y) else: tmp = x * (x * (x * x)) return tmp
function code(x, y) tmp = 0.0 if (Float64((x ^ 4.0) - (y ^ 4.0)) <= -1e-288) tmp = Float64(Float64(y * y) * Float64(-Float64(y * y))); else tmp = Float64(x * Float64(x * Float64(x * x))); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (((x ^ 4.0) - (y ^ 4.0)) <= -1e-288) tmp = (y * y) * -(y * y); else tmp = x * (x * (x * x)); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[(N[Power[x, 4.0], $MachinePrecision] - N[Power[y, 4.0], $MachinePrecision]), $MachinePrecision], -1e-288], N[(N[(y * y), $MachinePrecision] * (-N[(y * y), $MachinePrecision])), $MachinePrecision], N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;{x}^{4} - {y}^{4} \leq -1 \cdot 10^{-288}:\\
\;\;\;\;\left(y \cdot y\right) \cdot \left(-y \cdot y\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(x \cdot \left(x \cdot x\right)\right)\\
\end{array}
\end{array}
if (-.f64 (pow.f64 x #s(literal 4 binary64)) (pow.f64 y #s(literal 4 binary64))) < -1.00000000000000006e-288Initial program 100.0%
Taylor expanded in x around 0
mul-1-negN/A
neg-lowering-neg.f64N/A
pow-lowering-pow.f64100.0
Simplified100.0%
metadata-evalN/A
pow-sqrN/A
pow2N/A
pow2N/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
neg-lowering-neg.f64N/A
*-lowering-*.f6499.7
Applied egg-rr99.7%
if -1.00000000000000006e-288 < (-.f64 (pow.f64 x #s(literal 4 binary64)) (pow.f64 y #s(literal 4 binary64))) Initial program 74.0%
Taylor expanded in x around inf
pow-lowering-pow.f6487.3
Simplified87.3%
metadata-evalN/A
pow-plusN/A
cube-unmultN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6487.3
Applied egg-rr87.3%
Final simplification92.4%
(FPCore (x y) :precision binary64 (* (fma x x (* y y)) (* (+ x y) (- x y))))
double code(double x, double y) {
return fma(x, x, (y * y)) * ((x + y) * (x - y));
}
function code(x, y) return Float64(fma(x, x, Float64(y * y)) * Float64(Float64(x + y) * Float64(x - y))) end
code[x_, y_] := N[(N[(x * x + N[(y * y), $MachinePrecision]), $MachinePrecision] * N[(N[(x + y), $MachinePrecision] * N[(x - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x, y \cdot y\right) \cdot \left(\left(x + y\right) \cdot \left(x - y\right)\right)
\end{array}
Initial program 84.8%
sqr-powN/A
sqr-powN/A
difference-of-squaresN/A
*-lowering-*.f64N/A
metadata-evalN/A
unpow2N/A
accelerator-lowering-fma.f64N/A
metadata-evalN/A
unpow2N/A
*-lowering-*.f64N/A
metadata-evalN/A
unpow2N/A
metadata-evalN/A
unpow2N/A
difference-of-squaresN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
--lowering--.f6499.8
Applied egg-rr99.8%
(FPCore (x y) :precision binary64 (* x (* x (* x x))))
double code(double x, double y) {
return x * (x * (x * x));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (x * (x * x))
end function
public static double code(double x, double y) {
return x * (x * (x * x));
}
def code(x, y): return x * (x * (x * x))
function code(x, y) return Float64(x * Float64(x * Float64(x * x))) end
function tmp = code(x, y) tmp = x * (x * (x * x)); end
code[x_, y_] := N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \left(x \cdot x\right)\right)
\end{array}
Initial program 84.8%
Taylor expanded in x around inf
pow-lowering-pow.f6452.0
Simplified52.0%
metadata-evalN/A
pow-plusN/A
cube-unmultN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6452.0
Applied egg-rr52.0%
Final simplification52.0%
(FPCore (x y) :precision binary64 (* (* x x) (* x x)))
double code(double x, double y) {
return (x * x) * (x * x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * x) * (x * x)
end function
public static double code(double x, double y) {
return (x * x) * (x * x);
}
def code(x, y): return (x * x) * (x * x)
function code(x, y) return Float64(Float64(x * x) * Float64(x * x)) end
function tmp = code(x, y) tmp = (x * x) * (x * x); end
code[x_, y_] := N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot \left(x \cdot x\right)
\end{array}
Initial program 84.8%
Taylor expanded in x around inf
pow-lowering-pow.f6452.0
Simplified52.0%
metadata-evalN/A
pow-sqrN/A
pow2N/A
pow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6451.9
Applied egg-rr51.9%
herbie shell --seed 2024201
(FPCore (x y)
:name "Radioactive exchange between two surfaces"
:precision binary64
(- (pow x 4.0) (pow y 4.0)))