Math FPCore C Fortran Java Python Julia MATLAB Wolfram TeX \[{x}^{4} - {y}^{4}
\]
↓
\[\left(x \cdot x + y \cdot y\right) \cdot \left(x \cdot x - y \cdot y\right)
\]
(FPCore (x y) :precision binary64 (- (pow x 4.0) (pow y 4.0))) ↓
(FPCore (x y) :precision binary64 (* (+ (* x x) (* y y)) (- (* x x) (* y y)))) double code(double x, double y) {
return pow(x, 4.0) - pow(y, 4.0);
}
↓
double code(double x, double y) {
return ((x * x) + (y * y)) * ((x * x) - (y * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x ** 4.0d0) - (y ** 4.0d0)
end function
↓
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x * x) + (y * y)) * ((x * x) - (y * y))
end function
public static double code(double x, double y) {
return Math.pow(x, 4.0) - Math.pow(y, 4.0);
}
↓
public static double code(double x, double y) {
return ((x * x) + (y * y)) * ((x * x) - (y * y));
}
def code(x, y):
return math.pow(x, 4.0) - math.pow(y, 4.0)
↓
def code(x, y):
return ((x * x) + (y * y)) * ((x * x) - (y * y))
function code(x, y)
return Float64((x ^ 4.0) - (y ^ 4.0))
end
↓
function code(x, y)
return Float64(Float64(Float64(x * x) + Float64(y * y)) * Float64(Float64(x * x) - Float64(y * y)))
end
function tmp = code(x, y)
tmp = (x ^ 4.0) - (y ^ 4.0);
end
↓
function tmp = code(x, y)
tmp = ((x * x) + (y * y)) * ((x * x) - (y * y));
end
code[x_, y_] := N[(N[Power[x, 4.0], $MachinePrecision] - N[Power[y, 4.0], $MachinePrecision]), $MachinePrecision]
↓
code[x_, y_] := N[(N[(N[(x * x), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] - N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
{x}^{4} - {y}^{4}
↓
\left(x \cdot x + y \cdot y\right) \cdot \left(x \cdot x - y \cdot y\right)