Math FPCore C Fortran Java Python Julia MATLAB Wolfram TeX \[x \cdot y + \left(x - 1\right) \cdot z
\]
↓
\[x \cdot \left(y + z\right) - z
\]
(FPCore (x y z) :precision binary64 (+ (* x y) (* (- x 1.0) z))) ↓
(FPCore (x y z) :precision binary64 (- (* x (+ y z)) z)) double code(double x, double y, double z) {
return (x * y) + ((x - 1.0) * z);
}
↓
double code(double x, double y, double z) {
return (x * (y + z)) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = (x * y) + ((x - 1.0d0) * z)
end function
↓
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = (x * (y + z)) - z
end function
public static double code(double x, double y, double z) {
return (x * y) + ((x - 1.0) * z);
}
↓
public static double code(double x, double y, double z) {
return (x * (y + z)) - z;
}
def code(x, y, z):
return (x * y) + ((x - 1.0) * z)
↓
def code(x, y, z):
return (x * (y + z)) - z
function code(x, y, z)
return Float64(Float64(x * y) + Float64(Float64(x - 1.0) * z))
end
↓
function code(x, y, z)
return Float64(Float64(x * Float64(y + z)) - z)
end
function tmp = code(x, y, z)
tmp = (x * y) + ((x - 1.0) * z);
end
↓
function tmp = code(x, y, z)
tmp = (x * (y + z)) - z;
end
code[x_, y_, z_] := N[(N[(x * y), $MachinePrecision] + N[(N[(x - 1.0), $MachinePrecision] * z), $MachinePrecision]), $MachinePrecision]
↓
code[x_, y_, z_] := N[(N[(x * N[(y + z), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]
x \cdot y + \left(x - 1\right) \cdot z
↓
x \cdot \left(y + z\right) - z