Math FPCore C Fortran Java Python Julia MATLAB Wolfram TeX \[x - y \cdot z
\]
↓
\[x - y \cdot z
\]
(FPCore (x y z) :precision binary64 (- x (* y z))) ↓
(FPCore (x y z) :precision binary64 (- x (* y z))) double code(double x, double y, double z) {
return x - (y * z);
}
↓
double code(double x, double y, double z) {
return x - (y * z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x - (y * z)
end function
↓
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x - (y * z)
end function
public static double code(double x, double y, double z) {
return x - (y * z);
}
↓
public static double code(double x, double y, double z) {
return x - (y * z);
}
def code(x, y, z):
return x - (y * z)
↓
def code(x, y, z):
return x - (y * z)
function code(x, y, z)
return Float64(x - Float64(y * z))
end
↓
function code(x, y, z)
return Float64(x - Float64(y * z))
end
function tmp = code(x, y, z)
tmp = x - (y * z);
end
↓
function tmp = code(x, y, z)
tmp = x - (y * z);
end
code[x_, y_, z_] := N[(x - N[(y * z), $MachinePrecision]), $MachinePrecision]
↓
code[x_, y_, z_] := N[(x - N[(y * z), $MachinePrecision]), $MachinePrecision]
x - y \cdot z
↓
x - y \cdot z