Math FPCore C Fortran Java Python Julia MATLAB Wolfram TeX \[\frac{x \cdot y}{y + 1}
\]
↓
\[x \cdot \frac{y}{y + 1}
\]
(FPCore (x y) :precision binary64 (/ (* x y) (+ y 1.0))) ↓
(FPCore (x y) :precision binary64 (* x (/ y (+ y 1.0)))) double code(double x, double y) {
return (x * y) / (y + 1.0);
}
↓
double code(double x, double y) {
return x * (y / (y + 1.0));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * y) / (y + 1.0d0)
end function
↓
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (y / (y + 1.0d0))
end function
public static double code(double x, double y) {
return (x * y) / (y + 1.0);
}
↓
public static double code(double x, double y) {
return x * (y / (y + 1.0));
}
def code(x, y):
return (x * y) / (y + 1.0)
↓
def code(x, y):
return x * (y / (y + 1.0))
function code(x, y)
return Float64(Float64(x * y) / Float64(y + 1.0))
end
↓
function code(x, y)
return Float64(x * Float64(y / Float64(y + 1.0)))
end
function tmp = code(x, y)
tmp = (x * y) / (y + 1.0);
end
↓
function tmp = code(x, y)
tmp = x * (y / (y + 1.0));
end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] / N[(y + 1.0), $MachinePrecision]), $MachinePrecision]
↓
code[x_, y_] := N[(x * N[(y / N[(y + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\frac{x \cdot y}{y + 1}
↓
x \cdot \frac{y}{y + 1}