Math FPCore C Fortran Java Python Julia MATLAB Wolfram TeX \[\frac{x + y}{x - y}
\]
↓
\[1 - \frac{x + y}{y - x} \cdot \left(1 + \frac{y - x}{x + y}\right)
\]
(FPCore (x y) :precision binary64 (/ (+ x y) (- x y))) ↓
(FPCore (x y)
:precision binary64
(- 1.0 (* (/ (+ x y) (- y x)) (+ 1.0 (/ (- y x) (+ x y)))))) double code(double x, double y) {
return (x + y) / (x - y);
}
↓
double code(double x, double y) {
return 1.0 - (((x + y) / (y - x)) * (1.0 + ((y - x) / (x + y))));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x + y) / (x - y)
end function
↓
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 - (((x + y) / (y - x)) * (1.0d0 + ((y - x) / (x + y))))
end function
public static double code(double x, double y) {
return (x + y) / (x - y);
}
↓
public static double code(double x, double y) {
return 1.0 - (((x + y) / (y - x)) * (1.0 + ((y - x) / (x + y))));
}
def code(x, y):
return (x + y) / (x - y)
↓
def code(x, y):
return 1.0 - (((x + y) / (y - x)) * (1.0 + ((y - x) / (x + y))))
function code(x, y)
return Float64(Float64(x + y) / Float64(x - y))
end
↓
function code(x, y)
return Float64(1.0 - Float64(Float64(Float64(x + y) / Float64(y - x)) * Float64(1.0 + Float64(Float64(y - x) / Float64(x + y)))))
end
function tmp = code(x, y)
tmp = (x + y) / (x - y);
end
↓
function tmp = code(x, y)
tmp = 1.0 - (((x + y) / (y - x)) * (1.0 + ((y - x) / (x + y))));
end
code[x_, y_] := N[(N[(x + y), $MachinePrecision] / N[(x - y), $MachinePrecision]), $MachinePrecision]
↓
code[x_, y_] := N[(1.0 - N[(N[(N[(x + y), $MachinePrecision] / N[(y - x), $MachinePrecision]), $MachinePrecision] * N[(1.0 + N[(N[(y - x), $MachinePrecision] / N[(x + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\frac{x + y}{x - y}
↓
1 - \frac{x + y}{y - x} \cdot \left(1 + \frac{y - x}{x + y}\right)