\[\log \left(1 + e^{x}\right) - x \cdot y
\]
↓
\[\log \left(1 + e^{x}\right) - x \cdot y
\]
(FPCore (x y) :precision binary64 (- (log (+ 1.0 (exp x))) (* x y)))
↓
(FPCore (x y) :precision binary64 (- (log (+ 1.0 (exp x))) (* x y)))
double code(double x, double y) {
return log((1.0 + exp(x))) - (x * y);
}
↓
double code(double x, double y) {
return log((1.0 + exp(x))) - (x * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = log((1.0d0 + exp(x))) - (x * y)
end function
↓
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = log((1.0d0 + exp(x))) - (x * y)
end function
public static double code(double x, double y) {
return Math.log((1.0 + Math.exp(x))) - (x * y);
}
↓
public static double code(double x, double y) {
return Math.log((1.0 + Math.exp(x))) - (x * y);
}
def code(x, y):
return math.log((1.0 + math.exp(x))) - (x * y)
↓
def code(x, y):
return math.log((1.0 + math.exp(x))) - (x * y)
function code(x, y)
return Float64(log(Float64(1.0 + exp(x))) - Float64(x * y))
end
↓
function code(x, y)
return Float64(log(Float64(1.0 + exp(x))) - Float64(x * y))
end
function tmp = code(x, y)
tmp = log((1.0 + exp(x))) - (x * y);
end
↓
function tmp = code(x, y)
tmp = log((1.0 + exp(x))) - (x * y);
end
code[x_, y_] := N[(N[Log[N[(1.0 + N[Exp[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]
↓
code[x_, y_] := N[(N[Log[N[(1.0 + N[Exp[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]
\log \left(1 + e^{x}\right) - x \cdot y
↓
\log \left(1 + e^{x}\right) - x \cdot y