
(FPCore (x y) :precision binary64 (- (log (+ 1.0 (exp x))) (* x y)))
double code(double x, double y) {
return log((1.0 + exp(x))) - (x * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = log((1.0d0 + exp(x))) - (x * y)
end function
public static double code(double x, double y) {
return Math.log((1.0 + Math.exp(x))) - (x * y);
}
def code(x, y): return math.log((1.0 + math.exp(x))) - (x * y)
function code(x, y) return Float64(log(Float64(1.0 + exp(x))) - Float64(x * y)) end
function tmp = code(x, y) tmp = log((1.0 + exp(x))) - (x * y); end
code[x_, y_] := N[(N[Log[N[(1.0 + N[Exp[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(1 + e^{x}\right) - x \cdot y
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 1 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (- (log (+ 1.0 (exp x))) (* x y)))
double code(double x, double y) {
return log((1.0 + exp(x))) - (x * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = log((1.0d0 + exp(x))) - (x * y)
end function
public static double code(double x, double y) {
return Math.log((1.0 + Math.exp(x))) - (x * y);
}
def code(x, y): return math.log((1.0 + math.exp(x))) - (x * y)
function code(x, y) return Float64(log(Float64(1.0 + exp(x))) - Float64(x * y)) end
function tmp = code(x, y) tmp = log((1.0 + exp(x))) - (x * y); end
code[x_, y_] := N[(N[Log[N[(1.0 + N[Exp[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(1 + e^{x}\right) - x \cdot y
\end{array}
(FPCore (x y) :precision binary64 (- (log (+ (exp x) 1.0)) (* y x)))
double code(double x, double y) {
return log((exp(x) + 1.0)) - (y * x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = log((exp(x) + 1.0d0)) - (y * x)
end function
public static double code(double x, double y) {
return Math.log((Math.exp(x) + 1.0)) - (y * x);
}
def code(x, y): return math.log((math.exp(x) + 1.0)) - (y * x)
function code(x, y) return Float64(log(Float64(exp(x) + 1.0)) - Float64(y * x)) end
function tmp = code(x, y) tmp = log((exp(x) + 1.0)) - (y * x); end
code[x_, y_] := N[(N[Log[N[(N[Exp[x], $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision] - N[(y * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(e^{x} + 1\right) - y \cdot x
\end{array}
Initial program 100.0%
Final simplification100.0%
(FPCore (x y) :precision binary64 (if (<= x 0.0) (- (log (+ 1.0 (exp x))) (* x y)) (- (log (+ 1.0 (exp (- x)))) (* (- x) (- 1.0 y)))))
double code(double x, double y) {
double tmp;
if (x <= 0.0) {
tmp = log((1.0 + exp(x))) - (x * y);
} else {
tmp = log((1.0 + exp(-x))) - (-x * (1.0 - y));
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (x <= 0.0d0) then
tmp = log((1.0d0 + exp(x))) - (x * y)
else
tmp = log((1.0d0 + exp(-x))) - (-x * (1.0d0 - y))
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (x <= 0.0) {
tmp = Math.log((1.0 + Math.exp(x))) - (x * y);
} else {
tmp = Math.log((1.0 + Math.exp(-x))) - (-x * (1.0 - y));
}
return tmp;
}
def code(x, y): tmp = 0 if x <= 0.0: tmp = math.log((1.0 + math.exp(x))) - (x * y) else: tmp = math.log((1.0 + math.exp(-x))) - (-x * (1.0 - y)) return tmp
function code(x, y) tmp = 0.0 if (x <= 0.0) tmp = Float64(log(Float64(1.0 + exp(x))) - Float64(x * y)); else tmp = Float64(log(Float64(1.0 + exp(Float64(-x)))) - Float64(Float64(-x) * Float64(1.0 - y))); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (x <= 0.0) tmp = log((1.0 + exp(x))) - (x * y); else tmp = log((1.0 + exp(-x))) - (-x * (1.0 - y)); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[x, 0.0], N[(N[Log[N[(1.0 + N[Exp[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision], N[(N[Log[N[(1.0 + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[((-x) * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 0:\\
\;\;\;\;\log \left(1 + e^{x}\right) - x \cdot y\\
\mathbf{else}:\\
\;\;\;\;\log \left(1 + e^{-x}\right) - \left(-x\right) \cdot \left(1 - y\right)\\
\end{array}
\end{array}
herbie shell --seed 2024337
(FPCore (x y)
:name "Logistic regression 2"
:precision binary64
:alt
(! :herbie-platform default (if (<= x 0) (- (log (+ 1 (exp x))) (* x y)) (- (log (+ 1 (exp (- x)))) (* (- x) (- 1 y)))))
(- (log (+ 1.0 (exp x))) (* x y)))