?

Average Error: 0.63% → 0.67%
Time: 9.4s
Precision: binary64
Cost: 6980

?

\[\log \left(1 + e^{x}\right) - x \cdot y \]
\[\begin{array}{l} \mathbf{if}\;x \leq -1.4:\\ \;\;\;\;x \cdot \left(-y\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(0.5 - y\right) + \log 2\\ \end{array} \]
(FPCore (x y) :precision binary64 (- (log (+ 1.0 (exp x))) (* x y)))
(FPCore (x y)
 :precision binary64
 (if (<= x -1.4) (* x (- y)) (+ (* x (- 0.5 y)) (log 2.0))))
double code(double x, double y) {
	return log((1.0 + exp(x))) - (x * y);
}
double code(double x, double y) {
	double tmp;
	if (x <= -1.4) {
		tmp = x * -y;
	} else {
		tmp = (x * (0.5 - y)) + log(2.0);
	}
	return tmp;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = log((1.0d0 + exp(x))) - (x * y)
end function
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: tmp
    if (x <= (-1.4d0)) then
        tmp = x * -y
    else
        tmp = (x * (0.5d0 - y)) + log(2.0d0)
    end if
    code = tmp
end function
public static double code(double x, double y) {
	return Math.log((1.0 + Math.exp(x))) - (x * y);
}
public static double code(double x, double y) {
	double tmp;
	if (x <= -1.4) {
		tmp = x * -y;
	} else {
		tmp = (x * (0.5 - y)) + Math.log(2.0);
	}
	return tmp;
}
def code(x, y):
	return math.log((1.0 + math.exp(x))) - (x * y)
def code(x, y):
	tmp = 0
	if x <= -1.4:
		tmp = x * -y
	else:
		tmp = (x * (0.5 - y)) + math.log(2.0)
	return tmp
function code(x, y)
	return Float64(log(Float64(1.0 + exp(x))) - Float64(x * y))
end
function code(x, y)
	tmp = 0.0
	if (x <= -1.4)
		tmp = Float64(x * Float64(-y));
	else
		tmp = Float64(Float64(x * Float64(0.5 - y)) + log(2.0));
	end
	return tmp
end
function tmp = code(x, y)
	tmp = log((1.0 + exp(x))) - (x * y);
end
function tmp_2 = code(x, y)
	tmp = 0.0;
	if (x <= -1.4)
		tmp = x * -y;
	else
		tmp = (x * (0.5 - y)) + log(2.0);
	end
	tmp_2 = tmp;
end
code[x_, y_] := N[(N[Log[N[(1.0 + N[Exp[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]
code[x_, y_] := If[LessEqual[x, -1.4], N[(x * (-y)), $MachinePrecision], N[(N[(x * N[(0.5 - y), $MachinePrecision]), $MachinePrecision] + N[Log[2.0], $MachinePrecision]), $MachinePrecision]]
\log \left(1 + e^{x}\right) - x \cdot y
\begin{array}{l}
\mathbf{if}\;x \leq -1.4:\\
\;\;\;\;x \cdot \left(-y\right)\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(0.5 - y\right) + \log 2\\


\end{array}

Error?

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Target

Original0.63%
Target0.05%
Herbie0.67%
\[\begin{array}{l} \mathbf{if}\;x \leq 0:\\ \;\;\;\;\log \left(1 + e^{x}\right) - x \cdot y\\ \mathbf{else}:\\ \;\;\;\;\log \left(1 + e^{-x}\right) - \left(-x\right) \cdot \left(1 - y\right)\\ \end{array} \]

Derivation?

  1. Split input into 2 regimes
  2. if x < -1.3999999999999999

    1. Initial program 0.09

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Simplified0

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
      Proof

      [Start]0.09

      \[ \log \left(1 + e^{x}\right) - x \cdot y \]

      log1p-def [=>]0

      \[ \color{blue}{\mathsf{log1p}\left(e^{x}\right)} - x \cdot y \]
    3. Taylor expanded in x around inf 0.37

      \[\leadsto \color{blue}{-1 \cdot \left(y \cdot x\right)} \]
    4. Simplified0.37

      \[\leadsto \color{blue}{x \cdot \left(-y\right)} \]
      Proof

      [Start]0.37

      \[ -1 \cdot \left(y \cdot x\right) \]

      *-commutative [=>]0.37

      \[ -1 \cdot \color{blue}{\left(x \cdot y\right)} \]

      mul-1-neg [=>]0.37

      \[ \color{blue}{-x \cdot y} \]

      distribute-rgt-neg-out [<=]0.37

      \[ \color{blue}{x \cdot \left(-y\right)} \]

    if -1.3999999999999999 < x

    1. Initial program 0.82

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Simplified0.81

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
      Proof

      [Start]0.82

      \[ \log \left(1 + e^{x}\right) - x \cdot y \]

      log1p-def [=>]0.81

      \[ \color{blue}{\mathsf{log1p}\left(e^{x}\right)} - x \cdot y \]
    3. Taylor expanded in x around 0 0.78

      \[\leadsto \color{blue}{\left(0.5 - y\right) \cdot x + \log 2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification0.67

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.4:\\ \;\;\;\;x \cdot \left(-y\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(0.5 - y\right) + \log 2\\ \end{array} \]

Alternatives

Alternative 1
Error0.6%
Cost13120
\[\mathsf{log1p}\left(e^{x}\right) - x \cdot y \]
Alternative 2
Error1.17%
Cost6852
\[\begin{array}{l} \mathbf{if}\;x \leq -82:\\ \;\;\;\;x \cdot \left(-y\right)\\ \mathbf{else}:\\ \;\;\;\;\log 2 - x \cdot y\\ \end{array} \]
Alternative 3
Error18.6%
Cost6728
\[\begin{array}{l} \mathbf{if}\;x \leq -3.2 \cdot 10^{-34}:\\ \;\;\;\;x \cdot \left(-y\right)\\ \mathbf{elif}\;x \leq 1.55 \cdot 10^{-40}:\\ \;\;\;\;\log 2\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(0.5 - y\right)\\ \end{array} \]
Alternative 4
Error54.55%
Cost256
\[x \cdot \left(-y\right) \]
Alternative 5
Error96.47%
Cost192
\[x \cdot 0.5 \]

Error

Reproduce?

herbie shell --seed 2023102 
(FPCore (x y)
  :name "Logistic regression 2"
  :precision binary64

  :herbie-target
  (if (<= x 0.0) (- (log (+ 1.0 (exp x))) (* x y)) (- (log (+ 1.0 (exp (- x)))) (* (- x) (- 1.0 y))))

  (- (log (+ 1.0 (exp x))) (* x y)))