?

Average Accuracy: 99.1% → 98.7%
Time: 10.4s
Precision: binary64
Cost: 6980

?

\[\log \left(1 + e^{x}\right) - x \cdot y \]
\[\begin{array}{l} \mathbf{if}\;x \leq -200000000:\\ \;\;\;\;x \cdot \left(-y\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(0.5 - y\right) + \log 2\\ \end{array} \]
(FPCore (x y) :precision binary64 (- (log (+ 1.0 (exp x))) (* x y)))
(FPCore (x y)
 :precision binary64
 (if (<= x -200000000.0) (* x (- y)) (+ (* x (- 0.5 y)) (log 2.0))))
double code(double x, double y) {
	return log((1.0 + exp(x))) - (x * y);
}
double code(double x, double y) {
	double tmp;
	if (x <= -200000000.0) {
		tmp = x * -y;
	} else {
		tmp = (x * (0.5 - y)) + log(2.0);
	}
	return tmp;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = log((1.0d0 + exp(x))) - (x * y)
end function
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: tmp
    if (x <= (-200000000.0d0)) then
        tmp = x * -y
    else
        tmp = (x * (0.5d0 - y)) + log(2.0d0)
    end if
    code = tmp
end function
public static double code(double x, double y) {
	return Math.log((1.0 + Math.exp(x))) - (x * y);
}
public static double code(double x, double y) {
	double tmp;
	if (x <= -200000000.0) {
		tmp = x * -y;
	} else {
		tmp = (x * (0.5 - y)) + Math.log(2.0);
	}
	return tmp;
}
def code(x, y):
	return math.log((1.0 + math.exp(x))) - (x * y)
def code(x, y):
	tmp = 0
	if x <= -200000000.0:
		tmp = x * -y
	else:
		tmp = (x * (0.5 - y)) + math.log(2.0)
	return tmp
function code(x, y)
	return Float64(log(Float64(1.0 + exp(x))) - Float64(x * y))
end
function code(x, y)
	tmp = 0.0
	if (x <= -200000000.0)
		tmp = Float64(x * Float64(-y));
	else
		tmp = Float64(Float64(x * Float64(0.5 - y)) + log(2.0));
	end
	return tmp
end
function tmp = code(x, y)
	tmp = log((1.0 + exp(x))) - (x * y);
end
function tmp_2 = code(x, y)
	tmp = 0.0;
	if (x <= -200000000.0)
		tmp = x * -y;
	else
		tmp = (x * (0.5 - y)) + log(2.0);
	end
	tmp_2 = tmp;
end
code[x_, y_] := N[(N[Log[N[(1.0 + N[Exp[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]
code[x_, y_] := If[LessEqual[x, -200000000.0], N[(x * (-y)), $MachinePrecision], N[(N[(x * N[(0.5 - y), $MachinePrecision]), $MachinePrecision] + N[Log[2.0], $MachinePrecision]), $MachinePrecision]]
\log \left(1 + e^{x}\right) - x \cdot y
\begin{array}{l}
\mathbf{if}\;x \leq -200000000:\\
\;\;\;\;x \cdot \left(-y\right)\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(0.5 - y\right) + \log 2\\


\end{array}

Error?

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Target

Original99.1%
Target99.9%
Herbie98.7%
\[\begin{array}{l} \mathbf{if}\;x \leq 0:\\ \;\;\;\;\log \left(1 + e^{x}\right) - x \cdot y\\ \mathbf{else}:\\ \;\;\;\;\log \left(1 + e^{-x}\right) - \left(-x\right) \cdot \left(1 - y\right)\\ \end{array} \]

Derivation?

  1. Split input into 2 regimes
  2. if x < -2e8

    1. Initial program 100.0%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
      Proof

      [Start]100.0

      \[ \log \left(1 + e^{x}\right) - x \cdot y \]

      log1p-def [=>]100.0

      \[ \color{blue}{\mathsf{log1p}\left(e^{x}\right)} - x \cdot y \]
    3. Taylor expanded in x around inf 100.0%

      \[\leadsto \color{blue}{-1 \cdot \left(y \cdot x\right)} \]
    4. Simplified100.0%

      \[\leadsto \color{blue}{x \cdot \left(-y\right)} \]
      Proof

      [Start]100.0

      \[ -1 \cdot \left(y \cdot x\right) \]

      *-commutative [=>]100.0

      \[ -1 \cdot \color{blue}{\left(x \cdot y\right)} \]

      mul-1-neg [=>]100.0

      \[ \color{blue}{-x \cdot y} \]

      distribute-rgt-neg-out [<=]100.0

      \[ \color{blue}{x \cdot \left(-y\right)} \]

    if -2e8 < x

    1. Initial program 98.8%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Simplified98.9%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
      Proof

      [Start]98.8

      \[ \log \left(1 + e^{x}\right) - x \cdot y \]

      log1p-def [=>]98.9

      \[ \color{blue}{\mathsf{log1p}\left(e^{x}\right)} - x \cdot y \]
    3. Taylor expanded in x around 0 98.2%

      \[\leadsto \color{blue}{\left(0.5 - y\right) \cdot x + \log 2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification98.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -200000000:\\ \;\;\;\;x \cdot \left(-y\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(0.5 - y\right) + \log 2\\ \end{array} \]

Alternatives

Alternative 1
Accuracy99.2%
Cost13120
\[\mathsf{log1p}\left(e^{x}\right) - x \cdot y \]
Alternative 2
Accuracy98.2%
Cost6852
\[\begin{array}{l} \mathbf{if}\;x \leq -200000000:\\ \;\;\;\;x \cdot \left(-y\right)\\ \mathbf{else}:\\ \;\;\;\;\log 2 - x \cdot y\\ \end{array} \]
Alternative 3
Accuracy80.4%
Cost6728
\[\begin{array}{l} \mathbf{if}\;x \leq -5.8 \cdot 10^{-42}:\\ \;\;\;\;x \cdot \left(-y\right)\\ \mathbf{elif}\;x \leq 5.2 \cdot 10^{-80}:\\ \;\;\;\;\log 2\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(0.5 - y\right)\\ \end{array} \]
Alternative 4
Accuracy46.8%
Cost256
\[x \cdot \left(-y\right) \]
Alternative 5
Accuracy3.5%
Cost192
\[x \cdot 0.5 \]

Error

Reproduce?

herbie shell --seed 2023126 
(FPCore (x y)
  :name "Logistic regression 2"
  :precision binary64

  :herbie-target
  (if (<= x 0.0) (- (log (+ 1.0 (exp x))) (* x y)) (- (log (+ 1.0 (exp (- x)))) (* (- x) (- 1.0 y))))

  (- (log (+ 1.0 (exp x))) (* x y)))