Average Error: 0.6 → 0.7
Time: 6.9s
Precision: binary64
Cost: 6980
\[\log \left(1 + e^{x}\right) - x \cdot y \]
\[\begin{array}{l} \mathbf{if}\;x \leq -1.4:\\ \;\;\;\;y \cdot \left(-x\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(0.5 - y\right) + \log 2\\ \end{array} \]
(FPCore (x y) :precision binary64 (- (log (+ 1.0 (exp x))) (* x y)))
(FPCore (x y)
 :precision binary64
 (if (<= x -1.4) (* y (- x)) (+ (* x (- 0.5 y)) (log 2.0))))
double code(double x, double y) {
	return log((1.0 + exp(x))) - (x * y);
}
double code(double x, double y) {
	double tmp;
	if (x <= -1.4) {
		tmp = y * -x;
	} else {
		tmp = (x * (0.5 - y)) + log(2.0);
	}
	return tmp;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = log((1.0d0 + exp(x))) - (x * y)
end function
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: tmp
    if (x <= (-1.4d0)) then
        tmp = y * -x
    else
        tmp = (x * (0.5d0 - y)) + log(2.0d0)
    end if
    code = tmp
end function
public static double code(double x, double y) {
	return Math.log((1.0 + Math.exp(x))) - (x * y);
}
public static double code(double x, double y) {
	double tmp;
	if (x <= -1.4) {
		tmp = y * -x;
	} else {
		tmp = (x * (0.5 - y)) + Math.log(2.0);
	}
	return tmp;
}
def code(x, y):
	return math.log((1.0 + math.exp(x))) - (x * y)
def code(x, y):
	tmp = 0
	if x <= -1.4:
		tmp = y * -x
	else:
		tmp = (x * (0.5 - y)) + math.log(2.0)
	return tmp
function code(x, y)
	return Float64(log(Float64(1.0 + exp(x))) - Float64(x * y))
end
function code(x, y)
	tmp = 0.0
	if (x <= -1.4)
		tmp = Float64(y * Float64(-x));
	else
		tmp = Float64(Float64(x * Float64(0.5 - y)) + log(2.0));
	end
	return tmp
end
function tmp = code(x, y)
	tmp = log((1.0 + exp(x))) - (x * y);
end
function tmp_2 = code(x, y)
	tmp = 0.0;
	if (x <= -1.4)
		tmp = y * -x;
	else
		tmp = (x * (0.5 - y)) + log(2.0);
	end
	tmp_2 = tmp;
end
code[x_, y_] := N[(N[Log[N[(1.0 + N[Exp[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]
code[x_, y_] := If[LessEqual[x, -1.4], N[(y * (-x)), $MachinePrecision], N[(N[(x * N[(0.5 - y), $MachinePrecision]), $MachinePrecision] + N[Log[2.0], $MachinePrecision]), $MachinePrecision]]
\log \left(1 + e^{x}\right) - x \cdot y
\begin{array}{l}
\mathbf{if}\;x \leq -1.4:\\
\;\;\;\;y \cdot \left(-x\right)\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(0.5 - y\right) + \log 2\\


\end{array}

Error

Try it out

Your Program's Arguments

Results

Enter valid numbers for all inputs

Target

Original0.6
Target0.1
Herbie0.7
\[\begin{array}{l} \mathbf{if}\;x \leq 0:\\ \;\;\;\;\log \left(1 + e^{x}\right) - x \cdot y\\ \mathbf{else}:\\ \;\;\;\;\log \left(1 + e^{-x}\right) - \left(-x\right) \cdot \left(1 - y\right)\\ \end{array} \]

Derivation

  1. Split input into 2 regimes
  2. if x < -1.3999999999999999

    1. Initial program 0.2

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Simplified0.0

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
      Proof
      (-.f64 (log1p.f64 (exp.f64 x)) (*.f64 x y)): 0 points increase in error, 0 points decrease in error
      (-.f64 (Rewrite<= log1p-def_binary64 (log.f64 (+.f64 1 (exp.f64 x)))) (*.f64 x y)): 0 points increase in error, 0 points decrease in error
    3. Taylor expanded in x around inf 0.3

      \[\leadsto \color{blue}{-1 \cdot \left(y \cdot x\right)} \]
    4. Simplified0.3

      \[\leadsto \color{blue}{x \cdot \left(-y\right)} \]
      Proof
      (*.f64 x (neg.f64 y)): 0 points increase in error, 0 points decrease in error
      (Rewrite=> *-commutative_binary64 (*.f64 (neg.f64 y) x)): 0 points increase in error, 0 points decrease in error
      (*.f64 (Rewrite=> neg-mul-1_binary64 (*.f64 -1 y)) x): 0 points increase in error, 0 points decrease in error
      (Rewrite<= associate-*r*_binary64 (*.f64 -1 (*.f64 y x))): 0 points increase in error, 0 points decrease in error

    if -1.3999999999999999 < x

    1. Initial program 0.7

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Simplified0.7

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
      Proof
      (-.f64 (log1p.f64 (exp.f64 x)) (*.f64 x y)): 0 points increase in error, 0 points decrease in error
      (-.f64 (Rewrite<= log1p-def_binary64 (log.f64 (+.f64 1 (exp.f64 x)))) (*.f64 x y)): 0 points increase in error, 0 points decrease in error
    3. Taylor expanded in x around 0 0.8

      \[\leadsto \color{blue}{\left(0.5 - y\right) \cdot x + \log 2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification0.7

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.4:\\ \;\;\;\;y \cdot \left(-x\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(0.5 - y\right) + \log 2\\ \end{array} \]

Alternatives

Alternative 1
Error0.5
Cost13120
\[\mathsf{log1p}\left(e^{x}\right) - x \cdot y \]
Alternative 2
Error13.6
Cost6992
\[\begin{array}{l} t_0 := y \cdot \left(-x\right)\\ \mathbf{if}\;x \leq -5.6 \cdot 10^{-30}:\\ \;\;\;\;t_0\\ \mathbf{elif}\;x \leq -2.65 \cdot 10^{-79}:\\ \;\;\;\;\log 2\\ \mathbf{elif}\;x \leq -3.2 \cdot 10^{-96}:\\ \;\;\;\;t_0\\ \mathbf{elif}\;x \leq 3 \cdot 10^{-97}:\\ \;\;\;\;\log 2\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(0.5 - y\right)\\ \end{array} \]
Alternative 3
Error0.9
Cost6852
\[\begin{array}{l} \mathbf{if}\;x \leq -140:\\ \;\;\;\;y \cdot \left(-x\right)\\ \mathbf{else}:\\ \;\;\;\;\log 2 - x \cdot y\\ \end{array} \]
Alternative 4
Error34.7
Cost256
\[y \cdot \left(-x\right) \]
Alternative 5
Error61.7
Cost192
\[x \cdot 0.5 \]

Error

Reproduce

herbie shell --seed 2022331 
(FPCore (x y)
  :name "Logistic regression 2"
  :precision binary64

  :herbie-target
  (if (<= x 0.0) (- (log (+ 1.0 (exp x))) (* x y)) (- (log (+ 1.0 (exp (- x)))) (* (- x) (- 1.0 y))))

  (- (log (+ 1.0 (exp x))) (* x y)))