Logistic regression 2

Percentage Accurate: 99.1% → 99.2%
Time: 13.0s
Alternatives: 10
Speedup: 1.8×

Specification

?
\[\begin{array}{l} \\ \log \left(1 + e^{x}\right) - x \cdot y \end{array} \]
(FPCore (x y) :precision binary64 (- (log (+ 1.0 (exp x))) (* x y)))
double code(double x, double y) {
	return log((1.0 + exp(x))) - (x * y);
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = log((1.0d0 + exp(x))) - (x * y)
end function
public static double code(double x, double y) {
	return Math.log((1.0 + Math.exp(x))) - (x * y);
}
def code(x, y):
	return math.log((1.0 + math.exp(x))) - (x * y)
function code(x, y)
	return Float64(log(Float64(1.0 + exp(x))) - Float64(x * y))
end
function tmp = code(x, y)
	tmp = log((1.0 + exp(x))) - (x * y);
end
code[x_, y_] := N[(N[Log[N[(1.0 + N[Exp[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\log \left(1 + e^{x}\right) - x \cdot y
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 10 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 99.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log \left(1 + e^{x}\right) - x \cdot y \end{array} \]
(FPCore (x y) :precision binary64 (- (log (+ 1.0 (exp x))) (* x y)))
double code(double x, double y) {
	return log((1.0 + exp(x))) - (x * y);
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = log((1.0d0 + exp(x))) - (x * y)
end function
public static double code(double x, double y) {
	return Math.log((1.0 + Math.exp(x))) - (x * y);
}
def code(x, y):
	return math.log((1.0 + math.exp(x))) - (x * y)
function code(x, y)
	return Float64(log(Float64(1.0 + exp(x))) - Float64(x * y))
end
function tmp = code(x, y)
	tmp = log((1.0 + exp(x))) - (x * y);
end
code[x_, y_] := N[(N[Log[N[(1.0 + N[Exp[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\log \left(1 + e^{x}\right) - x \cdot y
\end{array}

Alternative 1: 99.2% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -3.55:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\log 2 + x \cdot \left(0.5 + \left(x \cdot \left(0.125 + -0.005208333333333333 \cdot \left(x \cdot x\right)\right) - y\right)\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= x -3.55)
   (- 0.0 (* y x))
   (+
    (log 2.0)
    (* x (+ 0.5 (- (* x (+ 0.125 (* -0.005208333333333333 (* x x)))) y))))))
double code(double x, double y) {
	double tmp;
	if (x <= -3.55) {
		tmp = 0.0 - (y * x);
	} else {
		tmp = log(2.0) + (x * (0.5 + ((x * (0.125 + (-0.005208333333333333 * (x * x)))) - y)));
	}
	return tmp;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: tmp
    if (x <= (-3.55d0)) then
        tmp = 0.0d0 - (y * x)
    else
        tmp = log(2.0d0) + (x * (0.5d0 + ((x * (0.125d0 + ((-0.005208333333333333d0) * (x * x)))) - y)))
    end if
    code = tmp
end function
public static double code(double x, double y) {
	double tmp;
	if (x <= -3.55) {
		tmp = 0.0 - (y * x);
	} else {
		tmp = Math.log(2.0) + (x * (0.5 + ((x * (0.125 + (-0.005208333333333333 * (x * x)))) - y)));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if x <= -3.55:
		tmp = 0.0 - (y * x)
	else:
		tmp = math.log(2.0) + (x * (0.5 + ((x * (0.125 + (-0.005208333333333333 * (x * x)))) - y)))
	return tmp
function code(x, y)
	tmp = 0.0
	if (x <= -3.55)
		tmp = Float64(0.0 - Float64(y * x));
	else
		tmp = Float64(log(2.0) + Float64(x * Float64(0.5 + Float64(Float64(x * Float64(0.125 + Float64(-0.005208333333333333 * Float64(x * x)))) - y))));
	end
	return tmp
end
function tmp_2 = code(x, y)
	tmp = 0.0;
	if (x <= -3.55)
		tmp = 0.0 - (y * x);
	else
		tmp = log(2.0) + (x * (0.5 + ((x * (0.125 + (-0.005208333333333333 * (x * x)))) - y)));
	end
	tmp_2 = tmp;
end
code[x_, y_] := If[LessEqual[x, -3.55], N[(0.0 - N[(y * x), $MachinePrecision]), $MachinePrecision], N[(N[Log[2.0], $MachinePrecision] + N[(x * N[(0.5 + N[(N[(x * N[(0.125 + N[(-0.005208333333333333 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.55:\\
\;\;\;\;0 - y \cdot x\\

\mathbf{else}:\\
\;\;\;\;\log 2 + x \cdot \left(0.5 + \left(x \cdot \left(0.125 + -0.005208333333333333 \cdot \left(x \cdot x\right)\right) - y\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -3.5499999999999998

    1. Initial program 100.0%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Step-by-step derivation
      1. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
      2. log1p-defineN/A

        \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      3. log1p-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      4. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
      5. *-lowering-*.f64100.0%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf

      \[\leadsto \color{blue}{-1 \cdot \left(x \cdot y\right)} \]
    6. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{neg}\left(x \cdot y\right) \]
      2. distribute-rgt-neg-inN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(y\right)\right)} \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\mathsf{neg}\left(y\right)\right)}\right) \]
      4. neg-sub0N/A

        \[\leadsto \mathsf{*.f64}\left(x, \left(0 - \color{blue}{y}\right)\right) \]
      5. --lowering--.f64100.0%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{\_.f64}\left(0, \color{blue}{y}\right)\right) \]
    7. Simplified100.0%

      \[\leadsto \color{blue}{x \cdot \left(0 - y\right)} \]
    8. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \mathsf{*.f64}\left(x, \left(\mathsf{neg}\left(y\right)\right)\right) \]
      2. neg-lowering-neg.f64100.0%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{neg.f64}\left(y\right)\right) \]
    9. Applied egg-rr100.0%

      \[\leadsto x \cdot \color{blue}{\left(-y\right)} \]

    if -3.5499999999999998 < x

    1. Initial program 98.2%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Step-by-step derivation
      1. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
      2. log1p-defineN/A

        \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      3. log1p-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      4. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
      5. *-lowering-*.f6498.2%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
    3. Simplified98.2%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0

      \[\leadsto \color{blue}{\log 2 + x \cdot \left(\left(\frac{1}{2} + x \cdot \left(\frac{1}{8} + \frac{-1}{192} \cdot {x}^{2}\right)\right) - y\right)} \]
    6. Step-by-step derivation
      1. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\log 2, \color{blue}{\left(x \cdot \left(\left(\frac{1}{2} + x \cdot \left(\frac{1}{8} + \frac{-1}{192} \cdot {x}^{2}\right)\right) - y\right)\right)}\right) \]
      2. log-lowering-log.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \left(\color{blue}{x} \cdot \left(\left(\frac{1}{2} + x \cdot \left(\frac{1}{8} + \frac{-1}{192} \cdot {x}^{2}\right)\right) - y\right)\right)\right) \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \color{blue}{\left(\left(\frac{1}{2} + x \cdot \left(\frac{1}{8} + \frac{-1}{192} \cdot {x}^{2}\right)\right) - y\right)}\right)\right) \]
      4. associate--l+N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \left(\frac{1}{2} + \color{blue}{\left(x \cdot \left(\frac{1}{8} + \frac{-1}{192} \cdot {x}^{2}\right) - y\right)}\right)\right)\right) \]
      5. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \color{blue}{\left(x \cdot \left(\frac{1}{8} + \frac{-1}{192} \cdot {x}^{2}\right) - y\right)}\right)\right)\right) \]
      6. --lowering--.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\left(x \cdot \left(\frac{1}{8} + \frac{-1}{192} \cdot {x}^{2}\right)\right), \color{blue}{y}\right)\right)\right)\right) \]
      7. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \left(\frac{1}{8} + \frac{-1}{192} \cdot {x}^{2}\right)\right), y\right)\right)\right)\right) \]
      8. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{8}, \left(\frac{-1}{192} \cdot {x}^{2}\right)\right)\right), y\right)\right)\right)\right) \]
      9. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{8}, \mathsf{*.f64}\left(\frac{-1}{192}, \left({x}^{2}\right)\right)\right)\right), y\right)\right)\right)\right) \]
      10. unpow2N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{8}, \mathsf{*.f64}\left(\frac{-1}{192}, \left(x \cdot x\right)\right)\right)\right), y\right)\right)\right)\right) \]
      11. *-lowering-*.f64100.0%

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{8}, \mathsf{*.f64}\left(\frac{-1}{192}, \mathsf{*.f64}\left(x, x\right)\right)\right)\right), y\right)\right)\right)\right) \]
    7. Simplified100.0%

      \[\leadsto \color{blue}{\log 2 + x \cdot \left(0.5 + \left(x \cdot \left(0.125 + -0.005208333333333333 \cdot \left(x \cdot x\right)\right) - y\right)\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -3.55:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\log 2 + x \cdot \left(0.5 + \left(x \cdot \left(0.125 + -0.005208333333333333 \cdot \left(x \cdot x\right)\right) - y\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 99.2% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(0 - y, x, \mathsf{log1p}\left(e^{x}\right)\right) \end{array} \]
(FPCore (x y) :precision binary64 (fma (- 0.0 y) x (log1p (exp x))))
double code(double x, double y) {
	return fma((0.0 - y), x, log1p(exp(x)));
}
function code(x, y)
	return fma(Float64(0.0 - y), x, log1p(exp(x)))
end
code[x_, y_] := N[(N[(0.0 - y), $MachinePrecision] * x + N[Log[1 + N[Exp[x], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(0 - y, x, \mathsf{log1p}\left(e^{x}\right)\right)
\end{array}
Derivation
  1. Initial program 98.8%

    \[\log \left(1 + e^{x}\right) - x \cdot y \]
  2. Step-by-step derivation
    1. --lowering--.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
    2. log1p-defineN/A

      \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
    3. log1p-lowering-log1p.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
    4. exp-lowering-exp.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
    5. *-lowering-*.f6498.8%

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
  3. Simplified98.8%

    \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
  4. Add Preprocessing
  5. Step-by-step derivation
    1. sub-negN/A

      \[\leadsto \log \left(1 + e^{x}\right) + \color{blue}{\left(\mathsf{neg}\left(x \cdot y\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto \left(\mathsf{neg}\left(x \cdot y\right)\right) + \color{blue}{\log \left(1 + e^{x}\right)} \]
    3. *-commutativeN/A

      \[\leadsto \left(\mathsf{neg}\left(y \cdot x\right)\right) + \log \left(\color{blue}{1} + e^{x}\right) \]
    4. distribute-lft-neg-inN/A

      \[\leadsto \left(\mathsf{neg}\left(y\right)\right) \cdot x + \log \color{blue}{\left(1 + e^{x}\right)} \]
    5. fma-defineN/A

      \[\leadsto \mathsf{fma}\left(\mathsf{neg}\left(y\right), \color{blue}{x}, \log \left(1 + e^{x}\right)\right) \]
    6. fma-lowering-fma.f64N/A

      \[\leadsto \mathsf{fma.f64}\left(\left(\mathsf{neg}\left(y\right)\right), \color{blue}{x}, \log \left(1 + e^{x}\right)\right) \]
    7. neg-sub0N/A

      \[\leadsto \mathsf{fma.f64}\left(\left(0 - y\right), x, \log \left(1 + e^{x}\right)\right) \]
    8. --lowering--.f64N/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{\_.f64}\left(0, y\right), x, \log \left(1 + e^{x}\right)\right) \]
    9. log1p-defineN/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{\_.f64}\left(0, y\right), x, \left(\mathsf{log1p}\left(e^{x}\right)\right)\right) \]
    10. log1p-lowering-log1p.f64N/A

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{\_.f64}\left(0, y\right), x, \mathsf{log1p.f64}\left(\left(e^{x}\right)\right)\right) \]
    11. exp-lowering-exp.f6498.8%

      \[\leadsto \mathsf{fma.f64}\left(\mathsf{\_.f64}\left(0, y\right), x, \mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right)\right) \]
  6. Applied egg-rr98.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(0 - y, x, \mathsf{log1p}\left(e^{x}\right)\right)} \]
  7. Add Preprocessing

Alternative 3: 99.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \mathsf{log1p}\left(e^{x}\right) - y \cdot x \end{array} \]
(FPCore (x y) :precision binary64 (- (log1p (exp x)) (* y x)))
double code(double x, double y) {
	return log1p(exp(x)) - (y * x);
}
public static double code(double x, double y) {
	return Math.log1p(Math.exp(x)) - (y * x);
}
def code(x, y):
	return math.log1p(math.exp(x)) - (y * x)
function code(x, y)
	return Float64(log1p(exp(x)) - Float64(y * x))
end
code[x_, y_] := N[(N[Log[1 + N[Exp[x], $MachinePrecision]], $MachinePrecision] - N[(y * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{log1p}\left(e^{x}\right) - y \cdot x
\end{array}
Derivation
  1. Initial program 98.8%

    \[\log \left(1 + e^{x}\right) - x \cdot y \]
  2. Step-by-step derivation
    1. --lowering--.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
    2. log1p-defineN/A

      \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
    3. log1p-lowering-log1p.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
    4. exp-lowering-exp.f64N/A

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
    5. *-lowering-*.f6498.8%

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
  3. Simplified98.8%

    \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
  4. Add Preprocessing
  5. Final simplification98.8%

    \[\leadsto \mathsf{log1p}\left(e^{x}\right) - y \cdot x \]
  6. Add Preprocessing

Alternative 4: 99.3% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -80:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(x \cdot 0.125 - y\right) + \left(\log 2 + x \cdot 0.5\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= x -80.0)
   (- 0.0 (* y x))
   (+ (* x (- (* x 0.125) y)) (+ (log 2.0) (* x 0.5)))))
double code(double x, double y) {
	double tmp;
	if (x <= -80.0) {
		tmp = 0.0 - (y * x);
	} else {
		tmp = (x * ((x * 0.125) - y)) + (log(2.0) + (x * 0.5));
	}
	return tmp;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: tmp
    if (x <= (-80.0d0)) then
        tmp = 0.0d0 - (y * x)
    else
        tmp = (x * ((x * 0.125d0) - y)) + (log(2.0d0) + (x * 0.5d0))
    end if
    code = tmp
end function
public static double code(double x, double y) {
	double tmp;
	if (x <= -80.0) {
		tmp = 0.0 - (y * x);
	} else {
		tmp = (x * ((x * 0.125) - y)) + (Math.log(2.0) + (x * 0.5));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if x <= -80.0:
		tmp = 0.0 - (y * x)
	else:
		tmp = (x * ((x * 0.125) - y)) + (math.log(2.0) + (x * 0.5))
	return tmp
function code(x, y)
	tmp = 0.0
	if (x <= -80.0)
		tmp = Float64(0.0 - Float64(y * x));
	else
		tmp = Float64(Float64(x * Float64(Float64(x * 0.125) - y)) + Float64(log(2.0) + Float64(x * 0.5)));
	end
	return tmp
end
function tmp_2 = code(x, y)
	tmp = 0.0;
	if (x <= -80.0)
		tmp = 0.0 - (y * x);
	else
		tmp = (x * ((x * 0.125) - y)) + (log(2.0) + (x * 0.5));
	end
	tmp_2 = tmp;
end
code[x_, y_] := If[LessEqual[x, -80.0], N[(0.0 - N[(y * x), $MachinePrecision]), $MachinePrecision], N[(N[(x * N[(N[(x * 0.125), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision] + N[(N[Log[2.0], $MachinePrecision] + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -80:\\
\;\;\;\;0 - y \cdot x\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(x \cdot 0.125 - y\right) + \left(\log 2 + x \cdot 0.5\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -80

    1. Initial program 100.0%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Step-by-step derivation
      1. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
      2. log1p-defineN/A

        \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      3. log1p-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      4. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
      5. *-lowering-*.f64100.0%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf

      \[\leadsto \color{blue}{-1 \cdot \left(x \cdot y\right)} \]
    6. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{neg}\left(x \cdot y\right) \]
      2. distribute-rgt-neg-inN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(y\right)\right)} \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\mathsf{neg}\left(y\right)\right)}\right) \]
      4. neg-sub0N/A

        \[\leadsto \mathsf{*.f64}\left(x, \left(0 - \color{blue}{y}\right)\right) \]
      5. --lowering--.f64100.0%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{\_.f64}\left(0, \color{blue}{y}\right)\right) \]
    7. Simplified100.0%

      \[\leadsto \color{blue}{x \cdot \left(0 - y\right)} \]
    8. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \mathsf{*.f64}\left(x, \left(\mathsf{neg}\left(y\right)\right)\right) \]
      2. neg-lowering-neg.f64100.0%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{neg.f64}\left(y\right)\right) \]
    9. Applied egg-rr100.0%

      \[\leadsto x \cdot \color{blue}{\left(-y\right)} \]

    if -80 < x

    1. Initial program 98.2%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Step-by-step derivation
      1. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
      2. log1p-defineN/A

        \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      3. log1p-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      4. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
      5. *-lowering-*.f6498.2%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
    3. Simplified98.2%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0

      \[\leadsto \color{blue}{\log 2 + x \cdot \left(\left(\frac{1}{2} + \frac{1}{8} \cdot x\right) - y\right)} \]
    6. Step-by-step derivation
      1. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\log 2, \color{blue}{\left(x \cdot \left(\left(\frac{1}{2} + \frac{1}{8} \cdot x\right) - y\right)\right)}\right) \]
      2. log-lowering-log.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \left(\color{blue}{x} \cdot \left(\left(\frac{1}{2} + \frac{1}{8} \cdot x\right) - y\right)\right)\right) \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \color{blue}{\left(\left(\frac{1}{2} + \frac{1}{8} \cdot x\right) - y\right)}\right)\right) \]
      4. associate--l+N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \left(\frac{1}{2} + \color{blue}{\left(\frac{1}{8} \cdot x - y\right)}\right)\right)\right) \]
      5. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \color{blue}{\left(\frac{1}{8} \cdot x - y\right)}\right)\right)\right) \]
      6. --lowering--.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\left(\frac{1}{8} \cdot x\right), \color{blue}{y}\right)\right)\right)\right) \]
      7. *-commutativeN/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\left(x \cdot \frac{1}{8}\right), y\right)\right)\right)\right) \]
      8. *-lowering-*.f6499.9%

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \frac{1}{8}\right), y\right)\right)\right)\right) \]
    7. Simplified99.9%

      \[\leadsto \color{blue}{\log 2 + x \cdot \left(0.5 + \left(x \cdot 0.125 - y\right)\right)} \]
    8. Step-by-step derivation
      1. metadata-evalN/A

        \[\leadsto \log \left(1 + 1\right) + x \cdot \left(\frac{1}{2} + \left(x \cdot \frac{1}{8} - y\right)\right) \]
      2. distribute-lft-inN/A

        \[\leadsto \log \left(1 + 1\right) + \left(x \cdot \frac{1}{2} + \color{blue}{x \cdot \left(x \cdot \frac{1}{8} - y\right)}\right) \]
      3. associate-+r+N/A

        \[\leadsto \left(\log \left(1 + 1\right) + x \cdot \frac{1}{2}\right) + \color{blue}{x \cdot \left(x \cdot \frac{1}{8} - y\right)} \]
      4. *-commutativeN/A

        \[\leadsto \left(\log \left(1 + 1\right) + \frac{1}{2} \cdot x\right) + x \cdot \left(x \cdot \frac{1}{8} - y\right) \]
      5. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\left(\log \left(1 + 1\right) + \frac{1}{2} \cdot x\right), \color{blue}{\left(x \cdot \left(x \cdot \frac{1}{8} - y\right)\right)}\right) \]
      6. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(\log \left(1 + 1\right), \left(\frac{1}{2} \cdot x\right)\right), \left(\color{blue}{x} \cdot \left(x \cdot \frac{1}{8} - y\right)\right)\right) \]
      7. log-lowering-log.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(\mathsf{log.f64}\left(\left(1 + 1\right)\right), \left(\frac{1}{2} \cdot x\right)\right), \left(x \cdot \left(x \cdot \frac{1}{8} - y\right)\right)\right) \]
      8. metadata-evalN/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \left(\frac{1}{2} \cdot x\right)\right), \left(x \cdot \left(x \cdot \frac{1}{8} - y\right)\right)\right) \]
      9. *-commutativeN/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \left(x \cdot \frac{1}{2}\right)\right), \left(x \cdot \left(x \cdot \frac{1}{8} - y\right)\right)\right) \]
      10. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \frac{1}{2}\right)\right), \left(x \cdot \left(x \cdot \frac{1}{8} - y\right)\right)\right) \]
      11. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \frac{1}{2}\right)\right), \mathsf{*.f64}\left(x, \color{blue}{\left(x \cdot \frac{1}{8} - y\right)}\right)\right) \]
      12. --lowering--.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \frac{1}{2}\right)\right), \mathsf{*.f64}\left(x, \mathsf{\_.f64}\left(\left(x \cdot \frac{1}{8}\right), \color{blue}{y}\right)\right)\right) \]
      13. *-lowering-*.f6499.9%

        \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \frac{1}{2}\right)\right), \mathsf{*.f64}\left(x, \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \frac{1}{8}\right), y\right)\right)\right) \]
    9. Applied egg-rr99.9%

      \[\leadsto \color{blue}{\left(\log 2 + x \cdot 0.5\right) + x \cdot \left(x \cdot 0.125 - y\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -80:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(x \cdot 0.125 - y\right) + \left(\log 2 + x \cdot 0.5\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 99.3% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -102:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\log 2 + x \cdot \left(0.5 + \left(x \cdot 0.125 - y\right)\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= x -102.0)
   (- 0.0 (* y x))
   (+ (log 2.0) (* x (+ 0.5 (- (* x 0.125) y))))))
double code(double x, double y) {
	double tmp;
	if (x <= -102.0) {
		tmp = 0.0 - (y * x);
	} else {
		tmp = log(2.0) + (x * (0.5 + ((x * 0.125) - y)));
	}
	return tmp;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: tmp
    if (x <= (-102.0d0)) then
        tmp = 0.0d0 - (y * x)
    else
        tmp = log(2.0d0) + (x * (0.5d0 + ((x * 0.125d0) - y)))
    end if
    code = tmp
end function
public static double code(double x, double y) {
	double tmp;
	if (x <= -102.0) {
		tmp = 0.0 - (y * x);
	} else {
		tmp = Math.log(2.0) + (x * (0.5 + ((x * 0.125) - y)));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if x <= -102.0:
		tmp = 0.0 - (y * x)
	else:
		tmp = math.log(2.0) + (x * (0.5 + ((x * 0.125) - y)))
	return tmp
function code(x, y)
	tmp = 0.0
	if (x <= -102.0)
		tmp = Float64(0.0 - Float64(y * x));
	else
		tmp = Float64(log(2.0) + Float64(x * Float64(0.5 + Float64(Float64(x * 0.125) - y))));
	end
	return tmp
end
function tmp_2 = code(x, y)
	tmp = 0.0;
	if (x <= -102.0)
		tmp = 0.0 - (y * x);
	else
		tmp = log(2.0) + (x * (0.5 + ((x * 0.125) - y)));
	end
	tmp_2 = tmp;
end
code[x_, y_] := If[LessEqual[x, -102.0], N[(0.0 - N[(y * x), $MachinePrecision]), $MachinePrecision], N[(N[Log[2.0], $MachinePrecision] + N[(x * N[(0.5 + N[(N[(x * 0.125), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -102:\\
\;\;\;\;0 - y \cdot x\\

\mathbf{else}:\\
\;\;\;\;\log 2 + x \cdot \left(0.5 + \left(x \cdot 0.125 - y\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -102

    1. Initial program 100.0%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Step-by-step derivation
      1. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
      2. log1p-defineN/A

        \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      3. log1p-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      4. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
      5. *-lowering-*.f64100.0%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf

      \[\leadsto \color{blue}{-1 \cdot \left(x \cdot y\right)} \]
    6. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{neg}\left(x \cdot y\right) \]
      2. distribute-rgt-neg-inN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(y\right)\right)} \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\mathsf{neg}\left(y\right)\right)}\right) \]
      4. neg-sub0N/A

        \[\leadsto \mathsf{*.f64}\left(x, \left(0 - \color{blue}{y}\right)\right) \]
      5. --lowering--.f64100.0%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{\_.f64}\left(0, \color{blue}{y}\right)\right) \]
    7. Simplified100.0%

      \[\leadsto \color{blue}{x \cdot \left(0 - y\right)} \]
    8. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \mathsf{*.f64}\left(x, \left(\mathsf{neg}\left(y\right)\right)\right) \]
      2. neg-lowering-neg.f64100.0%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{neg.f64}\left(y\right)\right) \]
    9. Applied egg-rr100.0%

      \[\leadsto x \cdot \color{blue}{\left(-y\right)} \]

    if -102 < x

    1. Initial program 98.2%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Step-by-step derivation
      1. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
      2. log1p-defineN/A

        \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      3. log1p-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      4. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
      5. *-lowering-*.f6498.2%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
    3. Simplified98.2%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0

      \[\leadsto \color{blue}{\log 2 + x \cdot \left(\left(\frac{1}{2} + \frac{1}{8} \cdot x\right) - y\right)} \]
    6. Step-by-step derivation
      1. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\log 2, \color{blue}{\left(x \cdot \left(\left(\frac{1}{2} + \frac{1}{8} \cdot x\right) - y\right)\right)}\right) \]
      2. log-lowering-log.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \left(\color{blue}{x} \cdot \left(\left(\frac{1}{2} + \frac{1}{8} \cdot x\right) - y\right)\right)\right) \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \color{blue}{\left(\left(\frac{1}{2} + \frac{1}{8} \cdot x\right) - y\right)}\right)\right) \]
      4. associate--l+N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \left(\frac{1}{2} + \color{blue}{\left(\frac{1}{8} \cdot x - y\right)}\right)\right)\right) \]
      5. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \color{blue}{\left(\frac{1}{8} \cdot x - y\right)}\right)\right)\right) \]
      6. --lowering--.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\left(\frac{1}{8} \cdot x\right), \color{blue}{y}\right)\right)\right)\right) \]
      7. *-commutativeN/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\left(x \cdot \frac{1}{8}\right), y\right)\right)\right)\right) \]
      8. *-lowering-*.f6499.9%

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \frac{1}{8}\right), y\right)\right)\right)\right) \]
    7. Simplified99.9%

      \[\leadsto \color{blue}{\log 2 + x \cdot \left(0.5 + \left(x \cdot 0.125 - y\right)\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -102:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\log 2 + x \cdot \left(0.5 + \left(x \cdot 0.125 - y\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 99.1% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -3.55:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\log 2 + x \cdot \left(0.5 - y\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= x -3.55) (- 0.0 (* y x)) (+ (log 2.0) (* x (- 0.5 y)))))
double code(double x, double y) {
	double tmp;
	if (x <= -3.55) {
		tmp = 0.0 - (y * x);
	} else {
		tmp = log(2.0) + (x * (0.5 - y));
	}
	return tmp;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: tmp
    if (x <= (-3.55d0)) then
        tmp = 0.0d0 - (y * x)
    else
        tmp = log(2.0d0) + (x * (0.5d0 - y))
    end if
    code = tmp
end function
public static double code(double x, double y) {
	double tmp;
	if (x <= -3.55) {
		tmp = 0.0 - (y * x);
	} else {
		tmp = Math.log(2.0) + (x * (0.5 - y));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if x <= -3.55:
		tmp = 0.0 - (y * x)
	else:
		tmp = math.log(2.0) + (x * (0.5 - y))
	return tmp
function code(x, y)
	tmp = 0.0
	if (x <= -3.55)
		tmp = Float64(0.0 - Float64(y * x));
	else
		tmp = Float64(log(2.0) + Float64(x * Float64(0.5 - y)));
	end
	return tmp
end
function tmp_2 = code(x, y)
	tmp = 0.0;
	if (x <= -3.55)
		tmp = 0.0 - (y * x);
	else
		tmp = log(2.0) + (x * (0.5 - y));
	end
	tmp_2 = tmp;
end
code[x_, y_] := If[LessEqual[x, -3.55], N[(0.0 - N[(y * x), $MachinePrecision]), $MachinePrecision], N[(N[Log[2.0], $MachinePrecision] + N[(x * N[(0.5 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.55:\\
\;\;\;\;0 - y \cdot x\\

\mathbf{else}:\\
\;\;\;\;\log 2 + x \cdot \left(0.5 - y\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -3.5499999999999998

    1. Initial program 100.0%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Step-by-step derivation
      1. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
      2. log1p-defineN/A

        \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      3. log1p-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      4. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
      5. *-lowering-*.f64100.0%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf

      \[\leadsto \color{blue}{-1 \cdot \left(x \cdot y\right)} \]
    6. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{neg}\left(x \cdot y\right) \]
      2. distribute-rgt-neg-inN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(y\right)\right)} \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\mathsf{neg}\left(y\right)\right)}\right) \]
      4. neg-sub0N/A

        \[\leadsto \mathsf{*.f64}\left(x, \left(0 - \color{blue}{y}\right)\right) \]
      5. --lowering--.f64100.0%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{\_.f64}\left(0, \color{blue}{y}\right)\right) \]
    7. Simplified100.0%

      \[\leadsto \color{blue}{x \cdot \left(0 - y\right)} \]
    8. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \mathsf{*.f64}\left(x, \left(\mathsf{neg}\left(y\right)\right)\right) \]
      2. neg-lowering-neg.f64100.0%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{neg.f64}\left(y\right)\right) \]
    9. Applied egg-rr100.0%

      \[\leadsto x \cdot \color{blue}{\left(-y\right)} \]

    if -3.5499999999999998 < x

    1. Initial program 98.2%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Step-by-step derivation
      1. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
      2. log1p-defineN/A

        \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      3. log1p-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      4. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
      5. *-lowering-*.f6498.2%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
    3. Simplified98.2%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0

      \[\leadsto \color{blue}{\log 2 + x \cdot \left(\frac{1}{2} - y\right)} \]
    6. Step-by-step derivation
      1. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\log 2, \color{blue}{\left(x \cdot \left(\frac{1}{2} - y\right)\right)}\right) \]
      2. log-lowering-log.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \left(\color{blue}{x} \cdot \left(\frac{1}{2} - y\right)\right)\right) \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{1}{2} - y\right)}\right)\right) \]
      4. --lowering--.f6499.7%

        \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{\_.f64}\left(\frac{1}{2}, \color{blue}{y}\right)\right)\right) \]
    7. Simplified99.7%

      \[\leadsto \color{blue}{\log 2 + x \cdot \left(0.5 - y\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -3.55:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\log 2 + x \cdot \left(0.5 - y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 98.7% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -102:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(1\right) - y \cdot x\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= x -102.0) (- 0.0 (* y x)) (- (log1p 1.0) (* y x))))
double code(double x, double y) {
	double tmp;
	if (x <= -102.0) {
		tmp = 0.0 - (y * x);
	} else {
		tmp = log1p(1.0) - (y * x);
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (x <= -102.0) {
		tmp = 0.0 - (y * x);
	} else {
		tmp = Math.log1p(1.0) - (y * x);
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if x <= -102.0:
		tmp = 0.0 - (y * x)
	else:
		tmp = math.log1p(1.0) - (y * x)
	return tmp
function code(x, y)
	tmp = 0.0
	if (x <= -102.0)
		tmp = Float64(0.0 - Float64(y * x));
	else
		tmp = Float64(log1p(1.0) - Float64(y * x));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[x, -102.0], N[(0.0 - N[(y * x), $MachinePrecision]), $MachinePrecision], N[(N[Log[1 + 1.0], $MachinePrecision] - N[(y * x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -102:\\
\;\;\;\;0 - y \cdot x\\

\mathbf{else}:\\
\;\;\;\;\mathsf{log1p}\left(1\right) - y \cdot x\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -102

    1. Initial program 100.0%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Step-by-step derivation
      1. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
      2. log1p-defineN/A

        \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      3. log1p-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      4. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
      5. *-lowering-*.f64100.0%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf

      \[\leadsto \color{blue}{-1 \cdot \left(x \cdot y\right)} \]
    6. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{neg}\left(x \cdot y\right) \]
      2. distribute-rgt-neg-inN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(y\right)\right)} \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\mathsf{neg}\left(y\right)\right)}\right) \]
      4. neg-sub0N/A

        \[\leadsto \mathsf{*.f64}\left(x, \left(0 - \color{blue}{y}\right)\right) \]
      5. --lowering--.f64100.0%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{\_.f64}\left(0, \color{blue}{y}\right)\right) \]
    7. Simplified100.0%

      \[\leadsto \color{blue}{x \cdot \left(0 - y\right)} \]
    8. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \mathsf{*.f64}\left(x, \left(\mathsf{neg}\left(y\right)\right)\right) \]
      2. neg-lowering-neg.f64100.0%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{neg.f64}\left(y\right)\right) \]
    9. Applied egg-rr100.0%

      \[\leadsto x \cdot \color{blue}{\left(-y\right)} \]

    if -102 < x

    1. Initial program 98.2%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Step-by-step derivation
      1. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
      2. log1p-defineN/A

        \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      3. log1p-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      4. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
      5. *-lowering-*.f6498.2%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
    3. Simplified98.2%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0

      \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\color{blue}{1}\right), \mathsf{*.f64}\left(x, y\right)\right) \]
    6. Step-by-step derivation
      1. Simplified98.9%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{1}\right) - x \cdot y \]
    7. Recombined 2 regimes into one program.
    8. Final simplification99.3%

      \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -102:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(1\right) - y \cdot x\\ \end{array} \]
    9. Add Preprocessing

    Alternative 8: 82.3% accurate, 1.9× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -4.2 \cdot 10^{-6}:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\log 2 + x \cdot 0.5\\ \end{array} \end{array} \]
    (FPCore (x y)
     :precision binary64
     (if (<= x -4.2e-6) (- 0.0 (* y x)) (+ (log 2.0) (* x 0.5))))
    double code(double x, double y) {
    	double tmp;
    	if (x <= -4.2e-6) {
    		tmp = 0.0 - (y * x);
    	} else {
    		tmp = log(2.0) + (x * 0.5);
    	}
    	return tmp;
    }
    
    real(8) function code(x, y)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        real(8) :: tmp
        if (x <= (-4.2d-6)) then
            tmp = 0.0d0 - (y * x)
        else
            tmp = log(2.0d0) + (x * 0.5d0)
        end if
        code = tmp
    end function
    
    public static double code(double x, double y) {
    	double tmp;
    	if (x <= -4.2e-6) {
    		tmp = 0.0 - (y * x);
    	} else {
    		tmp = Math.log(2.0) + (x * 0.5);
    	}
    	return tmp;
    }
    
    def code(x, y):
    	tmp = 0
    	if x <= -4.2e-6:
    		tmp = 0.0 - (y * x)
    	else:
    		tmp = math.log(2.0) + (x * 0.5)
    	return tmp
    
    function code(x, y)
    	tmp = 0.0
    	if (x <= -4.2e-6)
    		tmp = Float64(0.0 - Float64(y * x));
    	else
    		tmp = Float64(log(2.0) + Float64(x * 0.5));
    	end
    	return tmp
    end
    
    function tmp_2 = code(x, y)
    	tmp = 0.0;
    	if (x <= -4.2e-6)
    		tmp = 0.0 - (y * x);
    	else
    		tmp = log(2.0) + (x * 0.5);
    	end
    	tmp_2 = tmp;
    end
    
    code[x_, y_] := If[LessEqual[x, -4.2e-6], N[(0.0 - N[(y * x), $MachinePrecision]), $MachinePrecision], N[(N[Log[2.0], $MachinePrecision] + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    \mathbf{if}\;x \leq -4.2 \cdot 10^{-6}:\\
    \;\;\;\;0 - y \cdot x\\
    
    \mathbf{else}:\\
    \;\;\;\;\log 2 + x \cdot 0.5\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if x < -4.1999999999999996e-6

      1. Initial program 100.0%

        \[\log \left(1 + e^{x}\right) - x \cdot y \]
      2. Step-by-step derivation
        1. --lowering--.f64N/A

          \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
        2. log1p-defineN/A

          \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
        3. log1p-lowering-log1p.f64N/A

          \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
        4. exp-lowering-exp.f64N/A

          \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
        5. *-lowering-*.f64100.0%

          \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
      3. Simplified100.0%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
      4. Add Preprocessing
      5. Taylor expanded in x around inf

        \[\leadsto \color{blue}{-1 \cdot \left(x \cdot y\right)} \]
      6. Step-by-step derivation
        1. mul-1-negN/A

          \[\leadsto \mathsf{neg}\left(x \cdot y\right) \]
        2. distribute-rgt-neg-inN/A

          \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(y\right)\right)} \]
        3. *-lowering-*.f64N/A

          \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\mathsf{neg}\left(y\right)\right)}\right) \]
        4. neg-sub0N/A

          \[\leadsto \mathsf{*.f64}\left(x, \left(0 - \color{blue}{y}\right)\right) \]
        5. --lowering--.f64100.0%

          \[\leadsto \mathsf{*.f64}\left(x, \mathsf{\_.f64}\left(0, \color{blue}{y}\right)\right) \]
      7. Simplified100.0%

        \[\leadsto \color{blue}{x \cdot \left(0 - y\right)} \]
      8. Step-by-step derivation
        1. sub0-negN/A

          \[\leadsto \mathsf{*.f64}\left(x, \left(\mathsf{neg}\left(y\right)\right)\right) \]
        2. neg-lowering-neg.f64100.0%

          \[\leadsto \mathsf{*.f64}\left(x, \mathsf{neg.f64}\left(y\right)\right) \]
      9. Applied egg-rr100.0%

        \[\leadsto x \cdot \color{blue}{\left(-y\right)} \]

      if -4.1999999999999996e-6 < x

      1. Initial program 98.2%

        \[\log \left(1 + e^{x}\right) - x \cdot y \]
      2. Step-by-step derivation
        1. --lowering--.f64N/A

          \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
        2. log1p-defineN/A

          \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
        3. log1p-lowering-log1p.f64N/A

          \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
        4. exp-lowering-exp.f64N/A

          \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
        5. *-lowering-*.f6498.2%

          \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
      3. Simplified98.2%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
      4. Add Preprocessing
      5. Taylor expanded in x around 0

        \[\leadsto \color{blue}{\log 2 + x \cdot \left(\left(\frac{1}{2} + \frac{1}{8} \cdot x\right) - y\right)} \]
      6. Step-by-step derivation
        1. +-lowering-+.f64N/A

          \[\leadsto \mathsf{+.f64}\left(\log 2, \color{blue}{\left(x \cdot \left(\left(\frac{1}{2} + \frac{1}{8} \cdot x\right) - y\right)\right)}\right) \]
        2. log-lowering-log.f64N/A

          \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \left(\color{blue}{x} \cdot \left(\left(\frac{1}{2} + \frac{1}{8} \cdot x\right) - y\right)\right)\right) \]
        3. *-lowering-*.f64N/A

          \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \color{blue}{\left(\left(\frac{1}{2} + \frac{1}{8} \cdot x\right) - y\right)}\right)\right) \]
        4. associate--l+N/A

          \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \left(\frac{1}{2} + \color{blue}{\left(\frac{1}{8} \cdot x - y\right)}\right)\right)\right) \]
        5. +-lowering-+.f64N/A

          \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \color{blue}{\left(\frac{1}{8} \cdot x - y\right)}\right)\right)\right) \]
        6. --lowering--.f64N/A

          \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\left(\frac{1}{8} \cdot x\right), \color{blue}{y}\right)\right)\right)\right) \]
        7. *-commutativeN/A

          \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\left(x \cdot \frac{1}{8}\right), y\right)\right)\right)\right) \]
        8. *-lowering-*.f6499.9%

          \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{\_.f64}\left(\mathsf{*.f64}\left(x, \frac{1}{8}\right), y\right)\right)\right)\right) \]
      7. Simplified99.9%

        \[\leadsto \color{blue}{\log 2 + x \cdot \left(0.5 + \left(x \cdot 0.125 - y\right)\right)} \]
      8. Taylor expanded in y around 0

        \[\leadsto \color{blue}{\log 2 + x \cdot \left(\frac{1}{2} + \frac{1}{8} \cdot x\right)} \]
      9. Step-by-step derivation
        1. +-lowering-+.f64N/A

          \[\leadsto \mathsf{+.f64}\left(\log 2, \color{blue}{\left(x \cdot \left(\frac{1}{2} + \frac{1}{8} \cdot x\right)\right)}\right) \]
        2. log-lowering-log.f64N/A

          \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \left(\color{blue}{x} \cdot \left(\frac{1}{2} + \frac{1}{8} \cdot x\right)\right)\right) \]
        3. *-lowering-*.f64N/A

          \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \color{blue}{\left(\frac{1}{2} + \frac{1}{8} \cdot x\right)}\right)\right) \]
        4. +-lowering-+.f64N/A

          \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \color{blue}{\left(\frac{1}{8} \cdot x\right)}\right)\right)\right) \]
        5. *-commutativeN/A

          \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \left(x \cdot \color{blue}{\frac{1}{8}}\right)\right)\right)\right) \]
        6. *-lowering-*.f6474.2%

          \[\leadsto \mathsf{+.f64}\left(\mathsf{log.f64}\left(2\right), \mathsf{*.f64}\left(x, \mathsf{+.f64}\left(\frac{1}{2}, \mathsf{*.f64}\left(x, \color{blue}{\frac{1}{8}}\right)\right)\right)\right) \]
      10. Simplified74.2%

        \[\leadsto \color{blue}{\log 2 + x \cdot \left(0.5 + x \cdot 0.125\right)} \]
      11. Taylor expanded in x around 0

        \[\leadsto \color{blue}{\log 2 + \frac{1}{2} \cdot x} \]
      12. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \frac{1}{2} \cdot x + \color{blue}{\log 2} \]
        2. fma-defineN/A

          \[\leadsto \mathsf{fma}\left(\frac{1}{2}, \color{blue}{x}, \log 2\right) \]
        3. *-rgt-identityN/A

          \[\leadsto \mathsf{fma}\left(\frac{1}{2}, x \cdot \color{blue}{1}, \log 2\right) \]
        4. rgt-mult-inverseN/A

          \[\leadsto \mathsf{fma}\left(\frac{1}{2}, x \cdot \left(x \cdot \color{blue}{\frac{1}{x}}\right), \log 2\right) \]
        5. associate-*l*N/A

          \[\leadsto \mathsf{fma}\left(\frac{1}{2}, \left(x \cdot x\right) \cdot \color{blue}{\frac{1}{x}}, \log 2\right) \]
        6. unpow2N/A

          \[\leadsto \mathsf{fma}\left(\frac{1}{2}, {x}^{2} \cdot \frac{\color{blue}{1}}{x}, \log 2\right) \]
        7. fma-defineN/A

          \[\leadsto \frac{1}{2} \cdot \left({x}^{2} \cdot \frac{1}{x}\right) + \color{blue}{\log 2} \]
        8. *-commutativeN/A

          \[\leadsto \left({x}^{2} \cdot \frac{1}{x}\right) \cdot \frac{1}{2} + \log \color{blue}{2} \]
        9. associate-*r*N/A

          \[\leadsto {x}^{2} \cdot \left(\frac{1}{x} \cdot \frac{1}{2}\right) + \log \color{blue}{2} \]
        10. *-commutativeN/A

          \[\leadsto {x}^{2} \cdot \left(\frac{1}{2} \cdot \frac{1}{x}\right) + \log 2 \]
        11. remove-double-negN/A

          \[\leadsto {x}^{2} \cdot \left(\frac{1}{2} \cdot \frac{1}{x}\right) + \left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\log 2\right)\right)\right)\right) \]
        12. mul-1-negN/A

          \[\leadsto {x}^{2} \cdot \left(\frac{1}{2} \cdot \frac{1}{x}\right) + \left(\mathsf{neg}\left(-1 \cdot \log 2\right)\right) \]
        13. mul-1-negN/A

          \[\leadsto {x}^{2} \cdot \left(\frac{1}{2} \cdot \frac{1}{x}\right) + -1 \cdot \color{blue}{\left(-1 \cdot \log 2\right)} \]
        14. associate-*r*N/A

          \[\leadsto {x}^{2} \cdot \left(\frac{1}{2} \cdot \frac{1}{x}\right) + \left(-1 \cdot -1\right) \cdot \color{blue}{\log 2} \]
        15. metadata-evalN/A

          \[\leadsto {x}^{2} \cdot \left(\frac{1}{2} \cdot \frac{1}{x}\right) + 1 \cdot \log \color{blue}{2} \]
        16. *-commutativeN/A

          \[\leadsto {x}^{2} \cdot \left(\frac{1}{2} \cdot \frac{1}{x}\right) + \log 2 \cdot \color{blue}{1} \]
        17. *-inversesN/A

          \[\leadsto {x}^{2} \cdot \left(\frac{1}{2} \cdot \frac{1}{x}\right) + \log 2 \cdot \frac{x}{\color{blue}{x}} \]
        18. associate-/l*N/A

          \[\leadsto {x}^{2} \cdot \left(\frac{1}{2} \cdot \frac{1}{x}\right) + \frac{\log 2 \cdot x}{\color{blue}{x}} \]
        19. associate-*l/N/A

          \[\leadsto {x}^{2} \cdot \left(\frac{1}{2} \cdot \frac{1}{x}\right) + \frac{\log 2}{x} \cdot \color{blue}{x} \]
        20. +-lowering-+.f64N/A

          \[\leadsto \mathsf{+.f64}\left(\left({x}^{2} \cdot \left(\frac{1}{2} \cdot \frac{1}{x}\right)\right), \color{blue}{\left(\frac{\log 2}{x} \cdot x\right)}\right) \]
      13. Simplified74.0%

        \[\leadsto \color{blue}{x \cdot 0.5 + \log 2} \]
    3. Recombined 2 regimes into one program.
    4. Final simplification83.3%

      \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -4.2 \cdot 10^{-6}:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\log 2 + x \cdot 0.5\\ \end{array} \]
    5. Add Preprocessing

    Alternative 9: 82.0% accurate, 2.0× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -4.4 \cdot 10^{-11}:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\log 2\\ \end{array} \end{array} \]
    (FPCore (x y)
     :precision binary64
     (if (<= x -4.4e-11) (- 0.0 (* y x)) (log 2.0)))
    double code(double x, double y) {
    	double tmp;
    	if (x <= -4.4e-11) {
    		tmp = 0.0 - (y * x);
    	} else {
    		tmp = log(2.0);
    	}
    	return tmp;
    }
    
    real(8) function code(x, y)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        real(8) :: tmp
        if (x <= (-4.4d-11)) then
            tmp = 0.0d0 - (y * x)
        else
            tmp = log(2.0d0)
        end if
        code = tmp
    end function
    
    public static double code(double x, double y) {
    	double tmp;
    	if (x <= -4.4e-11) {
    		tmp = 0.0 - (y * x);
    	} else {
    		tmp = Math.log(2.0);
    	}
    	return tmp;
    }
    
    def code(x, y):
    	tmp = 0
    	if x <= -4.4e-11:
    		tmp = 0.0 - (y * x)
    	else:
    		tmp = math.log(2.0)
    	return tmp
    
    function code(x, y)
    	tmp = 0.0
    	if (x <= -4.4e-11)
    		tmp = Float64(0.0 - Float64(y * x));
    	else
    		tmp = log(2.0);
    	end
    	return tmp
    end
    
    function tmp_2 = code(x, y)
    	tmp = 0.0;
    	if (x <= -4.4e-11)
    		tmp = 0.0 - (y * x);
    	else
    		tmp = log(2.0);
    	end
    	tmp_2 = tmp;
    end
    
    code[x_, y_] := If[LessEqual[x, -4.4e-11], N[(0.0 - N[(y * x), $MachinePrecision]), $MachinePrecision], N[Log[2.0], $MachinePrecision]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    \mathbf{if}\;x \leq -4.4 \cdot 10^{-11}:\\
    \;\;\;\;0 - y \cdot x\\
    
    \mathbf{else}:\\
    \;\;\;\;\log 2\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if x < -4.4000000000000003e-11

      1. Initial program 100.0%

        \[\log \left(1 + e^{x}\right) - x \cdot y \]
      2. Step-by-step derivation
        1. --lowering--.f64N/A

          \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
        2. log1p-defineN/A

          \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
        3. log1p-lowering-log1p.f64N/A

          \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
        4. exp-lowering-exp.f64N/A

          \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
        5. *-lowering-*.f64100.0%

          \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
      3. Simplified100.0%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
      4. Add Preprocessing
      5. Taylor expanded in x around inf

        \[\leadsto \color{blue}{-1 \cdot \left(x \cdot y\right)} \]
      6. Step-by-step derivation
        1. mul-1-negN/A

          \[\leadsto \mathsf{neg}\left(x \cdot y\right) \]
        2. distribute-rgt-neg-inN/A

          \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(y\right)\right)} \]
        3. *-lowering-*.f64N/A

          \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\mathsf{neg}\left(y\right)\right)}\right) \]
        4. neg-sub0N/A

          \[\leadsto \mathsf{*.f64}\left(x, \left(0 - \color{blue}{y}\right)\right) \]
        5. --lowering--.f64100.0%

          \[\leadsto \mathsf{*.f64}\left(x, \mathsf{\_.f64}\left(0, \color{blue}{y}\right)\right) \]
      7. Simplified100.0%

        \[\leadsto \color{blue}{x \cdot \left(0 - y\right)} \]
      8. Step-by-step derivation
        1. sub0-negN/A

          \[\leadsto \mathsf{*.f64}\left(x, \left(\mathsf{neg}\left(y\right)\right)\right) \]
        2. neg-lowering-neg.f64100.0%

          \[\leadsto \mathsf{*.f64}\left(x, \mathsf{neg.f64}\left(y\right)\right) \]
      9. Applied egg-rr100.0%

        \[\leadsto x \cdot \color{blue}{\left(-y\right)} \]

      if -4.4000000000000003e-11 < x

      1. Initial program 98.2%

        \[\log \left(1 + e^{x}\right) - x \cdot y \]
      2. Step-by-step derivation
        1. --lowering--.f64N/A

          \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
        2. log1p-defineN/A

          \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
        3. log1p-lowering-log1p.f64N/A

          \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
        4. exp-lowering-exp.f64N/A

          \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
        5. *-lowering-*.f6498.2%

          \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
      3. Simplified98.2%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
      4. Add Preprocessing
      5. Taylor expanded in x around 0

        \[\leadsto \color{blue}{\log 2} \]
      6. Step-by-step derivation
        1. log-lowering-log.f6473.2%

          \[\leadsto \mathsf{log.f64}\left(2\right) \]
      7. Simplified73.2%

        \[\leadsto \color{blue}{\log 2} \]
    3. Recombined 2 regimes into one program.
    4. Final simplification82.9%

      \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -4.4 \cdot 10^{-11}:\\ \;\;\;\;0 - y \cdot x\\ \mathbf{else}:\\ \;\;\;\;\log 2\\ \end{array} \]
    5. Add Preprocessing

    Alternative 10: 50.3% accurate, 41.4× speedup?

    \[\begin{array}{l} \\ 0 - y \cdot x \end{array} \]
    (FPCore (x y) :precision binary64 (- 0.0 (* y x)))
    double code(double x, double y) {
    	return 0.0 - (y * x);
    }
    
    real(8) function code(x, y)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        code = 0.0d0 - (y * x)
    end function
    
    public static double code(double x, double y) {
    	return 0.0 - (y * x);
    }
    
    def code(x, y):
    	return 0.0 - (y * x)
    
    function code(x, y)
    	return Float64(0.0 - Float64(y * x))
    end
    
    function tmp = code(x, y)
    	tmp = 0.0 - (y * x);
    end
    
    code[x_, y_] := N[(0.0 - N[(y * x), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    0 - y \cdot x
    \end{array}
    
    Derivation
    1. Initial program 98.8%

      \[\log \left(1 + e^{x}\right) - x \cdot y \]
    2. Step-by-step derivation
      1. --lowering--.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\log \left(1 + e^{x}\right), \color{blue}{\left(x \cdot y\right)}\right) \]
      2. log1p-defineN/A

        \[\leadsto \mathsf{\_.f64}\left(\left(\mathsf{log1p}\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      3. log1p-lowering-log1p.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\left(e^{x}\right)\right), \left(\color{blue}{x} \cdot y\right)\right) \]
      4. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \left(x \cdot y\right)\right) \]
      5. *-lowering-*.f6498.8%

        \[\leadsto \mathsf{\_.f64}\left(\mathsf{log1p.f64}\left(\mathsf{exp.f64}\left(x\right)\right), \mathsf{*.f64}\left(x, \color{blue}{y}\right)\right) \]
    3. Simplified98.8%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{x}\right) - x \cdot y} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf

      \[\leadsto \color{blue}{-1 \cdot \left(x \cdot y\right)} \]
    6. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{neg}\left(x \cdot y\right) \]
      2. distribute-rgt-neg-inN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(y\right)\right)} \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{*.f64}\left(x, \color{blue}{\left(\mathsf{neg}\left(y\right)\right)}\right) \]
      4. neg-sub0N/A

        \[\leadsto \mathsf{*.f64}\left(x, \left(0 - \color{blue}{y}\right)\right) \]
      5. --lowering--.f6453.0%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{\_.f64}\left(0, \color{blue}{y}\right)\right) \]
    7. Simplified53.0%

      \[\leadsto \color{blue}{x \cdot \left(0 - y\right)} \]
    8. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \mathsf{*.f64}\left(x, \left(\mathsf{neg}\left(y\right)\right)\right) \]
      2. neg-lowering-neg.f6453.0%

        \[\leadsto \mathsf{*.f64}\left(x, \mathsf{neg.f64}\left(y\right)\right) \]
    9. Applied egg-rr53.0%

      \[\leadsto x \cdot \color{blue}{\left(-y\right)} \]
    10. Final simplification53.0%

      \[\leadsto 0 - y \cdot x \]
    11. Add Preprocessing

    Developer Target 1: 99.9% accurate, 1.0× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 0:\\ \;\;\;\;\log \left(1 + e^{x}\right) - x \cdot y\\ \mathbf{else}:\\ \;\;\;\;\log \left(1 + e^{-x}\right) - \left(-x\right) \cdot \left(1 - y\right)\\ \end{array} \end{array} \]
    (FPCore (x y)
     :precision binary64
     (if (<= x 0.0)
       (- (log (+ 1.0 (exp x))) (* x y))
       (- (log (+ 1.0 (exp (- x)))) (* (- x) (- 1.0 y)))))
    double code(double x, double y) {
    	double tmp;
    	if (x <= 0.0) {
    		tmp = log((1.0 + exp(x))) - (x * y);
    	} else {
    		tmp = log((1.0 + exp(-x))) - (-x * (1.0 - y));
    	}
    	return tmp;
    }
    
    real(8) function code(x, y)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        real(8) :: tmp
        if (x <= 0.0d0) then
            tmp = log((1.0d0 + exp(x))) - (x * y)
        else
            tmp = log((1.0d0 + exp(-x))) - (-x * (1.0d0 - y))
        end if
        code = tmp
    end function
    
    public static double code(double x, double y) {
    	double tmp;
    	if (x <= 0.0) {
    		tmp = Math.log((1.0 + Math.exp(x))) - (x * y);
    	} else {
    		tmp = Math.log((1.0 + Math.exp(-x))) - (-x * (1.0 - y));
    	}
    	return tmp;
    }
    
    def code(x, y):
    	tmp = 0
    	if x <= 0.0:
    		tmp = math.log((1.0 + math.exp(x))) - (x * y)
    	else:
    		tmp = math.log((1.0 + math.exp(-x))) - (-x * (1.0 - y))
    	return tmp
    
    function code(x, y)
    	tmp = 0.0
    	if (x <= 0.0)
    		tmp = Float64(log(Float64(1.0 + exp(x))) - Float64(x * y));
    	else
    		tmp = Float64(log(Float64(1.0 + exp(Float64(-x)))) - Float64(Float64(-x) * Float64(1.0 - y)));
    	end
    	return tmp
    end
    
    function tmp_2 = code(x, y)
    	tmp = 0.0;
    	if (x <= 0.0)
    		tmp = log((1.0 + exp(x))) - (x * y);
    	else
    		tmp = log((1.0 + exp(-x))) - (-x * (1.0 - y));
    	end
    	tmp_2 = tmp;
    end
    
    code[x_, y_] := If[LessEqual[x, 0.0], N[(N[Log[N[(1.0 + N[Exp[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[(x * y), $MachinePrecision]), $MachinePrecision], N[(N[Log[N[(1.0 + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[((-x) * N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    \mathbf{if}\;x \leq 0:\\
    \;\;\;\;\log \left(1 + e^{x}\right) - x \cdot y\\
    
    \mathbf{else}:\\
    \;\;\;\;\log \left(1 + e^{-x}\right) - \left(-x\right) \cdot \left(1 - y\right)\\
    
    
    \end{array}
    \end{array}
    

    Reproduce

    ?
    herbie shell --seed 2024163 
    (FPCore (x y)
      :name "Logistic regression 2"
      :precision binary64
    
      :alt
      (! :herbie-platform default (if (<= x 0) (- (log (+ 1 (exp x))) (* x y)) (- (log (+ 1 (exp (- x)))) (* (- x) (- 1 y)))))
    
      (- (log (+ 1.0 (exp x))) (* x y)))