symmetry log of sum of exp

Percentage Accurate: 53.5% → 98.3%
Time: 10.8s
Alternatives: 9
Speedup: 2.5×

Specification

?
\[\begin{array}{l} \\ \log \left(e^{a} + e^{b}\right) \end{array} \]
(FPCore (a b) :precision binary64 (log (+ (exp a) (exp b))))
double code(double a, double b) {
	return log((exp(a) + exp(b)));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(a, b)
use fmin_fmax_functions
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = log((exp(a) + exp(b)))
end function
public static double code(double a, double b) {
	return Math.log((Math.exp(a) + Math.exp(b)));
}
def code(a, b):
	return math.log((math.exp(a) + math.exp(b)))
function code(a, b)
	return log(Float64(exp(a) + exp(b)))
end
function tmp = code(a, b)
	tmp = log((exp(a) + exp(b)));
end
code[a_, b_] := N[Log[N[(N[Exp[a], $MachinePrecision] + N[Exp[b], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\log \left(e^{a} + e^{b}\right)
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 9 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 53.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log \left(e^{a} + e^{b}\right) \end{array} \]
(FPCore (a b) :precision binary64 (log (+ (exp a) (exp b))))
double code(double a, double b) {
	return log((exp(a) + exp(b)));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(a, b)
use fmin_fmax_functions
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = log((exp(a) + exp(b)))
end function
public static double code(double a, double b) {
	return Math.log((Math.exp(a) + Math.exp(b)));
}
def code(a, b):
	return math.log((math.exp(a) + math.exp(b)))
function code(a, b)
	return log(Float64(exp(a) + exp(b)))
end
function tmp = code(a, b)
	tmp = log((exp(a) + exp(b)));
end
code[a_, b_] := N[Log[N[(N[Exp[a], $MachinePrecision] + N[Exp[b], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\log \left(e^{a} + e^{b}\right)
\end{array}

Alternative 1: 98.3% accurate, 1.0× speedup?

\[\begin{array}{l} [a, b] = \mathsf{sort}([a, b])\\ \\ \frac{b}{e^{a} + 1} + \mathsf{log1p}\left(e^{a}\right) \end{array} \]
NOTE: a and b should be sorted in increasing order before calling this function.
(FPCore (a b) :precision binary64 (+ (/ b (+ (exp a) 1.0)) (log1p (exp a))))
assert(a < b);
double code(double a, double b) {
	return (b / (exp(a) + 1.0)) + log1p(exp(a));
}
assert a < b;
public static double code(double a, double b) {
	return (b / (Math.exp(a) + 1.0)) + Math.log1p(Math.exp(a));
}
[a, b] = sort([a, b])
def code(a, b):
	return (b / (math.exp(a) + 1.0)) + math.log1p(math.exp(a))
a, b = sort([a, b])
function code(a, b)
	return Float64(Float64(b / Float64(exp(a) + 1.0)) + log1p(exp(a)))
end
NOTE: a and b should be sorted in increasing order before calling this function.
code[a_, b_] := N[(N[(b / N[(N[Exp[a], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] + N[Log[1 + N[Exp[a], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
[a, b] = \mathsf{sort}([a, b])\\
\\
\frac{b}{e^{a} + 1} + \mathsf{log1p}\left(e^{a}\right)
\end{array}
Derivation
  1. Initial program 54.8%

    \[\log \left(e^{a} + e^{b}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in b around 0

    \[\leadsto \color{blue}{\log \left(1 + e^{a}\right) + \frac{b}{1 + e^{a}}} \]
  4. Step-by-step derivation
    1. *-rgt-identityN/A

      \[\leadsto \log \left(1 + e^{a}\right) + \frac{\color{blue}{b \cdot 1}}{1 + e^{a}} \]
    2. associate-*r/N/A

      \[\leadsto \log \left(1 + e^{a}\right) + \color{blue}{b \cdot \frac{1}{1 + e^{a}}} \]
    3. +-commutativeN/A

      \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
    4. lower-+.f64N/A

      \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
    5. associate-*r/N/A

      \[\leadsto \color{blue}{\frac{b \cdot 1}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
    6. *-rgt-identityN/A

      \[\leadsto \frac{\color{blue}{b}}{1 + e^{a}} + \log \left(1 + e^{a}\right) \]
    7. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{b}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
    8. +-commutativeN/A

      \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
    9. lower-+.f64N/A

      \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
    10. lower-exp.f64N/A

      \[\leadsto \frac{b}{\color{blue}{e^{a}} + 1} + \log \left(1 + e^{a}\right) \]
    11. lower-log1p.f64N/A

      \[\leadsto \frac{b}{e^{a} + 1} + \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
    12. lower-exp.f6475.2

      \[\leadsto \frac{b}{e^{a} + 1} + \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
  5. Applied rewrites75.2%

    \[\leadsto \color{blue}{\frac{b}{e^{a} + 1} + \mathsf{log1p}\left(e^{a}\right)} \]
  6. Add Preprocessing

Alternative 2: 98.3% accurate, 1.4× speedup?

\[\begin{array}{l} [a, b] = \mathsf{sort}([a, b])\\ \\ \begin{array}{l} \mathbf{if}\;a \leq -250:\\ \;\;\;\;\frac{b}{1 + e^{a}}\\ \mathbf{else}:\\ \;\;\;\;0.5 \cdot b + \mathsf{log1p}\left(e^{a}\right)\\ \end{array} \end{array} \]
NOTE: a and b should be sorted in increasing order before calling this function.
(FPCore (a b)
 :precision binary64
 (if (<= a -250.0) (/ b (+ 1.0 (exp a))) (+ (* 0.5 b) (log1p (exp a)))))
assert(a < b);
double code(double a, double b) {
	double tmp;
	if (a <= -250.0) {
		tmp = b / (1.0 + exp(a));
	} else {
		tmp = (0.5 * b) + log1p(exp(a));
	}
	return tmp;
}
assert a < b;
public static double code(double a, double b) {
	double tmp;
	if (a <= -250.0) {
		tmp = b / (1.0 + Math.exp(a));
	} else {
		tmp = (0.5 * b) + Math.log1p(Math.exp(a));
	}
	return tmp;
}
[a, b] = sort([a, b])
def code(a, b):
	tmp = 0
	if a <= -250.0:
		tmp = b / (1.0 + math.exp(a))
	else:
		tmp = (0.5 * b) + math.log1p(math.exp(a))
	return tmp
a, b = sort([a, b])
function code(a, b)
	tmp = 0.0
	if (a <= -250.0)
		tmp = Float64(b / Float64(1.0 + exp(a)));
	else
		tmp = Float64(Float64(0.5 * b) + log1p(exp(a)));
	end
	return tmp
end
NOTE: a and b should be sorted in increasing order before calling this function.
code[a_, b_] := If[LessEqual[a, -250.0], N[(b / N[(1.0 + N[Exp[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(0.5 * b), $MachinePrecision] + N[Log[1 + N[Exp[a], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
[a, b] = \mathsf{sort}([a, b])\\
\\
\begin{array}{l}
\mathbf{if}\;a \leq -250:\\
\;\;\;\;\frac{b}{1 + e^{a}}\\

\mathbf{else}:\\
\;\;\;\;0.5 \cdot b + \mathsf{log1p}\left(e^{a}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if a < -250

    1. Initial program 10.9%

      \[\log \left(e^{a} + e^{b}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in b around 0

      \[\leadsto \color{blue}{\log \left(1 + e^{a}\right) + \frac{b}{1 + e^{a}}} \]
    4. Step-by-step derivation
      1. *-rgt-identityN/A

        \[\leadsto \log \left(1 + e^{a}\right) + \frac{\color{blue}{b \cdot 1}}{1 + e^{a}} \]
      2. associate-*r/N/A

        \[\leadsto \log \left(1 + e^{a}\right) + \color{blue}{b \cdot \frac{1}{1 + e^{a}}} \]
      3. +-commutativeN/A

        \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
      4. lower-+.f64N/A

        \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
      5. associate-*r/N/A

        \[\leadsto \color{blue}{\frac{b \cdot 1}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
      6. *-rgt-identityN/A

        \[\leadsto \frac{\color{blue}{b}}{1 + e^{a}} + \log \left(1 + e^{a}\right) \]
      7. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{b}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
      8. +-commutativeN/A

        \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
      9. lower-+.f64N/A

        \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
      10. lower-exp.f64N/A

        \[\leadsto \frac{b}{\color{blue}{e^{a}} + 1} + \log \left(1 + e^{a}\right) \]
      11. lower-log1p.f64N/A

        \[\leadsto \frac{b}{e^{a} + 1} + \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
      12. lower-exp.f64100.0

        \[\leadsto \frac{b}{e^{a} + 1} + \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
    5. Applied rewrites100.0%

      \[\leadsto \color{blue}{\frac{b}{e^{a} + 1} + \mathsf{log1p}\left(e^{a}\right)} \]
    6. Step-by-step derivation
      1. Applied rewrites59.2%

        \[\leadsto \frac{{\left(\frac{b}{1 + e^{a}}\right)}^{2} - {\left(\mathsf{log1p}\left(e^{a}\right)\right)}^{2}}{\color{blue}{\frac{b}{1 + e^{a}} - \mathsf{log1p}\left(e^{a}\right)}} \]
      2. Taylor expanded in b around inf

        \[\leadsto \frac{b}{\color{blue}{1 + e^{a}}} \]
      3. Step-by-step derivation
        1. Applied rewrites100.0%

          \[\leadsto \frac{b}{\color{blue}{1 + e^{a}}} \]

        if -250 < a

        1. Initial program 69.2%

          \[\log \left(e^{a} + e^{b}\right) \]
        2. Add Preprocessing
        3. Taylor expanded in b around 0

          \[\leadsto \color{blue}{\log \left(1 + e^{a}\right) + \frac{b}{1 + e^{a}}} \]
        4. Step-by-step derivation
          1. *-rgt-identityN/A

            \[\leadsto \log \left(1 + e^{a}\right) + \frac{\color{blue}{b \cdot 1}}{1 + e^{a}} \]
          2. associate-*r/N/A

            \[\leadsto \log \left(1 + e^{a}\right) + \color{blue}{b \cdot \frac{1}{1 + e^{a}}} \]
          3. +-commutativeN/A

            \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
          4. lower-+.f64N/A

            \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
          5. associate-*r/N/A

            \[\leadsto \color{blue}{\frac{b \cdot 1}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
          6. *-rgt-identityN/A

            \[\leadsto \frac{\color{blue}{b}}{1 + e^{a}} + \log \left(1 + e^{a}\right) \]
          7. lower-/.f64N/A

            \[\leadsto \color{blue}{\frac{b}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
          8. +-commutativeN/A

            \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
          9. lower-+.f64N/A

            \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
          10. lower-exp.f64N/A

            \[\leadsto \frac{b}{\color{blue}{e^{a}} + 1} + \log \left(1 + e^{a}\right) \]
          11. lower-log1p.f64N/A

            \[\leadsto \frac{b}{e^{a} + 1} + \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
          12. lower-exp.f6467.1

            \[\leadsto \frac{b}{e^{a} + 1} + \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
        5. Applied rewrites67.1%

          \[\leadsto \color{blue}{\frac{b}{e^{a} + 1} + \mathsf{log1p}\left(e^{a}\right)} \]
        6. Taylor expanded in a around 0

          \[\leadsto \frac{1}{2} \cdot b + \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
        7. Step-by-step derivation
          1. Applied rewrites67.1%

            \[\leadsto 0.5 \cdot b + \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
        8. Recombined 2 regimes into one program.
        9. Add Preprocessing

        Alternative 3: 98.0% accurate, 1.4× speedup?

        \[\begin{array}{l} [a, b] = \mathsf{sort}([a, b])\\ \\ \begin{array}{l} \mathbf{if}\;a \leq -36:\\ \;\;\;\;\frac{b}{1 + e^{a}}\\ \mathbf{else}:\\ \;\;\;\;\log \left(e^{a} + \left(1 + b\right)\right)\\ \end{array} \end{array} \]
        NOTE: a and b should be sorted in increasing order before calling this function.
        (FPCore (a b)
         :precision binary64
         (if (<= a -36.0) (/ b (+ 1.0 (exp a))) (log (+ (exp a) (+ 1.0 b)))))
        assert(a < b);
        double code(double a, double b) {
        	double tmp;
        	if (a <= -36.0) {
        		tmp = b / (1.0 + exp(a));
        	} else {
        		tmp = log((exp(a) + (1.0 + b)));
        	}
        	return tmp;
        }
        
        NOTE: a and b should be sorted in increasing order before calling this function.
        module fmin_fmax_functions
            implicit none
            private
            public fmax
            public fmin
        
            interface fmax
                module procedure fmax88
                module procedure fmax44
                module procedure fmax84
                module procedure fmax48
            end interface
            interface fmin
                module procedure fmin88
                module procedure fmin44
                module procedure fmin84
                module procedure fmin48
            end interface
        contains
            real(8) function fmax88(x, y) result (res)
                real(8), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(x, max(x, y), y /= y), x /= x)
            end function
            real(4) function fmax44(x, y) result (res)
                real(4), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(y, merge(x, max(x, y), y /= y), x /= x)
            end function
            real(8) function fmax84(x, y) result(res)
                real(8), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
            end function
            real(8) function fmax48(x, y) result(res)
                real(4), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
            end function
            real(8) function fmin88(x, y) result (res)
                real(8), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(x, min(x, y), y /= y), x /= x)
            end function
            real(4) function fmin44(x, y) result (res)
                real(4), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(y, merge(x, min(x, y), y /= y), x /= x)
            end function
            real(8) function fmin84(x, y) result(res)
                real(8), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
            end function
            real(8) function fmin48(x, y) result(res)
                real(4), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
            end function
        end module
        
        real(8) function code(a, b)
        use fmin_fmax_functions
            real(8), intent (in) :: a
            real(8), intent (in) :: b
            real(8) :: tmp
            if (a <= (-36.0d0)) then
                tmp = b / (1.0d0 + exp(a))
            else
                tmp = log((exp(a) + (1.0d0 + b)))
            end if
            code = tmp
        end function
        
        assert a < b;
        public static double code(double a, double b) {
        	double tmp;
        	if (a <= -36.0) {
        		tmp = b / (1.0 + Math.exp(a));
        	} else {
        		tmp = Math.log((Math.exp(a) + (1.0 + b)));
        	}
        	return tmp;
        }
        
        [a, b] = sort([a, b])
        def code(a, b):
        	tmp = 0
        	if a <= -36.0:
        		tmp = b / (1.0 + math.exp(a))
        	else:
        		tmp = math.log((math.exp(a) + (1.0 + b)))
        	return tmp
        
        a, b = sort([a, b])
        function code(a, b)
        	tmp = 0.0
        	if (a <= -36.0)
        		tmp = Float64(b / Float64(1.0 + exp(a)));
        	else
        		tmp = log(Float64(exp(a) + Float64(1.0 + b)));
        	end
        	return tmp
        end
        
        a, b = num2cell(sort([a, b])){:}
        function tmp_2 = code(a, b)
        	tmp = 0.0;
        	if (a <= -36.0)
        		tmp = b / (1.0 + exp(a));
        	else
        		tmp = log((exp(a) + (1.0 + b)));
        	end
        	tmp_2 = tmp;
        end
        
        NOTE: a and b should be sorted in increasing order before calling this function.
        code[a_, b_] := If[LessEqual[a, -36.0], N[(b / N[(1.0 + N[Exp[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(N[Exp[a], $MachinePrecision] + N[(1.0 + b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
        
        \begin{array}{l}
        [a, b] = \mathsf{sort}([a, b])\\
        \\
        \begin{array}{l}
        \mathbf{if}\;a \leq -36:\\
        \;\;\;\;\frac{b}{1 + e^{a}}\\
        
        \mathbf{else}:\\
        \;\;\;\;\log \left(e^{a} + \left(1 + b\right)\right)\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if a < -36

          1. Initial program 10.9%

            \[\log \left(e^{a} + e^{b}\right) \]
          2. Add Preprocessing
          3. Taylor expanded in b around 0

            \[\leadsto \color{blue}{\log \left(1 + e^{a}\right) + \frac{b}{1 + e^{a}}} \]
          4. Step-by-step derivation
            1. *-rgt-identityN/A

              \[\leadsto \log \left(1 + e^{a}\right) + \frac{\color{blue}{b \cdot 1}}{1 + e^{a}} \]
            2. associate-*r/N/A

              \[\leadsto \log \left(1 + e^{a}\right) + \color{blue}{b \cdot \frac{1}{1 + e^{a}}} \]
            3. +-commutativeN/A

              \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
            4. lower-+.f64N/A

              \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
            5. associate-*r/N/A

              \[\leadsto \color{blue}{\frac{b \cdot 1}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
            6. *-rgt-identityN/A

              \[\leadsto \frac{\color{blue}{b}}{1 + e^{a}} + \log \left(1 + e^{a}\right) \]
            7. lower-/.f64N/A

              \[\leadsto \color{blue}{\frac{b}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
            8. +-commutativeN/A

              \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
            9. lower-+.f64N/A

              \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
            10. lower-exp.f64N/A

              \[\leadsto \frac{b}{\color{blue}{e^{a}} + 1} + \log \left(1 + e^{a}\right) \]
            11. lower-log1p.f64N/A

              \[\leadsto \frac{b}{e^{a} + 1} + \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
            12. lower-exp.f64100.0

              \[\leadsto \frac{b}{e^{a} + 1} + \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
          5. Applied rewrites100.0%

            \[\leadsto \color{blue}{\frac{b}{e^{a} + 1} + \mathsf{log1p}\left(e^{a}\right)} \]
          6. Step-by-step derivation
            1. Applied rewrites59.2%

              \[\leadsto \frac{{\left(\frac{b}{1 + e^{a}}\right)}^{2} - {\left(\mathsf{log1p}\left(e^{a}\right)\right)}^{2}}{\color{blue}{\frac{b}{1 + e^{a}} - \mathsf{log1p}\left(e^{a}\right)}} \]
            2. Taylor expanded in b around inf

              \[\leadsto \frac{b}{\color{blue}{1 + e^{a}}} \]
            3. Step-by-step derivation
              1. Applied rewrites100.0%

                \[\leadsto \frac{b}{\color{blue}{1 + e^{a}}} \]

              if -36 < a

              1. Initial program 69.2%

                \[\log \left(e^{a} + e^{b}\right) \]
              2. Add Preprocessing
              3. Taylor expanded in b around 0

                \[\leadsto \log \left(e^{a} + \color{blue}{\left(1 + b\right)}\right) \]
              4. Step-by-step derivation
                1. lower-+.f6466.0

                  \[\leadsto \log \left(e^{a} + \color{blue}{\left(1 + b\right)}\right) \]
              5. Applied rewrites66.0%

                \[\leadsto \log \left(e^{a} + \color{blue}{\left(1 + b\right)}\right) \]
            4. Recombined 2 regimes into one program.
            5. Add Preprocessing

            Alternative 4: 97.7% accurate, 1.5× speedup?

            \[\begin{array}{l} [a, b] = \mathsf{sort}([a, b])\\ \\ \begin{array}{l} \mathbf{if}\;a \leq -250:\\ \;\;\;\;\frac{b}{1 + e^{a}}\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(e^{a}\right)\\ \end{array} \end{array} \]
            NOTE: a and b should be sorted in increasing order before calling this function.
            (FPCore (a b)
             :precision binary64
             (if (<= a -250.0) (/ b (+ 1.0 (exp a))) (log1p (exp a))))
            assert(a < b);
            double code(double a, double b) {
            	double tmp;
            	if (a <= -250.0) {
            		tmp = b / (1.0 + exp(a));
            	} else {
            		tmp = log1p(exp(a));
            	}
            	return tmp;
            }
            
            assert a < b;
            public static double code(double a, double b) {
            	double tmp;
            	if (a <= -250.0) {
            		tmp = b / (1.0 + Math.exp(a));
            	} else {
            		tmp = Math.log1p(Math.exp(a));
            	}
            	return tmp;
            }
            
            [a, b] = sort([a, b])
            def code(a, b):
            	tmp = 0
            	if a <= -250.0:
            		tmp = b / (1.0 + math.exp(a))
            	else:
            		tmp = math.log1p(math.exp(a))
            	return tmp
            
            a, b = sort([a, b])
            function code(a, b)
            	tmp = 0.0
            	if (a <= -250.0)
            		tmp = Float64(b / Float64(1.0 + exp(a)));
            	else
            		tmp = log1p(exp(a));
            	end
            	return tmp
            end
            
            NOTE: a and b should be sorted in increasing order before calling this function.
            code[a_, b_] := If[LessEqual[a, -250.0], N[(b / N[(1.0 + N[Exp[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[1 + N[Exp[a], $MachinePrecision]], $MachinePrecision]]
            
            \begin{array}{l}
            [a, b] = \mathsf{sort}([a, b])\\
            \\
            \begin{array}{l}
            \mathbf{if}\;a \leq -250:\\
            \;\;\;\;\frac{b}{1 + e^{a}}\\
            
            \mathbf{else}:\\
            \;\;\;\;\mathsf{log1p}\left(e^{a}\right)\\
            
            
            \end{array}
            \end{array}
            
            Derivation
            1. Split input into 2 regimes
            2. if a < -250

              1. Initial program 10.9%

                \[\log \left(e^{a} + e^{b}\right) \]
              2. Add Preprocessing
              3. Taylor expanded in b around 0

                \[\leadsto \color{blue}{\log \left(1 + e^{a}\right) + \frac{b}{1 + e^{a}}} \]
              4. Step-by-step derivation
                1. *-rgt-identityN/A

                  \[\leadsto \log \left(1 + e^{a}\right) + \frac{\color{blue}{b \cdot 1}}{1 + e^{a}} \]
                2. associate-*r/N/A

                  \[\leadsto \log \left(1 + e^{a}\right) + \color{blue}{b \cdot \frac{1}{1 + e^{a}}} \]
                3. +-commutativeN/A

                  \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
                4. lower-+.f64N/A

                  \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
                5. associate-*r/N/A

                  \[\leadsto \color{blue}{\frac{b \cdot 1}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
                6. *-rgt-identityN/A

                  \[\leadsto \frac{\color{blue}{b}}{1 + e^{a}} + \log \left(1 + e^{a}\right) \]
                7. lower-/.f64N/A

                  \[\leadsto \color{blue}{\frac{b}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
                8. +-commutativeN/A

                  \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
                9. lower-+.f64N/A

                  \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
                10. lower-exp.f64N/A

                  \[\leadsto \frac{b}{\color{blue}{e^{a}} + 1} + \log \left(1 + e^{a}\right) \]
                11. lower-log1p.f64N/A

                  \[\leadsto \frac{b}{e^{a} + 1} + \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
                12. lower-exp.f64100.0

                  \[\leadsto \frac{b}{e^{a} + 1} + \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
              5. Applied rewrites100.0%

                \[\leadsto \color{blue}{\frac{b}{e^{a} + 1} + \mathsf{log1p}\left(e^{a}\right)} \]
              6. Step-by-step derivation
                1. Applied rewrites59.2%

                  \[\leadsto \frac{{\left(\frac{b}{1 + e^{a}}\right)}^{2} - {\left(\mathsf{log1p}\left(e^{a}\right)\right)}^{2}}{\color{blue}{\frac{b}{1 + e^{a}} - \mathsf{log1p}\left(e^{a}\right)}} \]
                2. Taylor expanded in b around inf

                  \[\leadsto \frac{b}{\color{blue}{1 + e^{a}}} \]
                3. Step-by-step derivation
                  1. Applied rewrites100.0%

                    \[\leadsto \frac{b}{\color{blue}{1 + e^{a}}} \]

                  if -250 < a

                  1. Initial program 69.2%

                    \[\log \left(e^{a} + e^{b}\right) \]
                  2. Add Preprocessing
                  3. Taylor expanded in b around 0

                    \[\leadsto \color{blue}{\log \left(1 + e^{a}\right)} \]
                  4. Step-by-step derivation
                    1. lower-log1p.f64N/A

                      \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
                    2. lower-exp.f6467.0

                      \[\leadsto \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
                  5. Applied rewrites67.0%

                    \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
                4. Recombined 2 regimes into one program.
                5. Add Preprocessing

                Alternative 5: 97.0% accurate, 2.5× speedup?

                \[\begin{array}{l} [a, b] = \mathsf{sort}([a, b])\\ \\ \begin{array}{l} \mathbf{if}\;a \leq -29:\\ \;\;\;\;\frac{b}{1 + e^{a}}\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(0.5, b, \log 2\right)\\ \end{array} \end{array} \]
                NOTE: a and b should be sorted in increasing order before calling this function.
                (FPCore (a b)
                 :precision binary64
                 (if (<= a -29.0) (/ b (+ 1.0 (exp a))) (fma 0.5 b (log 2.0))))
                assert(a < b);
                double code(double a, double b) {
                	double tmp;
                	if (a <= -29.0) {
                		tmp = b / (1.0 + exp(a));
                	} else {
                		tmp = fma(0.5, b, log(2.0));
                	}
                	return tmp;
                }
                
                a, b = sort([a, b])
                function code(a, b)
                	tmp = 0.0
                	if (a <= -29.0)
                		tmp = Float64(b / Float64(1.0 + exp(a)));
                	else
                		tmp = fma(0.5, b, log(2.0));
                	end
                	return tmp
                end
                
                NOTE: a and b should be sorted in increasing order before calling this function.
                code[a_, b_] := If[LessEqual[a, -29.0], N[(b / N[(1.0 + N[Exp[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(0.5 * b + N[Log[2.0], $MachinePrecision]), $MachinePrecision]]
                
                \begin{array}{l}
                [a, b] = \mathsf{sort}([a, b])\\
                \\
                \begin{array}{l}
                \mathbf{if}\;a \leq -29:\\
                \;\;\;\;\frac{b}{1 + e^{a}}\\
                
                \mathbf{else}:\\
                \;\;\;\;\mathsf{fma}\left(0.5, b, \log 2\right)\\
                
                
                \end{array}
                \end{array}
                
                Derivation
                1. Split input into 2 regimes
                2. if a < -29

                  1. Initial program 10.9%

                    \[\log \left(e^{a} + e^{b}\right) \]
                  2. Add Preprocessing
                  3. Taylor expanded in b around 0

                    \[\leadsto \color{blue}{\log \left(1 + e^{a}\right) + \frac{b}{1 + e^{a}}} \]
                  4. Step-by-step derivation
                    1. *-rgt-identityN/A

                      \[\leadsto \log \left(1 + e^{a}\right) + \frac{\color{blue}{b \cdot 1}}{1 + e^{a}} \]
                    2. associate-*r/N/A

                      \[\leadsto \log \left(1 + e^{a}\right) + \color{blue}{b \cdot \frac{1}{1 + e^{a}}} \]
                    3. +-commutativeN/A

                      \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
                    4. lower-+.f64N/A

                      \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
                    5. associate-*r/N/A

                      \[\leadsto \color{blue}{\frac{b \cdot 1}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
                    6. *-rgt-identityN/A

                      \[\leadsto \frac{\color{blue}{b}}{1 + e^{a}} + \log \left(1 + e^{a}\right) \]
                    7. lower-/.f64N/A

                      \[\leadsto \color{blue}{\frac{b}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
                    8. +-commutativeN/A

                      \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
                    9. lower-+.f64N/A

                      \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
                    10. lower-exp.f64N/A

                      \[\leadsto \frac{b}{\color{blue}{e^{a}} + 1} + \log \left(1 + e^{a}\right) \]
                    11. lower-log1p.f64N/A

                      \[\leadsto \frac{b}{e^{a} + 1} + \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
                    12. lower-exp.f64100.0

                      \[\leadsto \frac{b}{e^{a} + 1} + \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
                  5. Applied rewrites100.0%

                    \[\leadsto \color{blue}{\frac{b}{e^{a} + 1} + \mathsf{log1p}\left(e^{a}\right)} \]
                  6. Step-by-step derivation
                    1. Applied rewrites59.2%

                      \[\leadsto \frac{{\left(\frac{b}{1 + e^{a}}\right)}^{2} - {\left(\mathsf{log1p}\left(e^{a}\right)\right)}^{2}}{\color{blue}{\frac{b}{1 + e^{a}} - \mathsf{log1p}\left(e^{a}\right)}} \]
                    2. Taylor expanded in b around inf

                      \[\leadsto \frac{b}{\color{blue}{1 + e^{a}}} \]
                    3. Step-by-step derivation
                      1. Applied rewrites100.0%

                        \[\leadsto \frac{b}{\color{blue}{1 + e^{a}}} \]

                      if -29 < a

                      1. Initial program 69.2%

                        \[\log \left(e^{a} + e^{b}\right) \]
                      2. Add Preprocessing
                      3. Taylor expanded in b around 0

                        \[\leadsto \color{blue}{\log \left(1 + e^{a}\right) + \frac{b}{1 + e^{a}}} \]
                      4. Step-by-step derivation
                        1. *-rgt-identityN/A

                          \[\leadsto \log \left(1 + e^{a}\right) + \frac{\color{blue}{b \cdot 1}}{1 + e^{a}} \]
                        2. associate-*r/N/A

                          \[\leadsto \log \left(1 + e^{a}\right) + \color{blue}{b \cdot \frac{1}{1 + e^{a}}} \]
                        3. +-commutativeN/A

                          \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
                        4. lower-+.f64N/A

                          \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
                        5. associate-*r/N/A

                          \[\leadsto \color{blue}{\frac{b \cdot 1}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
                        6. *-rgt-identityN/A

                          \[\leadsto \frac{\color{blue}{b}}{1 + e^{a}} + \log \left(1 + e^{a}\right) \]
                        7. lower-/.f64N/A

                          \[\leadsto \color{blue}{\frac{b}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
                        8. +-commutativeN/A

                          \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
                        9. lower-+.f64N/A

                          \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
                        10. lower-exp.f64N/A

                          \[\leadsto \frac{b}{\color{blue}{e^{a}} + 1} + \log \left(1 + e^{a}\right) \]
                        11. lower-log1p.f64N/A

                          \[\leadsto \frac{b}{e^{a} + 1} + \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
                        12. lower-exp.f6467.1

                          \[\leadsto \frac{b}{e^{a} + 1} + \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
                      5. Applied rewrites67.1%

                        \[\leadsto \color{blue}{\frac{b}{e^{a} + 1} + \mathsf{log1p}\left(e^{a}\right)} \]
                      6. Taylor expanded in a around 0

                        \[\leadsto \log 2 + \color{blue}{\frac{1}{2} \cdot b} \]
                      7. Step-by-step derivation
                        1. Applied rewrites65.3%

                          \[\leadsto \mathsf{fma}\left(0.5, \color{blue}{b}, \log 2\right) \]
                      8. Recombined 2 regimes into one program.
                      9. Final simplification73.8%

                        \[\leadsto \begin{array}{l} \mathbf{if}\;a \leq -29:\\ \;\;\;\;\frac{b}{1 + e^{a}}\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(0.5, b, \log 2\right)\\ \end{array} \]
                      10. Add Preprocessing

                      Alternative 6: 49.1% accurate, 2.8× speedup?

                      \[\begin{array}{l} [a, b] = \mathsf{sort}([a, b])\\ \\ \mathsf{fma}\left(0.5, b, \log 2\right) \end{array} \]
                      NOTE: a and b should be sorted in increasing order before calling this function.
                      (FPCore (a b) :precision binary64 (fma 0.5 b (log 2.0)))
                      assert(a < b);
                      double code(double a, double b) {
                      	return fma(0.5, b, log(2.0));
                      }
                      
                      a, b = sort([a, b])
                      function code(a, b)
                      	return fma(0.5, b, log(2.0))
                      end
                      
                      NOTE: a and b should be sorted in increasing order before calling this function.
                      code[a_, b_] := N[(0.5 * b + N[Log[2.0], $MachinePrecision]), $MachinePrecision]
                      
                      \begin{array}{l}
                      [a, b] = \mathsf{sort}([a, b])\\
                      \\
                      \mathsf{fma}\left(0.5, b, \log 2\right)
                      \end{array}
                      
                      Derivation
                      1. Initial program 54.8%

                        \[\log \left(e^{a} + e^{b}\right) \]
                      2. Add Preprocessing
                      3. Taylor expanded in b around 0

                        \[\leadsto \color{blue}{\log \left(1 + e^{a}\right) + \frac{b}{1 + e^{a}}} \]
                      4. Step-by-step derivation
                        1. *-rgt-identityN/A

                          \[\leadsto \log \left(1 + e^{a}\right) + \frac{\color{blue}{b \cdot 1}}{1 + e^{a}} \]
                        2. associate-*r/N/A

                          \[\leadsto \log \left(1 + e^{a}\right) + \color{blue}{b \cdot \frac{1}{1 + e^{a}}} \]
                        3. +-commutativeN/A

                          \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
                        4. lower-+.f64N/A

                          \[\leadsto \color{blue}{b \cdot \frac{1}{1 + e^{a}} + \log \left(1 + e^{a}\right)} \]
                        5. associate-*r/N/A

                          \[\leadsto \color{blue}{\frac{b \cdot 1}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
                        6. *-rgt-identityN/A

                          \[\leadsto \frac{\color{blue}{b}}{1 + e^{a}} + \log \left(1 + e^{a}\right) \]
                        7. lower-/.f64N/A

                          \[\leadsto \color{blue}{\frac{b}{1 + e^{a}}} + \log \left(1 + e^{a}\right) \]
                        8. +-commutativeN/A

                          \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
                        9. lower-+.f64N/A

                          \[\leadsto \frac{b}{\color{blue}{e^{a} + 1}} + \log \left(1 + e^{a}\right) \]
                        10. lower-exp.f64N/A

                          \[\leadsto \frac{b}{\color{blue}{e^{a}} + 1} + \log \left(1 + e^{a}\right) \]
                        11. lower-log1p.f64N/A

                          \[\leadsto \frac{b}{e^{a} + 1} + \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
                        12. lower-exp.f6475.2

                          \[\leadsto \frac{b}{e^{a} + 1} + \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
                      5. Applied rewrites75.2%

                        \[\leadsto \color{blue}{\frac{b}{e^{a} + 1} + \mathsf{log1p}\left(e^{a}\right)} \]
                      6. Taylor expanded in a around 0

                        \[\leadsto \log 2 + \color{blue}{\frac{1}{2} \cdot b} \]
                      7. Step-by-step derivation
                        1. Applied rewrites50.3%

                          \[\leadsto \mathsf{fma}\left(0.5, \color{blue}{b}, \log 2\right) \]
                        2. Final simplification50.3%

                          \[\leadsto \mathsf{fma}\left(0.5, b, \log 2\right) \]
                        3. Add Preprocessing

                        Alternative 7: 48.8% accurate, 2.9× speedup?

                        \[\begin{array}{l} [a, b] = \mathsf{sort}([a, b])\\ \\ \mathsf{log1p}\left(1 + b\right) \end{array} \]
                        NOTE: a and b should be sorted in increasing order before calling this function.
                        (FPCore (a b) :precision binary64 (log1p (+ 1.0 b)))
                        assert(a < b);
                        double code(double a, double b) {
                        	return log1p((1.0 + b));
                        }
                        
                        assert a < b;
                        public static double code(double a, double b) {
                        	return Math.log1p((1.0 + b));
                        }
                        
                        [a, b] = sort([a, b])
                        def code(a, b):
                        	return math.log1p((1.0 + b))
                        
                        a, b = sort([a, b])
                        function code(a, b)
                        	return log1p(Float64(1.0 + b))
                        end
                        
                        NOTE: a and b should be sorted in increasing order before calling this function.
                        code[a_, b_] := N[Log[1 + N[(1.0 + b), $MachinePrecision]], $MachinePrecision]
                        
                        \begin{array}{l}
                        [a, b] = \mathsf{sort}([a, b])\\
                        \\
                        \mathsf{log1p}\left(1 + b\right)
                        \end{array}
                        
                        Derivation
                        1. Initial program 54.8%

                          \[\log \left(e^{a} + e^{b}\right) \]
                        2. Add Preprocessing
                        3. Taylor expanded in a around 0

                          \[\leadsto \color{blue}{\log \left(1 + e^{b}\right)} \]
                        4. Step-by-step derivation
                          1. lower-log1p.f64N/A

                            \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{b}\right)} \]
                          2. lower-exp.f6451.4

                            \[\leadsto \mathsf{log1p}\left(\color{blue}{e^{b}}\right) \]
                        5. Applied rewrites51.4%

                          \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{b}\right)} \]
                        6. Taylor expanded in b around 0

                          \[\leadsto \mathsf{log1p}\left(1 + b\right) \]
                        7. Step-by-step derivation
                          1. Applied rewrites49.6%

                            \[\leadsto \mathsf{log1p}\left(1 + b\right) \]
                          2. Add Preprocessing

                          Alternative 8: 48.3% accurate, 3.0× speedup?

                          \[\begin{array}{l} [a, b] = \mathsf{sort}([a, b])\\ \\ \mathsf{log1p}\left(1\right) \end{array} \]
                          NOTE: a and b should be sorted in increasing order before calling this function.
                          (FPCore (a b) :precision binary64 (log1p 1.0))
                          assert(a < b);
                          double code(double a, double b) {
                          	return log1p(1.0);
                          }
                          
                          assert a < b;
                          public static double code(double a, double b) {
                          	return Math.log1p(1.0);
                          }
                          
                          [a, b] = sort([a, b])
                          def code(a, b):
                          	return math.log1p(1.0)
                          
                          a, b = sort([a, b])
                          function code(a, b)
                          	return log1p(1.0)
                          end
                          
                          NOTE: a and b should be sorted in increasing order before calling this function.
                          code[a_, b_] := N[Log[1 + 1.0], $MachinePrecision]
                          
                          \begin{array}{l}
                          [a, b] = \mathsf{sort}([a, b])\\
                          \\
                          \mathsf{log1p}\left(1\right)
                          \end{array}
                          
                          Derivation
                          1. Initial program 54.8%

                            \[\log \left(e^{a} + e^{b}\right) \]
                          2. Add Preprocessing
                          3. Taylor expanded in b around 0

                            \[\leadsto \color{blue}{\log \left(1 + e^{a}\right)} \]
                          4. Step-by-step derivation
                            1. lower-log1p.f64N/A

                              \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
                            2. lower-exp.f6451.8

                              \[\leadsto \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
                          5. Applied rewrites51.8%

                            \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
                          6. Taylor expanded in a around 0

                            \[\leadsto \mathsf{log1p}\left(1\right) \]
                          7. Step-by-step derivation
                            1. Applied rewrites50.3%

                              \[\leadsto \mathsf{log1p}\left(1\right) \]
                            2. Add Preprocessing

                            Alternative 9: 2.6% accurate, 50.7× speedup?

                            \[\begin{array}{l} [a, b] = \mathsf{sort}([a, b])\\ \\ 0.5 \cdot a \end{array} \]
                            NOTE: a and b should be sorted in increasing order before calling this function.
                            (FPCore (a b) :precision binary64 (* 0.5 a))
                            assert(a < b);
                            double code(double a, double b) {
                            	return 0.5 * a;
                            }
                            
                            NOTE: a and b should be sorted in increasing order before calling this function.
                            module fmin_fmax_functions
                                implicit none
                                private
                                public fmax
                                public fmin
                            
                                interface fmax
                                    module procedure fmax88
                                    module procedure fmax44
                                    module procedure fmax84
                                    module procedure fmax48
                                end interface
                                interface fmin
                                    module procedure fmin88
                                    module procedure fmin44
                                    module procedure fmin84
                                    module procedure fmin48
                                end interface
                            contains
                                real(8) function fmax88(x, y) result (res)
                                    real(8), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                end function
                                real(4) function fmax44(x, y) result (res)
                                    real(4), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                end function
                                real(8) function fmax84(x, y) result(res)
                                    real(8), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                                end function
                                real(8) function fmax48(x, y) result(res)
                                    real(4), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                                end function
                                real(8) function fmin88(x, y) result (res)
                                    real(8), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                end function
                                real(4) function fmin44(x, y) result (res)
                                    real(4), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                end function
                                real(8) function fmin84(x, y) result(res)
                                    real(8), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                                end function
                                real(8) function fmin48(x, y) result(res)
                                    real(4), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                                end function
                            end module
                            
                            real(8) function code(a, b)
                            use fmin_fmax_functions
                                real(8), intent (in) :: a
                                real(8), intent (in) :: b
                                code = 0.5d0 * a
                            end function
                            
                            assert a < b;
                            public static double code(double a, double b) {
                            	return 0.5 * a;
                            }
                            
                            [a, b] = sort([a, b])
                            def code(a, b):
                            	return 0.5 * a
                            
                            a, b = sort([a, b])
                            function code(a, b)
                            	return Float64(0.5 * a)
                            end
                            
                            a, b = num2cell(sort([a, b])){:}
                            function tmp = code(a, b)
                            	tmp = 0.5 * a;
                            end
                            
                            NOTE: a and b should be sorted in increasing order before calling this function.
                            code[a_, b_] := N[(0.5 * a), $MachinePrecision]
                            
                            \begin{array}{l}
                            [a, b] = \mathsf{sort}([a, b])\\
                            \\
                            0.5 \cdot a
                            \end{array}
                            
                            Derivation
                            1. Initial program 54.8%

                              \[\log \left(e^{a} + e^{b}\right) \]
                            2. Add Preprocessing
                            3. Taylor expanded in b around 0

                              \[\leadsto \color{blue}{\log \left(1 + e^{a}\right)} \]
                            4. Step-by-step derivation
                              1. lower-log1p.f64N/A

                                \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
                              2. lower-exp.f6451.8

                                \[\leadsto \mathsf{log1p}\left(\color{blue}{e^{a}}\right) \]
                            5. Applied rewrites51.8%

                              \[\leadsto \color{blue}{\mathsf{log1p}\left(e^{a}\right)} \]
                            6. Taylor expanded in a around 0

                              \[\leadsto \log 2 + \color{blue}{\frac{1}{2} \cdot a} \]
                            7. Step-by-step derivation
                              1. Applied rewrites50.3%

                                \[\leadsto \mathsf{fma}\left(0.5, \color{blue}{a}, \log 2\right) \]
                              2. Taylor expanded in a around inf

                                \[\leadsto \frac{1}{2} \cdot a \]
                              3. Step-by-step derivation
                                1. Applied rewrites7.1%

                                  \[\leadsto 0.5 \cdot a \]
                                2. Add Preprocessing

                                Reproduce

                                ?
                                herbie shell --seed 2024359 
                                (FPCore (a b)
                                  :name "symmetry log of sum of exp"
                                  :precision binary64
                                  (log (+ (exp a) (exp b))))