Numeric.SpecFunctions:invIncompleteGamma from math-functions-0.1.5.2, B

Percentage Accurate: 72.8% → 99.9%
Time: 8.9s
Alternatives: 9
Speedup: 1.0×

Specification

?
\[\begin{array}{l} \\ 1 - \log \left(1 - \frac{x - y}{1 - y}\right) \end{array} \]
(FPCore (x y) :precision binary64 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))
double code(double x, double y) {
	return 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x, y)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = 1.0d0 - log((1.0d0 - ((x - y) / (1.0d0 - y))))
end function
public static double code(double x, double y) {
	return 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
}
def code(x, y):
	return 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
function code(x, y)
	return Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y)))))
end
function tmp = code(x, y)
	tmp = 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
end
code[x_, y_] := N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
1 - \log \left(1 - \frac{x - y}{1 - y}\right)
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 9 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 72.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ 1 - \log \left(1 - \frac{x - y}{1 - y}\right) \end{array} \]
(FPCore (x y) :precision binary64 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))
double code(double x, double y) {
	return 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x, y)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = 1.0d0 - log((1.0d0 - ((x - y) / (1.0d0 - y))))
end function
public static double code(double x, double y) {
	return 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
}
def code(x, y):
	return 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
function code(x, y)
	return Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y)))))
end
function tmp = code(x, y)
	tmp = 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
end
code[x_, y_] := N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
1 - \log \left(1 - \frac{x - y}{1 - y}\right)
\end{array}

Alternative 1: 99.9% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := 1 - \log \left(1 - \frac{x - y}{1 - y}\right)\\ \mathbf{if}\;t\_0 \leq 5:\\ \;\;\;\;t\_0\\ \mathbf{else}:\\ \;\;\;\;1 - \log \left(\frac{\left(-1 + \frac{x - 1}{y}\right) + x}{y}\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (let* ((t_0 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y)))))))
   (if (<= t_0 5.0) t_0 (- 1.0 (log (/ (+ (+ -1.0 (/ (- x 1.0) y)) x) y))))))
double code(double x, double y) {
	double t_0 = 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
	double tmp;
	if (t_0 <= 5.0) {
		tmp = t_0;
	} else {
		tmp = 1.0 - log((((-1.0 + ((x - 1.0) / y)) + x) / y));
	}
	return tmp;
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x, y)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: t_0
    real(8) :: tmp
    t_0 = 1.0d0 - log((1.0d0 - ((x - y) / (1.0d0 - y))))
    if (t_0 <= 5.0d0) then
        tmp = t_0
    else
        tmp = 1.0d0 - log(((((-1.0d0) + ((x - 1.0d0) / y)) + x) / y))
    end if
    code = tmp
end function
public static double code(double x, double y) {
	double t_0 = 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
	double tmp;
	if (t_0 <= 5.0) {
		tmp = t_0;
	} else {
		tmp = 1.0 - Math.log((((-1.0 + ((x - 1.0) / y)) + x) / y));
	}
	return tmp;
}
def code(x, y):
	t_0 = 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
	tmp = 0
	if t_0 <= 5.0:
		tmp = t_0
	else:
		tmp = 1.0 - math.log((((-1.0 + ((x - 1.0) / y)) + x) / y))
	return tmp
function code(x, y)
	t_0 = Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y)))))
	tmp = 0.0
	if (t_0 <= 5.0)
		tmp = t_0;
	else
		tmp = Float64(1.0 - log(Float64(Float64(Float64(-1.0 + Float64(Float64(x - 1.0) / y)) + x) / y)));
	end
	return tmp
end
function tmp_2 = code(x, y)
	t_0 = 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
	tmp = 0.0;
	if (t_0 <= 5.0)
		tmp = t_0;
	else
		tmp = 1.0 - log((((-1.0 + ((x - 1.0) / y)) + x) / y));
	end
	tmp_2 = tmp;
end
code[x_, y_] := Block[{t$95$0 = N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, 5.0], t$95$0, N[(1.0 - N[Log[N[(N[(N[(-1.0 + N[(N[(x - 1.0), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision] / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := 1 - \log \left(1 - \frac{x - y}{1 - y}\right)\\
\mathbf{if}\;t\_0 \leq 5:\\
\;\;\;\;t\_0\\

\mathbf{else}:\\
\;\;\;\;1 - \log \left(\frac{\left(-1 + \frac{x - 1}{y}\right) + x}{y}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 #s(literal 1 binary64) (log.f64 (-.f64 #s(literal 1 binary64) (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y))))) < 5

    1. Initial program 100.0%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Add Preprocessing

    if 5 < (-.f64 #s(literal 1 binary64) (log.f64 (-.f64 #s(literal 1 binary64) (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)))))

    1. Initial program 6.5%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in y around -inf

      \[\leadsto 1 - \log \color{blue}{\left(-1 \cdot \frac{\left(1 + -1 \cdot \frac{x - 1}{y}\right) - x}{y}\right)} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto 1 - \log \color{blue}{\left(\mathsf{neg}\left(\frac{\left(1 + -1 \cdot \frac{x - 1}{y}\right) - x}{y}\right)\right)} \]
      2. distribute-neg-frac2N/A

        \[\leadsto 1 - \log \color{blue}{\left(\frac{\left(1 + -1 \cdot \frac{x - 1}{y}\right) - x}{\mathsf{neg}\left(y\right)}\right)} \]
      3. associate--l+N/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{1 + \left(-1 \cdot \frac{x - 1}{y} - x\right)}}{\mathsf{neg}\left(y\right)}\right) \]
      4. +-commutativeN/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{\left(-1 \cdot \frac{x - 1}{y} - x\right) + 1}}{\mathsf{neg}\left(y\right)}\right) \]
      5. *-lft-identityN/A

        \[\leadsto 1 - \log \left(\frac{\left(-1 \cdot \frac{x - 1}{y} - \color{blue}{1 \cdot x}\right) + 1}{\mathsf{neg}\left(y\right)}\right) \]
      6. metadata-evalN/A

        \[\leadsto 1 - \log \left(\frac{\left(-1 \cdot \frac{x - 1}{y} - \color{blue}{\left(\mathsf{neg}\left(-1\right)\right)} \cdot x\right) + 1}{\mathsf{neg}\left(y\right)}\right) \]
      7. fp-cancel-sign-sub-invN/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{\left(-1 \cdot \frac{x - 1}{y} + -1 \cdot x\right)} + 1}{\mathsf{neg}\left(y\right)}\right) \]
      8. +-commutativeN/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{\left(-1 \cdot x + -1 \cdot \frac{x - 1}{y}\right)} + 1}{\mathsf{neg}\left(y\right)}\right) \]
      9. +-commutativeN/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{1 + \left(-1 \cdot x + -1 \cdot \frac{x - 1}{y}\right)}}{\mathsf{neg}\left(y\right)}\right) \]
      10. lower-/.f64N/A

        \[\leadsto 1 - \log \color{blue}{\left(\frac{1 + \left(-1 \cdot x + -1 \cdot \frac{x - 1}{y}\right)}{\mathsf{neg}\left(y\right)}\right)} \]
    5. Applied rewrites100.0%

      \[\leadsto 1 - \log \color{blue}{\left(\frac{\left(1 - \frac{x - 1}{y}\right) - x}{-y}\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;1 - \log \left(1 - \frac{x - y}{1 - y}\right) \leq 5:\\ \;\;\;\;1 - \log \left(1 - \frac{x - y}{1 - y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \log \left(\frac{\left(-1 + \frac{x - 1}{y}\right) + x}{y}\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 98.0% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := 1 - \log \left(1 - \frac{x - y}{1 - y}\right)\\ \mathbf{if}\;t\_0 \leq -2:\\ \;\;\;\;1 - \log \left(\frac{x}{-1 + y}\right)\\ \mathbf{elif}\;t\_0 \leq 20:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{y}{1 - y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \log \left(\frac{-1 + x}{y}\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (let* ((t_0 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y)))))))
   (if (<= t_0 -2.0)
     (- 1.0 (log (/ x (+ -1.0 y))))
     (if (<= t_0 20.0)
       (- 1.0 (log1p (/ y (- 1.0 y))))
       (- 1.0 (log (/ (+ -1.0 x) y)))))))
double code(double x, double y) {
	double t_0 = 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
	double tmp;
	if (t_0 <= -2.0) {
		tmp = 1.0 - log((x / (-1.0 + y)));
	} else if (t_0 <= 20.0) {
		tmp = 1.0 - log1p((y / (1.0 - y)));
	} else {
		tmp = 1.0 - log(((-1.0 + x) / y));
	}
	return tmp;
}
public static double code(double x, double y) {
	double t_0 = 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
	double tmp;
	if (t_0 <= -2.0) {
		tmp = 1.0 - Math.log((x / (-1.0 + y)));
	} else if (t_0 <= 20.0) {
		tmp = 1.0 - Math.log1p((y / (1.0 - y)));
	} else {
		tmp = 1.0 - Math.log(((-1.0 + x) / y));
	}
	return tmp;
}
def code(x, y):
	t_0 = 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
	tmp = 0
	if t_0 <= -2.0:
		tmp = 1.0 - math.log((x / (-1.0 + y)))
	elif t_0 <= 20.0:
		tmp = 1.0 - math.log1p((y / (1.0 - y)))
	else:
		tmp = 1.0 - math.log(((-1.0 + x) / y))
	return tmp
function code(x, y)
	t_0 = Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y)))))
	tmp = 0.0
	if (t_0 <= -2.0)
		tmp = Float64(1.0 - log(Float64(x / Float64(-1.0 + y))));
	elseif (t_0 <= 20.0)
		tmp = Float64(1.0 - log1p(Float64(y / Float64(1.0 - y))));
	else
		tmp = Float64(1.0 - log(Float64(Float64(-1.0 + x) / y)));
	end
	return tmp
end
code[x_, y_] := Block[{t$95$0 = N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -2.0], N[(1.0 - N[Log[N[(x / N[(-1.0 + y), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$0, 20.0], N[(1.0 - N[Log[1 + N[(y / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Log[N[(N[(-1.0 + x), $MachinePrecision] / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := 1 - \log \left(1 - \frac{x - y}{1 - y}\right)\\
\mathbf{if}\;t\_0 \leq -2:\\
\;\;\;\;1 - \log \left(\frac{x}{-1 + y}\right)\\

\mathbf{elif}\;t\_0 \leq 20:\\
\;\;\;\;1 - \mathsf{log1p}\left(\frac{y}{1 - y}\right)\\

\mathbf{else}:\\
\;\;\;\;1 - \log \left(\frac{-1 + x}{y}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (-.f64 #s(literal 1 binary64) (log.f64 (-.f64 #s(literal 1 binary64) (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y))))) < -2

    1. Initial program 100.0%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto 1 - \log \color{blue}{\left(-1 \cdot \frac{x}{1 - y}\right)} \]
    4. Step-by-step derivation
      1. associate-*r/N/A

        \[\leadsto 1 - \log \color{blue}{\left(\frac{-1 \cdot x}{1 - y}\right)} \]
      2. lower-/.f64N/A

        \[\leadsto 1 - \log \color{blue}{\left(\frac{-1 \cdot x}{1 - y}\right)} \]
      3. mul-1-negN/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{\mathsf{neg}\left(x\right)}}{1 - y}\right) \]
      4. lower-neg.f64N/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{-x}}{1 - y}\right) \]
      5. lower--.f64100.0

        \[\leadsto 1 - \log \left(\frac{-x}{\color{blue}{1 - y}}\right) \]
    5. Applied rewrites100.0%

      \[\leadsto 1 - \log \color{blue}{\left(\frac{-x}{1 - y}\right)} \]

    if -2 < (-.f64 #s(literal 1 binary64) (log.f64 (-.f64 #s(literal 1 binary64) (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y))))) < 20

    1. Initial program 99.7%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto 1 - \color{blue}{\log \left(1 + \frac{y}{1 - y}\right)} \]
    4. Step-by-step derivation
      1. lower-log1p.f64N/A

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(\frac{y}{1 - y}\right)} \]
      2. lower-/.f64N/A

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{y}{1 - y}}\right) \]
      3. lower--.f6497.2

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{1 - y}}\right) \]
    5. Applied rewrites97.2%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(\frac{y}{1 - y}\right)} \]

    if 20 < (-.f64 #s(literal 1 binary64) (log.f64 (-.f64 #s(literal 1 binary64) (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)))))

    1. Initial program 5.5%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in y around inf

      \[\leadsto 1 - \log \color{blue}{\left(-1 \cdot \frac{1 + -1 \cdot x}{y}\right)} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto 1 - \log \color{blue}{\left(\mathsf{neg}\left(\frac{1 + -1 \cdot x}{y}\right)\right)} \]
      2. distribute-neg-fracN/A

        \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
      3. lower-/.f64N/A

        \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
      4. distribute-neg-inN/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}}{y}\right) \]
      5. metadata-evalN/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{-1} + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}{y}\right) \]
      6. mul-1-negN/A

        \[\leadsto 1 - \log \left(\frac{-1 + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(x\right)\right)}\right)\right)}{y}\right) \]
      7. remove-double-negN/A

        \[\leadsto 1 - \log \left(\frac{-1 + \color{blue}{x}}{y}\right) \]
      8. lower-+.f6499.5

        \[\leadsto 1 - \log \left(\frac{\color{blue}{-1 + x}}{y}\right) \]
    5. Applied rewrites99.5%

      \[\leadsto 1 - \log \color{blue}{\left(\frac{-1 + x}{y}\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification98.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;1 - \log \left(1 - \frac{x - y}{1 - y}\right) \leq -2:\\ \;\;\;\;1 - \log \left(\frac{x}{-1 + y}\right)\\ \mathbf{elif}\;1 - \log \left(1 - \frac{x - y}{1 - y}\right) \leq 20:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{y}{1 - y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \log \left(\frac{-1 + x}{y}\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 99.7% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := 1 - \log \left(1 - \frac{x - y}{1 - y}\right)\\ \mathbf{if}\;t\_0 \leq 20:\\ \;\;\;\;t\_0\\ \mathbf{else}:\\ \;\;\;\;1 - \log \left(\frac{-1 + x}{y}\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (let* ((t_0 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y)))))))
   (if (<= t_0 20.0) t_0 (- 1.0 (log (/ (+ -1.0 x) y))))))
double code(double x, double y) {
	double t_0 = 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
	double tmp;
	if (t_0 <= 20.0) {
		tmp = t_0;
	} else {
		tmp = 1.0 - log(((-1.0 + x) / y));
	}
	return tmp;
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x, y)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: t_0
    real(8) :: tmp
    t_0 = 1.0d0 - log((1.0d0 - ((x - y) / (1.0d0 - y))))
    if (t_0 <= 20.0d0) then
        tmp = t_0
    else
        tmp = 1.0d0 - log((((-1.0d0) + x) / y))
    end if
    code = tmp
end function
public static double code(double x, double y) {
	double t_0 = 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
	double tmp;
	if (t_0 <= 20.0) {
		tmp = t_0;
	} else {
		tmp = 1.0 - Math.log(((-1.0 + x) / y));
	}
	return tmp;
}
def code(x, y):
	t_0 = 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
	tmp = 0
	if t_0 <= 20.0:
		tmp = t_0
	else:
		tmp = 1.0 - math.log(((-1.0 + x) / y))
	return tmp
function code(x, y)
	t_0 = Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y)))))
	tmp = 0.0
	if (t_0 <= 20.0)
		tmp = t_0;
	else
		tmp = Float64(1.0 - log(Float64(Float64(-1.0 + x) / y)));
	end
	return tmp
end
function tmp_2 = code(x, y)
	t_0 = 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
	tmp = 0.0;
	if (t_0 <= 20.0)
		tmp = t_0;
	else
		tmp = 1.0 - log(((-1.0 + x) / y));
	end
	tmp_2 = tmp;
end
code[x_, y_] := Block[{t$95$0 = N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, 20.0], t$95$0, N[(1.0 - N[Log[N[(N[(-1.0 + x), $MachinePrecision] / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := 1 - \log \left(1 - \frac{x - y}{1 - y}\right)\\
\mathbf{if}\;t\_0 \leq 20:\\
\;\;\;\;t\_0\\

\mathbf{else}:\\
\;\;\;\;1 - \log \left(\frac{-1 + x}{y}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 #s(literal 1 binary64) (log.f64 (-.f64 #s(literal 1 binary64) (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y))))) < 20

    1. Initial program 99.8%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Add Preprocessing

    if 20 < (-.f64 #s(literal 1 binary64) (log.f64 (-.f64 #s(literal 1 binary64) (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)))))

    1. Initial program 5.5%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in y around inf

      \[\leadsto 1 - \log \color{blue}{\left(-1 \cdot \frac{1 + -1 \cdot x}{y}\right)} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto 1 - \log \color{blue}{\left(\mathsf{neg}\left(\frac{1 + -1 \cdot x}{y}\right)\right)} \]
      2. distribute-neg-fracN/A

        \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
      3. lower-/.f64N/A

        \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
      4. distribute-neg-inN/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}}{y}\right) \]
      5. metadata-evalN/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{-1} + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}{y}\right) \]
      6. mul-1-negN/A

        \[\leadsto 1 - \log \left(\frac{-1 + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(x\right)\right)}\right)\right)}{y}\right) \]
      7. remove-double-negN/A

        \[\leadsto 1 - \log \left(\frac{-1 + \color{blue}{x}}{y}\right) \]
      8. lower-+.f6499.5

        \[\leadsto 1 - \log \left(\frac{\color{blue}{-1 + x}}{y}\right) \]
    5. Applied rewrites99.5%

      \[\leadsto 1 - \log \color{blue}{\left(\frac{-1 + x}{y}\right)} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 4: 80.4% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;1 - \log \left(1 - \frac{x - y}{1 - y}\right) \leq 2:\\ \;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))) 2.0)
   (- 1.0 (log1p (- x)))
   (- 1.0 (log (/ -1.0 y)))))
double code(double x, double y) {
	double tmp;
	if ((1.0 - log((1.0 - ((x - y) / (1.0 - y))))) <= 2.0) {
		tmp = 1.0 - log1p(-x);
	} else {
		tmp = 1.0 - log((-1.0 / y));
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if ((1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))))) <= 2.0) {
		tmp = 1.0 - Math.log1p(-x);
	} else {
		tmp = 1.0 - Math.log((-1.0 / y));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if (1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))) <= 2.0:
		tmp = 1.0 - math.log1p(-x)
	else:
		tmp = 1.0 - math.log((-1.0 / y))
	return tmp
function code(x, y)
	tmp = 0.0
	if (Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y))))) <= 2.0)
		tmp = Float64(1.0 - log1p(Float64(-x)));
	else
		tmp = Float64(1.0 - log(Float64(-1.0 / y)));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], 2.0], N[(1.0 - N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;1 - \log \left(1 - \frac{x - y}{1 - y}\right) \leq 2:\\
\;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\

\mathbf{else}:\\
\;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 #s(literal 1 binary64) (log.f64 (-.f64 #s(literal 1 binary64) (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y))))) < 2

    1. Initial program 100.0%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto 1 - \color{blue}{\log \left(1 - x\right)} \]
    4. Step-by-step derivation
      1. *-lft-identityN/A

        \[\leadsto 1 - \log \left(1 - \color{blue}{1 \cdot x}\right) \]
      2. metadata-evalN/A

        \[\leadsto 1 - \log \left(1 - \color{blue}{\left(\mathsf{neg}\left(-1\right)\right)} \cdot x\right) \]
      3. fp-cancel-sign-sub-invN/A

        \[\leadsto 1 - \log \color{blue}{\left(1 + -1 \cdot x\right)} \]
      4. lower-log1p.f64N/A

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)} \]
      5. mul-1-negN/A

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left(x\right)}\right) \]
      6. lower-neg.f6489.4

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{-x}\right) \]
    5. Applied rewrites89.4%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-x\right)} \]

    if 2 < (-.f64 #s(literal 1 binary64) (log.f64 (-.f64 #s(literal 1 binary64) (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)))))

    1. Initial program 7.8%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in y around inf

      \[\leadsto 1 - \log \color{blue}{\left(-1 \cdot \frac{1 + -1 \cdot x}{y}\right)} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto 1 - \log \color{blue}{\left(\mathsf{neg}\left(\frac{1 + -1 \cdot x}{y}\right)\right)} \]
      2. distribute-neg-fracN/A

        \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
      3. lower-/.f64N/A

        \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
      4. distribute-neg-inN/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}}{y}\right) \]
      5. metadata-evalN/A

        \[\leadsto 1 - \log \left(\frac{\color{blue}{-1} + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}{y}\right) \]
      6. mul-1-negN/A

        \[\leadsto 1 - \log \left(\frac{-1 + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(x\right)\right)}\right)\right)}{y}\right) \]
      7. remove-double-negN/A

        \[\leadsto 1 - \log \left(\frac{-1 + \color{blue}{x}}{y}\right) \]
      8. lower-+.f6497.9

        \[\leadsto 1 - \log \left(\frac{\color{blue}{-1 + x}}{y}\right) \]
    5. Applied rewrites97.9%

      \[\leadsto 1 - \log \color{blue}{\left(\frac{-1 + x}{y}\right)} \]
    6. Taylor expanded in x around 0

      \[\leadsto 1 - \log \left(\frac{-1}{\color{blue}{y}}\right) \]
    7. Step-by-step derivation
      1. Applied rewrites58.0%

        \[\leadsto 1 - \log \left(\frac{-1}{\color{blue}{y}}\right) \]
    8. Recombined 2 regimes into one program.
    9. Add Preprocessing

    Alternative 5: 98.8% accurate, 1.0× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -0.84 \lor \neg \left(y \leq 1\right):\\ \;\;\;\;1 - \log \left(\frac{-1 + x}{y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \log \left(\left(y + 1\right) \cdot \left(1 - x\right)\right)\\ \end{array} \end{array} \]
    (FPCore (x y)
     :precision binary64
     (if (or (<= y -0.84) (not (<= y 1.0)))
       (- 1.0 (log (/ (+ -1.0 x) y)))
       (- 1.0 (log (* (+ y 1.0) (- 1.0 x))))))
    double code(double x, double y) {
    	double tmp;
    	if ((y <= -0.84) || !(y <= 1.0)) {
    		tmp = 1.0 - log(((-1.0 + x) / y));
    	} else {
    		tmp = 1.0 - log(((y + 1.0) * (1.0 - x)));
    	}
    	return tmp;
    }
    
    module fmin_fmax_functions
        implicit none
        private
        public fmax
        public fmin
    
        interface fmax
            module procedure fmax88
            module procedure fmax44
            module procedure fmax84
            module procedure fmax48
        end interface
        interface fmin
            module procedure fmin88
            module procedure fmin44
            module procedure fmin84
            module procedure fmin48
        end interface
    contains
        real(8) function fmax88(x, y) result (res)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
        end function
        real(4) function fmax44(x, y) result (res)
            real(4), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
        end function
        real(8) function fmax84(x, y) result(res)
            real(8), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
        end function
        real(8) function fmax48(x, y) result(res)
            real(4), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
        end function
        real(8) function fmin88(x, y) result (res)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
        end function
        real(4) function fmin44(x, y) result (res)
            real(4), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
        end function
        real(8) function fmin84(x, y) result(res)
            real(8), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
        end function
        real(8) function fmin48(x, y) result(res)
            real(4), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
        end function
    end module
    
    real(8) function code(x, y)
    use fmin_fmax_functions
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        real(8) :: tmp
        if ((y <= (-0.84d0)) .or. (.not. (y <= 1.0d0))) then
            tmp = 1.0d0 - log((((-1.0d0) + x) / y))
        else
            tmp = 1.0d0 - log(((y + 1.0d0) * (1.0d0 - x)))
        end if
        code = tmp
    end function
    
    public static double code(double x, double y) {
    	double tmp;
    	if ((y <= -0.84) || !(y <= 1.0)) {
    		tmp = 1.0 - Math.log(((-1.0 + x) / y));
    	} else {
    		tmp = 1.0 - Math.log(((y + 1.0) * (1.0 - x)));
    	}
    	return tmp;
    }
    
    def code(x, y):
    	tmp = 0
    	if (y <= -0.84) or not (y <= 1.0):
    		tmp = 1.0 - math.log(((-1.0 + x) / y))
    	else:
    		tmp = 1.0 - math.log(((y + 1.0) * (1.0 - x)))
    	return tmp
    
    function code(x, y)
    	tmp = 0.0
    	if ((y <= -0.84) || !(y <= 1.0))
    		tmp = Float64(1.0 - log(Float64(Float64(-1.0 + x) / y)));
    	else
    		tmp = Float64(1.0 - log(Float64(Float64(y + 1.0) * Float64(1.0 - x))));
    	end
    	return tmp
    end
    
    function tmp_2 = code(x, y)
    	tmp = 0.0;
    	if ((y <= -0.84) || ~((y <= 1.0)))
    		tmp = 1.0 - log(((-1.0 + x) / y));
    	else
    		tmp = 1.0 - log(((y + 1.0) * (1.0 - x)));
    	end
    	tmp_2 = tmp;
    end
    
    code[x_, y_] := If[Or[LessEqual[y, -0.84], N[Not[LessEqual[y, 1.0]], $MachinePrecision]], N[(1.0 - N[Log[N[(N[(-1.0 + x), $MachinePrecision] / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Log[N[(N[(y + 1.0), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    \mathbf{if}\;y \leq -0.84 \lor \neg \left(y \leq 1\right):\\
    \;\;\;\;1 - \log \left(\frac{-1 + x}{y}\right)\\
    
    \mathbf{else}:\\
    \;\;\;\;1 - \log \left(\left(y + 1\right) \cdot \left(1 - x\right)\right)\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if y < -0.839999999999999969 or 1 < y

      1. Initial program 26.7%

        \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
      2. Add Preprocessing
      3. Taylor expanded in y around inf

        \[\leadsto 1 - \log \color{blue}{\left(-1 \cdot \frac{1 + -1 \cdot x}{y}\right)} \]
      4. Step-by-step derivation
        1. mul-1-negN/A

          \[\leadsto 1 - \log \color{blue}{\left(\mathsf{neg}\left(\frac{1 + -1 \cdot x}{y}\right)\right)} \]
        2. distribute-neg-fracN/A

          \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
        3. lower-/.f64N/A

          \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
        4. distribute-neg-inN/A

          \[\leadsto 1 - \log \left(\frac{\color{blue}{\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}}{y}\right) \]
        5. metadata-evalN/A

          \[\leadsto 1 - \log \left(\frac{\color{blue}{-1} + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}{y}\right) \]
        6. mul-1-negN/A

          \[\leadsto 1 - \log \left(\frac{-1 + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(x\right)\right)}\right)\right)}{y}\right) \]
        7. remove-double-negN/A

          \[\leadsto 1 - \log \left(\frac{-1 + \color{blue}{x}}{y}\right) \]
        8. lower-+.f6497.2

          \[\leadsto 1 - \log \left(\frac{\color{blue}{-1 + x}}{y}\right) \]
      5. Applied rewrites97.2%

        \[\leadsto 1 - \log \color{blue}{\left(\frac{-1 + x}{y}\right)} \]

      if -0.839999999999999969 < y < 1

      1. Initial program 100.0%

        \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
      2. Add Preprocessing
      3. Taylor expanded in y around 0

        \[\leadsto 1 - \log \color{blue}{\left(\left(1 + y \cdot \left(1 + -1 \cdot x\right)\right) - x\right)} \]
      4. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto 1 - \log \left(\color{blue}{\left(y \cdot \left(1 + -1 \cdot x\right) + 1\right)} - x\right) \]
        2. associate--l+N/A

          \[\leadsto 1 - \log \color{blue}{\left(y \cdot \left(1 + -1 \cdot x\right) + \left(1 - x\right)\right)} \]
        3. *-lft-identityN/A

          \[\leadsto 1 - \log \left(y \cdot \left(1 + -1 \cdot x\right) + \left(1 - \color{blue}{1 \cdot x}\right)\right) \]
        4. metadata-evalN/A

          \[\leadsto 1 - \log \left(y \cdot \left(1 + -1 \cdot x\right) + \left(1 - \color{blue}{\left(\mathsf{neg}\left(-1\right)\right)} \cdot x\right)\right) \]
        5. fp-cancel-sign-sub-invN/A

          \[\leadsto 1 - \log \left(y \cdot \left(1 + -1 \cdot x\right) + \color{blue}{\left(1 + -1 \cdot x\right)}\right) \]
        6. +-commutativeN/A

          \[\leadsto 1 - \log \color{blue}{\left(\left(1 + -1 \cdot x\right) + y \cdot \left(1 + -1 \cdot x\right)\right)} \]
        7. distribute-rgt1-inN/A

          \[\leadsto 1 - \log \color{blue}{\left(\left(y + 1\right) \cdot \left(1 + -1 \cdot x\right)\right)} \]
        8. lower-*.f64N/A

          \[\leadsto 1 - \log \color{blue}{\left(\left(y + 1\right) \cdot \left(1 + -1 \cdot x\right)\right)} \]
        9. lower-+.f64N/A

          \[\leadsto 1 - \log \left(\color{blue}{\left(y + 1\right)} \cdot \left(1 + -1 \cdot x\right)\right) \]
        10. fp-cancel-sign-sub-invN/A

          \[\leadsto 1 - \log \left(\left(y + 1\right) \cdot \color{blue}{\left(1 - \left(\mathsf{neg}\left(-1\right)\right) \cdot x\right)}\right) \]
        11. metadata-evalN/A

          \[\leadsto 1 - \log \left(\left(y + 1\right) \cdot \left(1 - \color{blue}{1} \cdot x\right)\right) \]
        12. *-lft-identityN/A

          \[\leadsto 1 - \log \left(\left(y + 1\right) \cdot \left(1 - \color{blue}{x}\right)\right) \]
        13. lower--.f6498.5

          \[\leadsto 1 - \log \left(\left(y + 1\right) \cdot \color{blue}{\left(1 - x\right)}\right) \]
      5. Applied rewrites98.5%

        \[\leadsto 1 - \log \color{blue}{\left(\left(y + 1\right) \cdot \left(1 - x\right)\right)} \]
    3. Recombined 2 regimes into one program.
    4. Final simplification98.1%

      \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -0.84 \lor \neg \left(y \leq 1\right):\\ \;\;\;\;1 - \log \left(\frac{-1 + x}{y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \log \left(\left(y + 1\right) \cdot \left(1 - x\right)\right)\\ \end{array} \]
    5. Add Preprocessing

    Alternative 6: 89.9% accurate, 1.0× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -1:\\ \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\ \mathbf{elif}\;y \leq 1:\\ \;\;\;\;1 - \log \left(\left(y + 1\right) \cdot \left(1 - x\right)\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \log \left(\frac{x}{y}\right)\\ \end{array} \end{array} \]
    (FPCore (x y)
     :precision binary64
     (if (<= y -1.0)
       (- 1.0 (log (/ -1.0 y)))
       (if (<= y 1.0)
         (- 1.0 (log (* (+ y 1.0) (- 1.0 x))))
         (- 1.0 (log (/ x y))))))
    double code(double x, double y) {
    	double tmp;
    	if (y <= -1.0) {
    		tmp = 1.0 - log((-1.0 / y));
    	} else if (y <= 1.0) {
    		tmp = 1.0 - log(((y + 1.0) * (1.0 - x)));
    	} else {
    		tmp = 1.0 - log((x / y));
    	}
    	return tmp;
    }
    
    module fmin_fmax_functions
        implicit none
        private
        public fmax
        public fmin
    
        interface fmax
            module procedure fmax88
            module procedure fmax44
            module procedure fmax84
            module procedure fmax48
        end interface
        interface fmin
            module procedure fmin88
            module procedure fmin44
            module procedure fmin84
            module procedure fmin48
        end interface
    contains
        real(8) function fmax88(x, y) result (res)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
        end function
        real(4) function fmax44(x, y) result (res)
            real(4), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
        end function
        real(8) function fmax84(x, y) result(res)
            real(8), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
        end function
        real(8) function fmax48(x, y) result(res)
            real(4), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
        end function
        real(8) function fmin88(x, y) result (res)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
        end function
        real(4) function fmin44(x, y) result (res)
            real(4), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
        end function
        real(8) function fmin84(x, y) result(res)
            real(8), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
        end function
        real(8) function fmin48(x, y) result(res)
            real(4), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
        end function
    end module
    
    real(8) function code(x, y)
    use fmin_fmax_functions
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        real(8) :: tmp
        if (y <= (-1.0d0)) then
            tmp = 1.0d0 - log(((-1.0d0) / y))
        else if (y <= 1.0d0) then
            tmp = 1.0d0 - log(((y + 1.0d0) * (1.0d0 - x)))
        else
            tmp = 1.0d0 - log((x / y))
        end if
        code = tmp
    end function
    
    public static double code(double x, double y) {
    	double tmp;
    	if (y <= -1.0) {
    		tmp = 1.0 - Math.log((-1.0 / y));
    	} else if (y <= 1.0) {
    		tmp = 1.0 - Math.log(((y + 1.0) * (1.0 - x)));
    	} else {
    		tmp = 1.0 - Math.log((x / y));
    	}
    	return tmp;
    }
    
    def code(x, y):
    	tmp = 0
    	if y <= -1.0:
    		tmp = 1.0 - math.log((-1.0 / y))
    	elif y <= 1.0:
    		tmp = 1.0 - math.log(((y + 1.0) * (1.0 - x)))
    	else:
    		tmp = 1.0 - math.log((x / y))
    	return tmp
    
    function code(x, y)
    	tmp = 0.0
    	if (y <= -1.0)
    		tmp = Float64(1.0 - log(Float64(-1.0 / y)));
    	elseif (y <= 1.0)
    		tmp = Float64(1.0 - log(Float64(Float64(y + 1.0) * Float64(1.0 - x))));
    	else
    		tmp = Float64(1.0 - log(Float64(x / y)));
    	end
    	return tmp
    end
    
    function tmp_2 = code(x, y)
    	tmp = 0.0;
    	if (y <= -1.0)
    		tmp = 1.0 - log((-1.0 / y));
    	elseif (y <= 1.0)
    		tmp = 1.0 - log(((y + 1.0) * (1.0 - x)));
    	else
    		tmp = 1.0 - log((x / y));
    	end
    	tmp_2 = tmp;
    end
    
    code[x_, y_] := If[LessEqual[y, -1.0], N[(1.0 - N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], If[LessEqual[y, 1.0], N[(1.0 - N[Log[N[(N[(y + 1.0), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Log[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    \mathbf{if}\;y \leq -1:\\
    \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\
    
    \mathbf{elif}\;y \leq 1:\\
    \;\;\;\;1 - \log \left(\left(y + 1\right) \cdot \left(1 - x\right)\right)\\
    
    \mathbf{else}:\\
    \;\;\;\;1 - \log \left(\frac{x}{y}\right)\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 3 regimes
    2. if y < -1

      1. Initial program 24.6%

        \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
      2. Add Preprocessing
      3. Taylor expanded in y around inf

        \[\leadsto 1 - \log \color{blue}{\left(-1 \cdot \frac{1 + -1 \cdot x}{y}\right)} \]
      4. Step-by-step derivation
        1. mul-1-negN/A

          \[\leadsto 1 - \log \color{blue}{\left(\mathsf{neg}\left(\frac{1 + -1 \cdot x}{y}\right)\right)} \]
        2. distribute-neg-fracN/A

          \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
        3. lower-/.f64N/A

          \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
        4. distribute-neg-inN/A

          \[\leadsto 1 - \log \left(\frac{\color{blue}{\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}}{y}\right) \]
        5. metadata-evalN/A

          \[\leadsto 1 - \log \left(\frac{\color{blue}{-1} + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}{y}\right) \]
        6. mul-1-negN/A

          \[\leadsto 1 - \log \left(\frac{-1 + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(x\right)\right)}\right)\right)}{y}\right) \]
        7. remove-double-negN/A

          \[\leadsto 1 - \log \left(\frac{-1 + \color{blue}{x}}{y}\right) \]
        8. lower-+.f6497.3

          \[\leadsto 1 - \log \left(\frac{\color{blue}{-1 + x}}{y}\right) \]
      5. Applied rewrites97.3%

        \[\leadsto 1 - \log \color{blue}{\left(\frac{-1 + x}{y}\right)} \]
      6. Taylor expanded in x around 0

        \[\leadsto 1 - \log \left(\frac{-1}{\color{blue}{y}}\right) \]
      7. Step-by-step derivation
        1. Applied rewrites62.7%

          \[\leadsto 1 - \log \left(\frac{-1}{\color{blue}{y}}\right) \]

        if -1 < y < 1

        1. Initial program 100.0%

          \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
        2. Add Preprocessing
        3. Taylor expanded in y around 0

          \[\leadsto 1 - \log \color{blue}{\left(\left(1 + y \cdot \left(1 + -1 \cdot x\right)\right) - x\right)} \]
        4. Step-by-step derivation
          1. +-commutativeN/A

            \[\leadsto 1 - \log \left(\color{blue}{\left(y \cdot \left(1 + -1 \cdot x\right) + 1\right)} - x\right) \]
          2. associate--l+N/A

            \[\leadsto 1 - \log \color{blue}{\left(y \cdot \left(1 + -1 \cdot x\right) + \left(1 - x\right)\right)} \]
          3. *-lft-identityN/A

            \[\leadsto 1 - \log \left(y \cdot \left(1 + -1 \cdot x\right) + \left(1 - \color{blue}{1 \cdot x}\right)\right) \]
          4. metadata-evalN/A

            \[\leadsto 1 - \log \left(y \cdot \left(1 + -1 \cdot x\right) + \left(1 - \color{blue}{\left(\mathsf{neg}\left(-1\right)\right)} \cdot x\right)\right) \]
          5. fp-cancel-sign-sub-invN/A

            \[\leadsto 1 - \log \left(y \cdot \left(1 + -1 \cdot x\right) + \color{blue}{\left(1 + -1 \cdot x\right)}\right) \]
          6. +-commutativeN/A

            \[\leadsto 1 - \log \color{blue}{\left(\left(1 + -1 \cdot x\right) + y \cdot \left(1 + -1 \cdot x\right)\right)} \]
          7. distribute-rgt1-inN/A

            \[\leadsto 1 - \log \color{blue}{\left(\left(y + 1\right) \cdot \left(1 + -1 \cdot x\right)\right)} \]
          8. lower-*.f64N/A

            \[\leadsto 1 - \log \color{blue}{\left(\left(y + 1\right) \cdot \left(1 + -1 \cdot x\right)\right)} \]
          9. lower-+.f64N/A

            \[\leadsto 1 - \log \left(\color{blue}{\left(y + 1\right)} \cdot \left(1 + -1 \cdot x\right)\right) \]
          10. fp-cancel-sign-sub-invN/A

            \[\leadsto 1 - \log \left(\left(y + 1\right) \cdot \color{blue}{\left(1 - \left(\mathsf{neg}\left(-1\right)\right) \cdot x\right)}\right) \]
          11. metadata-evalN/A

            \[\leadsto 1 - \log \left(\left(y + 1\right) \cdot \left(1 - \color{blue}{1} \cdot x\right)\right) \]
          12. *-lft-identityN/A

            \[\leadsto 1 - \log \left(\left(y + 1\right) \cdot \left(1 - \color{blue}{x}\right)\right) \]
          13. lower--.f6498.5

            \[\leadsto 1 - \log \left(\left(y + 1\right) \cdot \color{blue}{\left(1 - x\right)}\right) \]
        5. Applied rewrites98.5%

          \[\leadsto 1 - \log \color{blue}{\left(\left(y + 1\right) \cdot \left(1 - x\right)\right)} \]

        if 1 < y

        1. Initial program 32.6%

          \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
        2. Add Preprocessing
        3. Taylor expanded in y around inf

          \[\leadsto 1 - \log \color{blue}{\left(-1 \cdot \frac{1 + -1 \cdot x}{y}\right)} \]
        4. Step-by-step derivation
          1. mul-1-negN/A

            \[\leadsto 1 - \log \color{blue}{\left(\mathsf{neg}\left(\frac{1 + -1 \cdot x}{y}\right)\right)} \]
          2. distribute-neg-fracN/A

            \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
          3. lower-/.f64N/A

            \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
          4. distribute-neg-inN/A

            \[\leadsto 1 - \log \left(\frac{\color{blue}{\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}}{y}\right) \]
          5. metadata-evalN/A

            \[\leadsto 1 - \log \left(\frac{\color{blue}{-1} + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}{y}\right) \]
          6. mul-1-negN/A

            \[\leadsto 1 - \log \left(\frac{-1 + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(x\right)\right)}\right)\right)}{y}\right) \]
          7. remove-double-negN/A

            \[\leadsto 1 - \log \left(\frac{-1 + \color{blue}{x}}{y}\right) \]
          8. lower-+.f6496.7

            \[\leadsto 1 - \log \left(\frac{\color{blue}{-1 + x}}{y}\right) \]
        5. Applied rewrites96.7%

          \[\leadsto 1 - \log \color{blue}{\left(\frac{-1 + x}{y}\right)} \]
        6. Taylor expanded in x around inf

          \[\leadsto 1 - \log \left(\frac{x}{\color{blue}{y}}\right) \]
        7. Step-by-step derivation
          1. Applied rewrites93.1%

            \[\leadsto 1 - \log \left(\frac{x}{\color{blue}{y}}\right) \]
        8. Recombined 3 regimes into one program.
        9. Add Preprocessing

        Alternative 7: 89.3% accurate, 1.0× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -20000000:\\ \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\ \mathbf{elif}\;y \leq 1:\\ \;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \log \left(\frac{x}{y}\right)\\ \end{array} \end{array} \]
        (FPCore (x y)
         :precision binary64
         (if (<= y -20000000.0)
           (- 1.0 (log (/ -1.0 y)))
           (if (<= y 1.0) (- 1.0 (log1p (- x))) (- 1.0 (log (/ x y))))))
        double code(double x, double y) {
        	double tmp;
        	if (y <= -20000000.0) {
        		tmp = 1.0 - log((-1.0 / y));
        	} else if (y <= 1.0) {
        		tmp = 1.0 - log1p(-x);
        	} else {
        		tmp = 1.0 - log((x / y));
        	}
        	return tmp;
        }
        
        public static double code(double x, double y) {
        	double tmp;
        	if (y <= -20000000.0) {
        		tmp = 1.0 - Math.log((-1.0 / y));
        	} else if (y <= 1.0) {
        		tmp = 1.0 - Math.log1p(-x);
        	} else {
        		tmp = 1.0 - Math.log((x / y));
        	}
        	return tmp;
        }
        
        def code(x, y):
        	tmp = 0
        	if y <= -20000000.0:
        		tmp = 1.0 - math.log((-1.0 / y))
        	elif y <= 1.0:
        		tmp = 1.0 - math.log1p(-x)
        	else:
        		tmp = 1.0 - math.log((x / y))
        	return tmp
        
        function code(x, y)
        	tmp = 0.0
        	if (y <= -20000000.0)
        		tmp = Float64(1.0 - log(Float64(-1.0 / y)));
        	elseif (y <= 1.0)
        		tmp = Float64(1.0 - log1p(Float64(-x)));
        	else
        		tmp = Float64(1.0 - log(Float64(x / y)));
        	end
        	return tmp
        end
        
        code[x_, y_] := If[LessEqual[y, -20000000.0], N[(1.0 - N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], If[LessEqual[y, 1.0], N[(1.0 - N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Log[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        \mathbf{if}\;y \leq -20000000:\\
        \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\
        
        \mathbf{elif}\;y \leq 1:\\
        \;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\
        
        \mathbf{else}:\\
        \;\;\;\;1 - \log \left(\frac{x}{y}\right)\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 3 regimes
        2. if y < -2e7

          1. Initial program 22.2%

            \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
          2. Add Preprocessing
          3. Taylor expanded in y around inf

            \[\leadsto 1 - \log \color{blue}{\left(-1 \cdot \frac{1 + -1 \cdot x}{y}\right)} \]
          4. Step-by-step derivation
            1. mul-1-negN/A

              \[\leadsto 1 - \log \color{blue}{\left(\mathsf{neg}\left(\frac{1 + -1 \cdot x}{y}\right)\right)} \]
            2. distribute-neg-fracN/A

              \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
            3. lower-/.f64N/A

              \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
            4. distribute-neg-inN/A

              \[\leadsto 1 - \log \left(\frac{\color{blue}{\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}}{y}\right) \]
            5. metadata-evalN/A

              \[\leadsto 1 - \log \left(\frac{\color{blue}{-1} + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}{y}\right) \]
            6. mul-1-negN/A

              \[\leadsto 1 - \log \left(\frac{-1 + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(x\right)\right)}\right)\right)}{y}\right) \]
            7. remove-double-negN/A

              \[\leadsto 1 - \log \left(\frac{-1 + \color{blue}{x}}{y}\right) \]
            8. lower-+.f6498.9

              \[\leadsto 1 - \log \left(\frac{\color{blue}{-1 + x}}{y}\right) \]
          5. Applied rewrites98.9%

            \[\leadsto 1 - \log \color{blue}{\left(\frac{-1 + x}{y}\right)} \]
          6. Taylor expanded in x around 0

            \[\leadsto 1 - \log \left(\frac{-1}{\color{blue}{y}}\right) \]
          7. Step-by-step derivation
            1. Applied rewrites64.2%

              \[\leadsto 1 - \log \left(\frac{-1}{\color{blue}{y}}\right) \]

            if -2e7 < y < 1

            1. Initial program 100.0%

              \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
            2. Add Preprocessing
            3. Taylor expanded in y around 0

              \[\leadsto 1 - \color{blue}{\log \left(1 - x\right)} \]
            4. Step-by-step derivation
              1. *-lft-identityN/A

                \[\leadsto 1 - \log \left(1 - \color{blue}{1 \cdot x}\right) \]
              2. metadata-evalN/A

                \[\leadsto 1 - \log \left(1 - \color{blue}{\left(\mathsf{neg}\left(-1\right)\right)} \cdot x\right) \]
              3. fp-cancel-sign-sub-invN/A

                \[\leadsto 1 - \log \color{blue}{\left(1 + -1 \cdot x\right)} \]
              4. lower-log1p.f64N/A

                \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)} \]
              5. mul-1-negN/A

                \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left(x\right)}\right) \]
              6. lower-neg.f6496.8

                \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{-x}\right) \]
            5. Applied rewrites96.8%

              \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-x\right)} \]

            if 1 < y

            1. Initial program 32.6%

              \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
            2. Add Preprocessing
            3. Taylor expanded in y around inf

              \[\leadsto 1 - \log \color{blue}{\left(-1 \cdot \frac{1 + -1 \cdot x}{y}\right)} \]
            4. Step-by-step derivation
              1. mul-1-negN/A

                \[\leadsto 1 - \log \color{blue}{\left(\mathsf{neg}\left(\frac{1 + -1 \cdot x}{y}\right)\right)} \]
              2. distribute-neg-fracN/A

                \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
              3. lower-/.f64N/A

                \[\leadsto 1 - \log \color{blue}{\left(\frac{\mathsf{neg}\left(\left(1 + -1 \cdot x\right)\right)}{y}\right)} \]
              4. distribute-neg-inN/A

                \[\leadsto 1 - \log \left(\frac{\color{blue}{\left(\mathsf{neg}\left(1\right)\right) + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}}{y}\right) \]
              5. metadata-evalN/A

                \[\leadsto 1 - \log \left(\frac{\color{blue}{-1} + \left(\mathsf{neg}\left(-1 \cdot x\right)\right)}{y}\right) \]
              6. mul-1-negN/A

                \[\leadsto 1 - \log \left(\frac{-1 + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(x\right)\right)}\right)\right)}{y}\right) \]
              7. remove-double-negN/A

                \[\leadsto 1 - \log \left(\frac{-1 + \color{blue}{x}}{y}\right) \]
              8. lower-+.f6496.7

                \[\leadsto 1 - \log \left(\frac{\color{blue}{-1 + x}}{y}\right) \]
            5. Applied rewrites96.7%

              \[\leadsto 1 - \log \color{blue}{\left(\frac{-1 + x}{y}\right)} \]
            6. Taylor expanded in x around inf

              \[\leadsto 1 - \log \left(\frac{x}{\color{blue}{y}}\right) \]
            7. Step-by-step derivation
              1. Applied rewrites93.1%

                \[\leadsto 1 - \log \left(\frac{x}{\color{blue}{y}}\right) \]
            8. Recombined 3 regimes into one program.
            9. Add Preprocessing

            Alternative 8: 63.4% accurate, 1.2× speedup?

            \[\begin{array}{l} \\ 1 - \mathsf{log1p}\left(-x\right) \end{array} \]
            (FPCore (x y) :precision binary64 (- 1.0 (log1p (- x))))
            double code(double x, double y) {
            	return 1.0 - log1p(-x);
            }
            
            public static double code(double x, double y) {
            	return 1.0 - Math.log1p(-x);
            }
            
            def code(x, y):
            	return 1.0 - math.log1p(-x)
            
            function code(x, y)
            	return Float64(1.0 - log1p(Float64(-x)))
            end
            
            code[x_, y_] := N[(1.0 - N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision]
            
            \begin{array}{l}
            
            \\
            1 - \mathsf{log1p}\left(-x\right)
            \end{array}
            
            Derivation
            1. Initial program 74.8%

              \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
            2. Add Preprocessing
            3. Taylor expanded in y around 0

              \[\leadsto 1 - \color{blue}{\log \left(1 - x\right)} \]
            4. Step-by-step derivation
              1. *-lft-identityN/A

                \[\leadsto 1 - \log \left(1 - \color{blue}{1 \cdot x}\right) \]
              2. metadata-evalN/A

                \[\leadsto 1 - \log \left(1 - \color{blue}{\left(\mathsf{neg}\left(-1\right)\right)} \cdot x\right) \]
              3. fp-cancel-sign-sub-invN/A

                \[\leadsto 1 - \log \color{blue}{\left(1 + -1 \cdot x\right)} \]
              4. lower-log1p.f64N/A

                \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)} \]
              5. mul-1-negN/A

                \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left(x\right)}\right) \]
              6. lower-neg.f6467.3

                \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{-x}\right) \]
            5. Applied rewrites67.3%

              \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-x\right)} \]
            6. Add Preprocessing

            Alternative 9: 44.1% accurate, 20.7× speedup?

            \[\begin{array}{l} \\ 1 - \left(-x\right) \end{array} \]
            (FPCore (x y) :precision binary64 (- 1.0 (- x)))
            double code(double x, double y) {
            	return 1.0 - -x;
            }
            
            module fmin_fmax_functions
                implicit none
                private
                public fmax
                public fmin
            
                interface fmax
                    module procedure fmax88
                    module procedure fmax44
                    module procedure fmax84
                    module procedure fmax48
                end interface
                interface fmin
                    module procedure fmin88
                    module procedure fmin44
                    module procedure fmin84
                    module procedure fmin48
                end interface
            contains
                real(8) function fmax88(x, y) result (res)
                    real(8), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                end function
                real(4) function fmax44(x, y) result (res)
                    real(4), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                end function
                real(8) function fmax84(x, y) result(res)
                    real(8), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                end function
                real(8) function fmax48(x, y) result(res)
                    real(4), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                end function
                real(8) function fmin88(x, y) result (res)
                    real(8), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                end function
                real(4) function fmin44(x, y) result (res)
                    real(4), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                end function
                real(8) function fmin84(x, y) result(res)
                    real(8), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                end function
                real(8) function fmin48(x, y) result(res)
                    real(4), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                end function
            end module
            
            real(8) function code(x, y)
            use fmin_fmax_functions
                real(8), intent (in) :: x
                real(8), intent (in) :: y
                code = 1.0d0 - -x
            end function
            
            public static double code(double x, double y) {
            	return 1.0 - -x;
            }
            
            def code(x, y):
            	return 1.0 - -x
            
            function code(x, y)
            	return Float64(1.0 - Float64(-x))
            end
            
            function tmp = code(x, y)
            	tmp = 1.0 - -x;
            end
            
            code[x_, y_] := N[(1.0 - (-x)), $MachinePrecision]
            
            \begin{array}{l}
            
            \\
            1 - \left(-x\right)
            \end{array}
            
            Derivation
            1. Initial program 74.8%

              \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
            2. Add Preprocessing
            3. Taylor expanded in y around 0

              \[\leadsto 1 - \color{blue}{\log \left(1 - x\right)} \]
            4. Step-by-step derivation
              1. *-lft-identityN/A

                \[\leadsto 1 - \log \left(1 - \color{blue}{1 \cdot x}\right) \]
              2. metadata-evalN/A

                \[\leadsto 1 - \log \left(1 - \color{blue}{\left(\mathsf{neg}\left(-1\right)\right)} \cdot x\right) \]
              3. fp-cancel-sign-sub-invN/A

                \[\leadsto 1 - \log \color{blue}{\left(1 + -1 \cdot x\right)} \]
              4. lower-log1p.f64N/A

                \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)} \]
              5. mul-1-negN/A

                \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\mathsf{neg}\left(x\right)}\right) \]
              6. lower-neg.f6467.3

                \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{-x}\right) \]
            5. Applied rewrites67.3%

              \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-x\right)} \]
            6. Taylor expanded in x around 0

              \[\leadsto 1 - -1 \cdot \color{blue}{x} \]
            7. Step-by-step derivation
              1. Applied rewrites48.9%

                \[\leadsto 1 - \left(-x\right) \]
              2. Add Preprocessing

              Developer Target 1: 99.8% accurate, 0.5× speedup?

              \[\begin{array}{l} \\ \begin{array}{l} t_0 := 1 - \log \left(\frac{x}{y \cdot y} - \left(\frac{1}{y} - \frac{x}{y}\right)\right)\\ \mathbf{if}\;y < -81284752.61947241:\\ \;\;\;\;t\_0\\ \mathbf{elif}\;y < 3.0094271212461764 \cdot 10^{+25}:\\ \;\;\;\;\log \left(\frac{e^{1}}{1 - \frac{x - y}{1 - y}}\right)\\ \mathbf{else}:\\ \;\;\;\;t\_0\\ \end{array} \end{array} \]
              (FPCore (x y)
               :precision binary64
               (let* ((t_0 (- 1.0 (log (- (/ x (* y y)) (- (/ 1.0 y) (/ x y)))))))
                 (if (< y -81284752.61947241)
                   t_0
                   (if (< y 3.0094271212461764e+25)
                     (log (/ (exp 1.0) (- 1.0 (/ (- x y) (- 1.0 y)))))
                     t_0))))
              double code(double x, double y) {
              	double t_0 = 1.0 - log(((x / (y * y)) - ((1.0 / y) - (x / y))));
              	double tmp;
              	if (y < -81284752.61947241) {
              		tmp = t_0;
              	} else if (y < 3.0094271212461764e+25) {
              		tmp = log((exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
              	} else {
              		tmp = t_0;
              	}
              	return tmp;
              }
              
              module fmin_fmax_functions
                  implicit none
                  private
                  public fmax
                  public fmin
              
                  interface fmax
                      module procedure fmax88
                      module procedure fmax44
                      module procedure fmax84
                      module procedure fmax48
                  end interface
                  interface fmin
                      module procedure fmin88
                      module procedure fmin44
                      module procedure fmin84
                      module procedure fmin48
                  end interface
              contains
                  real(8) function fmax88(x, y) result (res)
                      real(8), intent (in) :: x
                      real(8), intent (in) :: y
                      res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                  end function
                  real(4) function fmax44(x, y) result (res)
                      real(4), intent (in) :: x
                      real(4), intent (in) :: y
                      res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                  end function
                  real(8) function fmax84(x, y) result(res)
                      real(8), intent (in) :: x
                      real(4), intent (in) :: y
                      res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                  end function
                  real(8) function fmax48(x, y) result(res)
                      real(4), intent (in) :: x
                      real(8), intent (in) :: y
                      res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                  end function
                  real(8) function fmin88(x, y) result (res)
                      real(8), intent (in) :: x
                      real(8), intent (in) :: y
                      res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                  end function
                  real(4) function fmin44(x, y) result (res)
                      real(4), intent (in) :: x
                      real(4), intent (in) :: y
                      res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                  end function
                  real(8) function fmin84(x, y) result(res)
                      real(8), intent (in) :: x
                      real(4), intent (in) :: y
                      res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                  end function
                  real(8) function fmin48(x, y) result(res)
                      real(4), intent (in) :: x
                      real(8), intent (in) :: y
                      res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                  end function
              end module
              
              real(8) function code(x, y)
              use fmin_fmax_functions
                  real(8), intent (in) :: x
                  real(8), intent (in) :: y
                  real(8) :: t_0
                  real(8) :: tmp
                  t_0 = 1.0d0 - log(((x / (y * y)) - ((1.0d0 / y) - (x / y))))
                  if (y < (-81284752.61947241d0)) then
                      tmp = t_0
                  else if (y < 3.0094271212461764d+25) then
                      tmp = log((exp(1.0d0) / (1.0d0 - ((x - y) / (1.0d0 - y)))))
                  else
                      tmp = t_0
                  end if
                  code = tmp
              end function
              
              public static double code(double x, double y) {
              	double t_0 = 1.0 - Math.log(((x / (y * y)) - ((1.0 / y) - (x / y))));
              	double tmp;
              	if (y < -81284752.61947241) {
              		tmp = t_0;
              	} else if (y < 3.0094271212461764e+25) {
              		tmp = Math.log((Math.exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
              	} else {
              		tmp = t_0;
              	}
              	return tmp;
              }
              
              def code(x, y):
              	t_0 = 1.0 - math.log(((x / (y * y)) - ((1.0 / y) - (x / y))))
              	tmp = 0
              	if y < -81284752.61947241:
              		tmp = t_0
              	elif y < 3.0094271212461764e+25:
              		tmp = math.log((math.exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))))
              	else:
              		tmp = t_0
              	return tmp
              
              function code(x, y)
              	t_0 = Float64(1.0 - log(Float64(Float64(x / Float64(y * y)) - Float64(Float64(1.0 / y) - Float64(x / y)))))
              	tmp = 0.0
              	if (y < -81284752.61947241)
              		tmp = t_0;
              	elseif (y < 3.0094271212461764e+25)
              		tmp = log(Float64(exp(1.0) / Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y)))));
              	else
              		tmp = t_0;
              	end
              	return tmp
              end
              
              function tmp_2 = code(x, y)
              	t_0 = 1.0 - log(((x / (y * y)) - ((1.0 / y) - (x / y))));
              	tmp = 0.0;
              	if (y < -81284752.61947241)
              		tmp = t_0;
              	elseif (y < 3.0094271212461764e+25)
              		tmp = log((exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
              	else
              		tmp = t_0;
              	end
              	tmp_2 = tmp;
              end
              
              code[x_, y_] := Block[{t$95$0 = N[(1.0 - N[Log[N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - N[(N[(1.0 / y), $MachinePrecision] - N[(x / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[Less[y, -81284752.61947241], t$95$0, If[Less[y, 3.0094271212461764e+25], N[Log[N[(N[Exp[1.0], $MachinePrecision] / N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], t$95$0]]]
              
              \begin{array}{l}
              
              \\
              \begin{array}{l}
              t_0 := 1 - \log \left(\frac{x}{y \cdot y} - \left(\frac{1}{y} - \frac{x}{y}\right)\right)\\
              \mathbf{if}\;y < -81284752.61947241:\\
              \;\;\;\;t\_0\\
              
              \mathbf{elif}\;y < 3.0094271212461764 \cdot 10^{+25}:\\
              \;\;\;\;\log \left(\frac{e^{1}}{1 - \frac{x - y}{1 - y}}\right)\\
              
              \mathbf{else}:\\
              \;\;\;\;t\_0\\
              
              
              \end{array}
              \end{array}
              

              Reproduce

              ?
              herbie shell --seed 2024359 
              (FPCore (x y)
                :name "Numeric.SpecFunctions:invIncompleteGamma from math-functions-0.1.5.2, B"
                :precision binary64
              
                :alt
                (! :herbie-platform default (if (< y -8128475261947241/100000000) (- 1 (log (- (/ x (* y y)) (- (/ 1 y) (/ x y))))) (if (< y 30094271212461764000000000) (log (/ (exp 1) (- 1 (/ (- x y) (- 1 y))))) (- 1 (log (- (/ x (* y y)) (- (/ 1 y) (/ x y))))))))
              
                (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))