Numeric.SpecFunctions:invIncompleteGamma from math-functions-0.1.5.2, B

Percentage Accurate: 72.8% → 99.7%
Time: 11.6s
Alternatives: 11
Speedup: 1.0×

Specification

?
\[\begin{array}{l} \\ 1 - \log \left(1 - \frac{x - y}{1 - y}\right) \end{array} \]
(FPCore (x y) :precision binary64 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))
double code(double x, double y) {
	return 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = 1.0d0 - log((1.0d0 - ((x - y) / (1.0d0 - y))))
end function
public static double code(double x, double y) {
	return 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
}
def code(x, y):
	return 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
function code(x, y)
	return Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y)))))
end
function tmp = code(x, y)
	tmp = 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
end
code[x_, y_] := N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
1 - \log \left(1 - \frac{x - y}{1 - y}\right)
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 11 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 72.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ 1 - \log \left(1 - \frac{x - y}{1 - y}\right) \end{array} \]
(FPCore (x y) :precision binary64 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))
double code(double x, double y) {
	return 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = 1.0d0 - log((1.0d0 - ((x - y) / (1.0d0 - y))))
end function
public static double code(double x, double y) {
	return 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
}
def code(x, y):
	return 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
function code(x, y)
	return Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y)))))
end
function tmp = code(x, y)
	tmp = 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
end
code[x_, y_] := N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
1 - \log \left(1 - \frac{x - y}{1 - y}\right)
\end{array}

Alternative 1: 99.7% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -1750:\\ \;\;\;\;\left(1 + \frac{\frac{-0.5 + \frac{-0.3333333333333333}{y}}{y} + -1}{y}\right) - \left(\mathsf{log1p}\left(-x\right) + \log \left(\frac{-1}{y}\right)\right)\\ \mathbf{elif}\;y \leq 2.75 \cdot 10^{+23}:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \left(\log \left(x + -1\right) - \log y\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= y -1750.0)
   (-
    (+ 1.0 (/ (+ (/ (+ -0.5 (/ -0.3333333333333333 y)) y) -1.0) y))
    (+ (log1p (- x)) (log (/ -1.0 y))))
   (if (<= y 2.75e+23)
     (- 1.0 (log1p (/ (- x y) (+ y -1.0))))
     (- 1.0 (- (log (+ x -1.0)) (log y))))))
double code(double x, double y) {
	double tmp;
	if (y <= -1750.0) {
		tmp = (1.0 + ((((-0.5 + (-0.3333333333333333 / y)) / y) + -1.0) / y)) - (log1p(-x) + log((-1.0 / y)));
	} else if (y <= 2.75e+23) {
		tmp = 1.0 - log1p(((x - y) / (y + -1.0)));
	} else {
		tmp = 1.0 - (log((x + -1.0)) - log(y));
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (y <= -1750.0) {
		tmp = (1.0 + ((((-0.5 + (-0.3333333333333333 / y)) / y) + -1.0) / y)) - (Math.log1p(-x) + Math.log((-1.0 / y)));
	} else if (y <= 2.75e+23) {
		tmp = 1.0 - Math.log1p(((x - y) / (y + -1.0)));
	} else {
		tmp = 1.0 - (Math.log((x + -1.0)) - Math.log(y));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if y <= -1750.0:
		tmp = (1.0 + ((((-0.5 + (-0.3333333333333333 / y)) / y) + -1.0) / y)) - (math.log1p(-x) + math.log((-1.0 / y)))
	elif y <= 2.75e+23:
		tmp = 1.0 - math.log1p(((x - y) / (y + -1.0)))
	else:
		tmp = 1.0 - (math.log((x + -1.0)) - math.log(y))
	return tmp
function code(x, y)
	tmp = 0.0
	if (y <= -1750.0)
		tmp = Float64(Float64(1.0 + Float64(Float64(Float64(Float64(-0.5 + Float64(-0.3333333333333333 / y)) / y) + -1.0) / y)) - Float64(log1p(Float64(-x)) + log(Float64(-1.0 / y))));
	elseif (y <= 2.75e+23)
		tmp = Float64(1.0 - log1p(Float64(Float64(x - y) / Float64(y + -1.0))));
	else
		tmp = Float64(1.0 - Float64(log(Float64(x + -1.0)) - log(y)));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[y, -1750.0], N[(N[(1.0 + N[(N[(N[(N[(-0.5 + N[(-0.3333333333333333 / y), $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision] + -1.0), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision] - N[(N[Log[1 + (-x)], $MachinePrecision] + N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[y, 2.75e+23], N[(1.0 - N[Log[1 + N[(N[(x - y), $MachinePrecision] / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[(N[Log[N[(x + -1.0), $MachinePrecision]], $MachinePrecision] - N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -1750:\\
\;\;\;\;\left(1 + \frac{\frac{-0.5 + \frac{-0.3333333333333333}{y}}{y} + -1}{y}\right) - \left(\mathsf{log1p}\left(-x\right) + \log \left(\frac{-1}{y}\right)\right)\\

\mathbf{elif}\;y \leq 2.75 \cdot 10^{+23}:\\
\;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\

\mathbf{else}:\\
\;\;\;\;1 - \left(\log \left(x + -1\right) - \log y\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if y < -1750

    1. Initial program 16.9%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg16.9%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define16.9%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac216.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub016.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-16.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval16.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative16.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified16.9%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around -inf 87.5%

      \[\leadsto \color{blue}{\left(1 + -1 \cdot \frac{\left(-1 \cdot \frac{0.16666666666666666 \cdot \frac{-6 \cdot \frac{1 - x}{x - 1} + \left(2 \cdot \frac{{\left(1 - x\right)}^{3}}{{\left(x - 1\right)}^{3}} + 6 \cdot \frac{1 - x}{x - 1}\right)}{y} - 0.5 \cdot \left(2 + -1 \cdot \frac{{\left(1 - x\right)}^{2}}{{\left(x - 1\right)}^{2}}\right)}{y} + \frac{x}{x - 1}\right) - \frac{1}{x - 1}}{y}\right) - \left(\log \left(-1 \cdot \left(x - 1\right)\right) + \log \left(\frac{-1}{y}\right)\right)} \]
    6. Simplified99.7%

      \[\leadsto \color{blue}{\left(1 - \frac{1 - \frac{-0.5 + \frac{-0.3333333333333333}{y}}{y}}{y}\right) - \left(\mathsf{log1p}\left(-x\right) + \log \left(\frac{-1}{y}\right)\right)} \]

    if -1750 < y < 2.75000000000000002e23

    1. Initial program 100.0%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg100.0%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define100.0%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac2100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub0100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing

    if 2.75000000000000002e23 < y

    1. Initial program 65.1%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg65.1%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define65.1%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac265.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub065.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-65.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval65.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative65.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified65.1%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around inf 98.2%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) + \log \left(\frac{1}{y}\right)\right)} \]
    6. Step-by-step derivation
      1. log-rec98.2%

        \[\leadsto 1 - \left(\log \left(x - 1\right) + \color{blue}{\left(-\log y\right)}\right) \]
      2. unsub-neg98.2%

        \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) - \log y\right)} \]
      3. sub-neg98.2%

        \[\leadsto 1 - \left(\log \color{blue}{\left(x + \left(-1\right)\right)} - \log y\right) \]
      4. metadata-eval98.2%

        \[\leadsto 1 - \left(\log \left(x + \color{blue}{-1}\right) - \log y\right) \]
    7. Simplified98.2%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x + -1\right) - \log y\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -1750:\\ \;\;\;\;\left(1 + \frac{\frac{-0.5 + \frac{-0.3333333333333333}{y}}{y} + -1}{y}\right) - \left(\mathsf{log1p}\left(-x\right) + \log \left(\frac{-1}{y}\right)\right)\\ \mathbf{elif}\;y \leq 2.75 \cdot 10^{+23}:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \left(\log \left(x + -1\right) - \log y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 99.6% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -2300000000:\\ \;\;\;\;1 - \left(\mathsf{log1p}\left(-x\right) + \log \left(\frac{-1}{y}\right)\right)\\ \mathbf{elif}\;y \leq 12500000000000:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \left(\log \left(x + -1\right) - \log y\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= y -2300000000.0)
   (- 1.0 (+ (log1p (- x)) (log (/ -1.0 y))))
   (if (<= y 12500000000000.0)
     (- 1.0 (log1p (/ (- x y) (+ y -1.0))))
     (- 1.0 (- (log (+ x -1.0)) (log y))))))
double code(double x, double y) {
	double tmp;
	if (y <= -2300000000.0) {
		tmp = 1.0 - (log1p(-x) + log((-1.0 / y)));
	} else if (y <= 12500000000000.0) {
		tmp = 1.0 - log1p(((x - y) / (y + -1.0)));
	} else {
		tmp = 1.0 - (log((x + -1.0)) - log(y));
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (y <= -2300000000.0) {
		tmp = 1.0 - (Math.log1p(-x) + Math.log((-1.0 / y)));
	} else if (y <= 12500000000000.0) {
		tmp = 1.0 - Math.log1p(((x - y) / (y + -1.0)));
	} else {
		tmp = 1.0 - (Math.log((x + -1.0)) - Math.log(y));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if y <= -2300000000.0:
		tmp = 1.0 - (math.log1p(-x) + math.log((-1.0 / y)))
	elif y <= 12500000000000.0:
		tmp = 1.0 - math.log1p(((x - y) / (y + -1.0)))
	else:
		tmp = 1.0 - (math.log((x + -1.0)) - math.log(y))
	return tmp
function code(x, y)
	tmp = 0.0
	if (y <= -2300000000.0)
		tmp = Float64(1.0 - Float64(log1p(Float64(-x)) + log(Float64(-1.0 / y))));
	elseif (y <= 12500000000000.0)
		tmp = Float64(1.0 - log1p(Float64(Float64(x - y) / Float64(y + -1.0))));
	else
		tmp = Float64(1.0 - Float64(log(Float64(x + -1.0)) - log(y)));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[y, -2300000000.0], N[(1.0 - N[(N[Log[1 + (-x)], $MachinePrecision] + N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[y, 12500000000000.0], N[(1.0 - N[Log[1 + N[(N[(x - y), $MachinePrecision] / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[(N[Log[N[(x + -1.0), $MachinePrecision]], $MachinePrecision] - N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -2300000000:\\
\;\;\;\;1 - \left(\mathsf{log1p}\left(-x\right) + \log \left(\frac{-1}{y}\right)\right)\\

\mathbf{elif}\;y \leq 12500000000000:\\
\;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\

\mathbf{else}:\\
\;\;\;\;1 - \left(\log \left(x + -1\right) - \log y\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if y < -2.3e9

    1. Initial program 14.5%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg14.5%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define14.5%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac214.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub014.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-14.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval14.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative14.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified14.5%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around -inf 99.4%

      \[\leadsto 1 - \color{blue}{\left(\log \left(-1 \cdot \left(x - 1\right)\right) + \log \left(\frac{-1}{y}\right)\right)} \]
    6. Step-by-step derivation
      1. sub-neg99.4%

        \[\leadsto 1 - \left(\log \left(-1 \cdot \color{blue}{\left(x + \left(-1\right)\right)}\right) + \log \left(\frac{-1}{y}\right)\right) \]
      2. metadata-eval99.4%

        \[\leadsto 1 - \left(\log \left(-1 \cdot \left(x + \color{blue}{-1}\right)\right) + \log \left(\frac{-1}{y}\right)\right) \]
      3. distribute-lft-in99.4%

        \[\leadsto 1 - \left(\log \color{blue}{\left(-1 \cdot x + -1 \cdot -1\right)} + \log \left(\frac{-1}{y}\right)\right) \]
      4. metadata-eval99.4%

        \[\leadsto 1 - \left(\log \left(-1 \cdot x + \color{blue}{1}\right) + \log \left(\frac{-1}{y}\right)\right) \]
      5. +-commutative99.4%

        \[\leadsto 1 - \left(\log \color{blue}{\left(1 + -1 \cdot x\right)} + \log \left(\frac{-1}{y}\right)\right) \]
      6. log1p-define99.4%

        \[\leadsto 1 - \left(\color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)} + \log \left(\frac{-1}{y}\right)\right) \]
      7. mul-1-neg99.4%

        \[\leadsto 1 - \left(\mathsf{log1p}\left(\color{blue}{-x}\right) + \log \left(\frac{-1}{y}\right)\right) \]
    7. Simplified99.4%

      \[\leadsto 1 - \color{blue}{\left(\mathsf{log1p}\left(-x\right) + \log \left(\frac{-1}{y}\right)\right)} \]

    if -2.3e9 < y < 1.25e13

    1. Initial program 99.6%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg99.6%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define99.6%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac299.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub099.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-99.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval99.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative99.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified99.6%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing

    if 1.25e13 < y

    1. Initial program 65.1%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg65.1%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define65.1%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac265.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub065.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-65.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval65.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative65.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified65.1%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around inf 98.2%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) + \log \left(\frac{1}{y}\right)\right)} \]
    6. Step-by-step derivation
      1. log-rec98.2%

        \[\leadsto 1 - \left(\log \left(x - 1\right) + \color{blue}{\left(-\log y\right)}\right) \]
      2. unsub-neg98.2%

        \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) - \log y\right)} \]
      3. sub-neg98.2%

        \[\leadsto 1 - \left(\log \color{blue}{\left(x + \left(-1\right)\right)} - \log y\right) \]
      4. metadata-eval98.2%

        \[\leadsto 1 - \left(\log \left(x + \color{blue}{-1}\right) - \log y\right) \]
    7. Simplified98.2%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x + -1\right) - \log y\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -2300000000:\\ \;\;\;\;1 - \left(\mathsf{log1p}\left(-x\right) + \log \left(\frac{-1}{y}\right)\right)\\ \mathbf{elif}\;y \leq 12500000000000:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \left(\log \left(x + -1\right) - \log y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 99.7% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -460000:\\ \;\;\;\;1 + \left(\left(\frac{-1}{y} - \log \left(\frac{-1}{y}\right)\right) - \mathsf{log1p}\left(-x\right)\right)\\ \mathbf{elif}\;y \leq 48000000000000:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \left(\log \left(x + -1\right) - \log y\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= y -460000.0)
   (+ 1.0 (- (- (/ -1.0 y) (log (/ -1.0 y))) (log1p (- x))))
   (if (<= y 48000000000000.0)
     (- 1.0 (log1p (/ (- x y) (+ y -1.0))))
     (- 1.0 (- (log (+ x -1.0)) (log y))))))
double code(double x, double y) {
	double tmp;
	if (y <= -460000.0) {
		tmp = 1.0 + (((-1.0 / y) - log((-1.0 / y))) - log1p(-x));
	} else if (y <= 48000000000000.0) {
		tmp = 1.0 - log1p(((x - y) / (y + -1.0)));
	} else {
		tmp = 1.0 - (log((x + -1.0)) - log(y));
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (y <= -460000.0) {
		tmp = 1.0 + (((-1.0 / y) - Math.log((-1.0 / y))) - Math.log1p(-x));
	} else if (y <= 48000000000000.0) {
		tmp = 1.0 - Math.log1p(((x - y) / (y + -1.0)));
	} else {
		tmp = 1.0 - (Math.log((x + -1.0)) - Math.log(y));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if y <= -460000.0:
		tmp = 1.0 + (((-1.0 / y) - math.log((-1.0 / y))) - math.log1p(-x))
	elif y <= 48000000000000.0:
		tmp = 1.0 - math.log1p(((x - y) / (y + -1.0)))
	else:
		tmp = 1.0 - (math.log((x + -1.0)) - math.log(y))
	return tmp
function code(x, y)
	tmp = 0.0
	if (y <= -460000.0)
		tmp = Float64(1.0 + Float64(Float64(Float64(-1.0 / y) - log(Float64(-1.0 / y))) - log1p(Float64(-x))));
	elseif (y <= 48000000000000.0)
		tmp = Float64(1.0 - log1p(Float64(Float64(x - y) / Float64(y + -1.0))));
	else
		tmp = Float64(1.0 - Float64(log(Float64(x + -1.0)) - log(y)));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[y, -460000.0], N[(1.0 + N[(N[(N[(-1.0 / y), $MachinePrecision] - N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[y, 48000000000000.0], N[(1.0 - N[Log[1 + N[(N[(x - y), $MachinePrecision] / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[(N[Log[N[(x + -1.0), $MachinePrecision]], $MachinePrecision] - N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -460000:\\
\;\;\;\;1 + \left(\left(\frac{-1}{y} - \log \left(\frac{-1}{y}\right)\right) - \mathsf{log1p}\left(-x\right)\right)\\

\mathbf{elif}\;y \leq 48000000000000:\\
\;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\

\mathbf{else}:\\
\;\;\;\;1 - \left(\log \left(x + -1\right) - \log y\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if y < -4.6e5

    1. Initial program 16.0%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg16.0%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define16.0%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac216.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub016.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-16.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval16.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative16.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified16.0%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around -inf 99.8%

      \[\leadsto 1 - \color{blue}{\left(\log \left(-1 \cdot \left(x - 1\right)\right) + \left(\log \left(\frac{-1}{y}\right) + -1 \cdot \frac{\frac{1}{x - 1} - \frac{x}{x - 1}}{y}\right)\right)} \]
    6. Simplified99.8%

      \[\leadsto 1 - \color{blue}{\left(\mathsf{log1p}\left(-x\right) + \left(\frac{1}{y} + \log \left(\frac{-1}{y}\right)\right)\right)} \]

    if -4.6e5 < y < 4.8e13

    1. Initial program 99.9%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg99.9%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define100.0%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac2100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub0100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing

    if 4.8e13 < y

    1. Initial program 65.1%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg65.1%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define65.1%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac265.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub065.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-65.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval65.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative65.1%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified65.1%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around inf 98.2%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) + \log \left(\frac{1}{y}\right)\right)} \]
    6. Step-by-step derivation
      1. log-rec98.2%

        \[\leadsto 1 - \left(\log \left(x - 1\right) + \color{blue}{\left(-\log y\right)}\right) \]
      2. unsub-neg98.2%

        \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) - \log y\right)} \]
      3. sub-neg98.2%

        \[\leadsto 1 - \left(\log \color{blue}{\left(x + \left(-1\right)\right)} - \log y\right) \]
      4. metadata-eval98.2%

        \[\leadsto 1 - \left(\log \left(x + \color{blue}{-1}\right) - \log y\right) \]
    7. Simplified98.2%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x + -1\right) - \log y\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -460000:\\ \;\;\;\;1 + \left(\left(\frac{-1}{y} - \log \left(\frac{-1}{y}\right)\right) - \mathsf{log1p}\left(-x\right)\right)\\ \mathbf{elif}\;y \leq 48000000000000:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \left(\log \left(x + -1\right) - \log y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 91.1% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \frac{x - y}{y + -1}\\ \mathbf{if}\;1 + t\_0 \leq 1.66 \cdot 10^{-8}:\\ \;\;\;\;\left(1 - \log \left(\frac{-1}{y}\right)\right) + \frac{-1}{y}\\ \mathbf{else}:\\ \;\;\;\;1 - \mathsf{log1p}\left(t\_0\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (let* ((t_0 (/ (- x y) (+ y -1.0))))
   (if (<= (+ 1.0 t_0) 1.66e-8)
     (+ (- 1.0 (log (/ -1.0 y))) (/ -1.0 y))
     (- 1.0 (log1p t_0)))))
double code(double x, double y) {
	double t_0 = (x - y) / (y + -1.0);
	double tmp;
	if ((1.0 + t_0) <= 1.66e-8) {
		tmp = (1.0 - log((-1.0 / y))) + (-1.0 / y);
	} else {
		tmp = 1.0 - log1p(t_0);
	}
	return tmp;
}
public static double code(double x, double y) {
	double t_0 = (x - y) / (y + -1.0);
	double tmp;
	if ((1.0 + t_0) <= 1.66e-8) {
		tmp = (1.0 - Math.log((-1.0 / y))) + (-1.0 / y);
	} else {
		tmp = 1.0 - Math.log1p(t_0);
	}
	return tmp;
}
def code(x, y):
	t_0 = (x - y) / (y + -1.0)
	tmp = 0
	if (1.0 + t_0) <= 1.66e-8:
		tmp = (1.0 - math.log((-1.0 / y))) + (-1.0 / y)
	else:
		tmp = 1.0 - math.log1p(t_0)
	return tmp
function code(x, y)
	t_0 = Float64(Float64(x - y) / Float64(y + -1.0))
	tmp = 0.0
	if (Float64(1.0 + t_0) <= 1.66e-8)
		tmp = Float64(Float64(1.0 - log(Float64(-1.0 / y))) + Float64(-1.0 / y));
	else
		tmp = Float64(1.0 - log1p(t_0));
	end
	return tmp
end
code[x_, y_] := Block[{t$95$0 = N[(N[(x - y), $MachinePrecision] / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(1.0 + t$95$0), $MachinePrecision], 1.66e-8], N[(N[(1.0 - N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[(-1.0 / y), $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Log[1 + t$95$0], $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \frac{x - y}{y + -1}\\
\mathbf{if}\;1 + t\_0 \leq 1.66 \cdot 10^{-8}:\\
\;\;\;\;\left(1 - \log \left(\frac{-1}{y}\right)\right) + \frac{-1}{y}\\

\mathbf{else}:\\
\;\;\;\;1 - \mathsf{log1p}\left(t\_0\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 #s(literal 1 binary64) (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y))) < 1.65999999999999998e-8

    1. Initial program 6.6%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg6.6%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define6.6%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac26.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub06.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-6.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval6.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative6.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified6.6%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 6.6%

      \[\leadsto 1 - \color{blue}{\log \left(1 - \frac{y}{y - 1}\right)} \]
    6. Step-by-step derivation
      1. sub-neg6.6%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{y}{y - 1}\right)\right)} \]
      2. mul-1-neg6.6%

        \[\leadsto 1 - \log \left(1 + \color{blue}{-1 \cdot \frac{y}{y - 1}}\right) \]
      3. sub-neg6.6%

        \[\leadsto 1 - \log \left(1 + -1 \cdot \frac{y}{\color{blue}{y + \left(-1\right)}}\right) \]
      4. metadata-eval6.6%

        \[\leadsto 1 - \log \left(1 + -1 \cdot \frac{y}{y + \color{blue}{-1}}\right) \]
      5. neg-mul-16.6%

        \[\leadsto 1 - \log \left(1 + \color{blue}{\left(-\frac{y}{y + -1}\right)}\right) \]
      6. log1p-define6.6%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{y}{y + -1}\right)} \]
      7. distribute-neg-frac26.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{y}{-\left(y + -1\right)}}\right) \]
      8. +-commutative6.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{-\color{blue}{\left(-1 + y\right)}}\right) \]
      9. distribute-neg-in6.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{\left(--1\right) + \left(-y\right)}}\right) \]
      10. metadata-eval6.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{1} + \left(-y\right)}\right) \]
      11. unsub-neg6.6%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{1 - y}}\right) \]
    7. Simplified6.6%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(\frac{y}{1 - y}\right)} \]
    8. Taylor expanded in y around -inf 79.4%

      \[\leadsto \color{blue}{1 - \left(\log \left(\frac{-1}{y}\right) + \frac{1}{y}\right)} \]
    9. Step-by-step derivation
      1. associate--r+79.5%

        \[\leadsto \color{blue}{\left(1 - \log \left(\frac{-1}{y}\right)\right) - \frac{1}{y}} \]
    10. Simplified79.5%

      \[\leadsto \color{blue}{\left(1 - \log \left(\frac{-1}{y}\right)\right) - \frac{1}{y}} \]

    if 1.65999999999999998e-8 < (-.f64 #s(literal 1 binary64) (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)))

    1. Initial program 99.8%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg99.8%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define99.8%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac299.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub099.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-99.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval99.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative99.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
  3. Recombined 2 regimes into one program.
  4. Final simplification93.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;1 + \frac{x - y}{y + -1} \leq 1.66 \cdot 10^{-8}:\\ \;\;\;\;\left(1 - \log \left(\frac{-1}{y}\right)\right) + \frac{-1}{y}\\ \mathbf{else}:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 86.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -8400000000:\\ \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= y -8400000000.0)
   (- 1.0 (log (/ -1.0 y)))
   (- 1.0 (log1p (/ (- x y) (+ y -1.0))))))
double code(double x, double y) {
	double tmp;
	if (y <= -8400000000.0) {
		tmp = 1.0 - log((-1.0 / y));
	} else {
		tmp = 1.0 - log1p(((x - y) / (y + -1.0)));
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (y <= -8400000000.0) {
		tmp = 1.0 - Math.log((-1.0 / y));
	} else {
		tmp = 1.0 - Math.log1p(((x - y) / (y + -1.0)));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if y <= -8400000000.0:
		tmp = 1.0 - math.log((-1.0 / y))
	else:
		tmp = 1.0 - math.log1p(((x - y) / (y + -1.0)))
	return tmp
function code(x, y)
	tmp = 0.0
	if (y <= -8400000000.0)
		tmp = Float64(1.0 - log(Float64(-1.0 / y)));
	else
		tmp = Float64(1.0 - log1p(Float64(Float64(x - y) / Float64(y + -1.0))));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[y, -8400000000.0], N[(1.0 - N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Log[1 + N[(N[(x - y), $MachinePrecision] / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -8400000000:\\
\;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\

\mathbf{else}:\\
\;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if y < -8.4e9

    1. Initial program 14.5%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg14.5%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define14.5%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac214.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub014.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-14.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval14.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative14.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified14.5%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 4.8%

      \[\leadsto 1 - \color{blue}{\log \left(1 - \frac{y}{y - 1}\right)} \]
    6. Step-by-step derivation
      1. sub-neg4.8%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{y}{y - 1}\right)\right)} \]
      2. mul-1-neg4.8%

        \[\leadsto 1 - \log \left(1 + \color{blue}{-1 \cdot \frac{y}{y - 1}}\right) \]
      3. sub-neg4.8%

        \[\leadsto 1 - \log \left(1 + -1 \cdot \frac{y}{\color{blue}{y + \left(-1\right)}}\right) \]
      4. metadata-eval4.8%

        \[\leadsto 1 - \log \left(1 + -1 \cdot \frac{y}{y + \color{blue}{-1}}\right) \]
      5. neg-mul-14.8%

        \[\leadsto 1 - \log \left(1 + \color{blue}{\left(-\frac{y}{y + -1}\right)}\right) \]
      6. log1p-define4.8%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{y}{y + -1}\right)} \]
      7. distribute-neg-frac24.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{y}{-\left(y + -1\right)}}\right) \]
      8. +-commutative4.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{-\color{blue}{\left(-1 + y\right)}}\right) \]
      9. distribute-neg-in4.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{\left(--1\right) + \left(-y\right)}}\right) \]
      10. metadata-eval4.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{1} + \left(-y\right)}\right) \]
      11. unsub-neg4.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{1 - y}}\right) \]
    7. Simplified4.8%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(\frac{y}{1 - y}\right)} \]
    8. Taylor expanded in y around -inf 79.9%

      \[\leadsto 1 - \color{blue}{\log \left(\frac{-1}{y}\right)} \]

    if -8.4e9 < y

    1. Initial program 94.7%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg94.7%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define94.7%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac294.7%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub094.7%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-94.7%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval94.7%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative94.7%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified94.7%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
  3. Recombined 2 regimes into one program.
  4. Final simplification90.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -8400000000:\\ \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 85.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -800:\\ \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x}{y + -1}\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= y -800.0) (- 1.0 (log (/ -1.0 y))) (- 1.0 (log1p (/ x (+ y -1.0))))))
double code(double x, double y) {
	double tmp;
	if (y <= -800.0) {
		tmp = 1.0 - log((-1.0 / y));
	} else {
		tmp = 1.0 - log1p((x / (y + -1.0)));
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (y <= -800.0) {
		tmp = 1.0 - Math.log((-1.0 / y));
	} else {
		tmp = 1.0 - Math.log1p((x / (y + -1.0)));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if y <= -800.0:
		tmp = 1.0 - math.log((-1.0 / y))
	else:
		tmp = 1.0 - math.log1p((x / (y + -1.0)))
	return tmp
function code(x, y)
	tmp = 0.0
	if (y <= -800.0)
		tmp = Float64(1.0 - log(Float64(-1.0 / y)));
	else
		tmp = Float64(1.0 - log1p(Float64(x / Float64(y + -1.0))));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[y, -800.0], N[(1.0 - N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Log[1 + N[(x / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -800:\\
\;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\

\mathbf{else}:\\
\;\;\;\;1 - \mathsf{log1p}\left(\frac{x}{y + -1}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if y < -800

    1. Initial program 16.9%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg16.9%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define16.9%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac216.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub016.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-16.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval16.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative16.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified16.9%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 7.5%

      \[\leadsto 1 - \color{blue}{\log \left(1 - \frac{y}{y - 1}\right)} \]
    6. Step-by-step derivation
      1. sub-neg7.5%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{y}{y - 1}\right)\right)} \]
      2. mul-1-neg7.5%

        \[\leadsto 1 - \log \left(1 + \color{blue}{-1 \cdot \frac{y}{y - 1}}\right) \]
      3. sub-neg7.5%

        \[\leadsto 1 - \log \left(1 + -1 \cdot \frac{y}{\color{blue}{y + \left(-1\right)}}\right) \]
      4. metadata-eval7.5%

        \[\leadsto 1 - \log \left(1 + -1 \cdot \frac{y}{y + \color{blue}{-1}}\right) \]
      5. neg-mul-17.5%

        \[\leadsto 1 - \log \left(1 + \color{blue}{\left(-\frac{y}{y + -1}\right)}\right) \]
      6. log1p-define7.5%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{y}{y + -1}\right)} \]
      7. distribute-neg-frac27.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{y}{-\left(y + -1\right)}}\right) \]
      8. +-commutative7.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{-\color{blue}{\left(-1 + y\right)}}\right) \]
      9. distribute-neg-in7.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{\left(--1\right) + \left(-y\right)}}\right) \]
      10. metadata-eval7.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{1} + \left(-y\right)}\right) \]
      11. unsub-neg7.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{1 - y}}\right) \]
    7. Simplified7.5%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(\frac{y}{1 - y}\right)} \]
    8. Taylor expanded in y around -inf 79.1%

      \[\leadsto 1 - \color{blue}{\log \left(\frac{-1}{y}\right)} \]

    if -800 < y

    1. Initial program 95.0%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg95.0%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define95.0%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac295.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub095.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-95.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval95.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative95.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified95.0%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf 93.2%

      \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x}{y - 1}}\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification88.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -800:\\ \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x}{y + -1}\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 80.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -31:\\ \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \left(y + \mathsf{log1p}\left(-x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= y -31.0) (- 1.0 (log (/ -1.0 y))) (- 1.0 (+ y (log1p (- x))))))
double code(double x, double y) {
	double tmp;
	if (y <= -31.0) {
		tmp = 1.0 - log((-1.0 / y));
	} else {
		tmp = 1.0 - (y + log1p(-x));
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (y <= -31.0) {
		tmp = 1.0 - Math.log((-1.0 / y));
	} else {
		tmp = 1.0 - (y + Math.log1p(-x));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if y <= -31.0:
		tmp = 1.0 - math.log((-1.0 / y))
	else:
		tmp = 1.0 - (y + math.log1p(-x))
	return tmp
function code(x, y)
	tmp = 0.0
	if (y <= -31.0)
		tmp = Float64(1.0 - log(Float64(-1.0 / y)));
	else
		tmp = Float64(1.0 - Float64(y + log1p(Float64(-x))));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[y, -31.0], N[(1.0 - N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[(y + N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -31:\\
\;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\

\mathbf{else}:\\
\;\;\;\;1 - \left(y + \mathsf{log1p}\left(-x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if y < -31

    1. Initial program 16.9%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg16.9%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define16.9%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac216.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub016.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-16.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval16.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative16.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified16.9%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 7.5%

      \[\leadsto 1 - \color{blue}{\log \left(1 - \frac{y}{y - 1}\right)} \]
    6. Step-by-step derivation
      1. sub-neg7.5%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{y}{y - 1}\right)\right)} \]
      2. mul-1-neg7.5%

        \[\leadsto 1 - \log \left(1 + \color{blue}{-1 \cdot \frac{y}{y - 1}}\right) \]
      3. sub-neg7.5%

        \[\leadsto 1 - \log \left(1 + -1 \cdot \frac{y}{\color{blue}{y + \left(-1\right)}}\right) \]
      4. metadata-eval7.5%

        \[\leadsto 1 - \log \left(1 + -1 \cdot \frac{y}{y + \color{blue}{-1}}\right) \]
      5. neg-mul-17.5%

        \[\leadsto 1 - \log \left(1 + \color{blue}{\left(-\frac{y}{y + -1}\right)}\right) \]
      6. log1p-define7.5%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{y}{y + -1}\right)} \]
      7. distribute-neg-frac27.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{y}{-\left(y + -1\right)}}\right) \]
      8. +-commutative7.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{-\color{blue}{\left(-1 + y\right)}}\right) \]
      9. distribute-neg-in7.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{\left(--1\right) + \left(-y\right)}}\right) \]
      10. metadata-eval7.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{1} + \left(-y\right)}\right) \]
      11. unsub-neg7.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{1 - y}}\right) \]
    7. Simplified7.5%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(\frac{y}{1 - y}\right)} \]
    8. Taylor expanded in y around -inf 79.1%

      \[\leadsto 1 - \color{blue}{\log \left(\frac{-1}{y}\right)} \]

    if -31 < y

    1. Initial program 95.0%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg95.0%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define95.0%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac295.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub095.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-95.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval95.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative95.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified95.0%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around 0 84.0%

      \[\leadsto 1 - \color{blue}{\left(\log \left(1 + -1 \cdot x\right) + y \cdot \left(\frac{1}{1 + -1 \cdot x} - \frac{x}{1 + -1 \cdot x}\right)\right)} \]
    6. Step-by-step derivation
      1. +-commutative84.0%

        \[\leadsto 1 - \color{blue}{\left(y \cdot \left(\frac{1}{1 + -1 \cdot x} - \frac{x}{1 + -1 \cdot x}\right) + \log \left(1 + -1 \cdot x\right)\right)} \]
      2. div-sub84.0%

        \[\leadsto 1 - \left(y \cdot \color{blue}{\frac{1 - x}{1 + -1 \cdot x}} + \log \left(1 + -1 \cdot x\right)\right) \]
      3. mul-1-neg84.0%

        \[\leadsto 1 - \left(y \cdot \frac{1 - x}{1 + \color{blue}{\left(-x\right)}} + \log \left(1 + -1 \cdot x\right)\right) \]
      4. sub-neg84.0%

        \[\leadsto 1 - \left(y \cdot \frac{1 - x}{\color{blue}{1 - x}} + \log \left(1 + -1 \cdot x\right)\right) \]
      5. *-inverses84.0%

        \[\leadsto 1 - \left(y \cdot \color{blue}{1} + \log \left(1 + -1 \cdot x\right)\right) \]
      6. *-rgt-identity84.0%

        \[\leadsto 1 - \left(\color{blue}{y} + \log \left(1 + -1 \cdot x\right)\right) \]
      7. log1p-define84.0%

        \[\leadsto 1 - \left(y + \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)}\right) \]
      8. mul-1-neg84.0%

        \[\leadsto 1 - \left(y + \mathsf{log1p}\left(\color{blue}{-x}\right)\right) \]
    7. Simplified84.0%

      \[\leadsto 1 - \color{blue}{\left(y + \mathsf{log1p}\left(-x\right)\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification82.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -31:\\ \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \left(y + \mathsf{log1p}\left(-x\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 79.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -27:\\ \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= y -27.0) (- 1.0 (log (/ -1.0 y))) (- 1.0 (log1p (- x)))))
double code(double x, double y) {
	double tmp;
	if (y <= -27.0) {
		tmp = 1.0 - log((-1.0 / y));
	} else {
		tmp = 1.0 - log1p(-x);
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (y <= -27.0) {
		tmp = 1.0 - Math.log((-1.0 / y));
	} else {
		tmp = 1.0 - Math.log1p(-x);
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if y <= -27.0:
		tmp = 1.0 - math.log((-1.0 / y))
	else:
		tmp = 1.0 - math.log1p(-x)
	return tmp
function code(x, y)
	tmp = 0.0
	if (y <= -27.0)
		tmp = Float64(1.0 - log(Float64(-1.0 / y)));
	else
		tmp = Float64(1.0 - log1p(Float64(-x)));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[y, -27.0], N[(1.0 - N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -27:\\
\;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\

\mathbf{else}:\\
\;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if y < -27

    1. Initial program 16.9%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg16.9%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define16.9%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac216.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub016.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-16.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval16.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative16.9%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified16.9%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 7.5%

      \[\leadsto 1 - \color{blue}{\log \left(1 - \frac{y}{y - 1}\right)} \]
    6. Step-by-step derivation
      1. sub-neg7.5%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{y}{y - 1}\right)\right)} \]
      2. mul-1-neg7.5%

        \[\leadsto 1 - \log \left(1 + \color{blue}{-1 \cdot \frac{y}{y - 1}}\right) \]
      3. sub-neg7.5%

        \[\leadsto 1 - \log \left(1 + -1 \cdot \frac{y}{\color{blue}{y + \left(-1\right)}}\right) \]
      4. metadata-eval7.5%

        \[\leadsto 1 - \log \left(1 + -1 \cdot \frac{y}{y + \color{blue}{-1}}\right) \]
      5. neg-mul-17.5%

        \[\leadsto 1 - \log \left(1 + \color{blue}{\left(-\frac{y}{y + -1}\right)}\right) \]
      6. log1p-define7.5%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{y}{y + -1}\right)} \]
      7. distribute-neg-frac27.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{y}{-\left(y + -1\right)}}\right) \]
      8. +-commutative7.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{-\color{blue}{\left(-1 + y\right)}}\right) \]
      9. distribute-neg-in7.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{\left(--1\right) + \left(-y\right)}}\right) \]
      10. metadata-eval7.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{1} + \left(-y\right)}\right) \]
      11. unsub-neg7.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{y}{\color{blue}{1 - y}}\right) \]
    7. Simplified7.5%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(\frac{y}{1 - y}\right)} \]
    8. Taylor expanded in y around -inf 79.1%

      \[\leadsto 1 - \color{blue}{\log \left(\frac{-1}{y}\right)} \]

    if -27 < y

    1. Initial program 95.0%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg95.0%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define95.0%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac295.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub095.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-95.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval95.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative95.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified95.0%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around 0 82.8%

      \[\leadsto 1 - \color{blue}{\log \left(1 + -1 \cdot x\right)} \]
    6. Step-by-step derivation
      1. log1p-define82.8%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)} \]
      2. mul-1-neg82.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{-x}\right) \]
    7. Simplified82.8%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-x\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification81.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -27:\\ \;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 9: 63.7% accurate, 1.1× speedup?

\[\begin{array}{l} \\ 1 - \mathsf{log1p}\left(-x\right) \end{array} \]
(FPCore (x y) :precision binary64 (- 1.0 (log1p (- x))))
double code(double x, double y) {
	return 1.0 - log1p(-x);
}
public static double code(double x, double y) {
	return 1.0 - Math.log1p(-x);
}
def code(x, y):
	return 1.0 - math.log1p(-x)
function code(x, y)
	return Float64(1.0 - log1p(Float64(-x)))
end
code[x_, y_] := N[(1.0 - N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
1 - \mathsf{log1p}\left(-x\right)
\end{array}
Derivation
  1. Initial program 70.3%

    \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
  2. Step-by-step derivation
    1. sub-neg70.3%

      \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
    2. log1p-define70.3%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
    3. distribute-neg-frac270.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
    4. neg-sub070.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
    5. associate--r-70.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
    6. metadata-eval70.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
    7. +-commutative70.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
  3. Simplified70.3%

    \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
  4. Add Preprocessing
  5. Taylor expanded in y around 0 60.7%

    \[\leadsto 1 - \color{blue}{\log \left(1 + -1 \cdot x\right)} \]
  6. Step-by-step derivation
    1. log1p-define60.7%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)} \]
    2. mul-1-neg60.7%

      \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{-x}\right) \]
  7. Simplified60.7%

    \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-x\right)} \]
  8. Final simplification60.7%

    \[\leadsto 1 - \mathsf{log1p}\left(-x\right) \]
  9. Add Preprocessing

Alternative 10: 43.9% accurate, 37.0× speedup?

\[\begin{array}{l} \\ 1 + x \end{array} \]
(FPCore (x y) :precision binary64 (+ 1.0 x))
double code(double x, double y) {
	return 1.0 + x;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = 1.0d0 + x
end function
public static double code(double x, double y) {
	return 1.0 + x;
}
def code(x, y):
	return 1.0 + x
function code(x, y)
	return Float64(1.0 + x)
end
function tmp = code(x, y)
	tmp = 1.0 + x;
end
code[x_, y_] := N[(1.0 + x), $MachinePrecision]
\begin{array}{l}

\\
1 + x
\end{array}
Derivation
  1. Initial program 70.3%

    \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
  2. Step-by-step derivation
    1. sub-neg70.3%

      \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
    2. log1p-define70.3%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
    3. distribute-neg-frac270.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
    4. neg-sub070.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
    5. associate--r-70.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
    6. metadata-eval70.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
    7. +-commutative70.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
  3. Simplified70.3%

    \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
  4. Add Preprocessing
  5. Taylor expanded in y around 0 60.7%

    \[\leadsto 1 - \color{blue}{\log \left(1 + -1 \cdot x\right)} \]
  6. Step-by-step derivation
    1. log1p-define60.7%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)} \]
    2. mul-1-neg60.7%

      \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{-x}\right) \]
  7. Simplified60.7%

    \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-x\right)} \]
  8. Taylor expanded in x around 0 42.8%

    \[\leadsto \color{blue}{1 + x} \]
  9. Final simplification42.8%

    \[\leadsto 1 + x \]
  10. Add Preprocessing

Alternative 11: 43.6% accurate, 111.0× speedup?

\[\begin{array}{l} \\ 1 \end{array} \]
(FPCore (x y) :precision binary64 1.0)
double code(double x, double y) {
	return 1.0;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = 1.0d0
end function
public static double code(double x, double y) {
	return 1.0;
}
def code(x, y):
	return 1.0
function code(x, y)
	return 1.0
end
function tmp = code(x, y)
	tmp = 1.0;
end
code[x_, y_] := 1.0
\begin{array}{l}

\\
1
\end{array}
Derivation
  1. Initial program 70.3%

    \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
  2. Step-by-step derivation
    1. sub-neg70.3%

      \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
    2. log1p-define70.3%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
    3. distribute-neg-frac270.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
    4. neg-sub070.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
    5. associate--r-70.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
    6. metadata-eval70.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
    7. +-commutative70.3%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
  3. Simplified70.3%

    \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
  4. Add Preprocessing
  5. Taylor expanded in y around 0 60.7%

    \[\leadsto 1 - \color{blue}{\log \left(1 + -1 \cdot x\right)} \]
  6. Step-by-step derivation
    1. log1p-define60.7%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)} \]
    2. mul-1-neg60.7%

      \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{-x}\right) \]
  7. Simplified60.7%

    \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-x\right)} \]
  8. Taylor expanded in x around 0 42.5%

    \[\leadsto \color{blue}{1} \]
  9. Final simplification42.5%

    \[\leadsto 1 \]
  10. Add Preprocessing

Developer target: 99.8% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := 1 - \log \left(\frac{x}{y \cdot y} - \left(\frac{1}{y} - \frac{x}{y}\right)\right)\\ \mathbf{if}\;y < -81284752.61947241:\\ \;\;\;\;t\_0\\ \mathbf{elif}\;y < 3.0094271212461764 \cdot 10^{+25}:\\ \;\;\;\;\log \left(\frac{e^{1}}{1 - \frac{x - y}{1 - y}}\right)\\ \mathbf{else}:\\ \;\;\;\;t\_0\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (let* ((t_0 (- 1.0 (log (- (/ x (* y y)) (- (/ 1.0 y) (/ x y)))))))
   (if (< y -81284752.61947241)
     t_0
     (if (< y 3.0094271212461764e+25)
       (log (/ (exp 1.0) (- 1.0 (/ (- x y) (- 1.0 y)))))
       t_0))))
double code(double x, double y) {
	double t_0 = 1.0 - log(((x / (y * y)) - ((1.0 / y) - (x / y))));
	double tmp;
	if (y < -81284752.61947241) {
		tmp = t_0;
	} else if (y < 3.0094271212461764e+25) {
		tmp = log((exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
	} else {
		tmp = t_0;
	}
	return tmp;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: t_0
    real(8) :: tmp
    t_0 = 1.0d0 - log(((x / (y * y)) - ((1.0d0 / y) - (x / y))))
    if (y < (-81284752.61947241d0)) then
        tmp = t_0
    else if (y < 3.0094271212461764d+25) then
        tmp = log((exp(1.0d0) / (1.0d0 - ((x - y) / (1.0d0 - y)))))
    else
        tmp = t_0
    end if
    code = tmp
end function
public static double code(double x, double y) {
	double t_0 = 1.0 - Math.log(((x / (y * y)) - ((1.0 / y) - (x / y))));
	double tmp;
	if (y < -81284752.61947241) {
		tmp = t_0;
	} else if (y < 3.0094271212461764e+25) {
		tmp = Math.log((Math.exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
	} else {
		tmp = t_0;
	}
	return tmp;
}
def code(x, y):
	t_0 = 1.0 - math.log(((x / (y * y)) - ((1.0 / y) - (x / y))))
	tmp = 0
	if y < -81284752.61947241:
		tmp = t_0
	elif y < 3.0094271212461764e+25:
		tmp = math.log((math.exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))))
	else:
		tmp = t_0
	return tmp
function code(x, y)
	t_0 = Float64(1.0 - log(Float64(Float64(x / Float64(y * y)) - Float64(Float64(1.0 / y) - Float64(x / y)))))
	tmp = 0.0
	if (y < -81284752.61947241)
		tmp = t_0;
	elseif (y < 3.0094271212461764e+25)
		tmp = log(Float64(exp(1.0) / Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y)))));
	else
		tmp = t_0;
	end
	return tmp
end
function tmp_2 = code(x, y)
	t_0 = 1.0 - log(((x / (y * y)) - ((1.0 / y) - (x / y))));
	tmp = 0.0;
	if (y < -81284752.61947241)
		tmp = t_0;
	elseif (y < 3.0094271212461764e+25)
		tmp = log((exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
	else
		tmp = t_0;
	end
	tmp_2 = tmp;
end
code[x_, y_] := Block[{t$95$0 = N[(1.0 - N[Log[N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - N[(N[(1.0 / y), $MachinePrecision] - N[(x / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[Less[y, -81284752.61947241], t$95$0, If[Less[y, 3.0094271212461764e+25], N[Log[N[(N[Exp[1.0], $MachinePrecision] / N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], t$95$0]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := 1 - \log \left(\frac{x}{y \cdot y} - \left(\frac{1}{y} - \frac{x}{y}\right)\right)\\
\mathbf{if}\;y < -81284752.61947241:\\
\;\;\;\;t\_0\\

\mathbf{elif}\;y < 3.0094271212461764 \cdot 10^{+25}:\\
\;\;\;\;\log \left(\frac{e^{1}}{1 - \frac{x - y}{1 - y}}\right)\\

\mathbf{else}:\\
\;\;\;\;t\_0\\


\end{array}
\end{array}

Reproduce

?
herbie shell --seed 2024077 
(FPCore (x y)
  :name "Numeric.SpecFunctions:invIncompleteGamma from math-functions-0.1.5.2, B"
  :precision binary64

  :alt
  (if (< y -81284752.61947241) (- 1.0 (log (- (/ x (* y y)) (- (/ 1.0 y) (/ x y))))) (if (< y 3.0094271212461764e+25) (log (/ (exp 1.0) (- 1.0 (/ (- x y) (- 1.0 y))))) (- 1.0 (log (- (/ x (* y y)) (- (/ 1.0 y) (/ x y)))))))

  (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))