Numeric.SpecFunctions:invIncompleteGamma from math-functions-0.1.5.2, B

Percentage Accurate: 72.2% → 99.8%
Time: 10.5s
Alternatives: 8
Speedup: 1.0×

Specification

?
\[\begin{array}{l} \\ 1 - \log \left(1 - \frac{x - y}{1 - y}\right) \end{array} \]
(FPCore (x y) :precision binary64 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))
double code(double x, double y) {
	return 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = 1.0d0 - log((1.0d0 - ((x - y) / (1.0d0 - y))))
end function
public static double code(double x, double y) {
	return 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
}
def code(x, y):
	return 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
function code(x, y)
	return Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y)))))
end
function tmp = code(x, y)
	tmp = 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
end
code[x_, y_] := N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
1 - \log \left(1 - \frac{x - y}{1 - y}\right)
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 72.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ 1 - \log \left(1 - \frac{x - y}{1 - y}\right) \end{array} \]
(FPCore (x y) :precision binary64 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))
double code(double x, double y) {
	return 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = 1.0d0 - log((1.0d0 - ((x - y) / (1.0d0 - y))))
end function
public static double code(double x, double y) {
	return 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
}
def code(x, y):
	return 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
function code(x, y)
	return Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y)))))
end
function tmp = code(x, y)
	tmp = 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
end
code[x_, y_] := N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
1 - \log \left(1 - \frac{x - y}{1 - y}\right)
\end{array}

Alternative 1: 99.8% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\frac{x - y}{1 - y} \leq 0.2:\\ \;\;\;\;1 - \mathsf{log1p}\left(\left(x - y\right) \cdot \frac{1}{y + -1}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \log \left(\frac{x + -1}{y} \cdot e^{\frac{\mathsf{fma}\left(0.5, \frac{2 - {\left(\frac{1 - x}{x + -1}\right)}^{2}}{y}, 1\right)}{y}}\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= (/ (- x y) (- 1.0 y)) 0.2)
   (- 1.0 (log1p (* (- x y) (/ 1.0 (+ y -1.0)))))
   (-
    1.0
    (log
     (*
      (/ (+ x -1.0) y)
      (exp
       (/
        (fma 0.5 (/ (- 2.0 (pow (/ (- 1.0 x) (+ x -1.0)) 2.0)) y) 1.0)
        y)))))))
double code(double x, double y) {
	double tmp;
	if (((x - y) / (1.0 - y)) <= 0.2) {
		tmp = 1.0 - log1p(((x - y) * (1.0 / (y + -1.0))));
	} else {
		tmp = 1.0 - log((((x + -1.0) / y) * exp((fma(0.5, ((2.0 - pow(((1.0 - x) / (x + -1.0)), 2.0)) / y), 1.0) / y))));
	}
	return tmp;
}
function code(x, y)
	tmp = 0.0
	if (Float64(Float64(x - y) / Float64(1.0 - y)) <= 0.2)
		tmp = Float64(1.0 - log1p(Float64(Float64(x - y) * Float64(1.0 / Float64(y + -1.0)))));
	else
		tmp = Float64(1.0 - log(Float64(Float64(Float64(x + -1.0) / y) * exp(Float64(fma(0.5, Float64(Float64(2.0 - (Float64(Float64(1.0 - x) / Float64(x + -1.0)) ^ 2.0)) / y), 1.0) / y)))));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision], 0.2], N[(1.0 - N[Log[1 + N[(N[(x - y), $MachinePrecision] * N[(1.0 / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Log[N[(N[(N[(x + -1.0), $MachinePrecision] / y), $MachinePrecision] * N[Exp[N[(N[(0.5 * N[(N[(2.0 - N[Power[N[(N[(1.0 - x), $MachinePrecision] / N[(x + -1.0), $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision] + 1.0), $MachinePrecision] / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\frac{x - y}{1 - y} \leq 0.2:\\
\;\;\;\;1 - \mathsf{log1p}\left(\left(x - y\right) \cdot \frac{1}{y + -1}\right)\\

\mathbf{else}:\\
\;\;\;\;1 - \log \left(\frac{x + -1}{y} \cdot e^{\frac{\mathsf{fma}\left(0.5, \frac{2 - {\left(\frac{1 - x}{x + -1}\right)}^{2}}{y}, 1\right)}{y}}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)) < 0.20000000000000001

    1. Initial program 100.0%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg100.0%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define100.0%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac2100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub0100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Step-by-step derivation
      1. clear-num100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{1}{\frac{y + -1}{x - y}}}\right) \]
      2. associate-/r/100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{1}{y + -1} \cdot \left(x - y\right)}\right) \]
    6. Applied egg-rr100.0%

      \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{1}{y + -1} \cdot \left(x - y\right)}\right) \]

    if 0.20000000000000001 < (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y))

    1. Initial program 7.2%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg7.2%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define7.2%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac27.2%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub07.2%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-7.2%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval7.2%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative7.2%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified7.2%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around -inf 80.2%

      \[\leadsto 1 - \color{blue}{\left(\log \left(-1 \cdot \left(x - 1\right)\right) + \left(\log \left(\frac{-1}{y}\right) + -1 \cdot \frac{\left(-0.5 \cdot \frac{2 + -1 \cdot \frac{{\left(1 - x\right)}^{2}}{{\left(x - 1\right)}^{2}}}{y} + \frac{1}{x - 1}\right) - \frac{x}{x - 1}}{y}\right)\right)} \]
    6. Simplified80.2%

      \[\leadsto 1 - \color{blue}{\left(\log \left(\frac{-1}{y}\right) + \left(\mathsf{log1p}\left(-x\right) + \frac{-1 \cdot \frac{-0.5 \cdot \left(2 - \frac{{\left(1 - x\right)}^{2}}{{\left(x + -1\right)}^{2}}\right)}{y} + 1}{y}\right)\right)} \]
    7. Applied egg-rr100.0%

      \[\leadsto 1 - \color{blue}{\log \left(\frac{-1}{y} \cdot \left(\left(1 - x\right) \cdot e^{\frac{\mathsf{fma}\left(-1, \frac{-0.5 \cdot \left(2 - {\left(\frac{1 - x}{x + -1}\right)}^{2}\right)}{y}, 1\right)}{y}}\right)\right)} \]
    8. Simplified100.0%

      \[\leadsto 1 - \color{blue}{\log \left(\frac{x + -1}{y} \cdot e^{\frac{\mathsf{fma}\left(0.5, \frac{2 - {\left(\frac{1 - x}{x + -1}\right)}^{2}}{y}, 1\right)}{y}}\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\frac{x - y}{1 - y} \leq 0.2:\\ \;\;\;\;1 - \mathsf{log1p}\left(\left(x - y\right) \cdot \frac{1}{y + -1}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \log \left(\frac{x + -1}{y} \cdot e^{\frac{\mathsf{fma}\left(0.5, \frac{2 - {\left(\frac{1 - x}{x + -1}\right)}^{2}}{y}, 1\right)}{y}}\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 99.7% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\frac{x - y}{1 - y} \leq 0.999999:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(\frac{e}{\frac{x + -1}{y}}\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= (/ (- x y) (- 1.0 y)) 0.999999)
   (- 1.0 (log1p (/ (- x y) (+ y -1.0))))
   (log (/ E (/ (+ x -1.0) y)))))
double code(double x, double y) {
	double tmp;
	if (((x - y) / (1.0 - y)) <= 0.999999) {
		tmp = 1.0 - log1p(((x - y) / (y + -1.0)));
	} else {
		tmp = log((((double) M_E) / ((x + -1.0) / y)));
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (((x - y) / (1.0 - y)) <= 0.999999) {
		tmp = 1.0 - Math.log1p(((x - y) / (y + -1.0)));
	} else {
		tmp = Math.log((Math.E / ((x + -1.0) / y)));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if ((x - y) / (1.0 - y)) <= 0.999999:
		tmp = 1.0 - math.log1p(((x - y) / (y + -1.0)))
	else:
		tmp = math.log((math.e / ((x + -1.0) / y)))
	return tmp
function code(x, y)
	tmp = 0.0
	if (Float64(Float64(x - y) / Float64(1.0 - y)) <= 0.999999)
		tmp = Float64(1.0 - log1p(Float64(Float64(x - y) / Float64(y + -1.0))));
	else
		tmp = log(Float64(exp(1) / Float64(Float64(x + -1.0) / y)));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision], 0.999999], N[(1.0 - N[Log[1 + N[(N[(x - y), $MachinePrecision] / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[Log[N[(E / N[(N[(x + -1.0), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\frac{x - y}{1 - y} \leq 0.999999:\\
\;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(\frac{e}{\frac{x + -1}{y}}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)) < 0.999998999999999971

    1. Initial program 99.7%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg99.7%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define99.8%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac299.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub099.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-99.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval99.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative99.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing

    if 0.999998999999999971 < (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y))

    1. Initial program 5.5%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg5.5%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define5.5%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac25.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub05.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-5.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval5.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative5.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified5.5%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around inf 18.5%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) + \log \left(\frac{1}{y}\right)\right)} \]
    6. Step-by-step derivation
      1. log-rec18.5%

        \[\leadsto 1 - \left(\log \left(x - 1\right) + \color{blue}{\left(-\log y\right)}\right) \]
      2. unsub-neg18.5%

        \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) - \log y\right)} \]
      3. sub-neg18.5%

        \[\leadsto 1 - \left(\log \color{blue}{\left(x + \left(-1\right)\right)} - \log y\right) \]
      4. metadata-eval18.5%

        \[\leadsto 1 - \left(\log \left(x + \color{blue}{-1}\right) - \log y\right) \]
    7. Simplified18.5%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x + -1\right) - \log y\right)} \]
    8. Step-by-step derivation
      1. add-log-exp18.5%

        \[\leadsto \color{blue}{\log \left(e^{1 - \left(\log \left(x + -1\right) - \log y\right)}\right)} \]
      2. exp-diff18.5%

        \[\leadsto \log \color{blue}{\left(\frac{e^{1}}{e^{\log \left(x + -1\right) - \log y}}\right)} \]
      3. diff-log99.6%

        \[\leadsto \log \left(\frac{e^{1}}{e^{\color{blue}{\log \left(\frac{x + -1}{y}\right)}}}\right) \]
      4. add-exp-log99.6%

        \[\leadsto \log \left(\frac{e^{1}}{\color{blue}{\frac{x + -1}{y}}}\right) \]
    9. Applied egg-rr99.6%

      \[\leadsto \color{blue}{\log \left(\frac{e^{1}}{\frac{x + -1}{y}}\right)} \]
    10. Step-by-step derivation
      1. exp-1-e99.6%

        \[\leadsto \log \left(\frac{\color{blue}{e}}{\frac{x + -1}{y}}\right) \]
      2. +-commutative99.6%

        \[\leadsto \log \left(\frac{e}{\frac{\color{blue}{-1 + x}}{y}}\right) \]
    11. Simplified99.6%

      \[\leadsto \color{blue}{\log \left(\frac{e}{\frac{-1 + x}{y}}\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\frac{x - y}{1 - y} \leq 0.999999:\\ \;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(\frac{e}{\frac{x + -1}{y}}\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 98.7% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -1.7 \lor \neg \left(y \leq 1\right):\\ \;\;\;\;\log \left(\frac{e}{\frac{x + -1}{y}}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \left(y + \mathsf{log1p}\left(-x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (or (<= y -1.7) (not (<= y 1.0)))
   (log (/ E (/ (+ x -1.0) y)))
   (- 1.0 (+ y (log1p (- x))))))
double code(double x, double y) {
	double tmp;
	if ((y <= -1.7) || !(y <= 1.0)) {
		tmp = log((((double) M_E) / ((x + -1.0) / y)));
	} else {
		tmp = 1.0 - (y + log1p(-x));
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if ((y <= -1.7) || !(y <= 1.0)) {
		tmp = Math.log((Math.E / ((x + -1.0) / y)));
	} else {
		tmp = 1.0 - (y + Math.log1p(-x));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if (y <= -1.7) or not (y <= 1.0):
		tmp = math.log((math.e / ((x + -1.0) / y)))
	else:
		tmp = 1.0 - (y + math.log1p(-x))
	return tmp
function code(x, y)
	tmp = 0.0
	if ((y <= -1.7) || !(y <= 1.0))
		tmp = log(Float64(exp(1) / Float64(Float64(x + -1.0) / y)));
	else
		tmp = Float64(1.0 - Float64(y + log1p(Float64(-x))));
	end
	return tmp
end
code[x_, y_] := If[Or[LessEqual[y, -1.7], N[Not[LessEqual[y, 1.0]], $MachinePrecision]], N[Log[N[(E / N[(N[(x + -1.0), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[(1.0 - N[(y + N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -1.7 \lor \neg \left(y \leq 1\right):\\
\;\;\;\;\log \left(\frac{e}{\frac{x + -1}{y}}\right)\\

\mathbf{else}:\\
\;\;\;\;1 - \left(y + \mathsf{log1p}\left(-x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if y < -1.69999999999999996 or 1 < y

    1. Initial program 32.2%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg32.2%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define32.2%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac232.2%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub032.2%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-32.2%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval32.2%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative32.2%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified32.2%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around inf 29.8%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) + \log \left(\frac{1}{y}\right)\right)} \]
    6. Step-by-step derivation
      1. log-rec29.8%

        \[\leadsto 1 - \left(\log \left(x - 1\right) + \color{blue}{\left(-\log y\right)}\right) \]
      2. unsub-neg29.8%

        \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) - \log y\right)} \]
      3. sub-neg29.8%

        \[\leadsto 1 - \left(\log \color{blue}{\left(x + \left(-1\right)\right)} - \log y\right) \]
      4. metadata-eval29.8%

        \[\leadsto 1 - \left(\log \left(x + \color{blue}{-1}\right) - \log y\right) \]
    7. Simplified29.8%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x + -1\right) - \log y\right)} \]
    8. Step-by-step derivation
      1. add-log-exp29.8%

        \[\leadsto \color{blue}{\log \left(e^{1 - \left(\log \left(x + -1\right) - \log y\right)}\right)} \]
      2. exp-diff29.8%

        \[\leadsto \log \color{blue}{\left(\frac{e^{1}}{e^{\log \left(x + -1\right) - \log y}}\right)} \]
      3. diff-log98.9%

        \[\leadsto \log \left(\frac{e^{1}}{e^{\color{blue}{\log \left(\frac{x + -1}{y}\right)}}}\right) \]
      4. add-exp-log98.9%

        \[\leadsto \log \left(\frac{e^{1}}{\color{blue}{\frac{x + -1}{y}}}\right) \]
    9. Applied egg-rr98.9%

      \[\leadsto \color{blue}{\log \left(\frac{e^{1}}{\frac{x + -1}{y}}\right)} \]
    10. Step-by-step derivation
      1. exp-1-e98.9%

        \[\leadsto \log \left(\frac{\color{blue}{e}}{\frac{x + -1}{y}}\right) \]
      2. +-commutative98.9%

        \[\leadsto \log \left(\frac{e}{\frac{\color{blue}{-1 + x}}{y}}\right) \]
    11. Simplified98.9%

      \[\leadsto \color{blue}{\log \left(\frac{e}{\frac{-1 + x}{y}}\right)} \]

    if -1.69999999999999996 < y < 1

    1. Initial program 100.0%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg100.0%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define100.0%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac2100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub0100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around 0 98.2%

      \[\leadsto 1 - \color{blue}{\left(\log \left(1 + -1 \cdot x\right) + y \cdot \left(\frac{1}{1 + -1 \cdot x} - \frac{x}{1 + -1 \cdot x}\right)\right)} \]
    6. Step-by-step derivation
      1. +-commutative98.2%

        \[\leadsto 1 - \color{blue}{\left(y \cdot \left(\frac{1}{1 + -1 \cdot x} - \frac{x}{1 + -1 \cdot x}\right) + \log \left(1 + -1 \cdot x\right)\right)} \]
      2. div-sub98.2%

        \[\leadsto 1 - \left(y \cdot \color{blue}{\frac{1 - x}{1 + -1 \cdot x}} + \log \left(1 + -1 \cdot x\right)\right) \]
      3. mul-1-neg98.2%

        \[\leadsto 1 - \left(y \cdot \frac{1 - x}{1 + \color{blue}{\left(-x\right)}} + \log \left(1 + -1 \cdot x\right)\right) \]
      4. sub-neg98.2%

        \[\leadsto 1 - \left(y \cdot \frac{1 - x}{\color{blue}{1 - x}} + \log \left(1 + -1 \cdot x\right)\right) \]
      5. *-inverses98.2%

        \[\leadsto 1 - \left(y \cdot \color{blue}{1} + \log \left(1 + -1 \cdot x\right)\right) \]
      6. *-rgt-identity98.2%

        \[\leadsto 1 - \left(\color{blue}{y} + \log \left(1 + -1 \cdot x\right)\right) \]
      7. log1p-define98.2%

        \[\leadsto 1 - \left(y + \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)}\right) \]
      8. mul-1-neg98.2%

        \[\leadsto 1 - \left(y + \mathsf{log1p}\left(\color{blue}{-x}\right)\right) \]
    7. Simplified98.2%

      \[\leadsto 1 - \color{blue}{\left(y + \mathsf{log1p}\left(-x\right)\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification98.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -1.7 \lor \neg \left(y \leq 1\right):\\ \;\;\;\;\log \left(\frac{e}{\frac{x + -1}{y}}\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \left(y + \mathsf{log1p}\left(-x\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 89.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -12.6:\\ \;\;\;\;\log \left(y \cdot \left(-e\right)\right)\\ \mathbf{elif}\;y \leq 1:\\ \;\;\;\;1 - \left(y + \mathsf{log1p}\left(-x\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(\frac{y \cdot e}{x}\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= y -12.6)
   (log (* y (- E)))
   (if (<= y 1.0) (- 1.0 (+ y (log1p (- x)))) (log (/ (* y E) x)))))
double code(double x, double y) {
	double tmp;
	if (y <= -12.6) {
		tmp = log((y * -((double) M_E)));
	} else if (y <= 1.0) {
		tmp = 1.0 - (y + log1p(-x));
	} else {
		tmp = log(((y * ((double) M_E)) / x));
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (y <= -12.6) {
		tmp = Math.log((y * -Math.E));
	} else if (y <= 1.0) {
		tmp = 1.0 - (y + Math.log1p(-x));
	} else {
		tmp = Math.log(((y * Math.E) / x));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if y <= -12.6:
		tmp = math.log((y * -math.e))
	elif y <= 1.0:
		tmp = 1.0 - (y + math.log1p(-x))
	else:
		tmp = math.log(((y * math.e) / x))
	return tmp
function code(x, y)
	tmp = 0.0
	if (y <= -12.6)
		tmp = log(Float64(y * Float64(-exp(1))));
	elseif (y <= 1.0)
		tmp = Float64(1.0 - Float64(y + log1p(Float64(-x))));
	else
		tmp = log(Float64(Float64(y * exp(1)) / x));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[y, -12.6], N[Log[N[(y * (-E)), $MachinePrecision]], $MachinePrecision], If[LessEqual[y, 1.0], N[(1.0 - N[(y + N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(N[(y * E), $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -12.6:\\
\;\;\;\;\log \left(y \cdot \left(-e\right)\right)\\

\mathbf{elif}\;y \leq 1:\\
\;\;\;\;1 - \left(y + \mathsf{log1p}\left(-x\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(\frac{y \cdot e}{x}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if y < -12.5999999999999996

    1. Initial program 20.8%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg20.8%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define20.8%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac220.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub020.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-20.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval20.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative20.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified20.8%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around inf 0.0%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) + \log \left(\frac{1}{y}\right)\right)} \]
    6. Step-by-step derivation
      1. log-rec0.0%

        \[\leadsto 1 - \left(\log \left(x - 1\right) + \color{blue}{\left(-\log y\right)}\right) \]
      2. unsub-neg0.0%

        \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) - \log y\right)} \]
      3. sub-neg0.0%

        \[\leadsto 1 - \left(\log \color{blue}{\left(x + \left(-1\right)\right)} - \log y\right) \]
      4. metadata-eval0.0%

        \[\leadsto 1 - \left(\log \left(x + \color{blue}{-1}\right) - \log y\right) \]
    7. Simplified0.0%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x + -1\right) - \log y\right)} \]
    8. Step-by-step derivation
      1. add-log-exp0.0%

        \[\leadsto \color{blue}{\log \left(e^{1 - \left(\log \left(x + -1\right) - \log y\right)}\right)} \]
      2. exp-diff0.0%

        \[\leadsto \log \color{blue}{\left(\frac{e^{1}}{e^{\log \left(x + -1\right) - \log y}}\right)} \]
      3. diff-log98.4%

        \[\leadsto \log \left(\frac{e^{1}}{e^{\color{blue}{\log \left(\frac{x + -1}{y}\right)}}}\right) \]
      4. add-exp-log98.5%

        \[\leadsto \log \left(\frac{e^{1}}{\color{blue}{\frac{x + -1}{y}}}\right) \]
    9. Applied egg-rr98.5%

      \[\leadsto \color{blue}{\log \left(\frac{e^{1}}{\frac{x + -1}{y}}\right)} \]
    10. Step-by-step derivation
      1. exp-1-e98.5%

        \[\leadsto \log \left(\frac{\color{blue}{e}}{\frac{x + -1}{y}}\right) \]
      2. +-commutative98.5%

        \[\leadsto \log \left(\frac{e}{\frac{\color{blue}{-1 + x}}{y}}\right) \]
    11. Simplified98.5%

      \[\leadsto \color{blue}{\log \left(\frac{e}{\frac{-1 + x}{y}}\right)} \]
    12. Taylor expanded in x around 0 68.4%

      \[\leadsto \color{blue}{\log \left(-1 \cdot \left(y \cdot e\right)\right)} \]
    13. Step-by-step derivation
      1. associate-*r*68.4%

        \[\leadsto \log \color{blue}{\left(\left(-1 \cdot y\right) \cdot e\right)} \]
      2. neg-mul-168.4%

        \[\leadsto \log \left(\color{blue}{\left(-y\right)} \cdot e\right) \]
    14. Simplified68.4%

      \[\leadsto \color{blue}{\log \left(\left(-y\right) \cdot e\right)} \]

    if -12.5999999999999996 < y < 1

    1. Initial program 100.0%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg100.0%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define100.0%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac2100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub0100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around 0 98.2%

      \[\leadsto 1 - \color{blue}{\left(\log \left(1 + -1 \cdot x\right) + y \cdot \left(\frac{1}{1 + -1 \cdot x} - \frac{x}{1 + -1 \cdot x}\right)\right)} \]
    6. Step-by-step derivation
      1. +-commutative98.2%

        \[\leadsto 1 - \color{blue}{\left(y \cdot \left(\frac{1}{1 + -1 \cdot x} - \frac{x}{1 + -1 \cdot x}\right) + \log \left(1 + -1 \cdot x\right)\right)} \]
      2. div-sub98.2%

        \[\leadsto 1 - \left(y \cdot \color{blue}{\frac{1 - x}{1 + -1 \cdot x}} + \log \left(1 + -1 \cdot x\right)\right) \]
      3. mul-1-neg98.2%

        \[\leadsto 1 - \left(y \cdot \frac{1 - x}{1 + \color{blue}{\left(-x\right)}} + \log \left(1 + -1 \cdot x\right)\right) \]
      4. sub-neg98.2%

        \[\leadsto 1 - \left(y \cdot \frac{1 - x}{\color{blue}{1 - x}} + \log \left(1 + -1 \cdot x\right)\right) \]
      5. *-inverses98.2%

        \[\leadsto 1 - \left(y \cdot \color{blue}{1} + \log \left(1 + -1 \cdot x\right)\right) \]
      6. *-rgt-identity98.2%

        \[\leadsto 1 - \left(\color{blue}{y} + \log \left(1 + -1 \cdot x\right)\right) \]
      7. log1p-define98.2%

        \[\leadsto 1 - \left(y + \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)}\right) \]
      8. mul-1-neg98.2%

        \[\leadsto 1 - \left(y + \mathsf{log1p}\left(\color{blue}{-x}\right)\right) \]
    7. Simplified98.2%

      \[\leadsto 1 - \color{blue}{\left(y + \mathsf{log1p}\left(-x\right)\right)} \]

    if 1 < y

    1. Initial program 58.3%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg58.3%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define58.3%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac258.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub058.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-58.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval58.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative58.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified58.3%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around inf 98.5%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) + \log \left(\frac{1}{y}\right)\right)} \]
    6. Step-by-step derivation
      1. log-rec98.5%

        \[\leadsto 1 - \left(\log \left(x - 1\right) + \color{blue}{\left(-\log y\right)}\right) \]
      2. unsub-neg98.5%

        \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) - \log y\right)} \]
      3. sub-neg98.5%

        \[\leadsto 1 - \left(\log \color{blue}{\left(x + \left(-1\right)\right)} - \log y\right) \]
      4. metadata-eval98.5%

        \[\leadsto 1 - \left(\log \left(x + \color{blue}{-1}\right) - \log y\right) \]
    7. Simplified98.5%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x + -1\right) - \log y\right)} \]
    8. Step-by-step derivation
      1. add-log-exp98.5%

        \[\leadsto \color{blue}{\log \left(e^{1 - \left(\log \left(x + -1\right) - \log y\right)}\right)} \]
      2. exp-diff98.5%

        \[\leadsto \log \color{blue}{\left(\frac{e^{1}}{e^{\log \left(x + -1\right) - \log y}}\right)} \]
      3. diff-log99.9%

        \[\leadsto \log \left(\frac{e^{1}}{e^{\color{blue}{\log \left(\frac{x + -1}{y}\right)}}}\right) \]
      4. add-exp-log99.9%

        \[\leadsto \log \left(\frac{e^{1}}{\color{blue}{\frac{x + -1}{y}}}\right) \]
    9. Applied egg-rr99.9%

      \[\leadsto \color{blue}{\log \left(\frac{e^{1}}{\frac{x + -1}{y}}\right)} \]
    10. Step-by-step derivation
      1. exp-1-e99.9%

        \[\leadsto \log \left(\frac{\color{blue}{e}}{\frac{x + -1}{y}}\right) \]
      2. +-commutative99.9%

        \[\leadsto \log \left(\frac{e}{\frac{\color{blue}{-1 + x}}{y}}\right) \]
    11. Simplified99.9%

      \[\leadsto \color{blue}{\log \left(\frac{e}{\frac{-1 + x}{y}}\right)} \]
    12. Taylor expanded in x around inf 99.0%

      \[\leadsto \log \color{blue}{\left(\frac{y \cdot e}{x}\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification88.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -12.6:\\ \;\;\;\;\log \left(y \cdot \left(-e\right)\right)\\ \mathbf{elif}\;y \leq 1:\\ \;\;\;\;1 - \left(y + \mathsf{log1p}\left(-x\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(\frac{y \cdot e}{x}\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 89.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -240:\\ \;\;\;\;\log \left(y \cdot \left(-e\right)\right)\\ \mathbf{elif}\;y \leq 1:\\ \;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(\frac{y \cdot e}{x}\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= y -240.0)
   (log (* y (- E)))
   (if (<= y 1.0) (- 1.0 (log1p (- x))) (log (/ (* y E) x)))))
double code(double x, double y) {
	double tmp;
	if (y <= -240.0) {
		tmp = log((y * -((double) M_E)));
	} else if (y <= 1.0) {
		tmp = 1.0 - log1p(-x);
	} else {
		tmp = log(((y * ((double) M_E)) / x));
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (y <= -240.0) {
		tmp = Math.log((y * -Math.E));
	} else if (y <= 1.0) {
		tmp = 1.0 - Math.log1p(-x);
	} else {
		tmp = Math.log(((y * Math.E) / x));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if y <= -240.0:
		tmp = math.log((y * -math.e))
	elif y <= 1.0:
		tmp = 1.0 - math.log1p(-x)
	else:
		tmp = math.log(((y * math.e) / x))
	return tmp
function code(x, y)
	tmp = 0.0
	if (y <= -240.0)
		tmp = log(Float64(y * Float64(-exp(1))));
	elseif (y <= 1.0)
		tmp = Float64(1.0 - log1p(Float64(-x)));
	else
		tmp = log(Float64(Float64(y * exp(1)) / x));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[y, -240.0], N[Log[N[(y * (-E)), $MachinePrecision]], $MachinePrecision], If[LessEqual[y, 1.0], N[(1.0 - N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision], N[Log[N[(N[(y * E), $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -240:\\
\;\;\;\;\log \left(y \cdot \left(-e\right)\right)\\

\mathbf{elif}\;y \leq 1:\\
\;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(\frac{y \cdot e}{x}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if y < -240

    1. Initial program 20.8%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg20.8%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define20.8%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac220.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub020.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-20.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval20.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative20.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified20.8%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around inf 0.0%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) + \log \left(\frac{1}{y}\right)\right)} \]
    6. Step-by-step derivation
      1. log-rec0.0%

        \[\leadsto 1 - \left(\log \left(x - 1\right) + \color{blue}{\left(-\log y\right)}\right) \]
      2. unsub-neg0.0%

        \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) - \log y\right)} \]
      3. sub-neg0.0%

        \[\leadsto 1 - \left(\log \color{blue}{\left(x + \left(-1\right)\right)} - \log y\right) \]
      4. metadata-eval0.0%

        \[\leadsto 1 - \left(\log \left(x + \color{blue}{-1}\right) - \log y\right) \]
    7. Simplified0.0%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x + -1\right) - \log y\right)} \]
    8. Step-by-step derivation
      1. add-log-exp0.0%

        \[\leadsto \color{blue}{\log \left(e^{1 - \left(\log \left(x + -1\right) - \log y\right)}\right)} \]
      2. exp-diff0.0%

        \[\leadsto \log \color{blue}{\left(\frac{e^{1}}{e^{\log \left(x + -1\right) - \log y}}\right)} \]
      3. diff-log98.4%

        \[\leadsto \log \left(\frac{e^{1}}{e^{\color{blue}{\log \left(\frac{x + -1}{y}\right)}}}\right) \]
      4. add-exp-log98.5%

        \[\leadsto \log \left(\frac{e^{1}}{\color{blue}{\frac{x + -1}{y}}}\right) \]
    9. Applied egg-rr98.5%

      \[\leadsto \color{blue}{\log \left(\frac{e^{1}}{\frac{x + -1}{y}}\right)} \]
    10. Step-by-step derivation
      1. exp-1-e98.5%

        \[\leadsto \log \left(\frac{\color{blue}{e}}{\frac{x + -1}{y}}\right) \]
      2. +-commutative98.5%

        \[\leadsto \log \left(\frac{e}{\frac{\color{blue}{-1 + x}}{y}}\right) \]
    11. Simplified98.5%

      \[\leadsto \color{blue}{\log \left(\frac{e}{\frac{-1 + x}{y}}\right)} \]
    12. Taylor expanded in x around 0 68.4%

      \[\leadsto \color{blue}{\log \left(-1 \cdot \left(y \cdot e\right)\right)} \]
    13. Step-by-step derivation
      1. associate-*r*68.4%

        \[\leadsto \log \color{blue}{\left(\left(-1 \cdot y\right) \cdot e\right)} \]
      2. neg-mul-168.4%

        \[\leadsto \log \left(\color{blue}{\left(-y\right)} \cdot e\right) \]
    14. Simplified68.4%

      \[\leadsto \color{blue}{\log \left(\left(-y\right) \cdot e\right)} \]

    if -240 < y < 1

    1. Initial program 100.0%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg100.0%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define100.0%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac2100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub0100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative100.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around 0 97.2%

      \[\leadsto 1 - \color{blue}{\log \left(1 + -1 \cdot x\right)} \]
    6. Step-by-step derivation
      1. log1p-define97.2%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)} \]
      2. mul-1-neg97.2%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{-x}\right) \]
    7. Simplified97.2%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-x\right)} \]

    if 1 < y

    1. Initial program 58.3%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg58.3%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define58.3%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac258.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub058.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-58.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval58.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative58.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified58.3%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around inf 98.5%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) + \log \left(\frac{1}{y}\right)\right)} \]
    6. Step-by-step derivation
      1. log-rec98.5%

        \[\leadsto 1 - \left(\log \left(x - 1\right) + \color{blue}{\left(-\log y\right)}\right) \]
      2. unsub-neg98.5%

        \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) - \log y\right)} \]
      3. sub-neg98.5%

        \[\leadsto 1 - \left(\log \color{blue}{\left(x + \left(-1\right)\right)} - \log y\right) \]
      4. metadata-eval98.5%

        \[\leadsto 1 - \left(\log \left(x + \color{blue}{-1}\right) - \log y\right) \]
    7. Simplified98.5%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x + -1\right) - \log y\right)} \]
    8. Step-by-step derivation
      1. add-log-exp98.5%

        \[\leadsto \color{blue}{\log \left(e^{1 - \left(\log \left(x + -1\right) - \log y\right)}\right)} \]
      2. exp-diff98.5%

        \[\leadsto \log \color{blue}{\left(\frac{e^{1}}{e^{\log \left(x + -1\right) - \log y}}\right)} \]
      3. diff-log99.9%

        \[\leadsto \log \left(\frac{e^{1}}{e^{\color{blue}{\log \left(\frac{x + -1}{y}\right)}}}\right) \]
      4. add-exp-log99.9%

        \[\leadsto \log \left(\frac{e^{1}}{\color{blue}{\frac{x + -1}{y}}}\right) \]
    9. Applied egg-rr99.9%

      \[\leadsto \color{blue}{\log \left(\frac{e^{1}}{\frac{x + -1}{y}}\right)} \]
    10. Step-by-step derivation
      1. exp-1-e99.9%

        \[\leadsto \log \left(\frac{\color{blue}{e}}{\frac{x + -1}{y}}\right) \]
      2. +-commutative99.9%

        \[\leadsto \log \left(\frac{e}{\frac{\color{blue}{-1 + x}}{y}}\right) \]
    11. Simplified99.9%

      \[\leadsto \color{blue}{\log \left(\frac{e}{\frac{-1 + x}{y}}\right)} \]
    12. Taylor expanded in x around inf 99.0%

      \[\leadsto \log \color{blue}{\left(\frac{y \cdot e}{x}\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification88.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -240:\\ \;\;\;\;\log \left(y \cdot \left(-e\right)\right)\\ \mathbf{elif}\;y \leq 1:\\ \;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(\frac{y \cdot e}{x}\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 78.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -330:\\ \;\;\;\;\log \left(y \cdot \left(-e\right)\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= y -330.0) (log (* y (- E))) (- 1.0 (log1p (- x)))))
double code(double x, double y) {
	double tmp;
	if (y <= -330.0) {
		tmp = log((y * -((double) M_E)));
	} else {
		tmp = 1.0 - log1p(-x);
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (y <= -330.0) {
		tmp = Math.log((y * -Math.E));
	} else {
		tmp = 1.0 - Math.log1p(-x);
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if y <= -330.0:
		tmp = math.log((y * -math.e))
	else:
		tmp = 1.0 - math.log1p(-x)
	return tmp
function code(x, y)
	tmp = 0.0
	if (y <= -330.0)
		tmp = log(Float64(y * Float64(-exp(1))));
	else
		tmp = Float64(1.0 - log1p(Float64(-x)));
	end
	return tmp
end
code[x_, y_] := If[LessEqual[y, -330.0], N[Log[N[(y * (-E)), $MachinePrecision]], $MachinePrecision], N[(1.0 - N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -330:\\
\;\;\;\;\log \left(y \cdot \left(-e\right)\right)\\

\mathbf{else}:\\
\;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if y < -330

    1. Initial program 20.8%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg20.8%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define20.8%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac220.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub020.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-20.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval20.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative20.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified20.8%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around inf 0.0%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) + \log \left(\frac{1}{y}\right)\right)} \]
    6. Step-by-step derivation
      1. log-rec0.0%

        \[\leadsto 1 - \left(\log \left(x - 1\right) + \color{blue}{\left(-\log y\right)}\right) \]
      2. unsub-neg0.0%

        \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) - \log y\right)} \]
      3. sub-neg0.0%

        \[\leadsto 1 - \left(\log \color{blue}{\left(x + \left(-1\right)\right)} - \log y\right) \]
      4. metadata-eval0.0%

        \[\leadsto 1 - \left(\log \left(x + \color{blue}{-1}\right) - \log y\right) \]
    7. Simplified0.0%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x + -1\right) - \log y\right)} \]
    8. Step-by-step derivation
      1. add-log-exp0.0%

        \[\leadsto \color{blue}{\log \left(e^{1 - \left(\log \left(x + -1\right) - \log y\right)}\right)} \]
      2. exp-diff0.0%

        \[\leadsto \log \color{blue}{\left(\frac{e^{1}}{e^{\log \left(x + -1\right) - \log y}}\right)} \]
      3. diff-log98.4%

        \[\leadsto \log \left(\frac{e^{1}}{e^{\color{blue}{\log \left(\frac{x + -1}{y}\right)}}}\right) \]
      4. add-exp-log98.5%

        \[\leadsto \log \left(\frac{e^{1}}{\color{blue}{\frac{x + -1}{y}}}\right) \]
    9. Applied egg-rr98.5%

      \[\leadsto \color{blue}{\log \left(\frac{e^{1}}{\frac{x + -1}{y}}\right)} \]
    10. Step-by-step derivation
      1. exp-1-e98.5%

        \[\leadsto \log \left(\frac{\color{blue}{e}}{\frac{x + -1}{y}}\right) \]
      2. +-commutative98.5%

        \[\leadsto \log \left(\frac{e}{\frac{\color{blue}{-1 + x}}{y}}\right) \]
    11. Simplified98.5%

      \[\leadsto \color{blue}{\log \left(\frac{e}{\frac{-1 + x}{y}}\right)} \]
    12. Taylor expanded in x around 0 68.4%

      \[\leadsto \color{blue}{\log \left(-1 \cdot \left(y \cdot e\right)\right)} \]
    13. Step-by-step derivation
      1. associate-*r*68.4%

        \[\leadsto \log \color{blue}{\left(\left(-1 \cdot y\right) \cdot e\right)} \]
      2. neg-mul-168.4%

        \[\leadsto \log \left(\color{blue}{\left(-y\right)} \cdot e\right) \]
    14. Simplified68.4%

      \[\leadsto \color{blue}{\log \left(\left(-y\right) \cdot e\right)} \]

    if -330 < y

    1. Initial program 91.3%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg91.3%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define91.3%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac291.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub091.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-91.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval91.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative91.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified91.3%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around 0 77.0%

      \[\leadsto 1 - \color{blue}{\log \left(1 + -1 \cdot x\right)} \]
    6. Step-by-step derivation
      1. log1p-define77.0%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-1 \cdot x\right)} \]
      2. mul-1-neg77.0%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{-x}\right) \]
    7. Simplified77.0%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-x\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification74.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -330:\\ \;\;\;\;\log \left(y \cdot \left(-e\right)\right)\\ \mathbf{else}:\\ \;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 58.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -1.45:\\ \;\;\;\;\log \left(y \cdot \left(-e\right)\right)\\ \mathbf{else}:\\ \;\;\;\;1 + y \cdot \left(-1 - y \cdot \left(0.5 + y \cdot \left(0.3333333333333333 + y \cdot 0.25\right)\right)\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= y -1.45)
   (log (* y (- E)))
   (+
    1.0
    (* y (- -1.0 (* y (+ 0.5 (* y (+ 0.3333333333333333 (* y 0.25))))))))))
double code(double x, double y) {
	double tmp;
	if (y <= -1.45) {
		tmp = log((y * -((double) M_E)));
	} else {
		tmp = 1.0 + (y * (-1.0 - (y * (0.5 + (y * (0.3333333333333333 + (y * 0.25)))))));
	}
	return tmp;
}
public static double code(double x, double y) {
	double tmp;
	if (y <= -1.45) {
		tmp = Math.log((y * -Math.E));
	} else {
		tmp = 1.0 + (y * (-1.0 - (y * (0.5 + (y * (0.3333333333333333 + (y * 0.25)))))));
	}
	return tmp;
}
def code(x, y):
	tmp = 0
	if y <= -1.45:
		tmp = math.log((y * -math.e))
	else:
		tmp = 1.0 + (y * (-1.0 - (y * (0.5 + (y * (0.3333333333333333 + (y * 0.25)))))))
	return tmp
function code(x, y)
	tmp = 0.0
	if (y <= -1.45)
		tmp = log(Float64(y * Float64(-exp(1))));
	else
		tmp = Float64(1.0 + Float64(y * Float64(-1.0 - Float64(y * Float64(0.5 + Float64(y * Float64(0.3333333333333333 + Float64(y * 0.25))))))));
	end
	return tmp
end
function tmp_2 = code(x, y)
	tmp = 0.0;
	if (y <= -1.45)
		tmp = log((y * -2.71828182845904523536));
	else
		tmp = 1.0 + (y * (-1.0 - (y * (0.5 + (y * (0.3333333333333333 + (y * 0.25)))))));
	end
	tmp_2 = tmp;
end
code[x_, y_] := If[LessEqual[y, -1.45], N[Log[N[(y * (-E)), $MachinePrecision]], $MachinePrecision], N[(1.0 + N[(y * N[(-1.0 - N[(y * N[(0.5 + N[(y * N[(0.3333333333333333 + N[(y * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -1.45:\\
\;\;\;\;\log \left(y \cdot \left(-e\right)\right)\\

\mathbf{else}:\\
\;\;\;\;1 + y \cdot \left(-1 - y \cdot \left(0.5 + y \cdot \left(0.3333333333333333 + y \cdot 0.25\right)\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if y < -1.44999999999999996

    1. Initial program 20.8%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg20.8%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define20.8%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac220.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub020.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-20.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval20.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative20.8%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified20.8%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in y around inf 0.0%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) + \log \left(\frac{1}{y}\right)\right)} \]
    6. Step-by-step derivation
      1. log-rec0.0%

        \[\leadsto 1 - \left(\log \left(x - 1\right) + \color{blue}{\left(-\log y\right)}\right) \]
      2. unsub-neg0.0%

        \[\leadsto 1 - \color{blue}{\left(\log \left(x - 1\right) - \log y\right)} \]
      3. sub-neg0.0%

        \[\leadsto 1 - \left(\log \color{blue}{\left(x + \left(-1\right)\right)} - \log y\right) \]
      4. metadata-eval0.0%

        \[\leadsto 1 - \left(\log \left(x + \color{blue}{-1}\right) - \log y\right) \]
    7. Simplified0.0%

      \[\leadsto 1 - \color{blue}{\left(\log \left(x + -1\right) - \log y\right)} \]
    8. Step-by-step derivation
      1. add-log-exp0.0%

        \[\leadsto \color{blue}{\log \left(e^{1 - \left(\log \left(x + -1\right) - \log y\right)}\right)} \]
      2. exp-diff0.0%

        \[\leadsto \log \color{blue}{\left(\frac{e^{1}}{e^{\log \left(x + -1\right) - \log y}}\right)} \]
      3. diff-log98.4%

        \[\leadsto \log \left(\frac{e^{1}}{e^{\color{blue}{\log \left(\frac{x + -1}{y}\right)}}}\right) \]
      4. add-exp-log98.5%

        \[\leadsto \log \left(\frac{e^{1}}{\color{blue}{\frac{x + -1}{y}}}\right) \]
    9. Applied egg-rr98.5%

      \[\leadsto \color{blue}{\log \left(\frac{e^{1}}{\frac{x + -1}{y}}\right)} \]
    10. Step-by-step derivation
      1. exp-1-e98.5%

        \[\leadsto \log \left(\frac{\color{blue}{e}}{\frac{x + -1}{y}}\right) \]
      2. +-commutative98.5%

        \[\leadsto \log \left(\frac{e}{\frac{\color{blue}{-1 + x}}{y}}\right) \]
    11. Simplified98.5%

      \[\leadsto \color{blue}{\log \left(\frac{e}{\frac{-1 + x}{y}}\right)} \]
    12. Taylor expanded in x around 0 68.4%

      \[\leadsto \color{blue}{\log \left(-1 \cdot \left(y \cdot e\right)\right)} \]
    13. Step-by-step derivation
      1. associate-*r*68.4%

        \[\leadsto \log \color{blue}{\left(\left(-1 \cdot y\right) \cdot e\right)} \]
      2. neg-mul-168.4%

        \[\leadsto \log \left(\color{blue}{\left(-y\right)} \cdot e\right) \]
    14. Simplified68.4%

      \[\leadsto \color{blue}{\log \left(\left(-y\right) \cdot e\right)} \]

    if -1.44999999999999996 < y

    1. Initial program 91.3%

      \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
    2. Step-by-step derivation
      1. sub-neg91.3%

        \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
      2. log1p-define91.3%

        \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
      3. distribute-neg-frac291.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
      4. neg-sub091.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
      5. associate--r-91.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
      6. metadata-eval91.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
      7. +-commutative91.3%

        \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
    3. Simplified91.3%

      \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 50.5%

      \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{-1 \cdot \frac{y}{y - 1}}\right) \]
    6. Step-by-step derivation
      1. sub-neg50.5%

        \[\leadsto 1 - \mathsf{log1p}\left(-1 \cdot \frac{y}{\color{blue}{y + \left(-1\right)}}\right) \]
      2. metadata-eval50.5%

        \[\leadsto 1 - \mathsf{log1p}\left(-1 \cdot \frac{y}{y + \color{blue}{-1}}\right) \]
      3. neg-mul-150.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{-\frac{y}{y + -1}}\right) \]
      4. distribute-neg-frac50.5%

        \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{-y}{y + -1}}\right) \]
    7. Simplified50.5%

      \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{-y}{y + -1}}\right) \]
    8. Taylor expanded in y around 0 50.3%

      \[\leadsto 1 - \color{blue}{y \cdot \left(1 + y \cdot \left(0.5 + y \cdot \left(0.3333333333333333 + 0.25 \cdot y\right)\right)\right)} \]
    9. Step-by-step derivation
      1. *-commutative50.3%

        \[\leadsto 1 - y \cdot \left(1 + y \cdot \left(0.5 + y \cdot \left(0.3333333333333333 + \color{blue}{y \cdot 0.25}\right)\right)\right) \]
    10. Simplified50.3%

      \[\leadsto 1 - \color{blue}{y \cdot \left(1 + y \cdot \left(0.5 + y \cdot \left(0.3333333333333333 + y \cdot 0.25\right)\right)\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification56.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -1.45:\\ \;\;\;\;\log \left(y \cdot \left(-e\right)\right)\\ \mathbf{else}:\\ \;\;\;\;1 + y \cdot \left(-1 - y \cdot \left(0.5 + y \cdot \left(0.3333333333333333 + y \cdot 0.25\right)\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 42.2% accurate, 111.0× speedup?

\[\begin{array}{l} \\ 1 \end{array} \]
(FPCore (x y) :precision binary64 1.0)
double code(double x, double y) {
	return 1.0;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = 1.0d0
end function
public static double code(double x, double y) {
	return 1.0;
}
def code(x, y):
	return 1.0
function code(x, y)
	return 1.0
end
function tmp = code(x, y)
	tmp = 1.0;
end
code[x_, y_] := 1.0
\begin{array}{l}

\\
1
\end{array}
Derivation
  1. Initial program 68.4%

    \[1 - \log \left(1 - \frac{x - y}{1 - y}\right) \]
  2. Step-by-step derivation
    1. sub-neg68.4%

      \[\leadsto 1 - \log \color{blue}{\left(1 + \left(-\frac{x - y}{1 - y}\right)\right)} \]
    2. log1p-define68.5%

      \[\leadsto 1 - \color{blue}{\mathsf{log1p}\left(-\frac{x - y}{1 - y}\right)} \]
    3. distribute-neg-frac268.5%

      \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x - y}{-\left(1 - y\right)}}\right) \]
    4. neg-sub068.5%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{0 - \left(1 - y\right)}}\right) \]
    5. associate--r-68.5%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{\left(0 - 1\right) + y}}\right) \]
    6. metadata-eval68.5%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{-1} + y}\right) \]
    7. +-commutative68.5%

      \[\leadsto 1 - \mathsf{log1p}\left(\frac{x - y}{\color{blue}{y + -1}}\right) \]
  3. Simplified68.5%

    \[\leadsto \color{blue}{1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)} \]
  4. Add Preprocessing
  5. Taylor expanded in x around inf 68.9%

    \[\leadsto 1 - \mathsf{log1p}\left(\color{blue}{\frac{x}{y - 1}}\right) \]
  6. Taylor expanded in x around 0 37.5%

    \[\leadsto \color{blue}{1} \]
  7. Add Preprocessing

Developer Target 1: 99.8% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := 1 - \log \left(\frac{x}{y \cdot y} - \left(\frac{1}{y} - \frac{x}{y}\right)\right)\\ \mathbf{if}\;y < -81284752.61947241:\\ \;\;\;\;t\_0\\ \mathbf{elif}\;y < 3.0094271212461764 \cdot 10^{+25}:\\ \;\;\;\;\log \left(\frac{e^{1}}{1 - \frac{x - y}{1 - y}}\right)\\ \mathbf{else}:\\ \;\;\;\;t\_0\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (let* ((t_0 (- 1.0 (log (- (/ x (* y y)) (- (/ 1.0 y) (/ x y)))))))
   (if (< y -81284752.61947241)
     t_0
     (if (< y 3.0094271212461764e+25)
       (log (/ (exp 1.0) (- 1.0 (/ (- x y) (- 1.0 y)))))
       t_0))))
double code(double x, double y) {
	double t_0 = 1.0 - log(((x / (y * y)) - ((1.0 / y) - (x / y))));
	double tmp;
	if (y < -81284752.61947241) {
		tmp = t_0;
	} else if (y < 3.0094271212461764e+25) {
		tmp = log((exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
	} else {
		tmp = t_0;
	}
	return tmp;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: t_0
    real(8) :: tmp
    t_0 = 1.0d0 - log(((x / (y * y)) - ((1.0d0 / y) - (x / y))))
    if (y < (-81284752.61947241d0)) then
        tmp = t_0
    else if (y < 3.0094271212461764d+25) then
        tmp = log((exp(1.0d0) / (1.0d0 - ((x - y) / (1.0d0 - y)))))
    else
        tmp = t_0
    end if
    code = tmp
end function
public static double code(double x, double y) {
	double t_0 = 1.0 - Math.log(((x / (y * y)) - ((1.0 / y) - (x / y))));
	double tmp;
	if (y < -81284752.61947241) {
		tmp = t_0;
	} else if (y < 3.0094271212461764e+25) {
		tmp = Math.log((Math.exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
	} else {
		tmp = t_0;
	}
	return tmp;
}
def code(x, y):
	t_0 = 1.0 - math.log(((x / (y * y)) - ((1.0 / y) - (x / y))))
	tmp = 0
	if y < -81284752.61947241:
		tmp = t_0
	elif y < 3.0094271212461764e+25:
		tmp = math.log((math.exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))))
	else:
		tmp = t_0
	return tmp
function code(x, y)
	t_0 = Float64(1.0 - log(Float64(Float64(x / Float64(y * y)) - Float64(Float64(1.0 / y) - Float64(x / y)))))
	tmp = 0.0
	if (y < -81284752.61947241)
		tmp = t_0;
	elseif (y < 3.0094271212461764e+25)
		tmp = log(Float64(exp(1.0) / Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y)))));
	else
		tmp = t_0;
	end
	return tmp
end
function tmp_2 = code(x, y)
	t_0 = 1.0 - log(((x / (y * y)) - ((1.0 / y) - (x / y))));
	tmp = 0.0;
	if (y < -81284752.61947241)
		tmp = t_0;
	elseif (y < 3.0094271212461764e+25)
		tmp = log((exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
	else
		tmp = t_0;
	end
	tmp_2 = tmp;
end
code[x_, y_] := Block[{t$95$0 = N[(1.0 - N[Log[N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - N[(N[(1.0 / y), $MachinePrecision] - N[(x / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[Less[y, -81284752.61947241], t$95$0, If[Less[y, 3.0094271212461764e+25], N[Log[N[(N[Exp[1.0], $MachinePrecision] / N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], t$95$0]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := 1 - \log \left(\frac{x}{y \cdot y} - \left(\frac{1}{y} - \frac{x}{y}\right)\right)\\
\mathbf{if}\;y < -81284752.61947241:\\
\;\;\;\;t\_0\\

\mathbf{elif}\;y < 3.0094271212461764 \cdot 10^{+25}:\\
\;\;\;\;\log \left(\frac{e^{1}}{1 - \frac{x - y}{1 - y}}\right)\\

\mathbf{else}:\\
\;\;\;\;t\_0\\


\end{array}
\end{array}

Reproduce

?
herbie shell --seed 2024135 
(FPCore (x y)
  :name "Numeric.SpecFunctions:invIncompleteGamma from math-functions-0.1.5.2, B"
  :precision binary64

  :alt
  (! :herbie-platform default (if (< y -8128475261947241/100000000) (- 1 (log (- (/ x (* y y)) (- (/ 1 y) (/ x y))))) (if (< y 30094271212461764000000000) (log (/ (exp 1) (- 1 (/ (- x y) (- 1 y))))) (- 1 (log (- (/ x (* y y)) (- (/ 1 y) (/ x y))))))))

  (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))