
(FPCore (x y) :precision binary64 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))
double code(double x, double y) {
return 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 - log((1.0d0 - ((x - y) / (1.0d0 - y))))
end function
public static double code(double x, double y) {
return 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
}
def code(x, y): return 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
function code(x, y) return Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y))))) end
function tmp = code(x, y) tmp = 1.0 - log((1.0 - ((x - y) / (1.0 - y)))); end
code[x_, y_] := N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \log \left(1 - \frac{x - y}{1 - y}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))
double code(double x, double y) {
return 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 - log((1.0d0 - ((x - y) / (1.0d0 - y))))
end function
public static double code(double x, double y) {
return 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
}
def code(x, y): return 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
function code(x, y) return Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y))))) end
function tmp = code(x, y) tmp = 1.0 - log((1.0 - ((x - y) / (1.0 - y)))); end
code[x_, y_] := N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \log \left(1 - \frac{x - y}{1 - y}\right)
\end{array}
(FPCore (x y) :precision binary64 (if (<= (/ (- x y) (- 1.0 y)) 0.0004) (- 1.0 (log1p (/ (- x y) (+ y -1.0)))) (- 1.0 (log (/ (+ x -1.0) y)))))
double code(double x, double y) {
double tmp;
if (((x - y) / (1.0 - y)) <= 0.0004) {
tmp = 1.0 - log1p(((x - y) / (y + -1.0)));
} else {
tmp = 1.0 - log(((x + -1.0) / y));
}
return tmp;
}
public static double code(double x, double y) {
double tmp;
if (((x - y) / (1.0 - y)) <= 0.0004) {
tmp = 1.0 - Math.log1p(((x - y) / (y + -1.0)));
} else {
tmp = 1.0 - Math.log(((x + -1.0) / y));
}
return tmp;
}
def code(x, y): tmp = 0 if ((x - y) / (1.0 - y)) <= 0.0004: tmp = 1.0 - math.log1p(((x - y) / (y + -1.0))) else: tmp = 1.0 - math.log(((x + -1.0) / y)) return tmp
function code(x, y) tmp = 0.0 if (Float64(Float64(x - y) / Float64(1.0 - y)) <= 0.0004) tmp = Float64(1.0 - log1p(Float64(Float64(x - y) / Float64(y + -1.0)))); else tmp = Float64(1.0 - log(Float64(Float64(x + -1.0) / y))); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision], 0.0004], N[(1.0 - N[Log[1 + N[(N[(x - y), $MachinePrecision] / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Log[N[(N[(x + -1.0), $MachinePrecision] / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{x - y}{1 - y} \leq 0.0004:\\
\;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\
\mathbf{else}:\\
\;\;\;\;1 - \log \left(\frac{x + -1}{y}\right)\\
\end{array}
\end{array}
if (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)) < 4.00000000000000019e-4Initial program 100.0%
sub-neg100.0%
log1p-define100.0%
distribute-neg-frac2100.0%
neg-sub0100.0%
associate--r-100.0%
metadata-eval100.0%
+-commutative100.0%
Simplified100.0%
if 4.00000000000000019e-4 < (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)) Initial program 4.9%
sub-neg4.9%
log1p-define4.9%
distribute-neg-frac24.9%
neg-sub04.9%
associate--r-4.9%
metadata-eval4.9%
+-commutative4.9%
Simplified4.9%
Taylor expanded in y around -inf 82.0%
sub-neg82.0%
metadata-eval82.0%
distribute-lft-in82.0%
metadata-eval82.0%
+-commutative82.0%
log1p-define82.0%
mul-1-neg82.0%
Simplified82.0%
*-un-lft-identity82.0%
*-commutative82.0%
Applied egg-rr99.6%
*-rgt-identity99.6%
Simplified99.6%
Final simplification99.9%
(FPCore (x y) :precision binary64 (if (or (<= y -1.7) (not (<= y 1.0))) (- 1.0 (log (/ (+ x -1.0) y))) (- 1.0 (+ y (log1p (- x))))))
double code(double x, double y) {
double tmp;
if ((y <= -1.7) || !(y <= 1.0)) {
tmp = 1.0 - log(((x + -1.0) / y));
} else {
tmp = 1.0 - (y + log1p(-x));
}
return tmp;
}
public static double code(double x, double y) {
double tmp;
if ((y <= -1.7) || !(y <= 1.0)) {
tmp = 1.0 - Math.log(((x + -1.0) / y));
} else {
tmp = 1.0 - (y + Math.log1p(-x));
}
return tmp;
}
def code(x, y): tmp = 0 if (y <= -1.7) or not (y <= 1.0): tmp = 1.0 - math.log(((x + -1.0) / y)) else: tmp = 1.0 - (y + math.log1p(-x)) return tmp
function code(x, y) tmp = 0.0 if ((y <= -1.7) || !(y <= 1.0)) tmp = Float64(1.0 - log(Float64(Float64(x + -1.0) / y))); else tmp = Float64(1.0 - Float64(y + log1p(Float64(-x)))); end return tmp end
code[x_, y_] := If[Or[LessEqual[y, -1.7], N[Not[LessEqual[y, 1.0]], $MachinePrecision]], N[(1.0 - N[Log[N[(N[(x + -1.0), $MachinePrecision] / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[(y + N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -1.7 \lor \neg \left(y \leq 1\right):\\
\;\;\;\;1 - \log \left(\frac{x + -1}{y}\right)\\
\mathbf{else}:\\
\;\;\;\;1 - \left(y + \mathsf{log1p}\left(-x\right)\right)\\
\end{array}
\end{array}
if y < -1.69999999999999996 or 1 < y Initial program 32.1%
sub-neg32.1%
log1p-define32.1%
distribute-neg-frac232.1%
neg-sub032.1%
associate--r-32.1%
metadata-eval32.1%
+-commutative32.1%
Simplified32.1%
Taylor expanded in y around -inf 73.4%
sub-neg73.4%
metadata-eval73.4%
distribute-lft-in73.4%
metadata-eval73.4%
+-commutative73.4%
log1p-define73.4%
mul-1-neg73.4%
Simplified73.4%
*-un-lft-identity73.4%
*-commutative73.4%
Applied egg-rr99.5%
*-rgt-identity99.5%
Simplified99.5%
if -1.69999999999999996 < y < 1Initial program 100.0%
sub-neg100.0%
log1p-define100.0%
distribute-neg-frac2100.0%
neg-sub0100.0%
associate--r-100.0%
metadata-eval100.0%
+-commutative100.0%
Simplified100.0%
Taylor expanded in y around 0 99.6%
+-commutative99.6%
div-sub99.6%
fma-define99.6%
mul-1-neg99.6%
sub-neg99.6%
*-inverses99.6%
+-commutative99.6%
metadata-eval99.6%
distribute-lft-in99.6%
metadata-eval99.6%
sub-neg99.6%
fma-undefine99.6%
*-rgt-identity99.6%
sub-neg99.6%
metadata-eval99.6%
distribute-lft-in99.6%
metadata-eval99.6%
+-commutative99.6%
Simplified99.6%
Final simplification99.5%
(FPCore (x y) :precision binary64 (if (<= y -20.0) (- 1.0 (log (/ -1.0 y))) (- 1.0 (+ y (log1p (- x))))))
double code(double x, double y) {
double tmp;
if (y <= -20.0) {
tmp = 1.0 - log((-1.0 / y));
} else {
tmp = 1.0 - (y + log1p(-x));
}
return tmp;
}
public static double code(double x, double y) {
double tmp;
if (y <= -20.0) {
tmp = 1.0 - Math.log((-1.0 / y));
} else {
tmp = 1.0 - (y + Math.log1p(-x));
}
return tmp;
}
def code(x, y): tmp = 0 if y <= -20.0: tmp = 1.0 - math.log((-1.0 / y)) else: tmp = 1.0 - (y + math.log1p(-x)) return tmp
function code(x, y) tmp = 0.0 if (y <= -20.0) tmp = Float64(1.0 - log(Float64(-1.0 / y))); else tmp = Float64(1.0 - Float64(y + log1p(Float64(-x)))); end return tmp end
code[x_, y_] := If[LessEqual[y, -20.0], N[(1.0 - N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[(y + N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -20:\\
\;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\
\mathbf{else}:\\
\;\;\;\;1 - \left(y + \mathsf{log1p}\left(-x\right)\right)\\
\end{array}
\end{array}
if y < -20Initial program 24.7%
sub-neg24.7%
log1p-define24.7%
distribute-neg-frac224.7%
neg-sub024.7%
associate--r-24.7%
metadata-eval24.7%
+-commutative24.7%
Simplified24.7%
Taylor expanded in y around -inf 98.7%
sub-neg98.7%
metadata-eval98.7%
distribute-lft-in98.7%
metadata-eval98.7%
+-commutative98.7%
log1p-define98.7%
mul-1-neg98.7%
Simplified98.7%
Taylor expanded in x around 0 66.9%
if -20 < y Initial program 92.9%
sub-neg92.9%
log1p-define92.9%
distribute-neg-frac292.9%
neg-sub092.9%
associate--r-92.9%
metadata-eval92.9%
+-commutative92.9%
Simplified92.9%
Taylor expanded in y around 0 84.5%
+-commutative84.5%
div-sub84.5%
fma-define84.5%
mul-1-neg84.5%
sub-neg84.5%
*-inverses84.5%
+-commutative84.5%
metadata-eval84.5%
distribute-lft-in84.5%
metadata-eval84.5%
sub-neg84.5%
fma-undefine84.5%
*-rgt-identity84.5%
sub-neg84.5%
metadata-eval84.5%
distribute-lft-in84.5%
metadata-eval84.5%
+-commutative84.5%
Simplified84.5%
Final simplification79.1%
(FPCore (x y) :precision binary64 (if (<= y -65000000.0) (- 1.0 (log (/ -1.0 y))) (- 1.0 (log1p (- x)))))
double code(double x, double y) {
double tmp;
if (y <= -65000000.0) {
tmp = 1.0 - log((-1.0 / y));
} else {
tmp = 1.0 - log1p(-x);
}
return tmp;
}
public static double code(double x, double y) {
double tmp;
if (y <= -65000000.0) {
tmp = 1.0 - Math.log((-1.0 / y));
} else {
tmp = 1.0 - Math.log1p(-x);
}
return tmp;
}
def code(x, y): tmp = 0 if y <= -65000000.0: tmp = 1.0 - math.log((-1.0 / y)) else: tmp = 1.0 - math.log1p(-x) return tmp
function code(x, y) tmp = 0.0 if (y <= -65000000.0) tmp = Float64(1.0 - log(Float64(-1.0 / y))); else tmp = Float64(1.0 - log1p(Float64(-x))); end return tmp end
code[x_, y_] := If[LessEqual[y, -65000000.0], N[(1.0 - N[Log[N[(-1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -65000000:\\
\;\;\;\;1 - \log \left(\frac{-1}{y}\right)\\
\mathbf{else}:\\
\;\;\;\;1 - \mathsf{log1p}\left(-x\right)\\
\end{array}
\end{array}
if y < -6.5e7Initial program 23.8%
sub-neg23.8%
log1p-define23.8%
distribute-neg-frac223.8%
neg-sub023.8%
associate--r-23.8%
metadata-eval23.8%
+-commutative23.8%
Simplified23.8%
Taylor expanded in y around -inf 99.1%
sub-neg99.1%
metadata-eval99.1%
distribute-lft-in99.1%
metadata-eval99.1%
+-commutative99.1%
log1p-define99.1%
mul-1-neg99.1%
Simplified99.1%
Taylor expanded in x around 0 67.8%
if -6.5e7 < y Initial program 93.0%
sub-neg93.0%
log1p-define93.0%
distribute-neg-frac293.0%
neg-sub093.0%
associate--r-93.0%
metadata-eval93.0%
+-commutative93.0%
Simplified93.0%
Taylor expanded in y around 0 82.8%
log1p-define82.8%
mul-1-neg82.8%
Simplified82.8%
Final simplification78.3%
(FPCore (x y) :precision binary64 (- 1.0 (log1p (- x))))
double code(double x, double y) {
return 1.0 - log1p(-x);
}
public static double code(double x, double y) {
return 1.0 - Math.log1p(-x);
}
def code(x, y): return 1.0 - math.log1p(-x)
function code(x, y) return Float64(1.0 - log1p(Float64(-x))) end
code[x_, y_] := N[(1.0 - N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \mathsf{log1p}\left(-x\right)
\end{array}
Initial program 72.1%
sub-neg72.1%
log1p-define72.1%
distribute-neg-frac272.1%
neg-sub072.1%
associate--r-72.1%
metadata-eval72.1%
+-commutative72.1%
Simplified72.1%
Taylor expanded in y around 0 61.8%
log1p-define61.8%
mul-1-neg61.8%
Simplified61.8%
Final simplification61.8%
(FPCore (x y) :precision binary64 (- 1.0 (log1p -1.0)))
double code(double x, double y) {
return 1.0 - log1p(-1.0);
}
public static double code(double x, double y) {
return 1.0 - Math.log1p(-1.0);
}
def code(x, y): return 1.0 - math.log1p(-1.0)
function code(x, y) return Float64(1.0 - log1p(-1.0)) end
code[x_, y_] := N[(1.0 - N[Log[1 + -1.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \mathsf{log1p}\left(-1\right)
\end{array}
Initial program 72.1%
sub-neg72.1%
log1p-define72.1%
distribute-neg-frac272.1%
neg-sub072.1%
associate--r-72.1%
metadata-eval72.1%
+-commutative72.1%
Simplified72.1%
Taylor expanded in y around inf 2.4%
Final simplification2.4%
(FPCore (x y)
:precision binary64
(let* ((t_0 (- 1.0 (log (- (/ x (* y y)) (- (/ 1.0 y) (/ x y)))))))
(if (< y -81284752.61947241)
t_0
(if (< y 3.0094271212461764e+25)
(log (/ (exp 1.0) (- 1.0 (/ (- x y) (- 1.0 y)))))
t_0))))
double code(double x, double y) {
double t_0 = 1.0 - log(((x / (y * y)) - ((1.0 / y) - (x / y))));
double tmp;
if (y < -81284752.61947241) {
tmp = t_0;
} else if (y < 3.0094271212461764e+25) {
tmp = log((exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: t_0
real(8) :: tmp
t_0 = 1.0d0 - log(((x / (y * y)) - ((1.0d0 / y) - (x / y))))
if (y < (-81284752.61947241d0)) then
tmp = t_0
else if (y < 3.0094271212461764d+25) then
tmp = log((exp(1.0d0) / (1.0d0 - ((x - y) / (1.0d0 - y)))))
else
tmp = t_0
end if
code = tmp
end function
public static double code(double x, double y) {
double t_0 = 1.0 - Math.log(((x / (y * y)) - ((1.0 / y) - (x / y))));
double tmp;
if (y < -81284752.61947241) {
tmp = t_0;
} else if (y < 3.0094271212461764e+25) {
tmp = Math.log((Math.exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
} else {
tmp = t_0;
}
return tmp;
}
def code(x, y): t_0 = 1.0 - math.log(((x / (y * y)) - ((1.0 / y) - (x / y)))) tmp = 0 if y < -81284752.61947241: tmp = t_0 elif y < 3.0094271212461764e+25: tmp = math.log((math.exp(1.0) / (1.0 - ((x - y) / (1.0 - y))))) else: tmp = t_0 return tmp
function code(x, y) t_0 = Float64(1.0 - log(Float64(Float64(x / Float64(y * y)) - Float64(Float64(1.0 / y) - Float64(x / y))))) tmp = 0.0 if (y < -81284752.61947241) tmp = t_0; elseif (y < 3.0094271212461764e+25) tmp = log(Float64(exp(1.0) / Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y))))); else tmp = t_0; end return tmp end
function tmp_2 = code(x, y) t_0 = 1.0 - log(((x / (y * y)) - ((1.0 / y) - (x / y)))); tmp = 0.0; if (y < -81284752.61947241) tmp = t_0; elseif (y < 3.0094271212461764e+25) tmp = log((exp(1.0) / (1.0 - ((x - y) / (1.0 - y))))); else tmp = t_0; end tmp_2 = tmp; end
code[x_, y_] := Block[{t$95$0 = N[(1.0 - N[Log[N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - N[(N[(1.0 / y), $MachinePrecision] - N[(x / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[Less[y, -81284752.61947241], t$95$0, If[Less[y, 3.0094271212461764e+25], N[Log[N[(N[Exp[1.0], $MachinePrecision] / N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 1 - \log \left(\frac{x}{y \cdot y} - \left(\frac{1}{y} - \frac{x}{y}\right)\right)\\
\mathbf{if}\;y < -81284752.61947241:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;y < 3.0094271212461764 \cdot 10^{+25}:\\
\;\;\;\;\log \left(\frac{e^{1}}{1 - \frac{x - y}{1 - y}}\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
herbie shell --seed 2024082
(FPCore (x y)
:name "Numeric.SpecFunctions:invIncompleteGamma from math-functions-0.1.5.2, B"
:precision binary64
:alt
(if (< y -81284752.61947241) (- 1.0 (log (- (/ x (* y y)) (- (/ 1.0 y) (/ x y))))) (if (< y 3.0094271212461764e+25) (log (/ (exp 1.0) (- 1.0 (/ (- x y) (- 1.0 y))))) (- 1.0 (log (- (/ x (* y y)) (- (/ 1.0 y) (/ x y)))))))
(- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))