
(FPCore (x y) :precision binary64 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))
double code(double x, double y) {
return 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 - log((1.0d0 - ((x - y) / (1.0d0 - y))))
end function
public static double code(double x, double y) {
return 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
}
def code(x, y): return 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
function code(x, y) return Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y))))) end
function tmp = code(x, y) tmp = 1.0 - log((1.0 - ((x - y) / (1.0 - y)))); end
code[x_, y_] := N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \log \left(1 - \frac{x - y}{1 - y}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))
double code(double x, double y) {
return 1.0 - log((1.0 - ((x - y) / (1.0 - y))));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 - log((1.0d0 - ((x - y) / (1.0d0 - y))))
end function
public static double code(double x, double y) {
return 1.0 - Math.log((1.0 - ((x - y) / (1.0 - y))));
}
def code(x, y): return 1.0 - math.log((1.0 - ((x - y) / (1.0 - y))))
function code(x, y) return Float64(1.0 - log(Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y))))) end
function tmp = code(x, y) tmp = 1.0 - log((1.0 - ((x - y) / (1.0 - y)))); end
code[x_, y_] := N[(1.0 - N[Log[N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \log \left(1 - \frac{x - y}{1 - y}\right)
\end{array}
(FPCore (x y) :precision binary64 (if (<= (/ (- x y) (- 1.0 y)) 0.95) (- 1.0 (log1p (/ (- x y) (+ y -1.0)))) (+ 1.0 (log (/ (* y -2.0) (+ 2.0 (* x -2.0)))))))
double code(double x, double y) {
double tmp;
if (((x - y) / (1.0 - y)) <= 0.95) {
tmp = 1.0 - log1p(((x - y) / (y + -1.0)));
} else {
tmp = 1.0 + log(((y * -2.0) / (2.0 + (x * -2.0))));
}
return tmp;
}
public static double code(double x, double y) {
double tmp;
if (((x - y) / (1.0 - y)) <= 0.95) {
tmp = 1.0 - Math.log1p(((x - y) / (y + -1.0)));
} else {
tmp = 1.0 + Math.log(((y * -2.0) / (2.0 + (x * -2.0))));
}
return tmp;
}
def code(x, y): tmp = 0 if ((x - y) / (1.0 - y)) <= 0.95: tmp = 1.0 - math.log1p(((x - y) / (y + -1.0))) else: tmp = 1.0 + math.log(((y * -2.0) / (2.0 + (x * -2.0)))) return tmp
function code(x, y) tmp = 0.0 if (Float64(Float64(x - y) / Float64(1.0 - y)) <= 0.95) tmp = Float64(1.0 - log1p(Float64(Float64(x - y) / Float64(y + -1.0)))); else tmp = Float64(1.0 + log(Float64(Float64(y * -2.0) / Float64(2.0 + Float64(x * -2.0))))); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision], 0.95], N[(1.0 - N[Log[1 + N[(N[(x - y), $MachinePrecision] / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 + N[Log[N[(N[(y * -2.0), $MachinePrecision] / N[(2.0 + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{x - y}{1 - y} \leq 0.95:\\
\;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\
\mathbf{else}:\\
\;\;\;\;1 + \log \left(\frac{y \cdot -2}{2 + x \cdot -2}\right)\\
\end{array}
\end{array}
if (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)) < 0.94999999999999996Initial program 99.9%
--lowering--.f64N/A
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
neg-sub0N/A
associate--r-N/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64100.0%
Simplified100.0%
if 0.94999999999999996 < (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)) Initial program 4.3%
--lowering--.f64N/A
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
neg-sub0N/A
associate--r-N/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f644.3%
Simplified4.3%
clear-numN/A
associate-/r/N/A
flip-+N/A
associate-/r/N/A
associate-*l*N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
metadata-evalN/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
--lowering--.f642.9%
Applied egg-rr2.9%
flip-+N/A
clear-numN/A
log-recN/A
neg-lowering-neg.f64N/A
log-lowering-log.f64N/A
Applied egg-rr3.9%
Taylor expanded in y around inf
associate-*r/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (x y) :precision binary64 (if (<= (/ (- x y) (- 1.0 y)) 0.95) (- 1.0 (log1p (/ (- x y) (+ y -1.0)))) (+ 1.0 (log (/ (+ y -1.0) x)))))
double code(double x, double y) {
double tmp;
if (((x - y) / (1.0 - y)) <= 0.95) {
tmp = 1.0 - log1p(((x - y) / (y + -1.0)));
} else {
tmp = 1.0 + log(((y + -1.0) / x));
}
return tmp;
}
public static double code(double x, double y) {
double tmp;
if (((x - y) / (1.0 - y)) <= 0.95) {
tmp = 1.0 - Math.log1p(((x - y) / (y + -1.0)));
} else {
tmp = 1.0 + Math.log(((y + -1.0) / x));
}
return tmp;
}
def code(x, y): tmp = 0 if ((x - y) / (1.0 - y)) <= 0.95: tmp = 1.0 - math.log1p(((x - y) / (y + -1.0))) else: tmp = 1.0 + math.log(((y + -1.0) / x)) return tmp
function code(x, y) tmp = 0.0 if (Float64(Float64(x - y) / Float64(1.0 - y)) <= 0.95) tmp = Float64(1.0 - log1p(Float64(Float64(x - y) / Float64(y + -1.0)))); else tmp = Float64(1.0 + log(Float64(Float64(y + -1.0) / x))); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision], 0.95], N[(1.0 - N[Log[1 + N[(N[(x - y), $MachinePrecision] / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 + N[Log[N[(N[(y + -1.0), $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{x - y}{1 - y} \leq 0.95:\\
\;\;\;\;1 - \mathsf{log1p}\left(\frac{x - y}{y + -1}\right)\\
\mathbf{else}:\\
\;\;\;\;1 + \log \left(\frac{y + -1}{x}\right)\\
\end{array}
\end{array}
if (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)) < 0.94999999999999996Initial program 99.9%
--lowering--.f64N/A
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
neg-sub0N/A
associate--r-N/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64100.0%
Simplified100.0%
if 0.94999999999999996 < (/.f64 (-.f64 x y) (-.f64 #s(literal 1 binary64) y)) Initial program 4.3%
--lowering--.f64N/A
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
neg-sub0N/A
associate--r-N/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f644.3%
Simplified4.3%
clear-numN/A
associate-/r/N/A
flip-+N/A
associate-/r/N/A
associate-*l*N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
metadata-evalN/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
--lowering--.f642.9%
Applied egg-rr2.9%
flip-+N/A
clear-numN/A
log-recN/A
neg-lowering-neg.f64N/A
log-lowering-log.f64N/A
Applied egg-rr3.9%
Taylor expanded in x around inf
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f6441.1%
Simplified41.1%
Final simplification80.9%
(FPCore (x y)
:precision binary64
(let* ((t_0 (+ 1.0 (log (/ (+ y -1.0) x)))))
(if (<= x -115000000.0)
t_0
(if (<= x 1.0) (- 1.0 (log1p (/ x (+ y -1.0)))) t_0))))
double code(double x, double y) {
double t_0 = 1.0 + log(((y + -1.0) / x));
double tmp;
if (x <= -115000000.0) {
tmp = t_0;
} else if (x <= 1.0) {
tmp = 1.0 - log1p((x / (y + -1.0)));
} else {
tmp = t_0;
}
return tmp;
}
public static double code(double x, double y) {
double t_0 = 1.0 + Math.log(((y + -1.0) / x));
double tmp;
if (x <= -115000000.0) {
tmp = t_0;
} else if (x <= 1.0) {
tmp = 1.0 - Math.log1p((x / (y + -1.0)));
} else {
tmp = t_0;
}
return tmp;
}
def code(x, y): t_0 = 1.0 + math.log(((y + -1.0) / x)) tmp = 0 if x <= -115000000.0: tmp = t_0 elif x <= 1.0: tmp = 1.0 - math.log1p((x / (y + -1.0))) else: tmp = t_0 return tmp
function code(x, y) t_0 = Float64(1.0 + log(Float64(Float64(y + -1.0) / x))) tmp = 0.0 if (x <= -115000000.0) tmp = t_0; elseif (x <= 1.0) tmp = Float64(1.0 - log1p(Float64(x / Float64(y + -1.0)))); else tmp = t_0; end return tmp end
code[x_, y_] := Block[{t$95$0 = N[(1.0 + N[Log[N[(N[(y + -1.0), $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -115000000.0], t$95$0, If[LessEqual[x, 1.0], N[(1.0 - N[Log[1 + N[(x / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 1 + \log \left(\frac{y + -1}{x}\right)\\
\mathbf{if}\;x \leq -115000000:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;x \leq 1:\\
\;\;\;\;1 - \mathsf{log1p}\left(\frac{x}{y + -1}\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if x < -1.15e8 or 1 < x Initial program 70.3%
--lowering--.f64N/A
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
neg-sub0N/A
associate--r-N/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f6470.3%
Simplified70.3%
clear-numN/A
associate-/r/N/A
flip-+N/A
associate-/r/N/A
associate-*l*N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
metadata-evalN/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
--lowering--.f6457.8%
Applied egg-rr57.8%
flip-+N/A
clear-numN/A
log-recN/A
neg-lowering-neg.f64N/A
log-lowering-log.f64N/A
Applied egg-rr32.9%
Taylor expanded in x around inf
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f6499.2%
Simplified99.2%
if -1.15e8 < x < 1Initial program 68.1%
--lowering--.f64N/A
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
neg-sub0N/A
associate--r-N/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f6468.1%
Simplified68.1%
Taylor expanded in x around inf
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f6470.4%
Simplified70.4%
Final simplification81.7%
(FPCore (x y) :precision binary64 (let* ((t_0 (- 1.0 (log1p (/ x y))))) (if (<= y -1.0) t_0 (if (<= y 1.9e-9) (- 1.0 (log1p (- 0.0 x))) t_0))))
double code(double x, double y) {
double t_0 = 1.0 - log1p((x / y));
double tmp;
if (y <= -1.0) {
tmp = t_0;
} else if (y <= 1.9e-9) {
tmp = 1.0 - log1p((0.0 - x));
} else {
tmp = t_0;
}
return tmp;
}
public static double code(double x, double y) {
double t_0 = 1.0 - Math.log1p((x / y));
double tmp;
if (y <= -1.0) {
tmp = t_0;
} else if (y <= 1.9e-9) {
tmp = 1.0 - Math.log1p((0.0 - x));
} else {
tmp = t_0;
}
return tmp;
}
def code(x, y): t_0 = 1.0 - math.log1p((x / y)) tmp = 0 if y <= -1.0: tmp = t_0 elif y <= 1.9e-9: tmp = 1.0 - math.log1p((0.0 - x)) else: tmp = t_0 return tmp
function code(x, y) t_0 = Float64(1.0 - log1p(Float64(x / y))) tmp = 0.0 if (y <= -1.0) tmp = t_0; elseif (y <= 1.9e-9) tmp = Float64(1.0 - log1p(Float64(0.0 - x))); else tmp = t_0; end return tmp end
code[x_, y_] := Block[{t$95$0 = N[(1.0 - N[Log[1 + N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[y, -1.0], t$95$0, If[LessEqual[y, 1.9e-9], N[(1.0 - N[Log[1 + N[(0.0 - x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 1 - \mathsf{log1p}\left(\frac{x}{y}\right)\\
\mathbf{if}\;y \leq -1:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;y \leq 1.9 \cdot 10^{-9}:\\
\;\;\;\;1 - \mathsf{log1p}\left(0 - x\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if y < -1 or 1.90000000000000006e-9 < y Initial program 29.0%
--lowering--.f64N/A
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
neg-sub0N/A
associate--r-N/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f6429.1%
Simplified29.1%
Taylor expanded in x around inf
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f6433.8%
Simplified33.8%
Taylor expanded in y around inf
/-lowering-/.f6433.5%
Simplified33.5%
if -1 < y < 1.90000000000000006e-9Initial program 100.0%
--lowering--.f64N/A
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
neg-sub0N/A
associate--r-N/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64100.0%
Simplified100.0%
div-invN/A
flip--N/A
associate-*l/N/A
/-lowering-/.f64N/A
un-div-invN/A
/-lowering-/.f64N/A
--lowering--.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
+-lowering-+.f6487.9%
Applied egg-rr87.9%
Taylor expanded in x around inf
*-lowering-*.f64N/A
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64100.0%
Simplified100.0%
clear-numN/A
associate-/r/N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64100.0%
Applied egg-rr100.0%
Taylor expanded in y around 0
mul-1-negN/A
neg-lowering-neg.f6499.9%
Simplified99.9%
Final simplification70.8%
(FPCore (x y) :precision binary64 (- 1.0 (log1p (/ x (+ y -1.0)))))
double code(double x, double y) {
return 1.0 - log1p((x / (y + -1.0)));
}
public static double code(double x, double y) {
return 1.0 - Math.log1p((x / (y + -1.0)));
}
def code(x, y): return 1.0 - math.log1p((x / (y + -1.0)))
function code(x, y) return Float64(1.0 - log1p(Float64(x / Float64(y + -1.0)))) end
code[x_, y_] := N[(1.0 - N[Log[1 + N[(x / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \mathsf{log1p}\left(\frac{x}{y + -1}\right)
\end{array}
Initial program 68.9%
--lowering--.f64N/A
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
neg-sub0N/A
associate--r-N/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f6469.0%
Simplified69.0%
Taylor expanded in x around inf
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f6471.0%
Simplified71.0%
(FPCore (x y) :precision binary64 (- 1.0 (log1p (- 0.0 x))))
double code(double x, double y) {
return 1.0 - log1p((0.0 - x));
}
public static double code(double x, double y) {
return 1.0 - Math.log1p((0.0 - x));
}
def code(x, y): return 1.0 - math.log1p((0.0 - x))
function code(x, y) return Float64(1.0 - log1p(Float64(0.0 - x))) end
code[x_, y_] := N[(1.0 - N[Log[1 + N[(0.0 - x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \mathsf{log1p}\left(0 - x\right)
\end{array}
Initial program 68.9%
--lowering--.f64N/A
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
neg-sub0N/A
associate--r-N/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f6469.0%
Simplified69.0%
div-invN/A
flip--N/A
associate-*l/N/A
/-lowering-/.f64N/A
un-div-invN/A
/-lowering-/.f64N/A
--lowering--.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
+-lowering-+.f6453.9%
Applied egg-rr53.9%
Taylor expanded in x around inf
*-lowering-*.f64N/A
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f6469.9%
Simplified69.9%
clear-numN/A
associate-/r/N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f6470.2%
Applied egg-rr70.2%
Taylor expanded in y around 0
mul-1-negN/A
neg-lowering-neg.f6460.2%
Simplified60.2%
Final simplification60.2%
(FPCore (x y) :precision binary64 (- 1.0 (/ x (+ y -1.0))))
double code(double x, double y) {
return 1.0 - (x / (y + -1.0));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 - (x / (y + (-1.0d0)))
end function
public static double code(double x, double y) {
return 1.0 - (x / (y + -1.0));
}
def code(x, y): return 1.0 - (x / (y + -1.0))
function code(x, y) return Float64(1.0 - Float64(x / Float64(y + -1.0))) end
function tmp = code(x, y) tmp = 1.0 - (x / (y + -1.0)); end
code[x_, y_] := N[(1.0 - N[(x / N[(y + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \frac{x}{y + -1}
\end{array}
Initial program 68.9%
--lowering--.f64N/A
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
neg-sub0N/A
associate--r-N/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f6469.0%
Simplified69.0%
Taylor expanded in x around inf
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f6471.0%
Simplified71.0%
Taylor expanded in x around 0
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f6444.9%
Simplified44.9%
(FPCore (x y) :precision binary64 1.0)
double code(double x, double y) {
return 1.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0
end function
public static double code(double x, double y) {
return 1.0;
}
def code(x, y): return 1.0
function code(x, y) return 1.0 end
function tmp = code(x, y) tmp = 1.0; end
code[x_, y_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 68.9%
--lowering--.f64N/A
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
neg-sub0N/A
associate--r-N/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f6469.0%
Simplified69.0%
Taylor expanded in x around inf
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f6471.0%
Simplified71.0%
Taylor expanded in x around 0
Simplified43.3%
(FPCore (x y)
:precision binary64
(let* ((t_0 (- 1.0 (log (- (/ x (* y y)) (- (/ 1.0 y) (/ x y)))))))
(if (< y -81284752.61947241)
t_0
(if (< y 3.0094271212461764e+25)
(log (/ (exp 1.0) (- 1.0 (/ (- x y) (- 1.0 y)))))
t_0))))
double code(double x, double y) {
double t_0 = 1.0 - log(((x / (y * y)) - ((1.0 / y) - (x / y))));
double tmp;
if (y < -81284752.61947241) {
tmp = t_0;
} else if (y < 3.0094271212461764e+25) {
tmp = log((exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: t_0
real(8) :: tmp
t_0 = 1.0d0 - log(((x / (y * y)) - ((1.0d0 / y) - (x / y))))
if (y < (-81284752.61947241d0)) then
tmp = t_0
else if (y < 3.0094271212461764d+25) then
tmp = log((exp(1.0d0) / (1.0d0 - ((x - y) / (1.0d0 - y)))))
else
tmp = t_0
end if
code = tmp
end function
public static double code(double x, double y) {
double t_0 = 1.0 - Math.log(((x / (y * y)) - ((1.0 / y) - (x / y))));
double tmp;
if (y < -81284752.61947241) {
tmp = t_0;
} else if (y < 3.0094271212461764e+25) {
tmp = Math.log((Math.exp(1.0) / (1.0 - ((x - y) / (1.0 - y)))));
} else {
tmp = t_0;
}
return tmp;
}
def code(x, y): t_0 = 1.0 - math.log(((x / (y * y)) - ((1.0 / y) - (x / y)))) tmp = 0 if y < -81284752.61947241: tmp = t_0 elif y < 3.0094271212461764e+25: tmp = math.log((math.exp(1.0) / (1.0 - ((x - y) / (1.0 - y))))) else: tmp = t_0 return tmp
function code(x, y) t_0 = Float64(1.0 - log(Float64(Float64(x / Float64(y * y)) - Float64(Float64(1.0 / y) - Float64(x / y))))) tmp = 0.0 if (y < -81284752.61947241) tmp = t_0; elseif (y < 3.0094271212461764e+25) tmp = log(Float64(exp(1.0) / Float64(1.0 - Float64(Float64(x - y) / Float64(1.0 - y))))); else tmp = t_0; end return tmp end
function tmp_2 = code(x, y) t_0 = 1.0 - log(((x / (y * y)) - ((1.0 / y) - (x / y)))); tmp = 0.0; if (y < -81284752.61947241) tmp = t_0; elseif (y < 3.0094271212461764e+25) tmp = log((exp(1.0) / (1.0 - ((x - y) / (1.0 - y))))); else tmp = t_0; end tmp_2 = tmp; end
code[x_, y_] := Block[{t$95$0 = N[(1.0 - N[Log[N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - N[(N[(1.0 / y), $MachinePrecision] - N[(x / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[Less[y, -81284752.61947241], t$95$0, If[Less[y, 3.0094271212461764e+25], N[Log[N[(N[Exp[1.0], $MachinePrecision] / N[(1.0 - N[(N[(x - y), $MachinePrecision] / N[(1.0 - y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 1 - \log \left(\frac{x}{y \cdot y} - \left(\frac{1}{y} - \frac{x}{y}\right)\right)\\
\mathbf{if}\;y < -81284752.61947241:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;y < 3.0094271212461764 \cdot 10^{+25}:\\
\;\;\;\;\log \left(\frac{e^{1}}{1 - \frac{x - y}{1 - y}}\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
herbie shell --seed 2024138
(FPCore (x y)
:name "Numeric.SpecFunctions:invIncompleteGamma from math-functions-0.1.5.2, B"
:precision binary64
:alt
(! :herbie-platform default (if (< y -8128475261947241/100000000) (- 1 (log (- (/ x (* y y)) (- (/ 1 y) (/ x y))))) (if (< y 30094271212461764000000000) (log (/ (exp 1) (- 1 (/ (- x y) (- 1 y))))) (- 1 (log (- (/ x (* y y)) (- (/ 1 y) (/ x y))))))))
(- 1.0 (log (- 1.0 (/ (- x y) (- 1.0 y))))))