
(FPCore (x) :precision binary64 (/ (log (- 1.0 x)) (log (+ 1.0 x))))
double code(double x) {
return log((1.0 - x)) / log((1.0 + x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((1.0d0 - x)) / log((1.0d0 + x))
end function
public static double code(double x) {
return Math.log((1.0 - x)) / Math.log((1.0 + x));
}
def code(x): return math.log((1.0 - x)) / math.log((1.0 + x))
function code(x) return Float64(log(Float64(1.0 - x)) / log(Float64(1.0 + x))) end
function tmp = code(x) tmp = log((1.0 - x)) / log((1.0 + x)); end
code[x_] := N[(N[Log[N[(1.0 - x), $MachinePrecision]], $MachinePrecision] / N[Log[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(1 - x\right)}{\log \left(1 + x\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (log (- 1.0 x)) (log (+ 1.0 x))))
double code(double x) {
return log((1.0 - x)) / log((1.0 + x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((1.0d0 - x)) / log((1.0d0 + x))
end function
public static double code(double x) {
return Math.log((1.0 - x)) / Math.log((1.0 + x));
}
def code(x): return math.log((1.0 - x)) / math.log((1.0 + x))
function code(x) return Float64(log(Float64(1.0 - x)) / log(Float64(1.0 + x))) end
function tmp = code(x) tmp = log((1.0 - x)) / log((1.0 + x)); end
code[x_] := N[(N[Log[N[(1.0 - x), $MachinePrecision]], $MachinePrecision] / N[Log[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(1 - x\right)}{\log \left(1 + x\right)}
\end{array}
(FPCore (x) :precision binary64 (+ (/ (log1p (* x (- x))) (log1p x)) -1.0))
double code(double x) {
return (log1p((x * -x)) / log1p(x)) + -1.0;
}
public static double code(double x) {
return (Math.log1p((x * -x)) / Math.log1p(x)) + -1.0;
}
def code(x): return (math.log1p((x * -x)) / math.log1p(x)) + -1.0
function code(x) return Float64(Float64(log1p(Float64(x * Float64(-x))) / log1p(x)) + -1.0) end
code[x_] := N[(N[(N[Log[1 + N[(x * (-x)), $MachinePrecision]], $MachinePrecision] / N[Log[1 + x], $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{log1p}\left(x \cdot \left(-x\right)\right)}{\mathsf{log1p}\left(x\right)} + -1
\end{array}
Initial program 4.3%
Applied rewrites100.0%
lift-log1p.f64N/A
lift-log1p.f64N/A
*-inverses100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (fma (fma x (fma x (fma x -0.2916666666666667 -0.4166666666666667) -0.5) -1.0) x -1.0))
double code(double x) {
return fma(fma(x, fma(x, fma(x, -0.2916666666666667, -0.4166666666666667), -0.5), -1.0), x, -1.0);
}
function code(x) return fma(fma(x, fma(x, fma(x, -0.2916666666666667, -0.4166666666666667), -0.5), -1.0), x, -1.0) end
code[x_] := N[(N[(x * N[(x * N[(x * -0.2916666666666667 + -0.4166666666666667), $MachinePrecision] + -0.5), $MachinePrecision] + -1.0), $MachinePrecision] * x + -1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.2916666666666667, -0.4166666666666667\right), -0.5\right), -1\right), x, -1\right)
\end{array}
Initial program 4.3%
Applied rewrites100.0%
lift-log1p.f64N/A
lift-log1p.f64N/A
*-inverses100.0
Applied rewrites100.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64100.0
Applied rewrites100.0%
lift-fma.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
sub-negN/A
lift-*.f64N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64100.0
Applied rewrites100.0%
(FPCore (x) :precision binary64 (fma x (fma x (fma x -0.4166666666666667 -0.5) -1.0) -1.0))
double code(double x) {
return fma(x, fma(x, fma(x, -0.4166666666666667, -0.5), -1.0), -1.0);
}
function code(x) return fma(x, fma(x, fma(x, -0.4166666666666667, -0.5), -1.0), -1.0) end
code[x_] := N[(x * N[(x * N[(x * -0.4166666666666667 + -0.5), $MachinePrecision] + -1.0), $MachinePrecision] + -1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.4166666666666667, -0.5\right), -1\right), -1\right)
\end{array}
Initial program 4.3%
Taylor expanded in x around 0
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6499.9
Applied rewrites99.9%
(FPCore (x) :precision binary64 (- (fma (* x x) -0.5 -1.0) x))
double code(double x) {
return fma((x * x), -0.5, -1.0) - x;
}
function code(x) return Float64(fma(Float64(x * x), -0.5, -1.0) - x) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * -0.5 + -1.0), $MachinePrecision] - x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, -0.5, -1\right) - x
\end{array}
Initial program 4.3%
Taylor expanded in x around 0
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6499.8
Applied rewrites99.8%
distribute-lft-inN/A
associate-+l+N/A
associate-*r*N/A
lift-*.f64N/A
*-commutativeN/A
neg-mul-1N/A
lower-fma.f64N/A
lower-+.f64N/A
lower-neg.f6499.8
Applied rewrites99.8%
lift-*.f64N/A
lift-neg.f64N/A
+-commutativeN/A
lift-neg.f64N/A
sub-negN/A
associate-+r-N/A
lower--.f64N/A
lower-fma.f6499.8
Applied rewrites99.8%
(FPCore (x) :precision binary64 (fma x (fma x -0.5 -1.0) -1.0))
double code(double x) {
return fma(x, fma(x, -0.5, -1.0), -1.0);
}
function code(x) return fma(x, fma(x, -0.5, -1.0), -1.0) end
code[x_] := N[(x * N[(x * -0.5 + -1.0), $MachinePrecision] + -1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.5, -1\right), -1\right)
\end{array}
Initial program 4.3%
Taylor expanded in x around 0
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6499.8
Applied rewrites99.8%
(FPCore (x) :precision binary64 (- -1.0 x))
double code(double x) {
return -1.0 - x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) - x
end function
public static double code(double x) {
return -1.0 - x;
}
def code(x): return -1.0 - x
function code(x) return Float64(-1.0 - x) end
function tmp = code(x) tmp = -1.0 - x; end
code[x_] := N[(-1.0 - x), $MachinePrecision]
\begin{array}{l}
\\
-1 - x
\end{array}
Initial program 4.3%
Taylor expanded in x around 0
sub-negN/A
metadata-evalN/A
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
lower--.f6499.3
Applied rewrites99.3%
(FPCore (x) :precision binary64 -1.0)
double code(double x) {
return -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double x) {
return -1.0;
}
def code(x): return -1.0
function code(x) return -1.0 end
function tmp = code(x) tmp = -1.0; end
code[x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 4.3%
Taylor expanded in x around 0
Applied rewrites98.3%
(FPCore (x) :precision binary64 (/ (log1p (- x)) (log1p x)))
double code(double x) {
return log1p(-x) / log1p(x);
}
public static double code(double x) {
return Math.log1p(-x) / Math.log1p(x);
}
def code(x): return math.log1p(-x) / math.log1p(x)
function code(x) return Float64(log1p(Float64(-x)) / log1p(x)) end
code[x_] := N[(N[Log[1 + (-x)], $MachinePrecision] / N[Log[1 + x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{log1p}\left(-x\right)}{\mathsf{log1p}\left(x\right)}
\end{array}
herbie shell --seed 2024219
(FPCore (x)
:name "qlog (example 3.10)"
:precision binary64
:pre (<= (fabs x) 1.0)
:alt
(! :herbie-platform default (/ (log1p (- x)) (log1p x)))
(/ (log (- 1.0 x)) (log (+ 1.0 x))))