
(FPCore (x) :precision binary64 (/ (log (- 1.0 x)) (log (+ 1.0 x))))
double code(double x) {
return log((1.0 - x)) / log((1.0 + x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((1.0d0 - x)) / log((1.0d0 + x))
end function
public static double code(double x) {
return Math.log((1.0 - x)) / Math.log((1.0 + x));
}
def code(x): return math.log((1.0 - x)) / math.log((1.0 + x))
function code(x) return Float64(log(Float64(1.0 - x)) / log(Float64(1.0 + x))) end
function tmp = code(x) tmp = log((1.0 - x)) / log((1.0 + x)); end
code[x_] := N[(N[Log[N[(1.0 - x), $MachinePrecision]], $MachinePrecision] / N[Log[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(1 - x\right)}{\log \left(1 + x\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (log (- 1.0 x)) (log (+ 1.0 x))))
double code(double x) {
return log((1.0 - x)) / log((1.0 + x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((1.0d0 - x)) / log((1.0d0 + x))
end function
public static double code(double x) {
return Math.log((1.0 - x)) / Math.log((1.0 + x));
}
def code(x): return math.log((1.0 - x)) / math.log((1.0 + x))
function code(x) return Float64(log(Float64(1.0 - x)) / log(Float64(1.0 + x))) end
function tmp = code(x) tmp = log((1.0 - x)) / log((1.0 + x)); end
code[x_] := N[(N[Log[N[(1.0 - x), $MachinePrecision]], $MachinePrecision] / N[Log[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(1 - x\right)}{\log \left(1 + x\right)}
\end{array}
(FPCore (x) :precision binary64 (/ (log1p (- x)) (log1p x)))
double code(double x) {
return log1p(-x) / log1p(x);
}
public static double code(double x) {
return Math.log1p(-x) / Math.log1p(x);
}
def code(x): return math.log1p(-x) / math.log1p(x)
function code(x) return Float64(log1p(Float64(-x)) / log1p(x)) end
code[x_] := N[(N[Log[1 + (-x)], $MachinePrecision] / N[Log[1 + x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{log1p}\left(-x\right)}{\mathsf{log1p}\left(x\right)}
\end{array}
Initial program 18.3%
sub-neg18.3%
log1p-def15.6%
log1p-def99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (+ (- (+ (* -0.5 (* x x)) (* -0.4166666666666667 (pow x 3.0))) x) -1.0))
double code(double x) {
return (((-0.5 * (x * x)) + (-0.4166666666666667 * pow(x, 3.0))) - x) + -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((((-0.5d0) * (x * x)) + ((-0.4166666666666667d0) * (x ** 3.0d0))) - x) + (-1.0d0)
end function
public static double code(double x) {
return (((-0.5 * (x * x)) + (-0.4166666666666667 * Math.pow(x, 3.0))) - x) + -1.0;
}
def code(x): return (((-0.5 * (x * x)) + (-0.4166666666666667 * math.pow(x, 3.0))) - x) + -1.0
function code(x) return Float64(Float64(Float64(Float64(-0.5 * Float64(x * x)) + Float64(-0.4166666666666667 * (x ^ 3.0))) - x) + -1.0) end
function tmp = code(x) tmp = (((-0.5 * (x * x)) + (-0.4166666666666667 * (x ^ 3.0))) - x) + -1.0; end
code[x_] := N[(N[(N[(N[(-0.5 * N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(-0.4166666666666667 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + -1.0), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(-0.5 \cdot \left(x \cdot x\right) + -0.4166666666666667 \cdot {x}^{3}\right) - x\right) + -1
\end{array}
Initial program 18.3%
Taylor expanded in x around 0 97.7%
unpow296.9%
Applied egg-rr97.7%
Final simplification97.7%
(FPCore (x) :precision binary64 (+ (- (* -0.5 (+ 1.0 (+ (* x x) -1.0))) x) -1.0))
double code(double x) {
return ((-0.5 * (1.0 + ((x * x) + -1.0))) - x) + -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (((-0.5d0) * (1.0d0 + ((x * x) + (-1.0d0)))) - x) + (-1.0d0)
end function
public static double code(double x) {
return ((-0.5 * (1.0 + ((x * x) + -1.0))) - x) + -1.0;
}
def code(x): return ((-0.5 * (1.0 + ((x * x) + -1.0))) - x) + -1.0
function code(x) return Float64(Float64(Float64(-0.5 * Float64(1.0 + Float64(Float64(x * x) + -1.0))) - x) + -1.0) end
function tmp = code(x) tmp = ((-0.5 * (1.0 + ((x * x) + -1.0))) - x) + -1.0; end
code[x_] := N[(N[(N[(-0.5 * N[(1.0 + N[(N[(x * x), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + -1.0), $MachinePrecision]
\begin{array}{l}
\\
\left(-0.5 \cdot \left(1 + \left(x \cdot x + -1\right)\right) - x\right) + -1
\end{array}
Initial program 18.3%
Taylor expanded in x around 0 96.9%
expm1-log1p-u96.9%
expm1-udef96.9%
Applied egg-rr96.9%
associate--l+96.9%
Simplified96.9%
unpow296.9%
Applied egg-rr96.9%
Final simplification96.9%
(FPCore (x) :precision binary64 (+ (- (* -0.5 (* x x)) x) -1.0))
double code(double x) {
return ((-0.5 * (x * x)) - x) + -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (((-0.5d0) * (x * x)) - x) + (-1.0d0)
end function
public static double code(double x) {
return ((-0.5 * (x * x)) - x) + -1.0;
}
def code(x): return ((-0.5 * (x * x)) - x) + -1.0
function code(x) return Float64(Float64(Float64(-0.5 * Float64(x * x)) - x) + -1.0) end
function tmp = code(x) tmp = ((-0.5 * (x * x)) - x) + -1.0; end
code[x_] := N[(N[(N[(-0.5 * N[(x * x), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + -1.0), $MachinePrecision]
\begin{array}{l}
\\
\left(-0.5 \cdot \left(x \cdot x\right) - x\right) + -1
\end{array}
Initial program 18.3%
Taylor expanded in x around 0 96.9%
unpow296.9%
Applied egg-rr96.9%
Final simplification96.9%
(FPCore (x) :precision binary64 (- -1.0 x))
double code(double x) {
return -1.0 - x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) - x
end function
public static double code(double x) {
return -1.0 - x;
}
def code(x): return -1.0 - x
function code(x) return Float64(-1.0 - x) end
function tmp = code(x) tmp = -1.0 - x; end
code[x_] := N[(-1.0 - x), $MachinePrecision]
\begin{array}{l}
\\
-1 - x
\end{array}
Initial program 18.3%
Taylor expanded in x around 0 95.2%
sub-neg95.2%
metadata-eval95.2%
+-commutative95.2%
mul-1-neg95.2%
unsub-neg95.2%
Simplified95.2%
Final simplification95.2%
(FPCore (x) :precision binary64 -1.0)
double code(double x) {
return -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double x) {
return -1.0;
}
def code(x): return -1.0
function code(x) return -1.0 end
function tmp = code(x) tmp = -1.0; end
code[x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 18.3%
Taylor expanded in x around 0 90.7%
Final simplification90.7%
(FPCore (x) :precision binary64 (/ (log1p (- x)) (log1p x)))
double code(double x) {
return log1p(-x) / log1p(x);
}
public static double code(double x) {
return Math.log1p(-x) / Math.log1p(x);
}
def code(x): return math.log1p(-x) / math.log1p(x)
function code(x) return Float64(log1p(Float64(-x)) / log1p(x)) end
code[x_] := N[(N[Log[1 + (-x)], $MachinePrecision] / N[Log[1 + x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{log1p}\left(-x\right)}{\mathsf{log1p}\left(x\right)}
\end{array}
herbie shell --seed 2024024
(FPCore (x)
:name "qlog (example 3.10)"
:precision binary64
:pre (<= (fabs x) 1.0)
:herbie-target
(/ (log1p (- x)) (log1p x))
(/ (log (- 1.0 x)) (log (+ 1.0 x))))