
(FPCore (x) :precision binary64 (/ (log (- 1.0 x)) (log (+ 1.0 x))))
double code(double x) {
return log((1.0 - x)) / log((1.0 + x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((1.0d0 - x)) / log((1.0d0 + x))
end function
public static double code(double x) {
return Math.log((1.0 - x)) / Math.log((1.0 + x));
}
def code(x): return math.log((1.0 - x)) / math.log((1.0 + x))
function code(x) return Float64(log(Float64(1.0 - x)) / log(Float64(1.0 + x))) end
function tmp = code(x) tmp = log((1.0 - x)) / log((1.0 + x)); end
code[x_] := N[(N[Log[N[(1.0 - x), $MachinePrecision]], $MachinePrecision] / N[Log[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(1 - x\right)}{\log \left(1 + x\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (log (- 1.0 x)) (log (+ 1.0 x))))
double code(double x) {
return log((1.0 - x)) / log((1.0 + x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((1.0d0 - x)) / log((1.0d0 + x))
end function
public static double code(double x) {
return Math.log((1.0 - x)) / Math.log((1.0 + x));
}
def code(x): return math.log((1.0 - x)) / math.log((1.0 + x))
function code(x) return Float64(log(Float64(1.0 - x)) / log(Float64(1.0 + x))) end
function tmp = code(x) tmp = log((1.0 - x)) / log((1.0 + x)); end
code[x_] := N[(N[Log[N[(1.0 - x), $MachinePrecision]], $MachinePrecision] / N[Log[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(1 - x\right)}{\log \left(1 + x\right)}
\end{array}
(FPCore (x) :precision binary64 (/ (log1p (- x)) (log1p x)))
double code(double x) {
return log1p(-x) / log1p(x);
}
public static double code(double x) {
return Math.log1p(-x) / Math.log1p(x);
}
def code(x): return math.log1p(-x) / math.log1p(x)
function code(x) return Float64(log1p(Float64(-x)) / log1p(x)) end
code[x_] := N[(N[Log[1 + (-x)], $MachinePrecision] / N[Log[1 + x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{log1p}\left(-x\right)}{\mathsf{log1p}\left(x\right)}
\end{array}
Initial program 45.7%
sub-neg45.7%
log1p-def36.6%
log1p-def89.9%
Simplified89.9%
Final simplification89.9%
(FPCore (x) :precision binary64 (+ (* x (* x (- (* x -0.4166666666666667) 0.5))) (- -1.0 x)))
double code(double x) {
return (x * (x * ((x * -0.4166666666666667) - 0.5))) + (-1.0 - x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * (x * ((x * (-0.4166666666666667d0)) - 0.5d0))) + ((-1.0d0) - x)
end function
public static double code(double x) {
return (x * (x * ((x * -0.4166666666666667) - 0.5))) + (-1.0 - x);
}
def code(x): return (x * (x * ((x * -0.4166666666666667) - 0.5))) + (-1.0 - x)
function code(x) return Float64(Float64(x * Float64(x * Float64(Float64(x * -0.4166666666666667) - 0.5))) + Float64(-1.0 - x)) end
function tmp = code(x) tmp = (x * (x * ((x * -0.4166666666666667) - 0.5))) + (-1.0 - x); end
code[x_] := N[(N[(x * N[(x * N[(N[(x * -0.4166666666666667), $MachinePrecision] - 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \left(x \cdot -0.4166666666666667 - 0.5\right)\right) + \left(-1 - x\right)
\end{array}
Initial program 45.7%
Taylor expanded in x around 0 84.9%
+-commutative84.9%
associate--l+84.9%
*-commutative84.9%
*-commutative84.9%
unpow384.9%
unpow284.9%
associate-*l*84.9%
distribute-lft-out84.9%
sub-neg84.9%
metadata-eval84.9%
+-commutative84.9%
mul-1-neg84.9%
unsub-neg84.9%
Simplified84.9%
expm1-log1p-u84.9%
expm1-udef84.9%
+-commutative84.9%
fma-def84.9%
Applied egg-rr84.9%
expm1-def84.9%
expm1-log1p-u84.9%
*-commutative84.9%
unpow284.9%
associate-*r*84.9%
Applied egg-rr84.9%
Taylor expanded in x around 0 84.9%
Final simplification84.9%
(FPCore (x) :precision binary64 (+ (- -1.0 x) (* x (* x -0.5))))
double code(double x) {
return (-1.0 - x) + (x * (x * -0.5));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((-1.0d0) - x) + (x * (x * (-0.5d0)))
end function
public static double code(double x) {
return (-1.0 - x) + (x * (x * -0.5));
}
def code(x): return (-1.0 - x) + (x * (x * -0.5))
function code(x) return Float64(Float64(-1.0 - x) + Float64(x * Float64(x * -0.5))) end
function tmp = code(x) tmp = (-1.0 - x) + (x * (x * -0.5)); end
code[x_] := N[(N[(-1.0 - x), $MachinePrecision] + N[(x * N[(x * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-1 - x\right) + x \cdot \left(x \cdot -0.5\right)
\end{array}
Initial program 45.7%
Taylor expanded in x around 0 84.9%
+-commutative84.9%
associate--l+84.9%
*-commutative84.9%
*-commutative84.9%
unpow384.9%
unpow284.9%
associate-*l*84.9%
distribute-lft-out84.9%
sub-neg84.9%
metadata-eval84.9%
+-commutative84.9%
mul-1-neg84.9%
unsub-neg84.9%
Simplified84.9%
expm1-log1p-u84.9%
expm1-udef84.9%
+-commutative84.9%
fma-def84.9%
Applied egg-rr84.9%
expm1-def84.9%
expm1-log1p-u84.9%
*-commutative84.9%
unpow284.9%
associate-*r*84.9%
Applied egg-rr84.9%
Taylor expanded in x around 0 82.7%
Final simplification82.7%
(FPCore (x) :precision binary64 (- -1.0 x))
double code(double x) {
return -1.0 - x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) - x
end function
public static double code(double x) {
return -1.0 - x;
}
def code(x): return -1.0 - x
function code(x) return Float64(-1.0 - x) end
function tmp = code(x) tmp = -1.0 - x; end
code[x_] := N[(-1.0 - x), $MachinePrecision]
\begin{array}{l}
\\
-1 - x
\end{array}
Initial program 45.7%
Taylor expanded in x around 0 78.7%
sub-neg78.7%
metadata-eval78.7%
+-commutative78.7%
mul-1-neg78.7%
unsub-neg78.7%
Simplified78.7%
Final simplification78.7%
(FPCore (x) :precision binary64 -1.0)
double code(double x) {
return -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double x) {
return -1.0;
}
def code(x): return -1.0
function code(x) return -1.0 end
function tmp = code(x) tmp = -1.0; end
code[x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 45.7%
Taylor expanded in x around 0 66.9%
Final simplification66.9%
(FPCore (x) :precision binary64 (/ (log1p (- x)) (log1p x)))
double code(double x) {
return log1p(-x) / log1p(x);
}
public static double code(double x) {
return Math.log1p(-x) / Math.log1p(x);
}
def code(x): return math.log1p(-x) / math.log1p(x)
function code(x) return Float64(log1p(Float64(-x)) / log1p(x)) end
code[x_] := N[(N[Log[1 + (-x)], $MachinePrecision] / N[Log[1 + x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{log1p}\left(-x\right)}{\mathsf{log1p}\left(x\right)}
\end{array}
herbie shell --seed 2024031
(FPCore (x)
:name "qlog (example 3.10)"
:precision binary64
:pre (<= (fabs x) 1.0)
:herbie-target
(/ (log1p (- x)) (log1p x))
(/ (log (- 1.0 x)) (log (+ 1.0 x))))