
(FPCore (x) :precision binary64 (/ (log (- 1.0 x)) (log (+ 1.0 x))))
double code(double x) {
return log((1.0 - x)) / log((1.0 + x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((1.0d0 - x)) / log((1.0d0 + x))
end function
public static double code(double x) {
return Math.log((1.0 - x)) / Math.log((1.0 + x));
}
def code(x): return math.log((1.0 - x)) / math.log((1.0 + x))
function code(x) return Float64(log(Float64(1.0 - x)) / log(Float64(1.0 + x))) end
function tmp = code(x) tmp = log((1.0 - x)) / log((1.0 + x)); end
code[x_] := N[(N[Log[N[(1.0 - x), $MachinePrecision]], $MachinePrecision] / N[Log[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(1 - x\right)}{\log \left(1 + x\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (log (- 1.0 x)) (log (+ 1.0 x))))
double code(double x) {
return log((1.0 - x)) / log((1.0 + x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((1.0d0 - x)) / log((1.0d0 + x))
end function
public static double code(double x) {
return Math.log((1.0 - x)) / Math.log((1.0 + x));
}
def code(x): return math.log((1.0 - x)) / math.log((1.0 + x))
function code(x) return Float64(log(Float64(1.0 - x)) / log(Float64(1.0 + x))) end
function tmp = code(x) tmp = log((1.0 - x)) / log((1.0 + x)); end
code[x_] := N[(N[Log[N[(1.0 - x), $MachinePrecision]], $MachinePrecision] / N[Log[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(1 - x\right)}{\log \left(1 + x\right)}
\end{array}
(FPCore (x) :precision binary64 (/ (log1p (- 0.0 x)) (log1p x)))
double code(double x) {
return log1p((0.0 - x)) / log1p(x);
}
public static double code(double x) {
return Math.log1p((0.0 - x)) / Math.log1p(x);
}
def code(x): return math.log1p((0.0 - x)) / math.log1p(x)
function code(x) return Float64(log1p(Float64(0.0 - x)) / log1p(x)) end
code[x_] := N[(N[Log[1 + N[(0.0 - x), $MachinePrecision]], $MachinePrecision] / N[Log[1 + x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{log1p}\left(0 - x\right)}{\mathsf{log1p}\left(x\right)}
\end{array}
Initial program 4.9%
sub-negN/A
accelerator-lowering-log1p.f64N/A
neg-sub0N/A
--lowering--.f645.7%
Applied egg-rr5.7%
accelerator-lowering-log1p.f64100.0%
Applied egg-rr100.0%
sub0-negN/A
neg-lowering-neg.f64100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (* (fma x (fma x (fma x -0.25 -0.3333333333333333) -0.5) -1.0) (/ x (* x (fma x (fma x (fma x -0.25 0.3333333333333333) -0.5) 1.0)))))
double code(double x) {
return fma(x, fma(x, fma(x, -0.25, -0.3333333333333333), -0.5), -1.0) * (x / (x * fma(x, fma(x, fma(x, -0.25, 0.3333333333333333), -0.5), 1.0)));
}
function code(x) return Float64(fma(x, fma(x, fma(x, -0.25, -0.3333333333333333), -0.5), -1.0) * Float64(x / Float64(x * fma(x, fma(x, fma(x, -0.25, 0.3333333333333333), -0.5), 1.0)))) end
code[x_] := N[(N[(x * N[(x * N[(x * -0.25 + -0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + -1.0), $MachinePrecision] * N[(x / N[(x * N[(x * N[(x * N[(x * -0.25 + 0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.25, -0.3333333333333333\right), -0.5\right), -1\right) \cdot \frac{x}{x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.25, 0.3333333333333333\right), -0.5\right), 1\right)}
\end{array}
Initial program 4.9%
Taylor expanded in x around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f645.3%
Simplified5.3%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f6499.6%
Simplified99.6%
*-commutativeN/A
associate-/l*N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
metadata-evalN/A
sub-negN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
Applied egg-rr99.6%
(FPCore (x) :precision binary64 (* (fma x (fma x (fma x -0.25 -0.3333333333333333) -0.5) -1.0) (/ 1.0 (fma x (fma x (fma x -0.25 0.3333333333333333) -0.5) 1.0))))
double code(double x) {
return fma(x, fma(x, fma(x, -0.25, -0.3333333333333333), -0.5), -1.0) * (1.0 / fma(x, fma(x, fma(x, -0.25, 0.3333333333333333), -0.5), 1.0));
}
function code(x) return Float64(fma(x, fma(x, fma(x, -0.25, -0.3333333333333333), -0.5), -1.0) * Float64(1.0 / fma(x, fma(x, fma(x, -0.25, 0.3333333333333333), -0.5), 1.0))) end
code[x_] := N[(N[(x * N[(x * N[(x * -0.25 + -0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + -1.0), $MachinePrecision] * N[(1.0 / N[(x * N[(x * N[(x * -0.25 + 0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.25, -0.3333333333333333\right), -0.5\right), -1\right) \cdot \frac{1}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.25, 0.3333333333333333\right), -0.5\right), 1\right)}
\end{array}
Initial program 4.9%
Taylor expanded in x around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f645.3%
Simplified5.3%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f6499.6%
Simplified99.6%
associate-/r*N/A
/-lowering-/.f64N/A
Applied egg-rr99.6%
associate-/l*N/A
*-inversesN/A
associate-/r*N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
metadata-evalN/A
sub-negN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
associate-/r*N/A
*-inversesN/A
Applied egg-rr99.6%
(FPCore (x) :precision binary64 (/ (fma (fma x (fma x -0.25 -0.3333333333333333) -0.5) x -1.0) (fma x (fma x (fma x -0.25 0.3333333333333333) -0.5) 1.0)))
double code(double x) {
return fma(fma(x, fma(x, -0.25, -0.3333333333333333), -0.5), x, -1.0) / fma(x, fma(x, fma(x, -0.25, 0.3333333333333333), -0.5), 1.0);
}
function code(x) return Float64(fma(fma(x, fma(x, -0.25, -0.3333333333333333), -0.5), x, -1.0) / fma(x, fma(x, fma(x, -0.25, 0.3333333333333333), -0.5), 1.0)) end
code[x_] := N[(N[(N[(x * N[(x * -0.25 + -0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] * x + -1.0), $MachinePrecision] / N[(x * N[(x * N[(x * -0.25 + 0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.25, -0.3333333333333333\right), -0.5\right), x, -1\right)}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.25, 0.3333333333333333\right), -0.5\right), 1\right)}
\end{array}
Initial program 4.9%
Taylor expanded in x around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f645.3%
Simplified5.3%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f6499.6%
Simplified99.6%
associate-/r*N/A
/-lowering-/.f64N/A
Applied egg-rr99.6%
*-rgt-identityN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
metadata-evalN/A
sub-negN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f6499.6%
Applied egg-rr99.6%
(FPCore (x) :precision binary64 (fma x (fma x (fma x -0.4166666666666667 -0.5) -1.0) -1.0))
double code(double x) {
return fma(x, fma(x, fma(x, -0.4166666666666667, -0.5), -1.0), -1.0);
}
function code(x) return fma(x, fma(x, fma(x, -0.4166666666666667, -0.5), -1.0), -1.0) end
code[x_] := N[(x * N[(x * N[(x * -0.4166666666666667 + -0.5), $MachinePrecision] + -1.0), $MachinePrecision] + -1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.4166666666666667, -0.5\right), -1\right), -1\right)
\end{array}
Initial program 4.9%
Taylor expanded in x around 0
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f6499.5%
Simplified99.5%
(FPCore (x) :precision binary64 (fma x (fma x -0.5 -1.0) -1.0))
double code(double x) {
return fma(x, fma(x, -0.5, -1.0), -1.0);
}
function code(x) return fma(x, fma(x, -0.5, -1.0), -1.0) end
code[x_] := N[(x * N[(x * -0.5 + -1.0), $MachinePrecision] + -1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.5, -1\right), -1\right)
\end{array}
Initial program 4.9%
Taylor expanded in x around 0
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f6499.3%
Simplified99.3%
(FPCore (x) :precision binary64 (- -1.0 x))
double code(double x) {
return -1.0 - x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-1.0d0) - x
end function
public static double code(double x) {
return -1.0 - x;
}
def code(x): return -1.0 - x
function code(x) return Float64(-1.0 - x) end
function tmp = code(x) tmp = -1.0 - x; end
code[x_] := N[(-1.0 - x), $MachinePrecision]
\begin{array}{l}
\\
-1 - x
\end{array}
Initial program 4.9%
Taylor expanded in x around 0
sub-negN/A
metadata-evalN/A
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
--lowering--.f6498.9%
Simplified98.9%
(FPCore (x) :precision binary64 -1.0)
double code(double x) {
return -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double x) {
return -1.0;
}
def code(x): return -1.0
function code(x) return -1.0 end
function tmp = code(x) tmp = -1.0; end
code[x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 4.9%
Taylor expanded in x around 0
Simplified97.4%
(FPCore (x) :precision binary64 (/ (log1p (- x)) (log1p x)))
double code(double x) {
return log1p(-x) / log1p(x);
}
public static double code(double x) {
return Math.log1p(-x) / Math.log1p(x);
}
def code(x): return math.log1p(-x) / math.log1p(x)
function code(x) return Float64(log1p(Float64(-x)) / log1p(x)) end
code[x_] := N[(N[Log[1 + (-x)], $MachinePrecision] / N[Log[1 + x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{log1p}\left(-x\right)}{\mathsf{log1p}\left(x\right)}
\end{array}
herbie shell --seed 2024193
(FPCore (x)
:name "qlog (example 3.10)"
:precision binary64
:pre (<= (fabs x) 1.0)
:alt
(! :herbie-platform default (/ (log1p (- x)) (log1p x)))
(/ (log (- 1.0 x)) (log (+ 1.0 x))))