
(FPCore (eps) :precision binary64 (log (/ (- 1.0 eps) (+ 1.0 eps))))
double code(double eps) {
return log(((1.0 - eps) / (1.0 + eps)));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = log(((1.0d0 - eps) / (1.0d0 + eps)))
end function
public static double code(double eps) {
return Math.log(((1.0 - eps) / (1.0 + eps)));
}
def code(eps): return math.log(((1.0 - eps) / (1.0 + eps)))
function code(eps) return log(Float64(Float64(1.0 - eps) / Float64(1.0 + eps))) end
function tmp = code(eps) tmp = log(((1.0 - eps) / (1.0 + eps))); end
code[eps_] := N[Log[N[(N[(1.0 - eps), $MachinePrecision] / N[(1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1 - \varepsilon}{1 + \varepsilon}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (eps) :precision binary64 (log (/ (- 1.0 eps) (+ 1.0 eps))))
double code(double eps) {
return log(((1.0 - eps) / (1.0 + eps)));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = log(((1.0d0 - eps) / (1.0d0 + eps)))
end function
public static double code(double eps) {
return Math.log(((1.0 - eps) / (1.0 + eps)));
}
def code(eps): return math.log(((1.0 - eps) / (1.0 + eps)))
function code(eps) return log(Float64(Float64(1.0 - eps) / Float64(1.0 + eps))) end
function tmp = code(eps) tmp = log(((1.0 - eps) / (1.0 + eps))); end
code[eps_] := N[Log[N[(N[(1.0 - eps), $MachinePrecision] / N[(1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1 - \varepsilon}{1 + \varepsilon}\right)
\end{array}
(FPCore (eps) :precision binary64 (fma (fma eps (* eps -0.4) -0.6666666666666666) (* eps (* eps eps)) (* eps -2.0)))
double code(double eps) {
return fma(fma(eps, (eps * -0.4), -0.6666666666666666), (eps * (eps * eps)), (eps * -2.0));
}
function code(eps) return fma(fma(eps, Float64(eps * -0.4), -0.6666666666666666), Float64(eps * Float64(eps * eps)), Float64(eps * -2.0)) end
code[eps_] := N[(N[(eps * N[(eps * -0.4), $MachinePrecision] + -0.6666666666666666), $MachinePrecision] * N[(eps * N[(eps * eps), $MachinePrecision]), $MachinePrecision] + N[(eps * -2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.4, -0.6666666666666666\right), \varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right), \varepsilon \cdot -2\right)
\end{array}
Initial program 8.4%
Taylor expanded in eps around 0
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
Applied rewrites100.0%
(FPCore (eps) :precision binary64 (* eps (fma eps (* eps (fma eps (* eps -0.4) -0.6666666666666666)) -2.0)))
double code(double eps) {
return eps * fma(eps, (eps * fma(eps, (eps * -0.4), -0.6666666666666666)), -2.0);
}
function code(eps) return Float64(eps * fma(eps, Float64(eps * fma(eps, Float64(eps * -0.4), -0.6666666666666666)), -2.0)) end
code[eps_] := N[(eps * N[(eps * N[(eps * N[(eps * N[(eps * -0.4), $MachinePrecision] + -0.6666666666666666), $MachinePrecision]), $MachinePrecision] + -2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.4, -0.6666666666666666\right), -2\right)
\end{array}
Initial program 8.3%
Taylor expanded in eps around 0
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6499.7
Applied rewrites99.7%
(FPCore (eps) :precision binary64 (- (log1p (- eps)) (log1p eps)))
double code(double eps) {
return log1p(-eps) - log1p(eps);
}
public static double code(double eps) {
return Math.log1p(-eps) - Math.log1p(eps);
}
def code(eps): return math.log1p(-eps) - math.log1p(eps)
function code(eps) return Float64(log1p(Float64(-eps)) - log1p(eps)) end
code[eps_] := N[(N[Log[1 + (-eps)], $MachinePrecision] - N[Log[1 + eps], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(-\varepsilon\right) - \mathsf{log1p}\left(\varepsilon\right)
\end{array}
herbie shell --seed 2024219
(FPCore (eps)
:name "logq (problem 3.4.3)"
:precision binary64
:pre (< (fabs eps) 1.0)
:alt
(! :herbie-platform default (- (log1p (- eps)) (log1p eps)))
(log (/ (- 1.0 eps) (+ 1.0 eps))))