
(FPCore (eps) :precision binary64 (log (/ (- 1.0 eps) (+ 1.0 eps))))
double code(double eps) {
return log(((1.0 - eps) / (1.0 + eps)));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = log(((1.0d0 - eps) / (1.0d0 + eps)))
end function
public static double code(double eps) {
return Math.log(((1.0 - eps) / (1.0 + eps)));
}
def code(eps): return math.log(((1.0 - eps) / (1.0 + eps)))
function code(eps) return log(Float64(Float64(1.0 - eps) / Float64(1.0 + eps))) end
function tmp = code(eps) tmp = log(((1.0 - eps) / (1.0 + eps))); end
code[eps_] := N[Log[N[(N[(1.0 - eps), $MachinePrecision] / N[(1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1 - \varepsilon}{1 + \varepsilon}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (eps) :precision binary64 (log (/ (- 1.0 eps) (+ 1.0 eps))))
double code(double eps) {
return log(((1.0 - eps) / (1.0 + eps)));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = log(((1.0d0 - eps) / (1.0d0 + eps)))
end function
public static double code(double eps) {
return Math.log(((1.0 - eps) / (1.0 + eps)));
}
def code(eps): return math.log(((1.0 - eps) / (1.0 + eps)))
function code(eps) return log(Float64(Float64(1.0 - eps) / Float64(1.0 + eps))) end
function tmp = code(eps) tmp = log(((1.0 - eps) / (1.0 + eps))); end
code[eps_] := N[Log[N[(N[(1.0 - eps), $MachinePrecision] / N[(1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1 - \varepsilon}{1 + \varepsilon}\right)
\end{array}
(FPCore (eps) :precision binary64 (fma (* (* (fma (* eps eps) -0.4 -0.6666666666666666) eps) eps) eps (* -2.0 eps)))
double code(double eps) {
return fma(((fma((eps * eps), -0.4, -0.6666666666666666) * eps) * eps), eps, (-2.0 * eps));
}
function code(eps) return fma(Float64(Float64(fma(Float64(eps * eps), -0.4, -0.6666666666666666) * eps) * eps), eps, Float64(-2.0 * eps)) end
code[eps_] := N[(N[(N[(N[(N[(eps * eps), $MachinePrecision] * -0.4 + -0.6666666666666666), $MachinePrecision] * eps), $MachinePrecision] * eps), $MachinePrecision] * eps + N[(-2.0 * eps), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\mathsf{fma}\left(\varepsilon \cdot \varepsilon, -0.4, -0.6666666666666666\right) \cdot \varepsilon\right) \cdot \varepsilon, \varepsilon, -2 \cdot \varepsilon\right)
\end{array}
Initial program 8.1%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
metadata-evalN/A
lower-fma.f64N/A
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
metadata-evalN/A
lower-fma.f64N/A
lower-*.f6499.7
Applied rewrites99.7%
Applied rewrites99.7%
(FPCore (eps) :precision binary64 (* (fma (* (fma (* -0.4 eps) eps -0.6666666666666666) eps) eps -2.0) eps))
double code(double eps) {
return fma((fma((-0.4 * eps), eps, -0.6666666666666666) * eps), eps, -2.0) * eps;
}
function code(eps) return Float64(fma(Float64(fma(Float64(-0.4 * eps), eps, -0.6666666666666666) * eps), eps, -2.0) * eps) end
code[eps_] := N[(N[(N[(N[(N[(-0.4 * eps), $MachinePrecision] * eps + -0.6666666666666666), $MachinePrecision] * eps), $MachinePrecision] * eps + -2.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(-0.4 \cdot \varepsilon, \varepsilon, -0.6666666666666666\right) \cdot \varepsilon, \varepsilon, -2\right) \cdot \varepsilon
\end{array}
Initial program 8.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
metadata-evalN/A
lower-fma.f64N/A
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
metadata-evalN/A
lower-fma.f64N/A
lower-*.f6499.7
Applied rewrites99.7%
(FPCore (eps) :precision binary64 (- (log1p (- eps)) (log1p eps)))
double code(double eps) {
return log1p(-eps) - log1p(eps);
}
public static double code(double eps) {
return Math.log1p(-eps) - Math.log1p(eps);
}
def code(eps): return math.log1p(-eps) - math.log1p(eps)
function code(eps) return Float64(log1p(Float64(-eps)) - log1p(eps)) end
code[eps_] := N[(N[Log[1 + (-eps)], $MachinePrecision] - N[Log[1 + eps], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(-\varepsilon\right) - \mathsf{log1p}\left(\varepsilon\right)
\end{array}
herbie shell --seed 2024234
(FPCore (eps)
:name "logq (problem 3.4.3)"
:precision binary64
:pre (< (fabs eps) 1.0)
:alt
(! :herbie-platform default (- (log1p (- eps)) (log1p eps)))
(log (/ (- 1.0 eps) (+ 1.0 eps))))