
(FPCore (eps) :precision binary64 (log (/ (- 1.0 eps) (+ 1.0 eps))))
double code(double eps) {
return log(((1.0 - eps) / (1.0 + eps)));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = log(((1.0d0 - eps) / (1.0d0 + eps)))
end function
public static double code(double eps) {
return Math.log(((1.0 - eps) / (1.0 + eps)));
}
def code(eps): return math.log(((1.0 - eps) / (1.0 + eps)))
function code(eps) return log(Float64(Float64(1.0 - eps) / Float64(1.0 + eps))) end
function tmp = code(eps) tmp = log(((1.0 - eps) / (1.0 + eps))); end
code[eps_] := N[Log[N[(N[(1.0 - eps), $MachinePrecision] / N[(1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1 - \varepsilon}{1 + \varepsilon}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (eps) :precision binary64 (log (/ (- 1.0 eps) (+ 1.0 eps))))
double code(double eps) {
return log(((1.0 - eps) / (1.0 + eps)));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = log(((1.0d0 - eps) / (1.0d0 + eps)))
end function
public static double code(double eps) {
return Math.log(((1.0 - eps) / (1.0 + eps)));
}
def code(eps): return math.log(((1.0 - eps) / (1.0 + eps)))
function code(eps) return log(Float64(Float64(1.0 - eps) / Float64(1.0 + eps))) end
function tmp = code(eps) tmp = log(((1.0 - eps) / (1.0 + eps))); end
code[eps_] := N[Log[N[(N[(1.0 - eps), $MachinePrecision] / N[(1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1 - \varepsilon}{1 + \varepsilon}\right)
\end{array}
(FPCore (eps) :precision binary64 (+ (* -2.0 eps) (* -0.6666666666666666 (pow eps 3.0))))
double code(double eps) {
return (-2.0 * eps) + (-0.6666666666666666 * pow(eps, 3.0));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = ((-2.0d0) * eps) + ((-0.6666666666666666d0) * (eps ** 3.0d0))
end function
public static double code(double eps) {
return (-2.0 * eps) + (-0.6666666666666666 * Math.pow(eps, 3.0));
}
def code(eps): return (-2.0 * eps) + (-0.6666666666666666 * math.pow(eps, 3.0))
function code(eps) return Float64(Float64(-2.0 * eps) + Float64(-0.6666666666666666 * (eps ^ 3.0))) end
function tmp = code(eps) tmp = (-2.0 * eps) + (-0.6666666666666666 * (eps ^ 3.0)); end
code[eps_] := N[(N[(-2.0 * eps), $MachinePrecision] + N[(-0.6666666666666666 * N[Power[eps, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-2 \cdot \varepsilon + -0.6666666666666666 \cdot {\varepsilon}^{3}
\end{array}
Initial program 7.2%
Taylor expanded in eps around 0 100.0%
Final simplification100.0%
(FPCore (eps) :precision binary64 (* -2.0 eps))
double code(double eps) {
return -2.0 * eps;
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = (-2.0d0) * eps
end function
public static double code(double eps) {
return -2.0 * eps;
}
def code(eps): return -2.0 * eps
function code(eps) return Float64(-2.0 * eps) end
function tmp = code(eps) tmp = -2.0 * eps; end
code[eps_] := N[(-2.0 * eps), $MachinePrecision]
\begin{array}{l}
\\
-2 \cdot \varepsilon
\end{array}
Initial program 7.2%
Taylor expanded in eps around 0 99.7%
Final simplification99.7%
(FPCore (eps) :precision binary64 0.0)
double code(double eps) {
return 0.0;
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = 0.0d0
end function
public static double code(double eps) {
return 0.0;
}
def code(eps): return 0.0
function code(eps) return 0.0 end
function tmp = code(eps) tmp = 0.0; end
code[eps_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 7.2%
div-inv7.2%
*-un-lft-identity7.2%
sub-neg7.2%
*-un-lft-identity7.2%
add-sqr-sqrt3.7%
sqrt-unprod6.6%
sqr-neg6.6%
sqrt-unprod2.9%
add-sqr-sqrt5.5%
sum-log5.7%
log1p-udef4.2%
neg-log4.1%
log1p-udef5.5%
Applied egg-rr5.5%
sub-neg5.5%
+-inverses5.5%
Simplified5.5%
Final simplification5.5%
(FPCore (eps) :precision binary64 (- (log1p (- eps)) (log1p eps)))
double code(double eps) {
return log1p(-eps) - log1p(eps);
}
public static double code(double eps) {
return Math.log1p(-eps) - Math.log1p(eps);
}
def code(eps): return math.log1p(-eps) - math.log1p(eps)
function code(eps) return Float64(log1p(Float64(-eps)) - log1p(eps)) end
code[eps_] := N[(N[Log[1 + (-eps)], $MachinePrecision] - N[Log[1 + eps], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(-\varepsilon\right) - \mathsf{log1p}\left(\varepsilon\right)
\end{array}
herbie shell --seed 2024040
(FPCore (eps)
:name "logq (problem 3.4.3)"
:precision binary64
:pre (< (fabs eps) 1.0)
:herbie-target
(- (log1p (- eps)) (log1p eps))
(log (/ (- 1.0 eps) (+ 1.0 eps))))