
(FPCore (eps) :precision binary64 (log (/ (- 1.0 eps) (+ 1.0 eps))))
double code(double eps) {
return log(((1.0 - eps) / (1.0 + eps)));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = log(((1.0d0 - eps) / (1.0d0 + eps)))
end function
public static double code(double eps) {
return Math.log(((1.0 - eps) / (1.0 + eps)));
}
def code(eps): return math.log(((1.0 - eps) / (1.0 + eps)))
function code(eps) return log(Float64(Float64(1.0 - eps) / Float64(1.0 + eps))) end
function tmp = code(eps) tmp = log(((1.0 - eps) / (1.0 + eps))); end
code[eps_] := N[Log[N[(N[(1.0 - eps), $MachinePrecision] / N[(1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1 - \varepsilon}{1 + \varepsilon}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (eps) :precision binary64 (log (/ (- 1.0 eps) (+ 1.0 eps))))
double code(double eps) {
return log(((1.0 - eps) / (1.0 + eps)));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = log(((1.0d0 - eps) / (1.0d0 + eps)))
end function
public static double code(double eps) {
return Math.log(((1.0 - eps) / (1.0 + eps)));
}
def code(eps): return math.log(((1.0 - eps) / (1.0 + eps)))
function code(eps) return log(Float64(Float64(1.0 - eps) / Float64(1.0 + eps))) end
function tmp = code(eps) tmp = log(((1.0 - eps) / (1.0 + eps))); end
code[eps_] := N[Log[N[(N[(1.0 - eps), $MachinePrecision] / N[(1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1 - \varepsilon}{1 + \varepsilon}\right)
\end{array}
(FPCore (eps) :precision binary64 (- (log1p (- eps)) (log1p eps)))
double code(double eps) {
return log1p(-eps) - log1p(eps);
}
public static double code(double eps) {
return Math.log1p(-eps) - Math.log1p(eps);
}
def code(eps): return math.log1p(-eps) - math.log1p(eps)
function code(eps) return Float64(log1p(Float64(-eps)) - log1p(eps)) end
code[eps_] := N[(N[Log[1 + (-eps)], $MachinePrecision] - N[Log[1 + eps], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(-\varepsilon\right) - \mathsf{log1p}\left(\varepsilon\right)
\end{array}
Initial program 8.8%
log-div8.7%
sub-neg8.7%
log1p-def21.4%
log1p-def100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (eps) :precision binary64 (+ (* eps -2.0) (* -0.6666666666666666 (pow eps 3.0))))
double code(double eps) {
return (eps * -2.0) + (-0.6666666666666666 * pow(eps, 3.0));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = (eps * (-2.0d0)) + ((-0.6666666666666666d0) * (eps ** 3.0d0))
end function
public static double code(double eps) {
return (eps * -2.0) + (-0.6666666666666666 * Math.pow(eps, 3.0));
}
def code(eps): return (eps * -2.0) + (-0.6666666666666666 * math.pow(eps, 3.0))
function code(eps) return Float64(Float64(eps * -2.0) + Float64(-0.6666666666666666 * (eps ^ 3.0))) end
function tmp = code(eps) tmp = (eps * -2.0) + (-0.6666666666666666 * (eps ^ 3.0)); end
code[eps_] := N[(N[(eps * -2.0), $MachinePrecision] + N[(-0.6666666666666666 * N[Power[eps, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot -2 + -0.6666666666666666 \cdot {\varepsilon}^{3}
\end{array}
Initial program 8.8%
Taylor expanded in eps around 0 99.4%
Final simplification99.4%
(FPCore (eps) :precision binary64 (/ 1.0 (- (* eps 0.16666666666666666) (/ 0.5 eps))))
double code(double eps) {
return 1.0 / ((eps * 0.16666666666666666) - (0.5 / eps));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = 1.0d0 / ((eps * 0.16666666666666666d0) - (0.5d0 / eps))
end function
public static double code(double eps) {
return 1.0 / ((eps * 0.16666666666666666) - (0.5 / eps));
}
def code(eps): return 1.0 / ((eps * 0.16666666666666666) - (0.5 / eps))
function code(eps) return Float64(1.0 / Float64(Float64(eps * 0.16666666666666666) - Float64(0.5 / eps))) end
function tmp = code(eps) tmp = 1.0 / ((eps * 0.16666666666666666) - (0.5 / eps)); end
code[eps_] := N[(1.0 / N[(N[(eps * 0.16666666666666666), $MachinePrecision] - N[(0.5 / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\varepsilon \cdot 0.16666666666666666 - \frac{0.5}{\varepsilon}}
\end{array}
Initial program 8.8%
div-sub8.8%
sub-neg8.8%
Applied egg-rr8.8%
sub-neg8.8%
+-commutative8.8%
+-commutative8.8%
Simplified8.8%
sub-div8.8%
clear-num8.8%
+-commutative8.8%
Applied egg-rr8.8%
clear-num8.8%
diff-log8.7%
log1p-udef21.4%
flip--16.2%
clear-num16.2%
*-un-lft-identity16.2%
associate-/l*16.2%
flip--21.4%
sub-neg21.4%
log1p-def99.7%
Applied egg-rr99.7%
Taylor expanded in eps around 0 99.2%
*-commutative99.2%
associate-*r/99.2%
metadata-eval99.2%
Simplified99.2%
Final simplification99.2%
(FPCore (eps) :precision binary64 (* eps -2.0))
double code(double eps) {
return eps * -2.0;
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = eps * (-2.0d0)
end function
public static double code(double eps) {
return eps * -2.0;
}
def code(eps): return eps * -2.0
function code(eps) return Float64(eps * -2.0) end
function tmp = code(eps) tmp = eps * -2.0; end
code[eps_] := N[(eps * -2.0), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot -2
\end{array}
Initial program 8.8%
Taylor expanded in eps around 0 98.8%
Final simplification98.8%
(FPCore (eps) :precision binary64 0.0)
double code(double eps) {
return 0.0;
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = 0.0d0
end function
public static double code(double eps) {
return 0.0;
}
def code(eps): return 0.0
function code(eps) return 0.0 end
function tmp = code(eps) tmp = 0.0; end
code[eps_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 8.8%
log-div8.7%
sub-neg8.7%
log1p-udef21.4%
log1p-udef100.0%
sub-neg100.0%
add-sqr-sqrt50.6%
sqrt-unprod36.7%
sqr-neg36.7%
sqrt-unprod3.1%
add-sqr-sqrt5.3%
Applied egg-rr5.3%
sub-neg5.3%
+-inverses5.3%
Simplified5.3%
Final simplification5.3%
(FPCore (eps) :precision binary64 (* -2.0 (+ (+ eps (/ (pow eps 3.0) 3.0)) (/ (pow eps 5.0) 5.0))))
double code(double eps) {
return -2.0 * ((eps + (pow(eps, 3.0) / 3.0)) + (pow(eps, 5.0) / 5.0));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = (-2.0d0) * ((eps + ((eps ** 3.0d0) / 3.0d0)) + ((eps ** 5.0d0) / 5.0d0))
end function
public static double code(double eps) {
return -2.0 * ((eps + (Math.pow(eps, 3.0) / 3.0)) + (Math.pow(eps, 5.0) / 5.0));
}
def code(eps): return -2.0 * ((eps + (math.pow(eps, 3.0) / 3.0)) + (math.pow(eps, 5.0) / 5.0))
function code(eps) return Float64(-2.0 * Float64(Float64(eps + Float64((eps ^ 3.0) / 3.0)) + Float64((eps ^ 5.0) / 5.0))) end
function tmp = code(eps) tmp = -2.0 * ((eps + ((eps ^ 3.0) / 3.0)) + ((eps ^ 5.0) / 5.0)); end
code[eps_] := N[(-2.0 * N[(N[(eps + N[(N[Power[eps, 3.0], $MachinePrecision] / 3.0), $MachinePrecision]), $MachinePrecision] + N[(N[Power[eps, 5.0], $MachinePrecision] / 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-2 \cdot \left(\left(\varepsilon + \frac{{\varepsilon}^{3}}{3}\right) + \frac{{\varepsilon}^{5}}{5}\right)
\end{array}
herbie shell --seed 2023336
(FPCore (eps)
:name "logq (problem 3.4.3)"
:precision binary64
:herbie-target
(* -2.0 (+ (+ eps (/ (pow eps 3.0) 3.0)) (/ (pow eps 5.0) 5.0)))
(log (/ (- 1.0 eps) (+ 1.0 eps))))