
(FPCore (eps) :precision binary64 (log (/ (- 1.0 eps) (+ 1.0 eps))))
double code(double eps) {
return log(((1.0 - eps) / (1.0 + eps)));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = log(((1.0d0 - eps) / (1.0d0 + eps)))
end function
public static double code(double eps) {
return Math.log(((1.0 - eps) / (1.0 + eps)));
}
def code(eps): return math.log(((1.0 - eps) / (1.0 + eps)))
function code(eps) return log(Float64(Float64(1.0 - eps) / Float64(1.0 + eps))) end
function tmp = code(eps) tmp = log(((1.0 - eps) / (1.0 + eps))); end
code[eps_] := N[Log[N[(N[(1.0 - eps), $MachinePrecision] / N[(1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1 - \varepsilon}{1 + \varepsilon}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (eps) :precision binary64 (log (/ (- 1.0 eps) (+ 1.0 eps))))
double code(double eps) {
return log(((1.0 - eps) / (1.0 + eps)));
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = log(((1.0d0 - eps) / (1.0d0 + eps)))
end function
public static double code(double eps) {
return Math.log(((1.0 - eps) / (1.0 + eps)));
}
def code(eps): return math.log(((1.0 - eps) / (1.0 + eps)))
function code(eps) return log(Float64(Float64(1.0 - eps) / Float64(1.0 + eps))) end
function tmp = code(eps) tmp = log(((1.0 - eps) / (1.0 + eps))); end
code[eps_] := N[Log[N[(N[(1.0 - eps), $MachinePrecision] / N[(1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1 - \varepsilon}{1 + \varepsilon}\right)
\end{array}
(FPCore (eps)
:precision binary64
(*
(fma
(*
(fma
(fma 0.22857142857142856 (* eps eps) 0.16)
(* (* (* eps eps) eps) eps)
-0.4444444444444444)
(* eps eps))
(/
1.0
(fma
(fma (* eps eps) -0.2857142857142857 -0.4)
(* eps eps)
0.6666666666666666))
-2.0)
eps))
double code(double eps) {
return fma((fma(fma(0.22857142857142856, (eps * eps), 0.16), (((eps * eps) * eps) * eps), -0.4444444444444444) * (eps * eps)), (1.0 / fma(fma((eps * eps), -0.2857142857142857, -0.4), (eps * eps), 0.6666666666666666)), -2.0) * eps;
}
function code(eps) return Float64(fma(Float64(fma(fma(0.22857142857142856, Float64(eps * eps), 0.16), Float64(Float64(Float64(eps * eps) * eps) * eps), -0.4444444444444444) * Float64(eps * eps)), Float64(1.0 / fma(fma(Float64(eps * eps), -0.2857142857142857, -0.4), Float64(eps * eps), 0.6666666666666666)), -2.0) * eps) end
code[eps_] := N[(N[(N[(N[(N[(0.22857142857142856 * N[(eps * eps), $MachinePrecision] + 0.16), $MachinePrecision] * N[(N[(N[(eps * eps), $MachinePrecision] * eps), $MachinePrecision] * eps), $MachinePrecision] + -0.4444444444444444), $MachinePrecision] * N[(eps * eps), $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(N[(N[(eps * eps), $MachinePrecision] * -0.2857142857142857 + -0.4), $MachinePrecision] * N[(eps * eps), $MachinePrecision] + 0.6666666666666666), $MachinePrecision]), $MachinePrecision] + -2.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.22857142857142856, \varepsilon \cdot \varepsilon, 0.16\right), \left(\left(\varepsilon \cdot \varepsilon\right) \cdot \varepsilon\right) \cdot \varepsilon, -0.4444444444444444\right) \cdot \left(\varepsilon \cdot \varepsilon\right), \frac{1}{\mathsf{fma}\left(\mathsf{fma}\left(\varepsilon \cdot \varepsilon, -0.2857142857142857, -0.4\right), \varepsilon \cdot \varepsilon, 0.6666666666666666\right)}, -2\right) \cdot \varepsilon
\end{array}
Initial program 10.3%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.5%
Applied rewrites99.5%
Taylor expanded in eps around 0
Applied rewrites99.5%
Taylor expanded in eps around 0
Applied rewrites99.5%
(FPCore (eps)
:precision binary64
(*
(fma
(*
(*
(fma
(fma 0.22857142857142856 (* eps eps) 0.16)
(* (* (* eps eps) eps) eps)
-0.4444444444444444)
eps)
eps)
(/
1.0
(fma
(fma (* eps eps) -0.2857142857142857 -0.4)
(* eps eps)
0.6666666666666666))
-2.0)
eps))
double code(double eps) {
return fma(((fma(fma(0.22857142857142856, (eps * eps), 0.16), (((eps * eps) * eps) * eps), -0.4444444444444444) * eps) * eps), (1.0 / fma(fma((eps * eps), -0.2857142857142857, -0.4), (eps * eps), 0.6666666666666666)), -2.0) * eps;
}
function code(eps) return Float64(fma(Float64(Float64(fma(fma(0.22857142857142856, Float64(eps * eps), 0.16), Float64(Float64(Float64(eps * eps) * eps) * eps), -0.4444444444444444) * eps) * eps), Float64(1.0 / fma(fma(Float64(eps * eps), -0.2857142857142857, -0.4), Float64(eps * eps), 0.6666666666666666)), -2.0) * eps) end
code[eps_] := N[(N[(N[(N[(N[(N[(0.22857142857142856 * N[(eps * eps), $MachinePrecision] + 0.16), $MachinePrecision] * N[(N[(N[(eps * eps), $MachinePrecision] * eps), $MachinePrecision] * eps), $MachinePrecision] + -0.4444444444444444), $MachinePrecision] * eps), $MachinePrecision] * eps), $MachinePrecision] * N[(1.0 / N[(N[(N[(eps * eps), $MachinePrecision] * -0.2857142857142857 + -0.4), $MachinePrecision] * N[(eps * eps), $MachinePrecision] + 0.6666666666666666), $MachinePrecision]), $MachinePrecision] + -2.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\mathsf{fma}\left(\mathsf{fma}\left(0.22857142857142856, \varepsilon \cdot \varepsilon, 0.16\right), \left(\left(\varepsilon \cdot \varepsilon\right) \cdot \varepsilon\right) \cdot \varepsilon, -0.4444444444444444\right) \cdot \varepsilon\right) \cdot \varepsilon, \frac{1}{\mathsf{fma}\left(\mathsf{fma}\left(\varepsilon \cdot \varepsilon, -0.2857142857142857, -0.4\right), \varepsilon \cdot \varepsilon, 0.6666666666666666\right)}, -2\right) \cdot \varepsilon
\end{array}
Initial program 10.3%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.5%
Applied rewrites99.5%
Taylor expanded in eps around 0
Applied rewrites99.5%
(FPCore (eps)
:precision binary64
(*
(+
(/
(*
(*
(fma
(fma 0.22857142857142856 (* eps eps) 0.16)
(* (* (* eps eps) eps) eps)
-0.4444444444444444)
eps)
eps)
(fma
(fma (* eps eps) -0.2857142857142857 -0.4)
(* eps eps)
0.6666666666666666))
-2.0)
eps))
double code(double eps) {
return ((((fma(fma(0.22857142857142856, (eps * eps), 0.16), (((eps * eps) * eps) * eps), -0.4444444444444444) * eps) * eps) / fma(fma((eps * eps), -0.2857142857142857, -0.4), (eps * eps), 0.6666666666666666)) + -2.0) * eps;
}
function code(eps) return Float64(Float64(Float64(Float64(Float64(fma(fma(0.22857142857142856, Float64(eps * eps), 0.16), Float64(Float64(Float64(eps * eps) * eps) * eps), -0.4444444444444444) * eps) * eps) / fma(fma(Float64(eps * eps), -0.2857142857142857, -0.4), Float64(eps * eps), 0.6666666666666666)) + -2.0) * eps) end
code[eps_] := N[(N[(N[(N[(N[(N[(N[(0.22857142857142856 * N[(eps * eps), $MachinePrecision] + 0.16), $MachinePrecision] * N[(N[(N[(eps * eps), $MachinePrecision] * eps), $MachinePrecision] * eps), $MachinePrecision] + -0.4444444444444444), $MachinePrecision] * eps), $MachinePrecision] * eps), $MachinePrecision] / N[(N[(N[(eps * eps), $MachinePrecision] * -0.2857142857142857 + -0.4), $MachinePrecision] * N[(eps * eps), $MachinePrecision] + 0.6666666666666666), $MachinePrecision]), $MachinePrecision] + -2.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{\left(\mathsf{fma}\left(\mathsf{fma}\left(0.22857142857142856, \varepsilon \cdot \varepsilon, 0.16\right), \left(\left(\varepsilon \cdot \varepsilon\right) \cdot \varepsilon\right) \cdot \varepsilon, -0.4444444444444444\right) \cdot \varepsilon\right) \cdot \varepsilon}{\mathsf{fma}\left(\mathsf{fma}\left(\varepsilon \cdot \varepsilon, -0.2857142857142857, -0.4\right), \varepsilon \cdot \varepsilon, 0.6666666666666666\right)} + -2\right) \cdot \varepsilon
\end{array}
Initial program 10.3%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.5%
Applied rewrites99.5%
Taylor expanded in eps around 0
Applied rewrites99.5%
Applied rewrites99.5%
(FPCore (eps)
:precision binary64
(*
(+
(/
(* (* (fma 0.16 (* (* (* eps eps) eps) eps) -0.4444444444444444) eps) eps)
(fma
(fma (* eps eps) -0.2857142857142857 -0.4)
(* eps eps)
0.6666666666666666))
-2.0)
eps))
double code(double eps) {
return ((((fma(0.16, (((eps * eps) * eps) * eps), -0.4444444444444444) * eps) * eps) / fma(fma((eps * eps), -0.2857142857142857, -0.4), (eps * eps), 0.6666666666666666)) + -2.0) * eps;
}
function code(eps) return Float64(Float64(Float64(Float64(Float64(fma(0.16, Float64(Float64(Float64(eps * eps) * eps) * eps), -0.4444444444444444) * eps) * eps) / fma(fma(Float64(eps * eps), -0.2857142857142857, -0.4), Float64(eps * eps), 0.6666666666666666)) + -2.0) * eps) end
code[eps_] := N[(N[(N[(N[(N[(N[(0.16 * N[(N[(N[(eps * eps), $MachinePrecision] * eps), $MachinePrecision] * eps), $MachinePrecision] + -0.4444444444444444), $MachinePrecision] * eps), $MachinePrecision] * eps), $MachinePrecision] / N[(N[(N[(eps * eps), $MachinePrecision] * -0.2857142857142857 + -0.4), $MachinePrecision] * N[(eps * eps), $MachinePrecision] + 0.6666666666666666), $MachinePrecision]), $MachinePrecision] + -2.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{\left(\mathsf{fma}\left(0.16, \left(\left(\varepsilon \cdot \varepsilon\right) \cdot \varepsilon\right) \cdot \varepsilon, -0.4444444444444444\right) \cdot \varepsilon\right) \cdot \varepsilon}{\mathsf{fma}\left(\mathsf{fma}\left(\varepsilon \cdot \varepsilon, -0.2857142857142857, -0.4\right), \varepsilon \cdot \varepsilon, 0.6666666666666666\right)} + -2\right) \cdot \varepsilon
\end{array}
Initial program 10.3%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.5%
Applied rewrites99.5%
Taylor expanded in eps around 0
Applied rewrites99.5%
Applied rewrites99.5%
(FPCore (eps)
:precision binary64
(*
(fma
(fma
(fma -0.2857142857142857 (* eps eps) -0.4)
(* eps eps)
-0.6666666666666666)
(* eps eps)
-2.0)
eps))
double code(double eps) {
return fma(fma(fma(-0.2857142857142857, (eps * eps), -0.4), (eps * eps), -0.6666666666666666), (eps * eps), -2.0) * eps;
}
function code(eps) return Float64(fma(fma(fma(-0.2857142857142857, Float64(eps * eps), -0.4), Float64(eps * eps), -0.6666666666666666), Float64(eps * eps), -2.0) * eps) end
code[eps_] := N[(N[(N[(N[(-0.2857142857142857 * N[(eps * eps), $MachinePrecision] + -0.4), $MachinePrecision] * N[(eps * eps), $MachinePrecision] + -0.6666666666666666), $MachinePrecision] * N[(eps * eps), $MachinePrecision] + -2.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.2857142857142857, \varepsilon \cdot \varepsilon, -0.4\right), \varepsilon \cdot \varepsilon, -0.6666666666666666\right), \varepsilon \cdot \varepsilon, -2\right) \cdot \varepsilon
\end{array}
Initial program 10.3%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.5%
(FPCore (eps) :precision binary64 (fma (* (* (fma -0.4 (* eps eps) -0.6666666666666666) eps) eps) eps (* -2.0 eps)))
double code(double eps) {
return fma(((fma(-0.4, (eps * eps), -0.6666666666666666) * eps) * eps), eps, (-2.0 * eps));
}
function code(eps) return fma(Float64(Float64(fma(-0.4, Float64(eps * eps), -0.6666666666666666) * eps) * eps), eps, Float64(-2.0 * eps)) end
code[eps_] := N[(N[(N[(N[(-0.4 * N[(eps * eps), $MachinePrecision] + -0.6666666666666666), $MachinePrecision] * eps), $MachinePrecision] * eps), $MachinePrecision] * eps + N[(-2.0 * eps), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\mathsf{fma}\left(-0.4, \varepsilon \cdot \varepsilon, -0.6666666666666666\right) \cdot \varepsilon\right) \cdot \varepsilon, \varepsilon, -2 \cdot \varepsilon\right)
\end{array}
Initial program 10.3%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.4
Applied rewrites99.4%
Applied rewrites99.4%
(FPCore (eps) :precision binary64 (* (fma (fma -0.4 (* eps eps) -0.6666666666666666) (* eps eps) -2.0) eps))
double code(double eps) {
return fma(fma(-0.4, (eps * eps), -0.6666666666666666), (eps * eps), -2.0) * eps;
}
function code(eps) return Float64(fma(fma(-0.4, Float64(eps * eps), -0.6666666666666666), Float64(eps * eps), -2.0) * eps) end
code[eps_] := N[(N[(N[(-0.4 * N[(eps * eps), $MachinePrecision] + -0.6666666666666666), $MachinePrecision] * N[(eps * eps), $MachinePrecision] + -2.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(-0.4, \varepsilon \cdot \varepsilon, -0.6666666666666666\right), \varepsilon \cdot \varepsilon, -2\right) \cdot \varepsilon
\end{array}
Initial program 10.3%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.4
Applied rewrites99.4%
(FPCore (eps) :precision binary64 (fma (* -0.6666666666666666 (* eps eps)) eps (* -2.0 eps)))
double code(double eps) {
return fma((-0.6666666666666666 * (eps * eps)), eps, (-2.0 * eps));
}
function code(eps) return fma(Float64(-0.6666666666666666 * Float64(eps * eps)), eps, Float64(-2.0 * eps)) end
code[eps_] := N[(N[(-0.6666666666666666 * N[(eps * eps), $MachinePrecision]), $MachinePrecision] * eps + N[(-2.0 * eps), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.6666666666666666 \cdot \left(\varepsilon \cdot \varepsilon\right), \varepsilon, -2 \cdot \varepsilon\right)
\end{array}
Initial program 10.3%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.0
Applied rewrites99.0%
Applied rewrites99.0%
(FPCore (eps) :precision binary64 (* (fma (* eps eps) -0.6666666666666666 -2.0) eps))
double code(double eps) {
return fma((eps * eps), -0.6666666666666666, -2.0) * eps;
}
function code(eps) return Float64(fma(Float64(eps * eps), -0.6666666666666666, -2.0) * eps) end
code[eps_] := N[(N[(N[(eps * eps), $MachinePrecision] * -0.6666666666666666 + -2.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\varepsilon \cdot \varepsilon, -0.6666666666666666, -2\right) \cdot \varepsilon
\end{array}
Initial program 10.3%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.0
Applied rewrites99.0%
(FPCore (eps) :precision binary64 (* -2.0 eps))
double code(double eps) {
return -2.0 * eps;
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = (-2.0d0) * eps
end function
public static double code(double eps) {
return -2.0 * eps;
}
def code(eps): return -2.0 * eps
function code(eps) return Float64(-2.0 * eps) end
function tmp = code(eps) tmp = -2.0 * eps; end
code[eps_] := N[(-2.0 * eps), $MachinePrecision]
\begin{array}{l}
\\
-2 \cdot \varepsilon
\end{array}
Initial program 10.3%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f6498.2
Applied rewrites98.2%
Final simplification98.2%
(FPCore (eps) :precision binary64 0.0)
double code(double eps) {
return 0.0;
}
real(8) function code(eps)
real(8), intent (in) :: eps
code = 0.0d0
end function
public static double code(double eps) {
return 0.0;
}
def code(eps): return 0.0
function code(eps) return 0.0 end
function tmp = code(eps) tmp = 0.0; end
code[eps_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 10.3%
Applied rewrites5.4%
(FPCore (eps) :precision binary64 (- (log1p (- eps)) (log1p eps)))
double code(double eps) {
return log1p(-eps) - log1p(eps);
}
public static double code(double eps) {
return Math.log1p(-eps) - Math.log1p(eps);
}
def code(eps): return math.log1p(-eps) - math.log1p(eps)
function code(eps) return Float64(log1p(Float64(-eps)) - log1p(eps)) end
code[eps_] := N[(N[Log[1 + (-eps)], $MachinePrecision] - N[Log[1 + eps], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(-\varepsilon\right) - \mathsf{log1p}\left(\varepsilon\right)
\end{array}
herbie shell --seed 2024235
(FPCore (eps)
:name "logq (problem 3.4.3)"
:precision binary64
:pre (< (fabs eps) 1.0)
:alt
(! :herbie-platform default (- (log1p (- eps)) (log1p eps)))
(log (/ (- 1.0 eps) (+ 1.0 eps))))