
(FPCore (x eps) :precision binary64 (- (pow (+ x eps) 2.0) (pow x 2.0)))
double code(double x, double eps) {
return pow((x + eps), 2.0) - pow(x, 2.0);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((x + eps) ** 2.0d0) - (x ** 2.0d0)
end function
public static double code(double x, double eps) {
return Math.pow((x + eps), 2.0) - Math.pow(x, 2.0);
}
def code(x, eps): return math.pow((x + eps), 2.0) - math.pow(x, 2.0)
function code(x, eps) return Float64((Float64(x + eps) ^ 2.0) - (x ^ 2.0)) end
function tmp = code(x, eps) tmp = ((x + eps) ^ 2.0) - (x ^ 2.0); end
code[x_, eps_] := N[(N[Power[N[(x + eps), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(x + \varepsilon\right)}^{2} - {x}^{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (pow (+ x eps) 2.0) (pow x 2.0)))
double code(double x, double eps) {
return pow((x + eps), 2.0) - pow(x, 2.0);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((x + eps) ** 2.0d0) - (x ** 2.0d0)
end function
public static double code(double x, double eps) {
return Math.pow((x + eps), 2.0) - Math.pow(x, 2.0);
}
def code(x, eps): return math.pow((x + eps), 2.0) - math.pow(x, 2.0)
function code(x, eps) return Float64((Float64(x + eps) ^ 2.0) - (x ^ 2.0)) end
function tmp = code(x, eps) tmp = ((x + eps) ^ 2.0) - (x ^ 2.0); end
code[x_, eps_] := N[(N[Power[N[(x + eps), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(x + \varepsilon\right)}^{2} - {x}^{2}
\end{array}
(FPCore (x eps) :precision binary64 (fma eps eps (* eps (* x 2.0))))
double code(double x, double eps) {
return fma(eps, eps, (eps * (x * 2.0)));
}
function code(x, eps) return fma(eps, eps, Float64(eps * Float64(x * 2.0))) end
code[x_, eps_] := N[(eps * eps + N[(eps * N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\varepsilon, \varepsilon, \varepsilon \cdot \left(x \cdot 2\right)\right)
\end{array}
Initial program 76.9%
+-commutative76.9%
unpow276.9%
unpow276.9%
difference-of-squares77.0%
*-commutative77.0%
associate--l+95.8%
+-inverses95.8%
+-rgt-identity95.8%
associate-+l+95.8%
*-lft-identity95.8%
metadata-eval95.8%
distribute-rgt-out95.8%
distribute-lft-out95.8%
metadata-eval95.8%
metadata-eval95.8%
metadata-eval95.8%
Simplified95.8%
distribute-lft-in95.8%
fma-define95.8%
Applied egg-rr95.8%
Final simplification95.8%
(FPCore (x eps) :precision binary64 (+ (* eps (+ eps x)) (* eps x)))
double code(double x, double eps) {
return (eps * (eps + x)) + (eps * x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (eps * (eps + x)) + (eps * x)
end function
public static double code(double x, double eps) {
return (eps * (eps + x)) + (eps * x);
}
def code(x, eps): return (eps * (eps + x)) + (eps * x)
function code(x, eps) return Float64(Float64(eps * Float64(eps + x)) + Float64(eps * x)) end
function tmp = code(x, eps) tmp = (eps * (eps + x)) + (eps * x); end
code[x_, eps_] := N[(N[(eps * N[(eps + x), $MachinePrecision]), $MachinePrecision] + N[(eps * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon + x\right) + \varepsilon \cdot x
\end{array}
Initial program 76.9%
unpow276.9%
unpow276.9%
difference-of-squares77.0%
+-commutative77.0%
+-commutative77.0%
Applied egg-rr77.0%
Taylor expanded in eps around 0 95.8%
*-commutative95.8%
distribute-lft-in95.8%
Applied egg-rr95.8%
Final simplification95.8%
(FPCore (x eps) :precision binary64 (* eps (+ eps (* x 2.0))))
double code(double x, double eps) {
return eps * (eps + (x * 2.0));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * (eps + (x * 2.0d0))
end function
public static double code(double x, double eps) {
return eps * (eps + (x * 2.0));
}
def code(x, eps): return eps * (eps + (x * 2.0))
function code(x, eps) return Float64(eps * Float64(eps + Float64(x * 2.0))) end
function tmp = code(x, eps) tmp = eps * (eps + (x * 2.0)); end
code[x_, eps_] := N[(eps * N[(eps + N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon + x \cdot 2\right)
\end{array}
Initial program 76.9%
+-commutative76.9%
unpow276.9%
unpow276.9%
difference-of-squares77.0%
*-commutative77.0%
associate--l+95.8%
+-inverses95.8%
+-rgt-identity95.8%
associate-+l+95.8%
*-lft-identity95.8%
metadata-eval95.8%
distribute-rgt-out95.8%
distribute-lft-out95.8%
metadata-eval95.8%
metadata-eval95.8%
metadata-eval95.8%
Simplified95.8%
Final simplification95.8%
(FPCore (x eps) :precision binary64 (* 2.0 (* eps x)))
double code(double x, double eps) {
return 2.0 * (eps * x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = 2.0d0 * (eps * x)
end function
public static double code(double x, double eps) {
return 2.0 * (eps * x);
}
def code(x, eps): return 2.0 * (eps * x)
function code(x, eps) return Float64(2.0 * Float64(eps * x)) end
function tmp = code(x, eps) tmp = 2.0 * (eps * x); end
code[x_, eps_] := N[(2.0 * N[(eps * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot \left(\varepsilon \cdot x\right)
\end{array}
Initial program 76.9%
+-commutative76.9%
unpow276.9%
unpow276.9%
difference-of-squares77.0%
*-commutative77.0%
associate--l+95.8%
+-inverses95.8%
+-rgt-identity95.8%
associate-+l+95.8%
*-lft-identity95.8%
metadata-eval95.8%
distribute-rgt-out95.8%
distribute-lft-out95.8%
metadata-eval95.8%
metadata-eval95.8%
metadata-eval95.8%
Simplified95.8%
Taylor expanded in eps around 0 56.8%
Final simplification56.8%
(FPCore (x eps) :precision binary64 (* x (* eps 2.0)))
double code(double x, double eps) {
return x * (eps * 2.0);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = x * (eps * 2.0d0)
end function
public static double code(double x, double eps) {
return x * (eps * 2.0);
}
def code(x, eps): return x * (eps * 2.0)
function code(x, eps) return Float64(x * Float64(eps * 2.0)) end
function tmp = code(x, eps) tmp = x * (eps * 2.0); end
code[x_, eps_] := N[(x * N[(eps * 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(\varepsilon \cdot 2\right)
\end{array}
Initial program 76.9%
+-commutative76.9%
unpow276.9%
unpow276.9%
difference-of-squares77.0%
*-commutative77.0%
associate--l+95.8%
+-inverses95.8%
+-rgt-identity95.8%
associate-+l+95.8%
*-lft-identity95.8%
metadata-eval95.8%
distribute-rgt-out95.8%
distribute-lft-out95.8%
metadata-eval95.8%
metadata-eval95.8%
metadata-eval95.8%
Simplified95.8%
Taylor expanded in eps around 0 56.8%
associate-*r*56.8%
Simplified56.8%
Final simplification56.8%
herbie shell --seed 2024062
(FPCore (x eps)
:name "ENA, Section 1.4, Exercise 4b, n=2"
:precision binary64
:pre (and (and (<= -1000000000.0 x) (<= x 1000000000.0)) (and (<= -1.0 eps) (<= eps 1.0)))
(- (pow (+ x eps) 2.0) (pow x 2.0)))