
(FPCore (x eps) :precision binary64 (- (pow (+ x eps) 2.0) (pow x 2.0)))
double code(double x, double eps) {
return pow((x + eps), 2.0) - pow(x, 2.0);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((x + eps) ** 2.0d0) - (x ** 2.0d0)
end function
public static double code(double x, double eps) {
return Math.pow((x + eps), 2.0) - Math.pow(x, 2.0);
}
def code(x, eps): return math.pow((x + eps), 2.0) - math.pow(x, 2.0)
function code(x, eps) return Float64((Float64(x + eps) ^ 2.0) - (x ^ 2.0)) end
function tmp = code(x, eps) tmp = ((x + eps) ^ 2.0) - (x ^ 2.0); end
code[x_, eps_] := N[(N[Power[N[(x + eps), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(x + \varepsilon\right)}^{2} - {x}^{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (pow (+ x eps) 2.0) (pow x 2.0)))
double code(double x, double eps) {
return pow((x + eps), 2.0) - pow(x, 2.0);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((x + eps) ** 2.0d0) - (x ** 2.0d0)
end function
public static double code(double x, double eps) {
return Math.pow((x + eps), 2.0) - Math.pow(x, 2.0);
}
def code(x, eps): return math.pow((x + eps), 2.0) - math.pow(x, 2.0)
function code(x, eps) return Float64((Float64(x + eps) ^ 2.0) - (x ^ 2.0)) end
function tmp = code(x, eps) tmp = ((x + eps) ^ 2.0) - (x ^ 2.0); end
code[x_, eps_] := N[(N[Power[N[(x + eps), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(x + \varepsilon\right)}^{2} - {x}^{2}
\end{array}
(FPCore (x eps) :precision binary64 (* eps (+ eps (* x 2.0))))
double code(double x, double eps) {
return eps * (eps + (x * 2.0));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * (eps + (x * 2.0d0))
end function
public static double code(double x, double eps) {
return eps * (eps + (x * 2.0));
}
def code(x, eps): return eps * (eps + (x * 2.0))
function code(x, eps) return Float64(eps * Float64(eps + Float64(x * 2.0))) end
function tmp = code(x, eps) tmp = eps * (eps + (x * 2.0)); end
code[x_, eps_] := N[(eps * N[(eps + N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon + x \cdot 2\right)
\end{array}
Initial program 68.8%
+-commutative68.8%
unpow268.8%
unpow268.8%
difference-of-squares68.8%
sub-neg68.8%
distribute-lft-in68.8%
+-commutative68.8%
distribute-lft-in68.8%
+-commutative68.8%
sub-neg68.8%
associate--l+100.0%
+-inverses100.0%
+-rgt-identity100.0%
*-commutative100.0%
associate-+l+100.0%
count-2100.0%
*-commutative100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (x eps) :precision binary64 (* 2.0 (* eps x)))
double code(double x, double eps) {
return 2.0 * (eps * x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = 2.0d0 * (eps * x)
end function
public static double code(double x, double eps) {
return 2.0 * (eps * x);
}
def code(x, eps): return 2.0 * (eps * x)
function code(x, eps) return Float64(2.0 * Float64(eps * x)) end
function tmp = code(x, eps) tmp = 2.0 * (eps * x); end
code[x_, eps_] := N[(2.0 * N[(eps * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot \left(\varepsilon \cdot x\right)
\end{array}
Initial program 68.8%
+-commutative68.8%
unpow268.8%
unpow268.8%
difference-of-squares68.8%
sub-neg68.8%
distribute-lft-in68.8%
+-commutative68.8%
distribute-lft-in68.8%
+-commutative68.8%
sub-neg68.8%
associate--l+100.0%
+-inverses100.0%
+-rgt-identity100.0%
*-commutative100.0%
associate-+l+100.0%
count-2100.0%
*-commutative100.0%
Simplified100.0%
Taylor expanded in eps around 0 67.8%
Final simplification67.8%
(FPCore (x eps) :precision binary64 (* eps (* x 2.0)))
double code(double x, double eps) {
return eps * (x * 2.0);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * (x * 2.0d0)
end function
public static double code(double x, double eps) {
return eps * (x * 2.0);
}
def code(x, eps): return eps * (x * 2.0)
function code(x, eps) return Float64(eps * Float64(x * 2.0)) end
function tmp = code(x, eps) tmp = eps * (x * 2.0); end
code[x_, eps_] := N[(eps * N[(x * 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(x \cdot 2\right)
\end{array}
Initial program 68.8%
+-commutative68.8%
unpow268.8%
unpow268.8%
difference-of-squares68.8%
sub-neg68.8%
distribute-lft-in68.8%
+-commutative68.8%
distribute-lft-in68.8%
+-commutative68.8%
sub-neg68.8%
associate--l+100.0%
+-inverses100.0%
+-rgt-identity100.0%
*-commutative100.0%
associate-+l+100.0%
count-2100.0%
*-commutative100.0%
Simplified100.0%
Taylor expanded in eps around 0 67.8%
*-commutative67.8%
associate-*r*67.8%
*-commutative67.8%
Simplified67.8%
Final simplification67.8%
herbie shell --seed 2024079
(FPCore (x eps)
:name "ENA, Section 1.4, Exercise 4b, n=2"
:precision binary64
:pre (and (and (<= -1000000000.0 x) (<= x 1000000000.0)) (and (<= -1.0 eps) (<= eps 1.0)))
(- (pow (+ x eps) 2.0) (pow x 2.0)))