
(FPCore (x eps) :precision binary64 (- (pow (+ x eps) 2.0) (pow x 2.0)))
double code(double x, double eps) {
return pow((x + eps), 2.0) - pow(x, 2.0);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((x + eps) ** 2.0d0) - (x ** 2.0d0)
end function
public static double code(double x, double eps) {
return Math.pow((x + eps), 2.0) - Math.pow(x, 2.0);
}
def code(x, eps): return math.pow((x + eps), 2.0) - math.pow(x, 2.0)
function code(x, eps) return Float64((Float64(x + eps) ^ 2.0) - (x ^ 2.0)) end
function tmp = code(x, eps) tmp = ((x + eps) ^ 2.0) - (x ^ 2.0); end
code[x_, eps_] := N[(N[Power[N[(x + eps), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(x + \varepsilon\right)}^{2} - {x}^{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (pow (+ x eps) 2.0) (pow x 2.0)))
double code(double x, double eps) {
return pow((x + eps), 2.0) - pow(x, 2.0);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((x + eps) ** 2.0d0) - (x ** 2.0d0)
end function
public static double code(double x, double eps) {
return Math.pow((x + eps), 2.0) - Math.pow(x, 2.0);
}
def code(x, eps): return math.pow((x + eps), 2.0) - math.pow(x, 2.0)
function code(x, eps) return Float64((Float64(x + eps) ^ 2.0) - (x ^ 2.0)) end
function tmp = code(x, eps) tmp = ((x + eps) ^ 2.0) - (x ^ 2.0); end
code[x_, eps_] := N[(N[Power[N[(x + eps), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(x + \varepsilon\right)}^{2} - {x}^{2}
\end{array}
(FPCore (x eps) :precision binary64 (* eps (+ x (+ x eps))))
double code(double x, double eps) {
return eps * (x + (x + eps));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * (x + (x + eps))
end function
public static double code(double x, double eps) {
return eps * (x + (x + eps));
}
def code(x, eps): return eps * (x + (x + eps))
function code(x, eps) return Float64(eps * Float64(x + Float64(x + eps))) end
function tmp = code(x, eps) tmp = eps * (x + (x + eps)); end
code[x_, eps_] := N[(eps * N[(x + N[(x + eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(x + \left(x + \varepsilon\right)\right)
\end{array}
Initial program 79.1%
Taylor expanded in x around 0
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
*-inversesN/A
associate-/l*N/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
unpow2N/A
associate-*l/N/A
associate-*r/N/A
*-commutativeN/A
*-rgt-identityN/A
distribute-lft-inN/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
Applied rewrites100.0%
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x eps) :precision binary64 (if (<= (- (pow (+ x eps) 2.0) (pow x 2.0)) 0.0) (* eps (+ x x)) (* eps eps)))
double code(double x, double eps) {
double tmp;
if ((pow((x + eps), 2.0) - pow(x, 2.0)) <= 0.0) {
tmp = eps * (x + x);
} else {
tmp = eps * eps;
}
return tmp;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
real(8) :: tmp
if ((((x + eps) ** 2.0d0) - (x ** 2.0d0)) <= 0.0d0) then
tmp = eps * (x + x)
else
tmp = eps * eps
end if
code = tmp
end function
public static double code(double x, double eps) {
double tmp;
if ((Math.pow((x + eps), 2.0) - Math.pow(x, 2.0)) <= 0.0) {
tmp = eps * (x + x);
} else {
tmp = eps * eps;
}
return tmp;
}
def code(x, eps): tmp = 0 if (math.pow((x + eps), 2.0) - math.pow(x, 2.0)) <= 0.0: tmp = eps * (x + x) else: tmp = eps * eps return tmp
function code(x, eps) tmp = 0.0 if (Float64((Float64(x + eps) ^ 2.0) - (x ^ 2.0)) <= 0.0) tmp = Float64(eps * Float64(x + x)); else tmp = Float64(eps * eps); end return tmp end
function tmp_2 = code(x, eps) tmp = 0.0; if ((((x + eps) ^ 2.0) - (x ^ 2.0)) <= 0.0) tmp = eps * (x + x); else tmp = eps * eps; end tmp_2 = tmp; end
code[x_, eps_] := If[LessEqual[N[(N[Power[N[(x + eps), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision], 0.0], N[(eps * N[(x + x), $MachinePrecision]), $MachinePrecision], N[(eps * eps), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;{\left(x + \varepsilon\right)}^{2} - {x}^{2} \leq 0:\\
\;\;\;\;\varepsilon \cdot \left(x + x\right)\\
\mathbf{else}:\\
\;\;\;\;\varepsilon \cdot \varepsilon\\
\end{array}
\end{array}
if (-.f64 (pow.f64 (+.f64 x eps) #s(literal 2 binary64)) (pow.f64 x #s(literal 2 binary64))) < 0.0Initial program 67.6%
Taylor expanded in x around inf
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f6498.7
Applied rewrites98.7%
Applied rewrites98.7%
if 0.0 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 2 binary64)) (pow.f64 x #s(literal 2 binary64))) Initial program 97.8%
Taylor expanded in x around 0
unpow2N/A
lower-*.f6493.3
Applied rewrites93.3%
Final simplification96.6%
(FPCore (x eps) :precision binary64 (* eps (fma x 2.0 eps)))
double code(double x, double eps) {
return eps * fma(x, 2.0, eps);
}
function code(x, eps) return Float64(eps * fma(x, 2.0, eps)) end
code[x_, eps_] := N[(eps * N[(x * 2.0 + eps), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(x, 2, \varepsilon\right)
\end{array}
Initial program 79.1%
Taylor expanded in x around 0
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
*-inversesN/A
associate-/l*N/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
unpow2N/A
associate-*l/N/A
associate-*r/N/A
*-commutativeN/A
*-rgt-identityN/A
distribute-lft-inN/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
Applied rewrites100.0%
(FPCore (x eps) :precision binary64 (* eps eps))
double code(double x, double eps) {
return eps * eps;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * eps
end function
public static double code(double x, double eps) {
return eps * eps;
}
def code(x, eps): return eps * eps
function code(x, eps) return Float64(eps * eps) end
function tmp = code(x, eps) tmp = eps * eps; end
code[x_, eps_] := N[(eps * eps), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \varepsilon
\end{array}
Initial program 79.1%
Taylor expanded in x around 0
unpow2N/A
lower-*.f6476.5
Applied rewrites76.5%
herbie shell --seed 2024233
(FPCore (x eps)
:name "ENA, Section 1.4, Exercise 4b, n=2"
:precision binary64
:pre (and (and (<= -1000000000.0 x) (<= x 1000000000.0)) (and (<= -1.0 eps) (<= eps 1.0)))
(- (pow (+ x eps) 2.0) (pow x 2.0)))