?

Average Accuracy: 88.0% → 98.2%
Time: 13.6s
Precision: binary64
Cost: 39880

?

\[\left(-1000000000 \leq x \land x \leq 1000000000\right) \land \left(-1 \leq \varepsilon \land \varepsilon \leq 1\right)\]
\[{\left(x + \varepsilon\right)}^{5} - {x}^{5} \]
\[\begin{array}{l} t_0 := {\left(x + \varepsilon\right)}^{5} - {x}^{5}\\ \mathbf{if}\;t_0 \leq -2 \cdot 10^{-280}:\\ \;\;\;\;\left({\varepsilon}^{5} + {\varepsilon}^{4} \cdot \left(x \cdot 5\right)\right) + {\varepsilon}^{3} \cdot \left(\left(x \cdot x\right) \cdot 10\right)\\ \mathbf{elif}\;t_0 \leq 0:\\ \;\;\;\;\varepsilon \cdot \left(5 \cdot {x}^{4}\right)\\ \mathbf{else}:\\ \;\;\;\;t_0\\ \end{array} \]
(FPCore (x eps) :precision binary64 (- (pow (+ x eps) 5.0) (pow x 5.0)))
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (- (pow (+ x eps) 5.0) (pow x 5.0))))
   (if (<= t_0 -2e-280)
     (+
      (+ (pow eps 5.0) (* (pow eps 4.0) (* x 5.0)))
      (* (pow eps 3.0) (* (* x x) 10.0)))
     (if (<= t_0 0.0) (* eps (* 5.0 (pow x 4.0))) t_0))))
double code(double x, double eps) {
	return pow((x + eps), 5.0) - pow(x, 5.0);
}
double code(double x, double eps) {
	double t_0 = pow((x + eps), 5.0) - pow(x, 5.0);
	double tmp;
	if (t_0 <= -2e-280) {
		tmp = (pow(eps, 5.0) + (pow(eps, 4.0) * (x * 5.0))) + (pow(eps, 3.0) * ((x * x) * 10.0));
	} else if (t_0 <= 0.0) {
		tmp = eps * (5.0 * pow(x, 4.0));
	} else {
		tmp = t_0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = ((x + eps) ** 5.0d0) - (x ** 5.0d0)
end function
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = ((x + eps) ** 5.0d0) - (x ** 5.0d0)
    if (t_0 <= (-2d-280)) then
        tmp = ((eps ** 5.0d0) + ((eps ** 4.0d0) * (x * 5.0d0))) + ((eps ** 3.0d0) * ((x * x) * 10.0d0))
    else if (t_0 <= 0.0d0) then
        tmp = eps * (5.0d0 * (x ** 4.0d0))
    else
        tmp = t_0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	return Math.pow((x + eps), 5.0) - Math.pow(x, 5.0);
}
public static double code(double x, double eps) {
	double t_0 = Math.pow((x + eps), 5.0) - Math.pow(x, 5.0);
	double tmp;
	if (t_0 <= -2e-280) {
		tmp = (Math.pow(eps, 5.0) + (Math.pow(eps, 4.0) * (x * 5.0))) + (Math.pow(eps, 3.0) * ((x * x) * 10.0));
	} else if (t_0 <= 0.0) {
		tmp = eps * (5.0 * Math.pow(x, 4.0));
	} else {
		tmp = t_0;
	}
	return tmp;
}
def code(x, eps):
	return math.pow((x + eps), 5.0) - math.pow(x, 5.0)
def code(x, eps):
	t_0 = math.pow((x + eps), 5.0) - math.pow(x, 5.0)
	tmp = 0
	if t_0 <= -2e-280:
		tmp = (math.pow(eps, 5.0) + (math.pow(eps, 4.0) * (x * 5.0))) + (math.pow(eps, 3.0) * ((x * x) * 10.0))
	elif t_0 <= 0.0:
		tmp = eps * (5.0 * math.pow(x, 4.0))
	else:
		tmp = t_0
	return tmp
function code(x, eps)
	return Float64((Float64(x + eps) ^ 5.0) - (x ^ 5.0))
end
function code(x, eps)
	t_0 = Float64((Float64(x + eps) ^ 5.0) - (x ^ 5.0))
	tmp = 0.0
	if (t_0 <= -2e-280)
		tmp = Float64(Float64((eps ^ 5.0) + Float64((eps ^ 4.0) * Float64(x * 5.0))) + Float64((eps ^ 3.0) * Float64(Float64(x * x) * 10.0)));
	elseif (t_0 <= 0.0)
		tmp = Float64(eps * Float64(5.0 * (x ^ 4.0)));
	else
		tmp = t_0;
	end
	return tmp
end
function tmp = code(x, eps)
	tmp = ((x + eps) ^ 5.0) - (x ^ 5.0);
end
function tmp_2 = code(x, eps)
	t_0 = ((x + eps) ^ 5.0) - (x ^ 5.0);
	tmp = 0.0;
	if (t_0 <= -2e-280)
		tmp = ((eps ^ 5.0) + ((eps ^ 4.0) * (x * 5.0))) + ((eps ^ 3.0) * ((x * x) * 10.0));
	elseif (t_0 <= 0.0)
		tmp = eps * (5.0 * (x ^ 4.0));
	else
		tmp = t_0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := N[(N[Power[N[(x + eps), $MachinePrecision], 5.0], $MachinePrecision] - N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]
code[x_, eps_] := Block[{t$95$0 = N[(N[Power[N[(x + eps), $MachinePrecision], 5.0], $MachinePrecision] - N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -2e-280], N[(N[(N[Power[eps, 5.0], $MachinePrecision] + N[(N[Power[eps, 4.0], $MachinePrecision] * N[(x * 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[Power[eps, 3.0], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 10.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$0, 0.0], N[(eps * N[(5.0 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$0]]]
{\left(x + \varepsilon\right)}^{5} - {x}^{5}
\begin{array}{l}
t_0 := {\left(x + \varepsilon\right)}^{5} - {x}^{5}\\
\mathbf{if}\;t_0 \leq -2 \cdot 10^{-280}:\\
\;\;\;\;\left({\varepsilon}^{5} + {\varepsilon}^{4} \cdot \left(x \cdot 5\right)\right) + {\varepsilon}^{3} \cdot \left(\left(x \cdot x\right) \cdot 10\right)\\

\mathbf{elif}\;t_0 \leq 0:\\
\;\;\;\;\varepsilon \cdot \left(5 \cdot {x}^{4}\right)\\

\mathbf{else}:\\
\;\;\;\;t_0\\


\end{array}

Error?

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation?

  1. Split input into 3 regimes
  2. if (-.f64 (pow.f64 (+.f64 x eps) 5) (pow.f64 x 5)) < -1.9999999999999999e-280

    1. Initial program 96.9%

      \[{\left(x + \varepsilon\right)}^{5} - {x}^{5} \]
    2. Taylor expanded in eps around inf 92.3%

      \[\leadsto \color{blue}{{\varepsilon}^{4} \cdot \left(4 \cdot x + x\right) + \left({\varepsilon}^{5} + {\varepsilon}^{3} \cdot \left(2 \cdot {x}^{2} + 8 \cdot {x}^{2}\right)\right)} \]
    3. Simplified92.3%

      \[\leadsto \color{blue}{\mathsf{fma}\left({\varepsilon}^{4}, 5 \cdot x, {\varepsilon}^{5}\right) + {\varepsilon}^{3} \cdot \left(\left(x \cdot x\right) \cdot 10\right)} \]
      Proof

      [Start]92.3

      \[ {\varepsilon}^{4} \cdot \left(4 \cdot x + x\right) + \left({\varepsilon}^{5} + {\varepsilon}^{3} \cdot \left(2 \cdot {x}^{2} + 8 \cdot {x}^{2}\right)\right) \]

      associate-+r+ [=>]92.3

      \[ \color{blue}{\left({\varepsilon}^{4} \cdot \left(4 \cdot x + x\right) + {\varepsilon}^{5}\right) + {\varepsilon}^{3} \cdot \left(2 \cdot {x}^{2} + 8 \cdot {x}^{2}\right)} \]

      fma-def [=>]92.3

      \[ \color{blue}{\mathsf{fma}\left({\varepsilon}^{4}, 4 \cdot x + x, {\varepsilon}^{5}\right)} + {\varepsilon}^{3} \cdot \left(2 \cdot {x}^{2} + 8 \cdot {x}^{2}\right) \]

      distribute-lft1-in [=>]92.3

      \[ \mathsf{fma}\left({\varepsilon}^{4}, \color{blue}{\left(4 + 1\right) \cdot x}, {\varepsilon}^{5}\right) + {\varepsilon}^{3} \cdot \left(2 \cdot {x}^{2} + 8 \cdot {x}^{2}\right) \]

      metadata-eval [=>]92.3

      \[ \mathsf{fma}\left({\varepsilon}^{4}, \color{blue}{5} \cdot x, {\varepsilon}^{5}\right) + {\varepsilon}^{3} \cdot \left(2 \cdot {x}^{2} + 8 \cdot {x}^{2}\right) \]

      distribute-rgt-out [=>]92.3

      \[ \mathsf{fma}\left({\varepsilon}^{4}, 5 \cdot x, {\varepsilon}^{5}\right) + {\varepsilon}^{3} \cdot \color{blue}{\left({x}^{2} \cdot \left(2 + 8\right)\right)} \]

      unpow2 [=>]92.3

      \[ \mathsf{fma}\left({\varepsilon}^{4}, 5 \cdot x, {\varepsilon}^{5}\right) + {\varepsilon}^{3} \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot \left(2 + 8\right)\right) \]

      metadata-eval [=>]92.3

      \[ \mathsf{fma}\left({\varepsilon}^{4}, 5 \cdot x, {\varepsilon}^{5}\right) + {\varepsilon}^{3} \cdot \left(\left(x \cdot x\right) \cdot \color{blue}{10}\right) \]
    4. Applied egg-rr92.3%

      \[\leadsto \color{blue}{\left({\varepsilon}^{5} + {\varepsilon}^{4} \cdot \left(x \cdot 5\right)\right)} + {\varepsilon}^{3} \cdot \left(\left(x \cdot x\right) \cdot 10\right) \]
      Proof

      [Start]92.3

      \[ \mathsf{fma}\left({\varepsilon}^{4}, 5 \cdot x, {\varepsilon}^{5}\right) + {\varepsilon}^{3} \cdot \left(\left(x \cdot x\right) \cdot 10\right) \]

      fma-udef [=>]92.3

      \[ \color{blue}{\left({\varepsilon}^{4} \cdot \left(5 \cdot x\right) + {\varepsilon}^{5}\right)} + {\varepsilon}^{3} \cdot \left(\left(x \cdot x\right) \cdot 10\right) \]

      +-commutative [=>]92.3

      \[ \color{blue}{\left({\varepsilon}^{5} + {\varepsilon}^{4} \cdot \left(5 \cdot x\right)\right)} + {\varepsilon}^{3} \cdot \left(\left(x \cdot x\right) \cdot 10\right) \]

      *-commutative [=>]92.3

      \[ \left({\varepsilon}^{5} + {\varepsilon}^{4} \cdot \color{blue}{\left(x \cdot 5\right)}\right) + {\varepsilon}^{3} \cdot \left(\left(x \cdot x\right) \cdot 10\right) \]

    if -1.9999999999999999e-280 < (-.f64 (pow.f64 (+.f64 x eps) 5) (pow.f64 x 5)) < 0.0

    1. Initial program 86.1%

      \[{\left(x + \varepsilon\right)}^{5} - {x}^{5} \]
    2. Taylor expanded in x around inf 98.9%

      \[\leadsto \color{blue}{\left(4 \cdot \varepsilon + \varepsilon\right) \cdot {x}^{4} + \left(2 \cdot {\varepsilon}^{2} + 8 \cdot {\varepsilon}^{2}\right) \cdot {x}^{3}} \]
    3. Simplified98.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon \cdot 5, {x}^{4}, \left(\varepsilon \cdot \left(\varepsilon \cdot 10\right)\right) \cdot {x}^{3}\right)} \]
      Proof

      [Start]98.9

      \[ \left(4 \cdot \varepsilon + \varepsilon\right) \cdot {x}^{4} + \left(2 \cdot {\varepsilon}^{2} + 8 \cdot {\varepsilon}^{2}\right) \cdot {x}^{3} \]

      fma-def [=>]98.9

      \[ \color{blue}{\mathsf{fma}\left(4 \cdot \varepsilon + \varepsilon, {x}^{4}, \left(2 \cdot {\varepsilon}^{2} + 8 \cdot {\varepsilon}^{2}\right) \cdot {x}^{3}\right)} \]

      distribute-lft1-in [=>]98.9

      \[ \mathsf{fma}\left(\color{blue}{\left(4 + 1\right) \cdot \varepsilon}, {x}^{4}, \left(2 \cdot {\varepsilon}^{2} + 8 \cdot {\varepsilon}^{2}\right) \cdot {x}^{3}\right) \]

      metadata-eval [=>]98.9

      \[ \mathsf{fma}\left(\color{blue}{5} \cdot \varepsilon, {x}^{4}, \left(2 \cdot {\varepsilon}^{2} + 8 \cdot {\varepsilon}^{2}\right) \cdot {x}^{3}\right) \]

      *-commutative [=>]98.9

      \[ \mathsf{fma}\left(\color{blue}{\varepsilon \cdot 5}, {x}^{4}, \left(2 \cdot {\varepsilon}^{2} + 8 \cdot {\varepsilon}^{2}\right) \cdot {x}^{3}\right) \]

      distribute-rgt-out [=>]98.9

      \[ \mathsf{fma}\left(\varepsilon \cdot 5, {x}^{4}, \color{blue}{\left({\varepsilon}^{2} \cdot \left(2 + 8\right)\right)} \cdot {x}^{3}\right) \]

      unpow2 [=>]98.9

      \[ \mathsf{fma}\left(\varepsilon \cdot 5, {x}^{4}, \left(\color{blue}{\left(\varepsilon \cdot \varepsilon\right)} \cdot \left(2 + 8\right)\right) \cdot {x}^{3}\right) \]

      metadata-eval [=>]98.9

      \[ \mathsf{fma}\left(\varepsilon \cdot 5, {x}^{4}, \left(\left(\varepsilon \cdot \varepsilon\right) \cdot \color{blue}{10}\right) \cdot {x}^{3}\right) \]

      associate-*l* [=>]98.9

      \[ \mathsf{fma}\left(\varepsilon \cdot 5, {x}^{4}, \color{blue}{\left(\varepsilon \cdot \left(\varepsilon \cdot 10\right)\right)} \cdot {x}^{3}\right) \]
    4. Taylor expanded in eps around 0 98.9%

      \[\leadsto \color{blue}{5 \cdot \left(\varepsilon \cdot {x}^{4}\right)} \]
    5. Simplified98.9%

      \[\leadsto \color{blue}{\varepsilon \cdot \left(5 \cdot {x}^{4}\right)} \]
      Proof

      [Start]98.9

      \[ 5 \cdot \left(\varepsilon \cdot {x}^{4}\right) \]

      associate-*r* [=>]98.9

      \[ \color{blue}{\left(5 \cdot \varepsilon\right) \cdot {x}^{4}} \]

      *-commutative [<=]98.9

      \[ \color{blue}{\left(\varepsilon \cdot 5\right)} \cdot {x}^{4} \]

      associate-*r* [<=]98.9

      \[ \color{blue}{\varepsilon \cdot \left(5 \cdot {x}^{4}\right)} \]

    if 0.0 < (-.f64 (pow.f64 (+.f64 x eps) 5) (pow.f64 x 5))

    1. Initial program 97.0%

      \[{\left(x + \varepsilon\right)}^{5} - {x}^{5} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification98.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;{\left(x + \varepsilon\right)}^{5} - {x}^{5} \leq -2 \cdot 10^{-280}:\\ \;\;\;\;\left({\varepsilon}^{5} + {\varepsilon}^{4} \cdot \left(x \cdot 5\right)\right) + {\varepsilon}^{3} \cdot \left(\left(x \cdot x\right) \cdot 10\right)\\ \mathbf{elif}\;{\left(x + \varepsilon\right)}^{5} - {x}^{5} \leq 0:\\ \;\;\;\;\varepsilon \cdot \left(5 \cdot {x}^{4}\right)\\ \mathbf{else}:\\ \;\;\;\;{\left(x + \varepsilon\right)}^{5} - {x}^{5}\\ \end{array} \]

Alternatives

Alternative 1
Accuracy98.6%
Cost39881
\[\begin{array}{l} t_0 := {\left(x + \varepsilon\right)}^{5} - {x}^{5}\\ \mathbf{if}\;t_0 \leq -2 \cdot 10^{-280} \lor \neg \left(t_0 \leq 0\right):\\ \;\;\;\;t_0\\ \mathbf{else}:\\ \;\;\;\;\varepsilon \cdot \left(5 \cdot {x}^{4}\right)\\ \end{array} \]
Alternative 2
Accuracy96.4%
Cost7048
\[\begin{array}{l} \mathbf{if}\;\varepsilon \leq -7 \cdot 10^{-61}:\\ \;\;\;\;{\varepsilon}^{5}\\ \mathbf{elif}\;\varepsilon \leq 1.45 \cdot 10^{-70}:\\ \;\;\;\;\varepsilon \cdot \left(5 \cdot {x}^{4}\right)\\ \mathbf{else}:\\ \;\;\;\;{\varepsilon}^{5}\\ \end{array} \]
Alternative 3
Accuracy96.3%
Cost6792
\[\begin{array}{l} \mathbf{if}\;\varepsilon \leq -7 \cdot 10^{-61}:\\ \;\;\;\;{\varepsilon}^{5}\\ \mathbf{elif}\;\varepsilon \leq 1.45 \cdot 10^{-70}:\\ \;\;\;\;\varepsilon \cdot \left(5 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;{\varepsilon}^{5}\\ \end{array} \]
Alternative 4
Accuracy83.3%
Cost1216
\[\left(x \cdot x\right) \cdot \left(10 \cdot \left(x \cdot \left(\varepsilon \cdot \varepsilon\right)\right) + \left(x \cdot x\right) \cdot \left(\varepsilon \cdot 5\right)\right) \]
Alternative 5
Accuracy83.3%
Cost1216
\[\left(x \cdot x\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 10\right)\right) + \varepsilon \cdot \left(x \cdot \left(x \cdot 5\right)\right)\right) \]
Alternative 6
Accuracy83.3%
Cost1216
\[\frac{x \cdot x}{\frac{1}{\varepsilon \cdot \left(5 \cdot \left(x \cdot x\right) + \varepsilon \cdot \left(x \cdot 10\right)\right)}} \]
Alternative 7
Accuracy83.0%
Cost704
\[5 \cdot \left(\varepsilon \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) \]
Alternative 8
Accuracy83.0%
Cost704
\[\varepsilon \cdot \left(5 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) \]

Error

Reproduce?

herbie shell --seed 2023126 
(FPCore (x eps)
  :name "ENA, Section 1.4, Exercise 4b, n=5"
  :precision binary64
  :pre (and (and (<= -1000000000.0 x) (<= x 1000000000.0)) (and (<= -1.0 eps) (<= eps 1.0)))
  (- (pow (+ x eps) 5.0) (pow x 5.0)))