Expression 1, p15

Percentage Accurate: 99.4% → 99.6%
Time: 9.4s
Alternatives: 8
Speedup: 1.0×

Specification

?
\[\left(\left(\left(\left(\left(\left(\left(\left(1 \leq a \land a \leq 2\right) \land 2 \leq b\right) \land b \leq 4\right) \land 4 \leq c\right) \land c \leq 8\right) \land 8 \leq d\right) \land d \leq 16\right) \land 16 \leq e\right) \land e \leq 32\]
\[\begin{array}{l} \\ \left(\left(\left(e + d\right) + c\right) + b\right) + a \end{array} \]
(FPCore (a b c d e) :precision binary64 (+ (+ (+ (+ e d) c) b) a))
double code(double a, double b, double c, double d, double e) {
	return (((e + d) + c) + b) + a;
}
real(8) function code(a, b, c, d, e)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    real(8), intent (in) :: d
    real(8), intent (in) :: e
    code = (((e + d) + c) + b) + a
end function
public static double code(double a, double b, double c, double d, double e) {
	return (((e + d) + c) + b) + a;
}
def code(a, b, c, d, e):
	return (((e + d) + c) + b) + a
function code(a, b, c, d, e)
	return Float64(Float64(Float64(Float64(e + d) + c) + b) + a)
end
function tmp = code(a, b, c, d, e)
	tmp = (((e + d) + c) + b) + a;
end
code[a_, b_, c_, d_, e_] := N[(N[(N[(N[(e + d), $MachinePrecision] + c), $MachinePrecision] + b), $MachinePrecision] + a), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(e + d\right) + c\right) + b\right) + a
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 99.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\left(e + d\right) + c\right) + b\right) + a \end{array} \]
(FPCore (a b c d e) :precision binary64 (+ (+ (+ (+ e d) c) b) a))
double code(double a, double b, double c, double d, double e) {
	return (((e + d) + c) + b) + a;
}
real(8) function code(a, b, c, d, e)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    real(8), intent (in) :: d
    real(8), intent (in) :: e
    code = (((e + d) + c) + b) + a
end function
public static double code(double a, double b, double c, double d, double e) {
	return (((e + d) + c) + b) + a;
}
def code(a, b, c, d, e):
	return (((e + d) + c) + b) + a
function code(a, b, c, d, e)
	return Float64(Float64(Float64(Float64(e + d) + c) + b) + a)
end
function tmp = code(a, b, c, d, e)
	tmp = (((e + d) + c) + b) + a;
end
code[a_, b_, c_, d_, e_] := N[(N[(N[(N[(e + d), $MachinePrecision] + c), $MachinePrecision] + b), $MachinePrecision] + a), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(e + d\right) + c\right) + b\right) + a
\end{array}

Alternative 1: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(e + d\right) + \left(\left(c + b\right) + a\right) \end{array} \]
(FPCore (a b c d e) :precision binary64 (+ (+ e d) (+ (+ c b) a)))
double code(double a, double b, double c, double d, double e) {
	return (e + d) + ((c + b) + a);
}
real(8) function code(a, b, c, d, e)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    real(8), intent (in) :: d
    real(8), intent (in) :: e
    code = (e + d) + ((c + b) + a)
end function
public static double code(double a, double b, double c, double d, double e) {
	return (e + d) + ((c + b) + a);
}
def code(a, b, c, d, e):
	return (e + d) + ((c + b) + a)
function code(a, b, c, d, e)
	return Float64(Float64(e + d) + Float64(Float64(c + b) + a))
end
function tmp = code(a, b, c, d, e)
	tmp = (e + d) + ((c + b) + a);
end
code[a_, b_, c_, d_, e_] := N[(N[(e + d), $MachinePrecision] + N[(N[(c + b), $MachinePrecision] + a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(e + d\right) + \left(\left(c + b\right) + a\right)
\end{array}
Derivation
  1. Initial program 99.4%

    \[\left(\left(\left(e + d\right) + c\right) + b\right) + a \]
  2. Step-by-step derivation
    1. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(\left(e + d\right) + c\right) + b\right), \color{blue}{a}\right) \]
    2. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(e + d\right) + \left(c + b\right)\right), a\right) \]
    3. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(c + b\right) + \left(e + d\right)\right), a\right) \]
    4. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(c + \left(b + \left(e + d\right)\right)\right), a\right) \]
    5. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(b + \left(e + d\right)\right)\right), a\right) \]
    6. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(\left(e + d\right) + b\right)\right), a\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\left(e + d\right), b\right)\right), a\right) \]
    8. +-lowering-+.f6499.4%

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), b\right)\right), a\right) \]
  3. Simplified99.4%

    \[\leadsto \color{blue}{\left(c + \left(\left(e + d\right) + b\right)\right) + a} \]
  4. Add Preprocessing
  5. Step-by-step derivation
    1. associate-+r+N/A

      \[\leadsto \left(\left(c + \left(e + d\right)\right) + b\right) + a \]
    2. +-commutativeN/A

      \[\leadsto \left(\left(\left(e + d\right) + c\right) + b\right) + a \]
    3. associate-+l+N/A

      \[\leadsto \left(\left(e + d\right) + \left(c + b\right)\right) + a \]
    4. associate-+l+N/A

      \[\leadsto \left(e + d\right) + \color{blue}{\left(\left(c + b\right) + a\right)} \]
    5. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\left(e + d\right), \color{blue}{\left(\left(c + b\right) + a\right)}\right) \]
    6. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), \left(\color{blue}{\left(c + b\right)} + a\right)\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), \mathsf{+.f64}\left(\left(c + b\right), \color{blue}{a}\right)\right) \]
    8. +-lowering-+.f6499.7%

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, b\right), a\right)\right) \]
  6. Applied egg-rr99.7%

    \[\leadsto \color{blue}{\left(e + d\right) + \left(\left(c + b\right) + a\right)} \]
  7. Add Preprocessing

Alternative 2: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(c + \left(e + b\right)\right) + \left(d + a\right) \end{array} \]
(FPCore (a b c d e) :precision binary64 (+ (+ c (+ e b)) (+ d a)))
double code(double a, double b, double c, double d, double e) {
	return (c + (e + b)) + (d + a);
}
real(8) function code(a, b, c, d, e)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    real(8), intent (in) :: d
    real(8), intent (in) :: e
    code = (c + (e + b)) + (d + a)
end function
public static double code(double a, double b, double c, double d, double e) {
	return (c + (e + b)) + (d + a);
}
def code(a, b, c, d, e):
	return (c + (e + b)) + (d + a)
function code(a, b, c, d, e)
	return Float64(Float64(c + Float64(e + b)) + Float64(d + a))
end
function tmp = code(a, b, c, d, e)
	tmp = (c + (e + b)) + (d + a);
end
code[a_, b_, c_, d_, e_] := N[(N[(c + N[(e + b), $MachinePrecision]), $MachinePrecision] + N[(d + a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(c + \left(e + b\right)\right) + \left(d + a\right)
\end{array}
Derivation
  1. Initial program 99.4%

    \[\left(\left(\left(e + d\right) + c\right) + b\right) + a \]
  2. Step-by-step derivation
    1. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(\left(e + d\right) + c\right) + b\right), \color{blue}{a}\right) \]
    2. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(e + d\right) + \left(c + b\right)\right), a\right) \]
    3. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(c + b\right) + \left(e + d\right)\right), a\right) \]
    4. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(c + \left(b + \left(e + d\right)\right)\right), a\right) \]
    5. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(b + \left(e + d\right)\right)\right), a\right) \]
    6. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(\left(e + d\right) + b\right)\right), a\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\left(e + d\right), b\right)\right), a\right) \]
    8. +-lowering-+.f6499.4%

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), b\right)\right), a\right) \]
  3. Simplified99.4%

    \[\leadsto \color{blue}{\left(c + \left(\left(e + d\right) + b\right)\right) + a} \]
  4. Add Preprocessing
  5. Step-by-step derivation
    1. associate-+r+N/A

      \[\leadsto \left(\left(c + \left(e + d\right)\right) + b\right) + a \]
    2. +-commutativeN/A

      \[\leadsto \left(\left(\left(e + d\right) + c\right) + b\right) + a \]
    3. associate-+l+N/A

      \[\leadsto \left(\left(e + d\right) + \left(c + b\right)\right) + a \]
    4. associate-+l+N/A

      \[\leadsto \left(e + d\right) + \color{blue}{\left(\left(c + b\right) + a\right)} \]
    5. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\left(e + d\right), \color{blue}{\left(\left(c + b\right) + a\right)}\right) \]
    6. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), \left(\color{blue}{\left(c + b\right)} + a\right)\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), \mathsf{+.f64}\left(\left(c + b\right), \color{blue}{a}\right)\right) \]
    8. +-lowering-+.f6499.7%

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, b\right), a\right)\right) \]
  6. Applied egg-rr99.7%

    \[\leadsto \color{blue}{\left(e + d\right) + \left(\left(c + b\right) + a\right)} \]
  7. Step-by-step derivation
    1. associate-+r+N/A

      \[\leadsto \left(\left(e + d\right) + \left(c + b\right)\right) + \color{blue}{a} \]
    2. +-commutativeN/A

      \[\leadsto \left(\left(e + d\right) + \left(b + c\right)\right) + a \]
    3. +-commutativeN/A

      \[\leadsto \left(\left(d + e\right) + \left(b + c\right)\right) + a \]
    4. +-commutativeN/A

      \[\leadsto \left(\left(d + e\right) + \left(c + b\right)\right) + a \]
    5. associate-+l+N/A

      \[\leadsto \left(d + \left(e + \left(c + b\right)\right)\right) + a \]
    6. associate-+l+N/A

      \[\leadsto \left(d + \left(\left(e + c\right) + b\right)\right) + a \]
    7. +-commutativeN/A

      \[\leadsto \left(d + \left(b + \left(e + c\right)\right)\right) + a \]
    8. +-commutativeN/A

      \[\leadsto \left(\left(b + \left(e + c\right)\right) + d\right) + a \]
    9. associate-+l+N/A

      \[\leadsto \left(b + \left(e + c\right)\right) + \color{blue}{\left(d + a\right)} \]
    10. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\left(b + \left(e + c\right)\right), \color{blue}{\left(d + a\right)}\right) \]
    11. associate-+r+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(b + e\right) + c\right), \left(\color{blue}{d} + a\right)\right) \]
    12. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\left(c + \left(b + e\right)\right), \left(\color{blue}{d} + a\right)\right) \]
    13. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(b + e\right)\right), \left(\color{blue}{d} + a\right)\right) \]
    14. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(e + b\right)\right), \left(d + a\right)\right) \]
    15. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(e, b\right)\right), \left(d + a\right)\right) \]
    16. +-lowering-+.f6499.6%

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(e, b\right)\right), \mathsf{+.f64}\left(d, \color{blue}{a}\right)\right) \]
  8. Applied egg-rr99.6%

    \[\leadsto \color{blue}{\left(c + \left(e + b\right)\right) + \left(d + a\right)} \]
  9. Add Preprocessing

Alternative 3: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ e + \left(\left(d + c\right) + \left(b + a\right)\right) \end{array} \]
(FPCore (a b c d e) :precision binary64 (+ e (+ (+ d c) (+ b a))))
double code(double a, double b, double c, double d, double e) {
	return e + ((d + c) + (b + a));
}
real(8) function code(a, b, c, d, e)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    real(8), intent (in) :: d
    real(8), intent (in) :: e
    code = e + ((d + c) + (b + a))
end function
public static double code(double a, double b, double c, double d, double e) {
	return e + ((d + c) + (b + a));
}
def code(a, b, c, d, e):
	return e + ((d + c) + (b + a))
function code(a, b, c, d, e)
	return Float64(e + Float64(Float64(d + c) + Float64(b + a)))
end
function tmp = code(a, b, c, d, e)
	tmp = e + ((d + c) + (b + a));
end
code[a_, b_, c_, d_, e_] := N[(e + N[(N[(d + c), $MachinePrecision] + N[(b + a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
e + \left(\left(d + c\right) + \left(b + a\right)\right)
\end{array}
Derivation
  1. Initial program 99.4%

    \[\left(\left(\left(e + d\right) + c\right) + b\right) + a \]
  2. Step-by-step derivation
    1. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(\left(e + d\right) + c\right) + b\right), \color{blue}{a}\right) \]
    2. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(e + d\right) + \left(c + b\right)\right), a\right) \]
    3. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(c + b\right) + \left(e + d\right)\right), a\right) \]
    4. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(c + \left(b + \left(e + d\right)\right)\right), a\right) \]
    5. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(b + \left(e + d\right)\right)\right), a\right) \]
    6. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(\left(e + d\right) + b\right)\right), a\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\left(e + d\right), b\right)\right), a\right) \]
    8. +-lowering-+.f6499.4%

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), b\right)\right), a\right) \]
  3. Simplified99.4%

    \[\leadsto \color{blue}{\left(c + \left(\left(e + d\right) + b\right)\right) + a} \]
  4. Add Preprocessing
  5. Step-by-step derivation
    1. associate-+r+N/A

      \[\leadsto \left(\left(c + \left(e + d\right)\right) + b\right) + a \]
    2. +-commutativeN/A

      \[\leadsto \left(\left(\left(e + d\right) + c\right) + b\right) + a \]
    3. associate-+l+N/A

      \[\leadsto \left(\left(e + d\right) + c\right) + \color{blue}{\left(b + a\right)} \]
    4. associate-+l+N/A

      \[\leadsto \left(e + \left(d + c\right)\right) + \left(\color{blue}{b} + a\right) \]
    5. associate-+l+N/A

      \[\leadsto e + \color{blue}{\left(\left(d + c\right) + \left(b + a\right)\right)} \]
    6. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(e, \color{blue}{\left(\left(d + c\right) + \left(b + a\right)\right)}\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(e, \mathsf{+.f64}\left(\left(d + c\right), \color{blue}{\left(b + a\right)}\right)\right) \]
    8. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(e, \mathsf{+.f64}\left(\left(c + d\right), \left(\color{blue}{b} + a\right)\right)\right) \]
    9. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(e, \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, d\right), \left(\color{blue}{b} + a\right)\right)\right) \]
    10. +-lowering-+.f6499.6%

      \[\leadsto \mathsf{+.f64}\left(e, \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, d\right), \mathsf{+.f64}\left(b, \color{blue}{a}\right)\right)\right) \]
  6. Applied egg-rr99.6%

    \[\leadsto \color{blue}{e + \left(\left(c + d\right) + \left(b + a\right)\right)} \]
  7. Final simplification99.6%

    \[\leadsto e + \left(\left(d + c\right) + \left(b + a\right)\right) \]
  8. Add Preprocessing

Alternative 4: 25.7% accurate, 1.3× speedup?

\[\begin{array}{l} \\ d + \left(b + \left(e + c\right)\right) \end{array} \]
(FPCore (a b c d e) :precision binary64 (+ d (+ b (+ e c))))
double code(double a, double b, double c, double d, double e) {
	return d + (b + (e + c));
}
real(8) function code(a, b, c, d, e)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    real(8), intent (in) :: d
    real(8), intent (in) :: e
    code = d + (b + (e + c))
end function
public static double code(double a, double b, double c, double d, double e) {
	return d + (b + (e + c));
}
def code(a, b, c, d, e):
	return d + (b + (e + c))
function code(a, b, c, d, e)
	return Float64(d + Float64(b + Float64(e + c)))
end
function tmp = code(a, b, c, d, e)
	tmp = d + (b + (e + c));
end
code[a_, b_, c_, d_, e_] := N[(d + N[(b + N[(e + c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
d + \left(b + \left(e + c\right)\right)
\end{array}
Derivation
  1. Initial program 99.4%

    \[\left(\left(\left(e + d\right) + c\right) + b\right) + a \]
  2. Step-by-step derivation
    1. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(\left(e + d\right) + c\right) + b\right), \color{blue}{a}\right) \]
    2. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(e + d\right) + \left(c + b\right)\right), a\right) \]
    3. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(c + b\right) + \left(e + d\right)\right), a\right) \]
    4. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(c + \left(b + \left(e + d\right)\right)\right), a\right) \]
    5. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(b + \left(e + d\right)\right)\right), a\right) \]
    6. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(\left(e + d\right) + b\right)\right), a\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\left(e + d\right), b\right)\right), a\right) \]
    8. +-lowering-+.f6499.4%

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), b\right)\right), a\right) \]
  3. Simplified99.4%

    \[\leadsto \color{blue}{\left(c + \left(\left(e + d\right) + b\right)\right) + a} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0

    \[\leadsto \color{blue}{b + \left(c + \left(d + e\right)\right)} \]
  6. Step-by-step derivation
    1. associate-+r+N/A

      \[\leadsto \left(b + c\right) + \color{blue}{\left(d + e\right)} \]
    2. +-commutativeN/A

      \[\leadsto \left(d + e\right) + \color{blue}{\left(b + c\right)} \]
    3. associate-+l+N/A

      \[\leadsto d + \color{blue}{\left(e + \left(b + c\right)\right)} \]
    4. +-commutativeN/A

      \[\leadsto d + \left(\left(b + c\right) + \color{blue}{e}\right) \]
    5. associate-+r+N/A

      \[\leadsto d + \left(b + \color{blue}{\left(c + e\right)}\right) \]
    6. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(d, \color{blue}{\left(b + \left(c + e\right)\right)}\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(b, \color{blue}{\left(c + e\right)}\right)\right) \]
    8. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(b, \left(e + \color{blue}{c}\right)\right)\right) \]
    9. +-lowering-+.f6425.7%

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(b, \mathsf{+.f64}\left(e, \color{blue}{c}\right)\right)\right) \]
  7. Simplified25.7%

    \[\leadsto \color{blue}{d + \left(b + \left(e + c\right)\right)} \]
  8. Add Preprocessing

Alternative 5: 25.7% accurate, 1.3× speedup?

\[\begin{array}{l} \\ c + \left(d + \left(e + b\right)\right) \end{array} \]
(FPCore (a b c d e) :precision binary64 (+ c (+ d (+ e b))))
double code(double a, double b, double c, double d, double e) {
	return c + (d + (e + b));
}
real(8) function code(a, b, c, d, e)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    real(8), intent (in) :: d
    real(8), intent (in) :: e
    code = c + (d + (e + b))
end function
public static double code(double a, double b, double c, double d, double e) {
	return c + (d + (e + b));
}
def code(a, b, c, d, e):
	return c + (d + (e + b))
function code(a, b, c, d, e)
	return Float64(c + Float64(d + Float64(e + b)))
end
function tmp = code(a, b, c, d, e)
	tmp = c + (d + (e + b));
end
code[a_, b_, c_, d_, e_] := N[(c + N[(d + N[(e + b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
c + \left(d + \left(e + b\right)\right)
\end{array}
Derivation
  1. Initial program 99.4%

    \[\left(\left(\left(e + d\right) + c\right) + b\right) + a \]
  2. Step-by-step derivation
    1. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(\left(e + d\right) + c\right) + b\right), \color{blue}{a}\right) \]
    2. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(e + d\right) + \left(c + b\right)\right), a\right) \]
    3. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(c + b\right) + \left(e + d\right)\right), a\right) \]
    4. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(c + \left(b + \left(e + d\right)\right)\right), a\right) \]
    5. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(b + \left(e + d\right)\right)\right), a\right) \]
    6. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(\left(e + d\right) + b\right)\right), a\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\left(e + d\right), b\right)\right), a\right) \]
    8. +-lowering-+.f6499.4%

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), b\right)\right), a\right) \]
  3. Simplified99.4%

    \[\leadsto \color{blue}{\left(c + \left(\left(e + d\right) + b\right)\right) + a} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0

    \[\leadsto \color{blue}{b + \left(c + \left(d + e\right)\right)} \]
  6. Step-by-step derivation
    1. associate-+r+N/A

      \[\leadsto \left(b + c\right) + \color{blue}{\left(d + e\right)} \]
    2. +-commutativeN/A

      \[\leadsto \left(d + e\right) + \color{blue}{\left(b + c\right)} \]
    3. associate-+l+N/A

      \[\leadsto d + \color{blue}{\left(e + \left(b + c\right)\right)} \]
    4. +-commutativeN/A

      \[\leadsto d + \left(\left(b + c\right) + \color{blue}{e}\right) \]
    5. associate-+r+N/A

      \[\leadsto d + \left(b + \color{blue}{\left(c + e\right)}\right) \]
    6. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(d, \color{blue}{\left(b + \left(c + e\right)\right)}\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(b, \color{blue}{\left(c + e\right)}\right)\right) \]
    8. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(b, \left(e + \color{blue}{c}\right)\right)\right) \]
    9. +-lowering-+.f6425.7%

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(b, \mathsf{+.f64}\left(e, \color{blue}{c}\right)\right)\right) \]
  7. Simplified25.7%

    \[\leadsto \color{blue}{d + \left(b + \left(e + c\right)\right)} \]
  8. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto d + \left(\left(e + c\right) + \color{blue}{b}\right) \]
    2. associate-+l+N/A

      \[\leadsto d + \left(e + \color{blue}{\left(c + b\right)}\right) \]
    3. associate-+l+N/A

      \[\leadsto \left(d + e\right) + \color{blue}{\left(c + b\right)} \]
    4. +-commutativeN/A

      \[\leadsto \left(e + d\right) + \left(\color{blue}{c} + b\right) \]
    5. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\left(e + d\right), \color{blue}{\left(c + b\right)}\right) \]
    6. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), \left(\color{blue}{c} + b\right)\right) \]
    7. +-lowering-+.f6425.7%

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), \mathsf{+.f64}\left(c, \color{blue}{b}\right)\right) \]
  9. Applied egg-rr25.7%

    \[\leadsto \color{blue}{\left(e + d\right) + \left(c + b\right)} \]
  10. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \left(c + b\right) + \color{blue}{\left(e + d\right)} \]
    2. associate-+r+N/A

      \[\leadsto \left(\left(c + b\right) + e\right) + \color{blue}{d} \]
    3. associate-+r+N/A

      \[\leadsto \left(c + \left(b + e\right)\right) + d \]
    4. +-commutativeN/A

      \[\leadsto \left(c + \left(e + b\right)\right) + d \]
    5. associate-+l+N/A

      \[\leadsto c + \color{blue}{\left(\left(e + b\right) + d\right)} \]
    6. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(c, \color{blue}{\left(\left(e + b\right) + d\right)}\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\left(e + b\right), \color{blue}{d}\right)\right) \]
    8. +-lowering-+.f6425.7%

      \[\leadsto \mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, b\right), d\right)\right) \]
  11. Applied egg-rr25.7%

    \[\leadsto \color{blue}{c + \left(\left(e + b\right) + d\right)} \]
  12. Final simplification25.7%

    \[\leadsto c + \left(d + \left(e + b\right)\right) \]
  13. Add Preprocessing

Alternative 6: 23.2% accurate, 1.8× speedup?

\[\begin{array}{l} \\ d + \left(e + c\right) \end{array} \]
(FPCore (a b c d e) :precision binary64 (+ d (+ e c)))
double code(double a, double b, double c, double d, double e) {
	return d + (e + c);
}
real(8) function code(a, b, c, d, e)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    real(8), intent (in) :: d
    real(8), intent (in) :: e
    code = d + (e + c)
end function
public static double code(double a, double b, double c, double d, double e) {
	return d + (e + c);
}
def code(a, b, c, d, e):
	return d + (e + c)
function code(a, b, c, d, e)
	return Float64(d + Float64(e + c))
end
function tmp = code(a, b, c, d, e)
	tmp = d + (e + c);
end
code[a_, b_, c_, d_, e_] := N[(d + N[(e + c), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
d + \left(e + c\right)
\end{array}
Derivation
  1. Initial program 99.4%

    \[\left(\left(\left(e + d\right) + c\right) + b\right) + a \]
  2. Step-by-step derivation
    1. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(\left(e + d\right) + c\right) + b\right), \color{blue}{a}\right) \]
    2. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(e + d\right) + \left(c + b\right)\right), a\right) \]
    3. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(c + b\right) + \left(e + d\right)\right), a\right) \]
    4. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(c + \left(b + \left(e + d\right)\right)\right), a\right) \]
    5. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(b + \left(e + d\right)\right)\right), a\right) \]
    6. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(\left(e + d\right) + b\right)\right), a\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\left(e + d\right), b\right)\right), a\right) \]
    8. +-lowering-+.f6499.4%

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), b\right)\right), a\right) \]
  3. Simplified99.4%

    \[\leadsto \color{blue}{\left(c + \left(\left(e + d\right) + b\right)\right) + a} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0

    \[\leadsto \color{blue}{b + \left(c + \left(d + e\right)\right)} \]
  6. Step-by-step derivation
    1. associate-+r+N/A

      \[\leadsto \left(b + c\right) + \color{blue}{\left(d + e\right)} \]
    2. +-commutativeN/A

      \[\leadsto \left(d + e\right) + \color{blue}{\left(b + c\right)} \]
    3. associate-+l+N/A

      \[\leadsto d + \color{blue}{\left(e + \left(b + c\right)\right)} \]
    4. +-commutativeN/A

      \[\leadsto d + \left(\left(b + c\right) + \color{blue}{e}\right) \]
    5. associate-+r+N/A

      \[\leadsto d + \left(b + \color{blue}{\left(c + e\right)}\right) \]
    6. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(d, \color{blue}{\left(b + \left(c + e\right)\right)}\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(b, \color{blue}{\left(c + e\right)}\right)\right) \]
    8. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(b, \left(e + \color{blue}{c}\right)\right)\right) \]
    9. +-lowering-+.f6425.7%

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(b, \mathsf{+.f64}\left(e, \color{blue}{c}\right)\right)\right) \]
  7. Simplified25.7%

    \[\leadsto \color{blue}{d + \left(b + \left(e + c\right)\right)} \]
  8. Taylor expanded in b around 0

    \[\leadsto \color{blue}{c + \left(d + e\right)} \]
  9. Step-by-step derivation
    1. associate-+r+N/A

      \[\leadsto \left(c + d\right) + \color{blue}{e} \]
    2. +-commutativeN/A

      \[\leadsto \left(d + c\right) + e \]
    3. associate-+l+N/A

      \[\leadsto d + \color{blue}{\left(c + e\right)} \]
    4. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(d, \color{blue}{\left(c + e\right)}\right) \]
    5. +-lowering-+.f6423.2%

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(c, \color{blue}{e}\right)\right) \]
  10. Simplified23.2%

    \[\leadsto \color{blue}{d + \left(c + e\right)} \]
  11. Final simplification23.2%

    \[\leadsto d + \left(e + c\right) \]
  12. Add Preprocessing

Alternative 7: 21.2% accurate, 3.0× speedup?

\[\begin{array}{l} \\ e + d \end{array} \]
(FPCore (a b c d e) :precision binary64 (+ e d))
double code(double a, double b, double c, double d, double e) {
	return e + d;
}
real(8) function code(a, b, c, d, e)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    real(8), intent (in) :: d
    real(8), intent (in) :: e
    code = e + d
end function
public static double code(double a, double b, double c, double d, double e) {
	return e + d;
}
def code(a, b, c, d, e):
	return e + d
function code(a, b, c, d, e)
	return Float64(e + d)
end
function tmp = code(a, b, c, d, e)
	tmp = e + d;
end
code[a_, b_, c_, d_, e_] := N[(e + d), $MachinePrecision]
\begin{array}{l}

\\
e + d
\end{array}
Derivation
  1. Initial program 99.4%

    \[\left(\left(\left(e + d\right) + c\right) + b\right) + a \]
  2. Step-by-step derivation
    1. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(\left(e + d\right) + c\right) + b\right), \color{blue}{a}\right) \]
    2. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(e + d\right) + \left(c + b\right)\right), a\right) \]
    3. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\left(\left(c + b\right) + \left(e + d\right)\right), a\right) \]
    4. associate-+l+N/A

      \[\leadsto \mathsf{+.f64}\left(\left(c + \left(b + \left(e + d\right)\right)\right), a\right) \]
    5. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(b + \left(e + d\right)\right)\right), a\right) \]
    6. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(\left(e + d\right) + b\right)\right), a\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\left(e + d\right), b\right)\right), a\right) \]
    8. +-lowering-+.f6499.4%

      \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), b\right)\right), a\right) \]
  3. Simplified99.4%

    \[\leadsto \color{blue}{\left(c + \left(\left(e + d\right) + b\right)\right) + a} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0

    \[\leadsto \color{blue}{b + \left(c + \left(d + e\right)\right)} \]
  6. Step-by-step derivation
    1. associate-+r+N/A

      \[\leadsto \left(b + c\right) + \color{blue}{\left(d + e\right)} \]
    2. +-commutativeN/A

      \[\leadsto \left(d + e\right) + \color{blue}{\left(b + c\right)} \]
    3. associate-+l+N/A

      \[\leadsto d + \color{blue}{\left(e + \left(b + c\right)\right)} \]
    4. +-commutativeN/A

      \[\leadsto d + \left(\left(b + c\right) + \color{blue}{e}\right) \]
    5. associate-+r+N/A

      \[\leadsto d + \left(b + \color{blue}{\left(c + e\right)}\right) \]
    6. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(d, \color{blue}{\left(b + \left(c + e\right)\right)}\right) \]
    7. +-lowering-+.f64N/A

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(b, \color{blue}{\left(c + e\right)}\right)\right) \]
    8. +-commutativeN/A

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(b, \left(e + \color{blue}{c}\right)\right)\right) \]
    9. +-lowering-+.f6425.7%

      \[\leadsto \mathsf{+.f64}\left(d, \mathsf{+.f64}\left(b, \mathsf{+.f64}\left(e, \color{blue}{c}\right)\right)\right) \]
  7. Simplified25.7%

    \[\leadsto \color{blue}{d + \left(b + \left(e + c\right)\right)} \]
  8. Taylor expanded in e around inf

    \[\leadsto \mathsf{+.f64}\left(d, \color{blue}{e}\right) \]
  9. Step-by-step derivation
    1. Simplified21.2%

      \[\leadsto d + \color{blue}{e} \]
    2. Final simplification21.2%

      \[\leadsto e + d \]
    3. Add Preprocessing

    Alternative 8: 18.9% accurate, 9.0× speedup?

    \[\begin{array}{l} \\ e \end{array} \]
    (FPCore (a b c d e) :precision binary64 e)
    double code(double a, double b, double c, double d, double e) {
    	return e;
    }
    
    real(8) function code(a, b, c, d, e)
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        real(8), intent (in) :: c
        real(8), intent (in) :: d
        real(8), intent (in) :: e
        code = e
    end function
    
    public static double code(double a, double b, double c, double d, double e) {
    	return e;
    }
    
    def code(a, b, c, d, e):
    	return e
    
    function code(a, b, c, d, e)
    	return e
    end
    
    function tmp = code(a, b, c, d, e)
    	tmp = e;
    end
    
    code[a_, b_, c_, d_, e_] := e
    
    \begin{array}{l}
    
    \\
    e
    \end{array}
    
    Derivation
    1. Initial program 99.4%

      \[\left(\left(\left(e + d\right) + c\right) + b\right) + a \]
    2. Step-by-step derivation
      1. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\left(\left(\left(e + d\right) + c\right) + b\right), \color{blue}{a}\right) \]
      2. associate-+l+N/A

        \[\leadsto \mathsf{+.f64}\left(\left(\left(e + d\right) + \left(c + b\right)\right), a\right) \]
      3. +-commutativeN/A

        \[\leadsto \mathsf{+.f64}\left(\left(\left(c + b\right) + \left(e + d\right)\right), a\right) \]
      4. associate-+l+N/A

        \[\leadsto \mathsf{+.f64}\left(\left(c + \left(b + \left(e + d\right)\right)\right), a\right) \]
      5. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(b + \left(e + d\right)\right)\right), a\right) \]
      6. +-commutativeN/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \left(\left(e + d\right) + b\right)\right), a\right) \]
      7. +-lowering-+.f64N/A

        \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\left(e + d\right), b\right)\right), a\right) \]
      8. +-lowering-+.f6499.4%

        \[\leadsto \mathsf{+.f64}\left(\mathsf{+.f64}\left(c, \mathsf{+.f64}\left(\mathsf{+.f64}\left(e, d\right), b\right)\right), a\right) \]
    3. Simplified99.4%

      \[\leadsto \color{blue}{\left(c + \left(\left(e + d\right) + b\right)\right) + a} \]
    4. Add Preprocessing
    5. Taylor expanded in e around inf

      \[\leadsto \color{blue}{e} \]
    6. Step-by-step derivation
      1. Simplified18.9%

        \[\leadsto \color{blue}{e} \]
      2. Add Preprocessing

      Developer Target 1: 99.6% accurate, 1.0× speedup?

      \[\begin{array}{l} \\ \left(d + \left(c + \left(a + b\right)\right)\right) + e \end{array} \]
      (FPCore (a b c d e) :precision binary64 (+ (+ d (+ c (+ a b))) e))
      double code(double a, double b, double c, double d, double e) {
      	return (d + (c + (a + b))) + e;
      }
      
      real(8) function code(a, b, c, d, e)
          real(8), intent (in) :: a
          real(8), intent (in) :: b
          real(8), intent (in) :: c
          real(8), intent (in) :: d
          real(8), intent (in) :: e
          code = (d + (c + (a + b))) + e
      end function
      
      public static double code(double a, double b, double c, double d, double e) {
      	return (d + (c + (a + b))) + e;
      }
      
      def code(a, b, c, d, e):
      	return (d + (c + (a + b))) + e
      
      function code(a, b, c, d, e)
      	return Float64(Float64(d + Float64(c + Float64(a + b))) + e)
      end
      
      function tmp = code(a, b, c, d, e)
      	tmp = (d + (c + (a + b))) + e;
      end
      
      code[a_, b_, c_, d_, e_] := N[(N[(d + N[(c + N[(a + b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + e), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \left(d + \left(c + \left(a + b\right)\right)\right) + e
      \end{array}
      

      Reproduce

      ?
      herbie shell --seed 2024163 
      (FPCore (a b c d e)
        :name "Expression 1, p15"
        :precision binary64
        :pre (and (and (and (and (and (and (and (and (and (<= 1.0 a) (<= a 2.0)) (<= 2.0 b)) (<= b 4.0)) (<= 4.0 c)) (<= c 8.0)) (<= 8.0 d)) (<= d 16.0)) (<= 16.0 e)) (<= e 32.0))
      
        :alt
        (! :herbie-platform default (+ (+ d (+ c (+ a b))) e))
      
        (+ (+ (+ (+ e d) c) b) a))