
(FPCore (a b c d) :precision binary64 (* (+ a (+ b (+ c d))) 2.0))
double code(double a, double b, double c, double d) {
return (a + (b + (c + d))) * 2.0;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = (a + (b + (c + d))) * 2.0d0
end function
public static double code(double a, double b, double c, double d) {
return (a + (b + (c + d))) * 2.0;
}
def code(a, b, c, d): return (a + (b + (c + d))) * 2.0
function code(a, b, c, d) return Float64(Float64(a + Float64(b + Float64(c + d))) * 2.0) end
function tmp = code(a, b, c, d) tmp = (a + (b + (c + d))) * 2.0; end
code[a_, b_, c_, d_] := N[(N[(a + N[(b + N[(c + d), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(a + \left(b + \left(c + d\right)\right)\right) \cdot 2
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c d) :precision binary64 (* (+ a (+ b (+ c d))) 2.0))
double code(double a, double b, double c, double d) {
return (a + (b + (c + d))) * 2.0;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = (a + (b + (c + d))) * 2.0d0
end function
public static double code(double a, double b, double c, double d) {
return (a + (b + (c + d))) * 2.0;
}
def code(a, b, c, d): return (a + (b + (c + d))) * 2.0
function code(a, b, c, d) return Float64(Float64(a + Float64(b + Float64(c + d))) * 2.0) end
function tmp = code(a, b, c, d) tmp = (a + (b + (c + d))) * 2.0; end
code[a_, b_, c_, d_] := N[(N[(a + N[(b + N[(c + d), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(a + \left(b + \left(c + d\right)\right)\right) \cdot 2
\end{array}
(FPCore (a b c d) :precision binary64 (* (fma 1.0 (+ b c) (+ d a)) 2.0))
double code(double a, double b, double c, double d) {
return fma(1.0, (b + c), (d + a)) * 2.0;
}
function code(a, b, c, d) return Float64(fma(1.0, Float64(b + c), Float64(d + a)) * 2.0) end
code[a_, b_, c_, d_] := N[(N[(1.0 * N[(b + c), $MachinePrecision] + N[(d + a), $MachinePrecision]), $MachinePrecision] * 2.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(1, b + c, d + a\right) \cdot 2
\end{array}
Initial program 94.3%
+-commutative94.3%
associate-+r+95.7%
associate-+l+100.0%
*-un-lft-identity100.0%
fma-def100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (a b c d) :precision binary64 (* 2.0 (+ a (+ b (+ c d)))))
double code(double a, double b, double c, double d) {
return 2.0 * (a + (b + (c + d)));
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = 2.0d0 * (a + (b + (c + d)))
end function
public static double code(double a, double b, double c, double d) {
return 2.0 * (a + (b + (c + d)));
}
def code(a, b, c, d): return 2.0 * (a + (b + (c + d)))
function code(a, b, c, d) return Float64(2.0 * Float64(a + Float64(b + Float64(c + d)))) end
function tmp = code(a, b, c, d) tmp = 2.0 * (a + (b + (c + d))); end
code[a_, b_, c_, d_] := N[(2.0 * N[(a + N[(b + N[(c + d), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot \left(a + \left(b + \left(c + d\right)\right)\right)
\end{array}
Initial program 94.3%
Final simplification94.3%
(FPCore (a b c d) :precision binary64 (* 2.0 (+ c (+ a (+ b d)))))
double code(double a, double b, double c, double d) {
return 2.0 * (c + (a + (b + d)));
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = 2.0d0 * (c + (a + (b + d)))
end function
public static double code(double a, double b, double c, double d) {
return 2.0 * (c + (a + (b + d)));
}
def code(a, b, c, d): return 2.0 * (c + (a + (b + d)))
function code(a, b, c, d) return Float64(2.0 * Float64(c + Float64(a + Float64(b + d)))) end
function tmp = code(a, b, c, d) tmp = 2.0 * (c + (a + (b + d))); end
code[a_, b_, c_, d_] := N[(2.0 * N[(c + N[(a + N[(b + d), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot \left(c + \left(a + \left(b + d\right)\right)\right)
\end{array}
Initial program 94.3%
Taylor expanded in a around 0 95.8%
Final simplification95.8%
(FPCore (a b c d) :precision binary64 (* 2.0 (+ c (+ b (+ d a)))))
double code(double a, double b, double c, double d) {
return 2.0 * (c + (b + (d + a)));
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = 2.0d0 * (c + (b + (d + a)))
end function
public static double code(double a, double b, double c, double d) {
return 2.0 * (c + (b + (d + a)));
}
def code(a, b, c, d): return 2.0 * (c + (b + (d + a)))
function code(a, b, c, d) return Float64(2.0 * Float64(c + Float64(b + Float64(d + a)))) end
function tmp = code(a, b, c, d) tmp = 2.0 * (c + (b + (d + a))); end
code[a_, b_, c_, d_] := N[(2.0 * N[(c + N[(b + N[(d + a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot \left(c + \left(b + \left(d + a\right)\right)\right)
\end{array}
Initial program 94.3%
Taylor expanded in a around 0 95.8%
associate-+r+100.0%
flip-+98.6%
Applied egg-rr98.6%
flip-+100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (a b c d) :precision binary64 (* (+ b c) 2.0))
double code(double a, double b, double c, double d) {
return (b + c) * 2.0;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = (b + c) * 2.0d0
end function
public static double code(double a, double b, double c, double d) {
return (b + c) * 2.0;
}
def code(a, b, c, d): return (b + c) * 2.0
function code(a, b, c, d) return Float64(Float64(b + c) * 2.0) end
function tmp = code(a, b, c, d) tmp = (b + c) * 2.0; end
code[a_, b_, c_, d_] := N[(N[(b + c), $MachinePrecision] * 2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(b + c\right) \cdot 2
\end{array}
Initial program 94.3%
Taylor expanded in a around 0 95.8%
Taylor expanded in b around inf 14.4%
Final simplification14.4%
(FPCore (a b c d) :precision binary64 (* b 2.0))
double code(double a, double b, double c, double d) {
return b * 2.0;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = b * 2.0d0
end function
public static double code(double a, double b, double c, double d) {
return b * 2.0;
}
def code(a, b, c, d): return b * 2.0
function code(a, b, c, d) return Float64(b * 2.0) end
function tmp = code(a, b, c, d) tmp = b * 2.0; end
code[a_, b_, c_, d_] := N[(b * 2.0), $MachinePrecision]
\begin{array}{l}
\\
b \cdot 2
\end{array}
Initial program 94.3%
Taylor expanded in b around inf 5.9%
Final simplification5.9%
(FPCore (a b c d) :precision binary64 (* c 2.0))
double code(double a, double b, double c, double d) {
return c * 2.0;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = c * 2.0d0
end function
public static double code(double a, double b, double c, double d) {
return c * 2.0;
}
def code(a, b, c, d): return c * 2.0
function code(a, b, c, d) return Float64(c * 2.0) end
function tmp = code(a, b, c, d) tmp = c * 2.0; end
code[a_, b_, c_, d_] := N[(c * 2.0), $MachinePrecision]
\begin{array}{l}
\\
c \cdot 2
\end{array}
Initial program 94.3%
Taylor expanded in c around inf 12.0%
Final simplification12.0%
(FPCore (a b c d) :precision binary64 (+ (* (+ a b) 2.0) (* (+ c d) 2.0)))
double code(double a, double b, double c, double d) {
return ((a + b) * 2.0) + ((c + d) * 2.0);
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = ((a + b) * 2.0d0) + ((c + d) * 2.0d0)
end function
public static double code(double a, double b, double c, double d) {
return ((a + b) * 2.0) + ((c + d) * 2.0);
}
def code(a, b, c, d): return ((a + b) * 2.0) + ((c + d) * 2.0)
function code(a, b, c, d) return Float64(Float64(Float64(a + b) * 2.0) + Float64(Float64(c + d) * 2.0)) end
function tmp = code(a, b, c, d) tmp = ((a + b) * 2.0) + ((c + d) * 2.0); end
code[a_, b_, c_, d_] := N[(N[(N[(a + b), $MachinePrecision] * 2.0), $MachinePrecision] + N[(N[(c + d), $MachinePrecision] * 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(a + b\right) \cdot 2 + \left(c + d\right) \cdot 2
\end{array}
herbie shell --seed 2023229
(FPCore (a b c d)
:name "Expression, p6"
:precision binary64
:pre (and (and (and (and (<= -14.0 a) (<= a -13.0)) (and (<= -3.0 b) (<= b -2.0))) (and (<= 3.0 c) (<= c 3.5))) (and (<= 12.5 d) (<= d 13.5)))
:herbie-target
(+ (* (+ a b) 2.0) (* (+ c d) 2.0))
(* (+ a (+ b (+ c d))) 2.0))