
(FPCore (a b c d) :precision binary64 (* (+ a (+ b (+ c d))) 2.0))
double code(double a, double b, double c, double d) {
return (a + (b + (c + d))) * 2.0;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = (a + (b + (c + d))) * 2.0d0
end function
public static double code(double a, double b, double c, double d) {
return (a + (b + (c + d))) * 2.0;
}
def code(a, b, c, d): return (a + (b + (c + d))) * 2.0
function code(a, b, c, d) return Float64(Float64(a + Float64(b + Float64(c + d))) * 2.0) end
function tmp = code(a, b, c, d) tmp = (a + (b + (c + d))) * 2.0; end
code[a_, b_, c_, d_] := N[(N[(a + N[(b + N[(c + d), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(a + \left(b + \left(c + d\right)\right)\right) \cdot 2
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c d) :precision binary64 (* (+ a (+ b (+ c d))) 2.0))
double code(double a, double b, double c, double d) {
return (a + (b + (c + d))) * 2.0;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = (a + (b + (c + d))) * 2.0d0
end function
public static double code(double a, double b, double c, double d) {
return (a + (b + (c + d))) * 2.0;
}
def code(a, b, c, d): return (a + (b + (c + d))) * 2.0
function code(a, b, c, d) return Float64(Float64(a + Float64(b + Float64(c + d))) * 2.0) end
function tmp = code(a, b, c, d) tmp = (a + (b + (c + d))) * 2.0; end
code[a_, b_, c_, d_] := N[(N[(a + N[(b + N[(c + d), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(a + \left(b + \left(c + d\right)\right)\right) \cdot 2
\end{array}
(FPCore (a b c d) :precision binary64 (* (+ (+ a d) (+ b c)) 2.0))
double code(double a, double b, double c, double d) {
return ((a + d) + (b + c)) * 2.0;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = ((a + d) + (b + c)) * 2.0d0
end function
public static double code(double a, double b, double c, double d) {
return ((a + d) + (b + c)) * 2.0;
}
def code(a, b, c, d): return ((a + d) + (b + c)) * 2.0
function code(a, b, c, d) return Float64(Float64(Float64(a + d) + Float64(b + c)) * 2.0) end
function tmp = code(a, b, c, d) tmp = ((a + d) + (b + c)) * 2.0; end
code[a_, b_, c_, d_] := N[(N[(N[(a + d), $MachinePrecision] + N[(b + c), $MachinePrecision]), $MachinePrecision] * 2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(a + d\right) + \left(b + c\right)\right) \cdot 2
\end{array}
Initial program 94.1%
+-commutative94.1%
associate-+r+95.1%
+-commutative95.1%
Simplified95.1%
Taylor expanded in a around 0 94.1%
associate-+r+95.6%
+-commutative95.6%
associate-+r+100.0%
Simplified100.0%
(FPCore (a b c d) :precision binary64 (if (<= (+ d c) 15.99) (* (+ a d) 2.0) (* 2.0 (+ d (+ a c)))))
double code(double a, double b, double c, double d) {
double tmp;
if ((d + c) <= 15.99) {
tmp = (a + d) * 2.0;
} else {
tmp = 2.0 * (d + (a + c));
}
return tmp;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
real(8) :: tmp
if ((d + c) <= 15.99d0) then
tmp = (a + d) * 2.0d0
else
tmp = 2.0d0 * (d + (a + c))
end if
code = tmp
end function
public static double code(double a, double b, double c, double d) {
double tmp;
if ((d + c) <= 15.99) {
tmp = (a + d) * 2.0;
} else {
tmp = 2.0 * (d + (a + c));
}
return tmp;
}
def code(a, b, c, d): tmp = 0 if (d + c) <= 15.99: tmp = (a + d) * 2.0 else: tmp = 2.0 * (d + (a + c)) return tmp
function code(a, b, c, d) tmp = 0.0 if (Float64(d + c) <= 15.99) tmp = Float64(Float64(a + d) * 2.0); else tmp = Float64(2.0 * Float64(d + Float64(a + c))); end return tmp end
function tmp_2 = code(a, b, c, d) tmp = 0.0; if ((d + c) <= 15.99) tmp = (a + d) * 2.0; else tmp = 2.0 * (d + (a + c)); end tmp_2 = tmp; end
code[a_, b_, c_, d_] := If[LessEqual[N[(d + c), $MachinePrecision], 15.99], N[(N[(a + d), $MachinePrecision] * 2.0), $MachinePrecision], N[(2.0 * N[(d + N[(a + c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;d + c \leq 15.99:\\
\;\;\;\;\left(a + d\right) \cdot 2\\
\mathbf{else}:\\
\;\;\;\;2 \cdot \left(d + \left(a + c\right)\right)\\
\end{array}
\end{array}
if (+.f64 c d) < 15.9900000000000002Initial program 94.1%
Taylor expanded in b around 0 5.1%
+-commutative5.1%
+-commutative5.1%
associate-+l+5.1%
Simplified5.1%
Taylor expanded in c around 0 13.9%
+-commutative13.9%
Simplified13.9%
if 15.9900000000000002 < (+.f64 c d) Initial program 94.1%
Taylor expanded in b around 0 13.8%
+-commutative13.8%
+-commutative13.8%
associate-+l+13.8%
Simplified13.8%
Final simplification13.9%
(FPCore (a b c d) :precision binary64 (if (<= a -13.635) (* (+ a d) 2.0) (* c 2.0)))
double code(double a, double b, double c, double d) {
double tmp;
if (a <= -13.635) {
tmp = (a + d) * 2.0;
} else {
tmp = c * 2.0;
}
return tmp;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
real(8) :: tmp
if (a <= (-13.635d0)) then
tmp = (a + d) * 2.0d0
else
tmp = c * 2.0d0
end if
code = tmp
end function
public static double code(double a, double b, double c, double d) {
double tmp;
if (a <= -13.635) {
tmp = (a + d) * 2.0;
} else {
tmp = c * 2.0;
}
return tmp;
}
def code(a, b, c, d): tmp = 0 if a <= -13.635: tmp = (a + d) * 2.0 else: tmp = c * 2.0 return tmp
function code(a, b, c, d) tmp = 0.0 if (a <= -13.635) tmp = Float64(Float64(a + d) * 2.0); else tmp = Float64(c * 2.0); end return tmp end
function tmp_2 = code(a, b, c, d) tmp = 0.0; if (a <= -13.635) tmp = (a + d) * 2.0; else tmp = c * 2.0; end tmp_2 = tmp; end
code[a_, b_, c_, d_] := If[LessEqual[a, -13.635], N[(N[(a + d), $MachinePrecision] * 2.0), $MachinePrecision], N[(c * 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \leq -13.635:\\
\;\;\;\;\left(a + d\right) \cdot 2\\
\mathbf{else}:\\
\;\;\;\;c \cdot 2\\
\end{array}
\end{array}
if a < -13.6349999999999998Initial program 93.7%
Taylor expanded in b around 0 7.2%
+-commutative7.2%
+-commutative7.2%
associate-+l+7.2%
Simplified7.2%
Taylor expanded in c around 0 11.4%
+-commutative11.4%
Simplified11.4%
if -13.6349999999999998 < a Initial program 94.3%
Taylor expanded in c around inf 14.8%
Final simplification13.5%
(FPCore (a b c d) :precision binary64 (* 2.0 (+ a (+ c (+ d b)))))
double code(double a, double b, double c, double d) {
return 2.0 * (a + (c + (d + b)));
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = 2.0d0 * (a + (c + (d + b)))
end function
public static double code(double a, double b, double c, double d) {
return 2.0 * (a + (c + (d + b)));
}
def code(a, b, c, d): return 2.0 * (a + (c + (d + b)))
function code(a, b, c, d) return Float64(2.0 * Float64(a + Float64(c + Float64(d + b)))) end
function tmp = code(a, b, c, d) tmp = 2.0 * (a + (c + (d + b))); end
code[a_, b_, c_, d_] := N[(2.0 * N[(a + N[(c + N[(d + b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot \left(a + \left(c + \left(d + b\right)\right)\right)
\end{array}
Initial program 94.1%
+-commutative94.1%
associate-+r+95.1%
+-commutative95.1%
Simplified95.1%
Final simplification95.1%
(FPCore (a b c d) :precision binary64 (* 2.0 (+ a (+ b (+ d c)))))
double code(double a, double b, double c, double d) {
return 2.0 * (a + (b + (d + c)));
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = 2.0d0 * (a + (b + (d + c)))
end function
public static double code(double a, double b, double c, double d) {
return 2.0 * (a + (b + (d + c)));
}
def code(a, b, c, d): return 2.0 * (a + (b + (d + c)))
function code(a, b, c, d) return Float64(2.0 * Float64(a + Float64(b + Float64(d + c)))) end
function tmp = code(a, b, c, d) tmp = 2.0 * (a + (b + (d + c))); end
code[a_, b_, c_, d_] := N[(2.0 * N[(a + N[(b + N[(d + c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot \left(a + \left(b + \left(d + c\right)\right)\right)
\end{array}
Initial program 94.1%
Final simplification94.1%
(FPCore (a b c d) :precision binary64 (* c 2.0))
double code(double a, double b, double c, double d) {
return c * 2.0;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = c * 2.0d0
end function
public static double code(double a, double b, double c, double d) {
return c * 2.0;
}
def code(a, b, c, d): return c * 2.0
function code(a, b, c, d) return Float64(c * 2.0) end
function tmp = code(a, b, c, d) tmp = c * 2.0; end
code[a_, b_, c_, d_] := N[(c * 2.0), $MachinePrecision]
\begin{array}{l}
\\
c \cdot 2
\end{array}
Initial program 94.1%
Taylor expanded in c around inf 11.8%
(FPCore (a b c d) :precision binary64 (* b 2.0))
double code(double a, double b, double c, double d) {
return b * 2.0;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = b * 2.0d0
end function
public static double code(double a, double b, double c, double d) {
return b * 2.0;
}
def code(a, b, c, d): return b * 2.0
function code(a, b, c, d) return Float64(b * 2.0) end
function tmp = code(a, b, c, d) tmp = b * 2.0; end
code[a_, b_, c_, d_] := N[(b * 2.0), $MachinePrecision]
\begin{array}{l}
\\
b \cdot 2
\end{array}
Initial program 94.1%
Taylor expanded in b around inf 5.9%
(FPCore (a b c d) :precision binary64 (+ (* (+ a b) 2.0) (* (+ c d) 2.0)))
double code(double a, double b, double c, double d) {
return ((a + b) * 2.0) + ((c + d) * 2.0);
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = ((a + b) * 2.0d0) + ((c + d) * 2.0d0)
end function
public static double code(double a, double b, double c, double d) {
return ((a + b) * 2.0) + ((c + d) * 2.0);
}
def code(a, b, c, d): return ((a + b) * 2.0) + ((c + d) * 2.0)
function code(a, b, c, d) return Float64(Float64(Float64(a + b) * 2.0) + Float64(Float64(c + d) * 2.0)) end
function tmp = code(a, b, c, d) tmp = ((a + b) * 2.0) + ((c + d) * 2.0); end
code[a_, b_, c_, d_] := N[(N[(N[(a + b), $MachinePrecision] * 2.0), $MachinePrecision] + N[(N[(c + d), $MachinePrecision] * 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(a + b\right) \cdot 2 + \left(c + d\right) \cdot 2
\end{array}
herbie shell --seed 2024116
(FPCore (a b c d)
:name "Expression, p6"
:precision binary64
:pre (and (and (and (and (<= -14.0 a) (<= a -13.0)) (and (<= -3.0 b) (<= b -2.0))) (and (<= 3.0 c) (<= c 3.5))) (and (<= 12.5 d) (<= d 13.5)))
:alt
(! :herbie-platform default (let ((e 2)) (+ (* (+ a b) e) (* (+ c d) e))))
(* (+ a (+ b (+ c d))) 2.0))