-static struct arp_eth_header *
-pull_arp(struct ofpbuf *packet)
-{
- return ofpbuf_try_pull(packet, ARP_ETH_HEADER_LEN);
-}
-
-static struct ip_header *
-pull_ip(struct ofpbuf *packet)
-{
- if (packet->size >= IP_HEADER_LEN) {
- struct ip_header *ip = packet->data;
- int ip_len = IP_IHL(ip->ip_ihl_ver) * 4;
- if (ip_len >= IP_HEADER_LEN && packet->size >= ip_len) {
- return ofpbuf_pull(packet, ip_len);
- }
- }
- return NULL;
-}
-
-static struct tcp_header *
-pull_tcp(struct ofpbuf *packet)
-{
- if (packet->size >= TCP_HEADER_LEN) {
- struct tcp_header *tcp = packet->data;
- int tcp_len = TCP_OFFSET(tcp->tcp_ctl) * 4;
- if (tcp_len >= TCP_HEADER_LEN && packet->size >= tcp_len) {
- return ofpbuf_pull(packet, tcp_len);
- }
- }
- return NULL;
-}
-
-static struct udp_header *
-pull_udp(struct ofpbuf *packet)
-{
- return ofpbuf_try_pull(packet, UDP_HEADER_LEN);
-}
-
-static struct sctp_header *
-pull_sctp(struct ofpbuf *packet)
-{
- return ofpbuf_try_pull(packet, SCTP_HEADER_LEN);
-}
-
-static struct icmp_header *
-pull_icmp(struct ofpbuf *packet)
-{
- return ofpbuf_try_pull(packet, ICMP_HEADER_LEN);
-}
-
-static struct icmp6_hdr *
-pull_icmpv6(struct ofpbuf *packet)
-{
- return ofpbuf_try_pull(packet, sizeof(struct icmp6_hdr));
-}
-
-static void
-parse_mpls(struct ofpbuf *b, struct flow *flow)
-{
- struct mpls_hdr *mh;
-
- while ((mh = ofpbuf_try_pull(b, sizeof *mh))) {
- if (flow->mpls_depth++ == 0) {
- flow->mpls_lse = mh->mpls_lse;
- }
- if (mh->mpls_lse & htonl(MPLS_BOS_MASK)) {
+/* U64 indices for segmented flow classification. */
+const uint8_t flow_segment_u64s[4] = {
+ FLOW_SEGMENT_1_ENDS_AT / sizeof(uint64_t),
+ FLOW_SEGMENT_2_ENDS_AT / sizeof(uint64_t),
+ FLOW_SEGMENT_3_ENDS_AT / sizeof(uint64_t),
+ FLOW_U64S
+};
+
+/* Asserts that field 'f1' follows immediately after 'f0' in struct flow,
+ * without any intervening padding. */
+#define ASSERT_SEQUENTIAL(f0, f1) \
+ BUILD_ASSERT_DECL(offsetof(struct flow, f0) \
+ + MEMBER_SIZEOF(struct flow, f0) \
+ == offsetof(struct flow, f1))
+
+/* Asserts that fields 'f0' and 'f1' are in the same 32-bit aligned word within
+ * struct flow. */
+#define ASSERT_SAME_WORD(f0, f1) \
+ BUILD_ASSERT_DECL(offsetof(struct flow, f0) / 4 \
+ == offsetof(struct flow, f1) / 4)
+
+/* Asserts that 'f0' and 'f1' are both sequential and within the same 32-bit
+ * aligned word in struct flow. */
+#define ASSERT_SEQUENTIAL_SAME_WORD(f0, f1) \
+ ASSERT_SEQUENTIAL(f0, f1); \
+ ASSERT_SAME_WORD(f0, f1)
+
+/* miniflow_extract() assumes the following to be true to optimize the
+ * extraction process. */
+ASSERT_SEQUENTIAL_SAME_WORD(dl_type, vlan_tci);
+
+ASSERT_SEQUENTIAL_SAME_WORD(nw_frag, nw_tos);
+ASSERT_SEQUENTIAL_SAME_WORD(nw_tos, nw_ttl);
+ASSERT_SEQUENTIAL_SAME_WORD(nw_ttl, nw_proto);
+
+/* TCP flags in the middle of a BE64, zeroes in the other half. */
+BUILD_ASSERT_DECL(offsetof(struct flow, tcp_flags) % 8 == 4);
+
+#if WORDS_BIGENDIAN
+#define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl) \
+ << 16)
+#else
+#define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl))
+#endif
+
+ASSERT_SEQUENTIAL_SAME_WORD(tp_src, tp_dst);
+
+/* Removes 'size' bytes from the head end of '*datap', of size '*sizep', which
+ * must contain at least 'size' bytes of data. Returns the first byte of data
+ * removed. */
+static inline const void *
+data_pull(const void **datap, size_t *sizep, size_t size)
+{
+ const char *data = *datap;
+ *datap = data + size;
+ *sizep -= size;
+ return data;
+}
+
+/* If '*datap' has at least 'size' bytes of data, removes that many bytes from
+ * the head end of '*datap' and returns the first byte removed. Otherwise,
+ * returns a null pointer without modifying '*datap'. */
+static inline const void *
+data_try_pull(const void **datap, size_t *sizep, size_t size)
+{
+ return OVS_LIKELY(*sizep >= size) ? data_pull(datap, sizep, size) : NULL;
+}
+
+/* Context for pushing data to a miniflow. */
+struct mf_ctx {
+ uint64_t map;
+ uint64_t *data;
+ uint64_t * const end;
+};
+
+/* miniflow_push_* macros allow filling in a miniflow data values in order.
+ * Assertions are needed only when the layout of the struct flow is modified.
+ * 'ofs' is a compile-time constant, which allows most of the code be optimized
+ * away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
+ * defined as macros. */
+
+#if (FLOW_WC_SEQ != 32)
+#define MINIFLOW_ASSERT(X) ovs_assert(X)
+BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
+ "assertions enabled. Consider updating FLOW_WC_SEQ after "
+ "testing")
+#else
+#define MINIFLOW_ASSERT(X)
+#endif
+
+#define miniflow_push_uint64_(MF, OFS, VALUE) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 8 == 0 \
+ && !(MF.map & (UINT64_MAX << (OFS) / 8))); \
+ *MF.data++ = VALUE; \
+ MF.map |= UINT64_C(1) << (OFS) / 8; \
+}
+
+#define miniflow_push_be64_(MF, OFS, VALUE) \
+ miniflow_push_uint64_(MF, OFS, (OVS_FORCE uint64_t)(VALUE))
+
+#define miniflow_push_uint32_(MF, OFS, VALUE) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end && \
+ (((OFS) % 8 == 0 && !(MF.map & (UINT64_MAX << (OFS) / 8))) \
+ || ((OFS) % 8 == 4 && MF.map & (UINT64_C(1) << (OFS) / 8) \
+ && !(MF.map & (UINT64_MAX << ((OFS) / 8 + 1)))))); \
+ \
+ if ((OFS) % 8 == 0) { \
+ *(uint32_t *)MF.data = VALUE; \
+ MF.map |= UINT64_C(1) << (OFS) / 8; \
+ } else if ((OFS) % 8 == 4) { \
+ *((uint32_t *)MF.data + 1) = VALUE; \
+ MF.data++; \
+ } \
+}
+
+#define miniflow_push_be32_(MF, OFS, VALUE) \
+ miniflow_push_uint32_(MF, OFS, (OVS_FORCE uint32_t)(VALUE))
+
+#define miniflow_push_uint16_(MF, OFS, VALUE) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end && \
+ (((OFS) % 8 == 0 && !(MF.map & (UINT64_MAX << (OFS) / 8))) \
+ || ((OFS) % 2 == 0 && MF.map & (UINT64_C(1) << (OFS) / 8) \
+ && !(MF.map & (UINT64_MAX << ((OFS) / 8 + 1)))))); \
+ \
+ if ((OFS) % 8 == 0) { \
+ *(uint16_t *)MF.data = VALUE; \
+ MF.map |= UINT64_C(1) << (OFS) / 8; \
+ } else if ((OFS) % 8 == 2) { \
+ *((uint16_t *)MF.data + 1) = VALUE; \
+ } else if ((OFS) % 8 == 4) { \
+ *((uint16_t *)MF.data + 2) = VALUE; \
+ } else if ((OFS) % 8 == 6) { \
+ *((uint16_t *)MF.data + 3) = VALUE; \
+ MF.data++; \
+ } \
+}
+
+#define miniflow_pad_to_64_(MF, OFS) \
+{ \
+ MINIFLOW_ASSERT((OFS) % 8 != 0); \
+ MINIFLOW_ASSERT(MF.map & (UINT64_C(1) << (OFS) / 8)); \
+ MINIFLOW_ASSERT(!(MF.map & (UINT64_MAX << ((OFS) / 8 + 1)))); \
+ \
+ memset((uint8_t *)MF.data + (OFS) % 8, 0, 8 - (OFS) % 8); \
+ MF.data++; \
+}
+
+#define miniflow_push_be16_(MF, OFS, VALUE) \
+ miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
+
+/* Data at 'valuep' may be unaligned. */
+#define miniflow_push_words_(MF, OFS, VALUEP, N_WORDS) \
+{ \
+ int ofs64 = (OFS) / 8; \
+ \
+ MINIFLOW_ASSERT(MF.data + (N_WORDS) <= MF.end && (OFS) % 8 == 0 \
+ && !(MF.map & (UINT64_MAX << ofs64))); \
+ \
+ memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof *MF.data); \
+ MF.data += (N_WORDS); \
+ MF.map |= ((UINT64_MAX >> (64 - (N_WORDS))) << ofs64); \
+}
+
+/* Push 32-bit words padded to 64-bits. */
+#define miniflow_push_words_32_(MF, OFS, VALUEP, N_WORDS) \
+{ \
+ int ofs64 = (OFS) / 8; \
+ \
+ MINIFLOW_ASSERT(MF.data + DIV_ROUND_UP(N_WORDS, 2) <= MF.end \
+ && (OFS) % 8 == 0 \
+ && !(MF.map & (UINT64_MAX << ofs64))); \
+ \
+ memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof(uint32_t)); \
+ MF.data += DIV_ROUND_UP(N_WORDS, 2); \
+ MF.map |= ((UINT64_MAX >> (64 - DIV_ROUND_UP(N_WORDS, 2))) << ofs64); \
+ if ((N_WORDS) & 1) { \
+ *((uint32_t *)MF.data - 1) = 0; \
+ } \
+}
+
+/* Data at 'valuep' may be unaligned. */
+/* MACs start 64-aligned, and must be followed by other data or padding. */
+#define miniflow_push_macs_(MF, OFS, VALUEP) \
+{ \
+ int ofs64 = (OFS) / 8; \
+ \
+ MINIFLOW_ASSERT(MF.data + 2 <= MF.end && (OFS) % 8 == 0 \
+ && !(MF.map & (UINT64_MAX << ofs64))); \
+ \
+ memcpy(MF.data, (VALUEP), 2 * ETH_ADDR_LEN); \
+ MF.data += 1; /* First word only. */ \
+ MF.map |= UINT64_C(3) << ofs64; /* Both words. */ \
+}
+
+#define miniflow_push_uint32(MF, FIELD, VALUE) \
+ miniflow_push_uint32_(MF, offsetof(struct flow, FIELD), VALUE)
+
+#define miniflow_push_be32(MF, FIELD, VALUE) \
+ miniflow_push_be32_(MF, offsetof(struct flow, FIELD), VALUE)
+
+#define miniflow_push_uint16(MF, FIELD, VALUE) \
+ miniflow_push_uint16_(MF, offsetof(struct flow, FIELD), VALUE)
+
+#define miniflow_push_be16(MF, FIELD, VALUE) \
+ miniflow_push_be16_(MF, offsetof(struct flow, FIELD), VALUE)
+
+#define miniflow_pad_to_64(MF, FIELD) \
+ miniflow_pad_to_64_(MF, offsetof(struct flow, FIELD))
+
+#define miniflow_push_words(MF, FIELD, VALUEP, N_WORDS) \
+ miniflow_push_words_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
+
+#define miniflow_push_words_32(MF, FIELD, VALUEP, N_WORDS) \
+ miniflow_push_words_32_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
+
+#define miniflow_push_macs(MF, FIELD, VALUEP) \
+ miniflow_push_macs_(MF, offsetof(struct flow, FIELD), VALUEP)
+
+/* Pulls the MPLS headers at '*datap' and returns the count of them. */
+static inline int
+parse_mpls(const void **datap, size_t *sizep)
+{
+ const struct mpls_hdr *mh;
+ int count = 0;
+
+ while ((mh = data_try_pull(datap, sizep, sizeof *mh))) {
+ count++;
+ if (mh->mpls_lse.lo & htons(1 << MPLS_BOS_SHIFT)) {