/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "hash.h"
#include "jhash.h"
#include "match.h"
-#include "ofpbuf.h"
+#include "dp-packet.h"
#include "openflow/openflow.h"
#include "packets.h"
#include "odp-util.h"
COVERAGE_DEFINE(flow_extract);
COVERAGE_DEFINE(miniflow_malloc);
-/* U32 indices for segmented flow classification. */
-const uint8_t flow_segment_u32s[4] = {
- FLOW_SEGMENT_1_ENDS_AT / 4,
- FLOW_SEGMENT_2_ENDS_AT / 4,
- FLOW_SEGMENT_3_ENDS_AT / 4,
- FLOW_U32S
+/* U64 indices for segmented flow classification. */
+const uint8_t flow_segment_u64s[4] = {
+ FLOW_SEGMENT_1_ENDS_AT / sizeof(uint64_t),
+ FLOW_SEGMENT_2_ENDS_AT / sizeof(uint64_t),
+ FLOW_SEGMENT_3_ENDS_AT / sizeof(uint64_t),
+ FLOW_U64S
};
+/* Asserts that field 'f1' follows immediately after 'f0' in struct flow,
+ * without any intervening padding. */
+#define ASSERT_SEQUENTIAL(f0, f1) \
+ BUILD_ASSERT_DECL(offsetof(struct flow, f0) \
+ + MEMBER_SIZEOF(struct flow, f0) \
+ == offsetof(struct flow, f1))
+
+/* Asserts that fields 'f0' and 'f1' are in the same 32-bit aligned word within
+ * struct flow. */
+#define ASSERT_SAME_WORD(f0, f1) \
+ BUILD_ASSERT_DECL(offsetof(struct flow, f0) / 4 \
+ == offsetof(struct flow, f1) / 4)
+
+/* Asserts that 'f0' and 'f1' are both sequential and within the same 32-bit
+ * aligned word in struct flow. */
+#define ASSERT_SEQUENTIAL_SAME_WORD(f0, f1) \
+ ASSERT_SEQUENTIAL(f0, f1); \
+ ASSERT_SAME_WORD(f0, f1)
+
/* miniflow_extract() assumes the following to be true to optimize the
* extraction process. */
-BUILD_ASSERT_DECL(offsetof(struct flow, dl_type) + 2
- == offsetof(struct flow, vlan_tci) &&
- offsetof(struct flow, dl_type) / 4
- == offsetof(struct flow, vlan_tci) / 4 );
-
-BUILD_ASSERT_DECL(offsetof(struct flow, nw_frag) + 3
- == offsetof(struct flow, nw_proto) &&
- offsetof(struct flow, nw_tos) + 2
- == offsetof(struct flow, nw_proto) &&
- offsetof(struct flow, nw_ttl) + 1
- == offsetof(struct flow, nw_proto) &&
- offsetof(struct flow, nw_frag) / 4
- == offsetof(struct flow, nw_tos) / 4 &&
- offsetof(struct flow, nw_ttl) / 4
- == offsetof(struct flow, nw_tos) / 4 &&
- offsetof(struct flow, nw_proto) / 4
- == offsetof(struct flow, nw_tos) / 4);
-
-/* TCP flags in the first half of a BE32, zeroes in the other half. */
-BUILD_ASSERT_DECL(offsetof(struct flow, tcp_flags) + 2
- == offsetof(struct flow, pad2) &&
- offsetof(struct flow, tcp_flags) / 4
- == offsetof(struct flow, pad2) / 4);
+ASSERT_SEQUENTIAL_SAME_WORD(dl_type, vlan_tci);
+
+ASSERT_SEQUENTIAL_SAME_WORD(nw_frag, nw_tos);
+ASSERT_SEQUENTIAL_SAME_WORD(nw_tos, nw_ttl);
+ASSERT_SEQUENTIAL_SAME_WORD(nw_ttl, nw_proto);
+
+/* TCP flags in the middle of a BE64, zeroes in the other half. */
+BUILD_ASSERT_DECL(offsetof(struct flow, tcp_flags) % 8 == 4);
+
#if WORDS_BIGENDIAN
#define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl) \
<< 16)
#define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl))
#endif
-BUILD_ASSERT_DECL(offsetof(struct flow, tp_src) + 2
- == offsetof(struct flow, tp_dst) &&
- offsetof(struct flow, tp_src) / 4
- == offsetof(struct flow, tp_dst) / 4);
+ASSERT_SEQUENTIAL_SAME_WORD(tp_src, tp_dst);
/* Removes 'size' bytes from the head end of '*datap', of size '*sizep', which
* must contain at least 'size' bytes of data. Returns the first byte of data
* removed. */
static inline const void *
-data_pull(void **datap, size_t *sizep, size_t size)
+data_pull(const void **datap, size_t *sizep, size_t size)
{
- char *data = (char *)*datap;
+ const char *data = *datap;
*datap = data + size;
*sizep -= size;
return data;
* the head end of '*datap' and returns the first byte removed. Otherwise,
* returns a null pointer without modifying '*datap'. */
static inline const void *
-data_try_pull(void **datap, size_t *sizep, size_t size)
+data_try_pull(const void **datap, size_t *sizep, size_t size)
{
return OVS_LIKELY(*sizep >= size) ? data_pull(datap, sizep, size) : NULL;
}
/* Context for pushing data to a miniflow. */
struct mf_ctx {
uint64_t map;
- uint32_t *data;
- uint32_t * const end;
+ uint64_t *data;
+ uint64_t * const end;
};
/* miniflow_push_* macros allow filling in a miniflow data values in order.
* away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
* defined as macros. */
-#if (FLOW_WC_SEQ != 28)
+#if (FLOW_WC_SEQ != 31)
#define MINIFLOW_ASSERT(X) ovs_assert(X)
BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
"assertions enabled. Consider updating FLOW_WC_SEQ after "
#define MINIFLOW_ASSERT(X)
#endif
-#define miniflow_push_uint32_(MF, OFS, VALUE) \
+#define miniflow_push_uint64_(MF, OFS, VALUE) \
{ \
- MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 4 == 0 \
- && !(MF.map & (UINT64_MAX << (OFS) / 4))); \
+ MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 8 == 0 \
+ && !(MF.map & (UINT64_MAX << (OFS) / 8))); \
*MF.data++ = VALUE; \
- MF.map |= UINT64_C(1) << (OFS) / 4; \
+ MF.map |= UINT64_C(1) << (OFS) / 8; \
}
-#define miniflow_push_be32_(MF, OFS, VALUE) \
- miniflow_push_uint32_(MF, OFS, (OVS_FORCE uint32_t)(VALUE))
+#define miniflow_push_be64_(MF, OFS, VALUE) \
+ miniflow_push_uint64_(MF, OFS, (OVS_FORCE uint64_t)(VALUE))
-#define miniflow_push_uint16_(MF, OFS, VALUE) \
+#define miniflow_push_uint32_(MF, OFS, VALUE) \
{ \
MINIFLOW_ASSERT(MF.data < MF.end && \
- (((OFS) % 4 == 0 && !(MF.map & (UINT64_MAX << (OFS) / 4))) \
- || ((OFS) % 4 == 2 && MF.map & (UINT64_C(1) << (OFS) / 4) \
- && !(MF.map & (UINT64_MAX << ((OFS) / 4 + 1)))))); \
+ (((OFS) % 8 == 0 && !(MF.map & (UINT64_MAX << (OFS) / 8))) \
+ || ((OFS) % 8 == 4 && MF.map & (UINT64_C(1) << (OFS) / 8) \
+ && !(MF.map & (UINT64_MAX << ((OFS) / 8 + 1)))))); \
+ \
+ if ((OFS) % 8 == 0) { \
+ *(uint32_t *)MF.data = VALUE; \
+ MF.map |= UINT64_C(1) << (OFS) / 8; \
+ } else if ((OFS) % 8 == 4) { \
+ *((uint32_t *)MF.data + 1) = VALUE; \
+ MF.data++; \
+ } \
+}
+
+#define miniflow_push_be32_(MF, OFS, VALUE) \
+ miniflow_push_uint32_(MF, OFS, (OVS_FORCE uint32_t)(VALUE))
+
+#define miniflow_push_uint16_(MF, OFS, VALUE) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end && \
+ (((OFS) % 8 == 0 && !(MF.map & (UINT64_MAX << (OFS) / 8))) \
+ || ((OFS) % 2 == 0 && MF.map & (UINT64_C(1) << (OFS) / 8) \
+ && !(MF.map & (UINT64_MAX << ((OFS) / 8 + 1)))))); \
\
- if ((OFS) % 4 == 0) { \
+ if ((OFS) % 8 == 0) { \
*(uint16_t *)MF.data = VALUE; \
- MF.map |= UINT64_C(1) << (OFS) / 4; \
- } else if ((OFS) % 4 == 2) { \
+ MF.map |= UINT64_C(1) << (OFS) / 8; \
+ } else if ((OFS) % 8 == 2) { \
*((uint16_t *)MF.data + 1) = VALUE; \
+ } else if ((OFS) % 8 == 4) { \
+ *((uint16_t *)MF.data + 2) = VALUE; \
+ } else if ((OFS) % 8 == 6) { \
+ *((uint16_t *)MF.data + 3) = VALUE; \
MF.data++; \
} \
}
-#define miniflow_push_be16_(MF, OFS, VALUE) \
+#define miniflow_pad_to_64_(MF, OFS) \
+{ \
+ MINIFLOW_ASSERT((OFS) % 8 != 0); \
+ MINIFLOW_ASSERT(MF.map & (UINT64_C(1) << (OFS) / 8)); \
+ MINIFLOW_ASSERT(!(MF.map & (UINT64_MAX << ((OFS) / 8 + 1)))); \
+ \
+ memset((uint8_t *)MF.data + (OFS) % 8, 0, 8 - (OFS) % 8); \
+ MF.data++; \
+}
+
+#define miniflow_push_be16_(MF, OFS, VALUE) \
miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
/* Data at 'valuep' may be unaligned. */
#define miniflow_push_words_(MF, OFS, VALUEP, N_WORDS) \
{ \
- int ofs32 = (OFS) / 4; \
+ int ofs64 = (OFS) / 8; \
\
- MINIFLOW_ASSERT(MF.data + (N_WORDS) <= MF.end && (OFS) % 4 == 0 \
- && !(MF.map & (UINT64_MAX << ofs32))); \
+ MINIFLOW_ASSERT(MF.data + (N_WORDS) <= MF.end && (OFS) % 8 == 0 \
+ && !(MF.map & (UINT64_MAX << ofs64))); \
\
memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof *MF.data); \
MF.data += (N_WORDS); \
- MF.map |= ((UINT64_MAX >> (64 - (N_WORDS))) << ofs32); \
+ MF.map |= ((UINT64_MAX >> (64 - (N_WORDS))) << ofs64); \
}
-#define miniflow_push_uint32(MF, FIELD, VALUE) \
- miniflow_push_uint32_(MF, offsetof(struct flow, FIELD), VALUE)
+/* Push 32-bit words padded to 64-bits. */
+#define miniflow_push_words_32_(MF, OFS, VALUEP, N_WORDS) \
+{ \
+ int ofs64 = (OFS) / 8; \
+ \
+ MINIFLOW_ASSERT(MF.data + DIV_ROUND_UP(N_WORDS, 2) <= MF.end \
+ && (OFS) % 8 == 0 \
+ && !(MF.map & (UINT64_MAX << ofs64))); \
+ \
+ memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof(uint32_t)); \
+ MF.data += DIV_ROUND_UP(N_WORDS, 2); \
+ MF.map |= ((UINT64_MAX >> (64 - DIV_ROUND_UP(N_WORDS, 2))) << ofs64); \
+ if ((N_WORDS) & 1) { \
+ *((uint32_t *)MF.data - 1) = 0; \
+ } \
+}
-#define miniflow_push_be32(MF, FIELD, VALUE) \
- miniflow_push_be32_(MF, offsetof(struct flow, FIELD), VALUE)
+/* Data at 'valuep' may be unaligned. */
+/* MACs start 64-aligned, and must be followed by other data or padding. */
+#define miniflow_push_macs_(MF, OFS, VALUEP) \
+{ \
+ int ofs64 = (OFS) / 8; \
+ \
+ MINIFLOW_ASSERT(MF.data + 2 <= MF.end && (OFS) % 8 == 0 \
+ && !(MF.map & (UINT64_MAX << ofs64))); \
+ \
+ memcpy(MF.data, (VALUEP), 2 * ETH_ADDR_LEN); \
+ MF.data += 1; /* First word only. */ \
+ MF.map |= UINT64_C(3) << ofs64; /* Both words. */ \
+}
-#define miniflow_push_uint32_check(MF, FIELD, VALUE) \
- { if (OVS_LIKELY(VALUE)) { \
- miniflow_push_uint32_(MF, offsetof(struct flow, FIELD), VALUE); \
- } \
- }
+#define miniflow_push_uint32(MF, FIELD, VALUE) \
+ miniflow_push_uint32_(MF, offsetof(struct flow, FIELD), VALUE)
-#define miniflow_push_be32_check(MF, FIELD, VALUE) \
- { if (OVS_LIKELY(VALUE)) { \
- miniflow_push_be32_(MF, offsetof(struct flow, FIELD), VALUE); \
- } \
- }
+#define miniflow_push_be32(MF, FIELD, VALUE) \
+ miniflow_push_be32_(MF, offsetof(struct flow, FIELD), VALUE)
-#define miniflow_push_uint16(MF, FIELD, VALUE) \
+#define miniflow_push_uint16(MF, FIELD, VALUE) \
miniflow_push_uint16_(MF, offsetof(struct flow, FIELD), VALUE)
-#define miniflow_push_be16(MF, FIELD, VALUE) \
+#define miniflow_push_be16(MF, FIELD, VALUE) \
miniflow_push_be16_(MF, offsetof(struct flow, FIELD), VALUE)
+#define miniflow_pad_to_64(MF, FIELD) \
+ miniflow_pad_to_64_(MF, offsetof(struct flow, FIELD))
+
#define miniflow_push_words(MF, FIELD, VALUEP, N_WORDS) \
miniflow_push_words_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
+#define miniflow_push_words_32(MF, FIELD, VALUEP, N_WORDS) \
+ miniflow_push_words_32_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
+
+#define miniflow_push_macs(MF, FIELD, VALUEP) \
+ miniflow_push_macs_(MF, offsetof(struct flow, FIELD), VALUEP)
+
/* Pulls the MPLS headers at '*datap' and returns the count of them. */
static inline int
-parse_mpls(void **datap, size_t *sizep)
+parse_mpls(const void **datap, size_t *sizep)
{
const struct mpls_hdr *mh;
int count = 0;
}
static inline ovs_be16
-parse_vlan(void **datap, size_t *sizep)
+parse_vlan(const void **datap, size_t *sizep)
{
const struct eth_header *eth = *datap;
}
static inline ovs_be16
-parse_ethertype(void **datap, size_t *sizep)
+parse_ethertype(const void **datap, size_t *sizep)
{
const struct llc_snap_header *llc;
ovs_be16 proto;
}
static inline bool
-parse_icmpv6(void **datap, size_t *sizep, const struct icmp6_hdr *icmp,
+parse_icmpv6(const void **datap, size_t *sizep, const struct icmp6_hdr *icmp,
const struct in6_addr **nd_target,
uint8_t arp_buf[2][ETH_ADDR_LEN])
{
* otherwise UINT16_MAX.
*/
void
-flow_extract(struct ofpbuf *packet, const struct pkt_metadata *md,
- struct flow *flow)
+flow_extract(struct dp_packet *packet, struct flow *flow)
{
struct {
struct miniflow mf;
- uint32_t buf[FLOW_U32S];
+ uint64_t buf[FLOW_U64S];
} m;
COVERAGE_INC(flow_extract);
miniflow_initialize(&m.mf, m.buf);
- miniflow_extract(packet, md, &m.mf);
+ miniflow_extract(packet, &m.mf);
miniflow_expand(&m.mf, flow);
}
/* Caller is responsible for initializing 'dst' with enough storage for
- * FLOW_U32S * 4 bytes. */
+ * FLOW_U64S * 8 bytes. */
void
-miniflow_extract(struct ofpbuf *packet, const struct pkt_metadata *md,
- struct miniflow *dst)
-{
- void *data = ofpbuf_data(packet);
- size_t size = ofpbuf_size(packet);
- uint32_t *values = miniflow_values(dst);
- struct mf_ctx mf = { 0, values, values + FLOW_U32S };
- char *l2;
+miniflow_extract(struct dp_packet *packet, struct miniflow *dst)
+{
+ const struct pkt_metadata *md = &packet->md;
+ const void *data = dp_packet_data(packet);
+ size_t size = dp_packet_size(packet);
+ uint64_t *values = miniflow_values(dst);
+ struct mf_ctx mf = { 0, values, values + FLOW_U64S };
+ const char *l2;
ovs_be16 dl_type;
uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
/* Metadata. */
- if (md) {
- if (md->tunnel.ip_dst) {
- miniflow_push_words(mf, tunnel, &md->tunnel,
- sizeof md->tunnel / 4);
- }
- miniflow_push_uint32_check(mf, skb_priority, md->skb_priority);
- miniflow_push_uint32_check(mf, pkt_mark, md->pkt_mark);
- miniflow_push_uint32_check(mf, recirc_id, md->recirc_id);
- miniflow_push_uint32(mf, in_port, odp_to_u32(md->in_port.odp_port));
+ if (md->tunnel.ip_dst) {
+ miniflow_push_words(mf, tunnel, &md->tunnel,
+ sizeof md->tunnel / sizeof(uint64_t));
+ }
+ if (md->skb_priority || md->pkt_mark) {
+ miniflow_push_uint32(mf, skb_priority, md->skb_priority);
+ miniflow_push_uint32(mf, pkt_mark, md->pkt_mark);
+ }
+ miniflow_push_uint32(mf, dp_hash, md->dp_hash);
+ miniflow_push_uint32(mf, in_port, odp_to_u32(md->in_port.odp_port));
+ if (md->recirc_id) {
+ miniflow_push_uint32(mf, recirc_id, md->recirc_id);
+ miniflow_pad_to_64(mf, conj_id);
}
/* Initialize packet's layer pointer and offsets. */
l2 = data;
- ofpbuf_set_frame(packet, data);
+ dp_packet_reset_offsets(packet);
/* Must have full Ethernet header to proceed. */
if (OVS_UNLIKELY(size < sizeof(struct eth_header))) {
ovs_be16 vlan_tci;
/* Link layer. */
- BUILD_ASSERT(offsetof(struct flow, dl_dst) + 6
- == offsetof(struct flow, dl_src));
- miniflow_push_words(mf, dl_dst, data, ETH_ADDR_LEN * 2 / 4);
+ ASSERT_SEQUENTIAL(dl_dst, dl_src);
+ miniflow_push_macs(mf, dl_dst, data);
/* dl_type, vlan_tci. */
vlan_tci = parse_vlan(&data, &size);
dl_type = parse_ethertype(&data, &size);
packet->l2_5_ofs = (char *)data - l2;
count = parse_mpls(&data, &size);
- miniflow_push_words(mf, mpls_lse, mpls, count);
+ miniflow_push_words_32(mf, mpls_lse, mpls, count);
}
/* Network layer. */
if (OVS_UNLIKELY(size - tot_len > UINT8_MAX)) {
goto out;
}
- ofpbuf_set_l2_pad_size(packet, size - tot_len);
+ dp_packet_set_l2_pad_size(packet, size - tot_len);
size = tot_len; /* Never pull padding. */
/* Push both source and destination address at once. */
- miniflow_push_words(mf, nw_src, &nh->ip_src, 2);
+ miniflow_push_words(mf, nw_src, &nh->ip_src, 1);
+
+ miniflow_push_be32(mf, ipv6_label, 0); /* Padding for IPv4. */
nw_tos = nh->ip_tos;
nw_ttl = nh->ip_ttl;
if (OVS_UNLIKELY(size - plen > UINT8_MAX)) {
goto out;
}
- ofpbuf_set_l2_pad_size(packet, size - plen);
+ dp_packet_set_l2_pad_size(packet, size - plen);
size = plen; /* Never pull padding. */
miniflow_push_words(mf, ipv6_src, &nh->ip6_src,
- sizeof nh->ip6_src / 4);
+ sizeof nh->ip6_src / 8);
miniflow_push_words(mf, ipv6_dst, &nh->ip6_dst,
- sizeof nh->ip6_dst / 4);
+ sizeof nh->ip6_dst / 8);
tc_flow = get_16aligned_be32(&nh->ip6_flow);
{
ovs_be32 label = tc_flow & htonl(IPV6_LABEL_MASK);
- miniflow_push_be32_check(mf, ipv6_label, label);
+ miniflow_push_be32(mf, ipv6_label, label);
}
nw_tos = ntohl(tc_flow) >> 20;
&& OVS_LIKELY(arp->ar_pro == htons(ETH_TYPE_IP))
&& OVS_LIKELY(arp->ar_hln == ETH_ADDR_LEN)
&& OVS_LIKELY(arp->ar_pln == 4)) {
- miniflow_push_words(mf, nw_src, &arp->ar_spa, 1);
- miniflow_push_words(mf, nw_dst, &arp->ar_tpa, 1);
+ miniflow_push_be32(mf, nw_src,
+ get_16aligned_be32(&arp->ar_spa));
+ miniflow_push_be32(mf, nw_dst,
+ get_16aligned_be32(&arp->ar_tpa));
/* We only match on the lower 8 bits of the opcode. */
if (OVS_LIKELY(ntohs(arp->ar_op) <= 0xff)) {
+ miniflow_push_be32(mf, ipv6_label, 0); /* Pad with ARP. */
miniflow_push_be32(mf, nw_frag, htonl(ntohs(arp->ar_op)));
}
/* Must be adjacent. */
- BUILD_ASSERT(offsetof(struct flow, arp_sha) + 6
- == offsetof(struct flow, arp_tha));
+ ASSERT_SEQUENTIAL(arp_sha, arp_tha);
memcpy(arp_buf[0], arp->ar_sha, ETH_ADDR_LEN);
memcpy(arp_buf[1], arp->ar_tha, ETH_ADDR_LEN);
- miniflow_push_words(mf, arp_sha, arp_buf,
- ETH_ADDR_LEN * 2 / 4);
+ miniflow_push_macs(mf, arp_sha, arp_buf);
+ miniflow_pad_to_64(mf, tcp_flags);
}
}
goto out;
if (OVS_LIKELY(size >= TCP_HEADER_LEN)) {
const struct tcp_header *tcp = data;
+ miniflow_push_be32(mf, arp_tha[2], 0);
miniflow_push_be32(mf, tcp_flags,
TCP_FLAGS_BE32(tcp->tcp_ctl));
- miniflow_push_words(mf, tp_src, &tcp->tcp_src, 1);
+ miniflow_push_be16(mf, tp_src, tcp->tcp_src);
+ miniflow_push_be16(mf, tp_dst, tcp->tcp_dst);
+ miniflow_pad_to_64(mf, igmp_group_ip4);
}
} else if (OVS_LIKELY(nw_proto == IPPROTO_UDP)) {
if (OVS_LIKELY(size >= UDP_HEADER_LEN)) {
const struct udp_header *udp = data;
- miniflow_push_words(mf, tp_src, &udp->udp_src, 1);
+ miniflow_push_be16(mf, tp_src, udp->udp_src);
+ miniflow_push_be16(mf, tp_dst, udp->udp_dst);
+ miniflow_pad_to_64(mf, igmp_group_ip4);
}
} else if (OVS_LIKELY(nw_proto == IPPROTO_SCTP)) {
if (OVS_LIKELY(size >= SCTP_HEADER_LEN)) {
const struct sctp_header *sctp = data;
- miniflow_push_words(mf, tp_src, &sctp->sctp_src, 1);
+ miniflow_push_be16(mf, tp_src, sctp->sctp_src);
+ miniflow_push_be16(mf, tp_dst, sctp->sctp_dst);
+ miniflow_pad_to_64(mf, igmp_group_ip4);
}
} else if (OVS_LIKELY(nw_proto == IPPROTO_ICMP)) {
if (OVS_LIKELY(size >= ICMP_HEADER_LEN)) {
miniflow_push_be16(mf, tp_src, htons(icmp->icmp_type));
miniflow_push_be16(mf, tp_dst, htons(icmp->icmp_code));
+ miniflow_pad_to_64(mf, igmp_group_ip4);
}
} else if (OVS_LIKELY(nw_proto == IPPROTO_IGMP)) {
if (OVS_LIKELY(size >= IGMP_HEADER_LEN)) {
memset(arp_buf, 0, sizeof arp_buf);
if (OVS_LIKELY(parse_icmpv6(&data, &size, icmp, &nd_target,
arp_buf))) {
- miniflow_push_words(mf, arp_sha, arp_buf,
- ETH_ADDR_LEN * 2 / 4);
if (nd_target) {
miniflow_push_words(mf, nd_target, nd_target,
- sizeof *nd_target / 4);
+ sizeof *nd_target / 8);
}
+ miniflow_push_macs(mf, arp_sha, arp_buf);
+ miniflow_pad_to_64(mf, tcp_flags);
miniflow_push_be16(mf, tp_src, htons(icmp->icmp6_type));
miniflow_push_be16(mf, tp_dst, htons(icmp->icmp6_code));
+ miniflow_pad_to_64(mf, igmp_group_ip4);
}
}
}
}
- if (md) {
- miniflow_push_uint32_check(mf, dp_hash, md->dp_hash);
- }
out:
dst->map = mf.map;
}
void
flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
{
- uint32_t *flow_u32 = (uint32_t *) flow;
- const uint32_t *wc_u32 = (const uint32_t *) &wildcards->masks;
+ uint64_t *flow_u64 = (uint64_t *) flow;
+ const uint64_t *wc_u64 = (const uint64_t *) &wildcards->masks;
size_t i;
- for (i = 0; i < FLOW_U32S; i++) {
- flow_u32[i] &= wc_u32[i];
+ for (i = 0; i < FLOW_U64S; i++) {
+ flow_u64[i] &= wc_u64[i];
}
}
}
}
-/* Initializes 'fmd' with the metadata found in 'flow'. */
+/* Initializes 'flow_metadata' with the metadata found in 'flow'. */
void
-flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
+flow_get_metadata(const struct flow *flow, struct match *flow_metadata)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
+ int i;
- fmd->dp_hash = flow->dp_hash;
- fmd->recirc_id = flow->recirc_id;
- fmd->tun_id = flow->tunnel.tun_id;
- fmd->tun_src = flow->tunnel.ip_src;
- fmd->tun_dst = flow->tunnel.ip_dst;
- fmd->metadata = flow->metadata;
- memcpy(fmd->regs, flow->regs, sizeof fmd->regs);
- fmd->pkt_mark = flow->pkt_mark;
- fmd->in_port = flow->in_port.ofp_port;
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 31);
+
+ match_init_catchall(flow_metadata);
+ if (flow->tunnel.tun_id != htonll(0)) {
+ match_set_tun_id(flow_metadata, flow->tunnel.tun_id);
+ }
+ if (flow->tunnel.ip_src != htonl(0)) {
+ match_set_tun_src(flow_metadata, flow->tunnel.ip_src);
+ }
+ if (flow->tunnel.ip_dst != htonl(0)) {
+ match_set_tun_dst(flow_metadata, flow->tunnel.ip_dst);
+ }
+ if (flow->tunnel.gbp_id != htons(0)) {
+ match_set_tun_gbp_id(flow_metadata, flow->tunnel.gbp_id);
+ }
+ if (flow->tunnel.gbp_flags) {
+ match_set_tun_gbp_flags(flow_metadata, flow->tunnel.gbp_flags);
+ }
+ if (flow->metadata != htonll(0)) {
+ match_set_metadata(flow_metadata, flow->metadata);
+ }
+
+ for (i = 0; i < FLOW_N_REGS; i++) {
+ if (flow->regs[i]) {
+ match_set_reg(flow_metadata, i, flow->regs[i]);
+ }
+ }
+
+ if (flow->pkt_mark != 0) {
+ match_set_pkt_mark(flow_metadata, flow->pkt_mark);
+ }
+
+ match_set_in_port(flow_metadata, flow->in_port.ofp_port);
}
char *
/* As this function is most often used for formatting a packet in a
* packet-in message, skip formatting the packet context fields that are
- * all-zeroes (Openflow spec encourages leaving out all-zeroes context
- * fields from the packet-in messages). We make an exception with the
- * 'in_port' field, which we always format, as packets usually have an
- * in_port, and 0 is a port just like any other port. */
+ * all-zeroes to make the print-out easier on the eyes. This means that a
+ * missing context field implies a zero value for that field. This is
+ * similar to OpenFlow encoding of these fields, as the specification
+ * states that all-zeroes context fields should not be encoded in the
+ * packet-in messages. */
+ if (!flow->in_port.ofp_port) {
+ WC_UNMASK_FIELD(wc, in_port);
+ }
if (!flow->skb_priority) {
WC_UNMASK_FIELD(wc, skb_priority);
}
if (!flow->recirc_id) {
WC_UNMASK_FIELD(wc, recirc_id);
}
+ if (!flow->dp_hash) {
+ WC_UNMASK_FIELD(wc, dp_hash);
+ }
for (int i = 0; i < FLOW_N_REGS; i++) {
if (!flow->regs[i]) {
WC_UNMASK_FIELD(wc, regs[i]);
memset(&wc->masks, 0x0, sizeof wc->masks);
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 31);
if (flow->tunnel.ip_dst) {
if (flow->tunnel.flags & FLOW_TNL_F_KEY) {
WC_MASK_FIELD(wc, tunnel.ip_ttl);
WC_MASK_FIELD(wc, tunnel.tp_src);
WC_MASK_FIELD(wc, tunnel.tp_dst);
+ WC_MASK_FIELD(wc, tunnel.gbp_id);
+ WC_MASK_FIELD(wc, tunnel.gbp_flags);
} else if (flow->tunnel.tun_id) {
WC_MASK_FIELD(wc, tunnel.tun_id);
}
- /* metadata and regs wildcarded. */
+ /* metadata, regs, and conj_id wildcarded. */
WC_MASK_FIELD(wc, skb_priority);
WC_MASK_FIELD(wc, pkt_mark);
flow_wc_map(const struct flow *flow)
{
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 31);
uint64_t map = (flow->tunnel.ip_dst) ? MINIFLOW_MAP(tunnel) : 0;
flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
{
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 31);
memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
wc->masks.actset_output = 0;
+ wc->masks.conj_id = 0;
}
/* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
bool
flow_wildcards_is_catchall(const struct flow_wildcards *wc)
{
- const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
+ const uint64_t *wc_u64 = (const uint64_t *) &wc->masks;
size_t i;
- for (i = 0; i < FLOW_U32S; i++) {
- if (wc_u32[i]) {
+ for (i = 0; i < FLOW_U64S; i++) {
+ if (wc_u64[i]) {
return false;
}
}
const struct flow_wildcards *src1,
const struct flow_wildcards *src2)
{
- uint32_t *dst_u32 = (uint32_t *) &dst->masks;
- const uint32_t *src1_u32 = (const uint32_t *) &src1->masks;
- const uint32_t *src2_u32 = (const uint32_t *) &src2->masks;
+ uint64_t *dst_u64 = (uint64_t *) &dst->masks;
+ const uint64_t *src1_u64 = (const uint64_t *) &src1->masks;
+ const uint64_t *src2_u64 = (const uint64_t *) &src2->masks;
size_t i;
- for (i = 0; i < FLOW_U32S; i++) {
- dst_u32[i] = src1_u32[i] & src2_u32[i];
+ for (i = 0; i < FLOW_U64S; i++) {
+ dst_u64[i] = src1_u64[i] & src2_u64[i];
}
}
const struct flow_wildcards *src1,
const struct flow_wildcards *src2)
{
- uint32_t *dst_u32 = (uint32_t *) &dst->masks;
- const uint32_t *src1_u32 = (const uint32_t *) &src1->masks;
- const uint32_t *src2_u32 = (const uint32_t *) &src2->masks;
+ uint64_t *dst_u64 = (uint64_t *) &dst->masks;
+ const uint64_t *src1_u64 = (const uint64_t *) &src1->masks;
+ const uint64_t *src2_u64 = (const uint64_t *) &src2->masks;
size_t i;
- for (i = 0; i < FLOW_U32S; i++) {
- dst_u32[i] = src1_u32[i] | src2_u32[i];
+ for (i = 0; i < FLOW_U64S; i++) {
+ dst_u64[i] = src1_u64[i] | src2_u64[i];
}
}
flow_wildcards_has_extra(const struct flow_wildcards *a,
const struct flow_wildcards *b)
{
- const uint32_t *a_u32 = (const uint32_t *) &a->masks;
- const uint32_t *b_u32 = (const uint32_t *) &b->masks;
+ const uint64_t *a_u64 = (const uint64_t *) &a->masks;
+ const uint64_t *b_u64 = (const uint64_t *) &b->masks;
size_t i;
- for (i = 0; i < FLOW_U32S; i++) {
- if ((a_u32[i] & b_u32[i]) != b_u32[i]) {
+ for (i = 0; i < FLOW_U64S; i++) {
+ if ((a_u64[i] & b_u64[i]) != b_u64[i]) {
return true;
}
}
flow_equal_except(const struct flow *a, const struct flow *b,
const struct flow_wildcards *wc)
{
- const uint32_t *a_u32 = (const uint32_t *) a;
- const uint32_t *b_u32 = (const uint32_t *) b;
- const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
+ const uint64_t *a_u64 = (const uint64_t *) a;
+ const uint64_t *b_u64 = (const uint64_t *) b;
+ const uint64_t *wc_u64 = (const uint64_t *) &wc->masks;
size_t i;
- for (i = 0; i < FLOW_U32S; i++) {
- if ((a_u32[i] ^ b_u32[i]) & wc_u32[i]) {
+ for (i = 0; i < FLOW_U64S; i++) {
+ if ((a_u64[i] ^ b_u64[i]) & wc_u64[i]) {
return false;
}
}
/* Separate loops for better optimization. */
if (dl_type == htons(ETH_TYPE_IPV6)) {
- uint64_t map = MINIFLOW_MAP(ipv6_src) | MINIFLOW_MAP(ipv6_dst)
- | MINIFLOW_MAP(tp_src); /* Covers both ports */
- uint32_t value;
+ uint64_t map = MINIFLOW_MAP(ipv6_src) | MINIFLOW_MAP(ipv6_dst);
+ uint64_t value;
MINIFLOW_FOR_EACH_IN_MAP(value, flow, map) {
- hash = hash_add(hash, value);
+ hash = hash_add64(hash, value);
}
} else {
- uint64_t map = MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
- | MINIFLOW_MAP(tp_src); /* Covers both ports */
- uint32_t value;
-
- MINIFLOW_FOR_EACH_IN_MAP(value, flow, map) {
- hash = hash_add(hash, value);
- }
+ hash = hash_add(hash, MINIFLOW_GET_U32(flow, nw_src));
+ hash = hash_add(hash, MINIFLOW_GET_U32(flow, nw_dst));
}
+ /* Add both ports at once. */
+ hash = hash_add(hash, MINIFLOW_GET_U32(flow, tp_src));
hash = hash_finish(hash, 42); /* Arbitrary number. */
}
return hash;
}
-BUILD_ASSERT_DECL(offsetof(struct flow, tp_src) + 2
- == offsetof(struct flow, tp_dst) &&
- offsetof(struct flow, tp_src) / 4
- == offsetof(struct flow, tp_dst) / 4);
-BUILD_ASSERT_DECL(offsetof(struct flow, ipv6_src) + 16
- == offsetof(struct flow, ipv6_dst));
+ASSERT_SEQUENTIAL_SAME_WORD(tp_src, tp_dst);
+ASSERT_SEQUENTIAL(ipv6_src, ipv6_dst);
/* Calculates the 5-tuple hash from the given flow. */
uint32_t
uint32_t hash = basis;
if (flow) {
- const uint32_t *flow_u32 = (const uint32_t *)flow;
-
hash = hash_add(hash, flow->nw_proto);
if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
- int ofs = offsetof(struct flow, ipv6_src) / 4;
- int end = ofs + 2 * sizeof flow->ipv6_src / 4;
+ const uint64_t *flow_u64 = (const uint64_t *)flow;
+ int ofs = offsetof(struct flow, ipv6_src) / 8;
+ int end = ofs + 2 * sizeof flow->ipv6_src / 8;
- while (ofs < end) {
- hash = hash_add(hash, flow_u32[ofs++]);
+ for (;ofs < end; ofs++) {
+ hash = hash_add64(hash, flow_u64[ofs]);
}
} else {
hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_src);
hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_dst);
}
- hash = hash_add(hash, flow_u32[offsetof(struct flow, tp_src) / 4]);
-
+ /* Add both ports at once. */
+ hash = hash_add(hash,
+ ((const uint32_t *)flow)[offsetof(struct flow, tp_src)
+ / sizeof(uint32_t)]);
hash = hash_finish(hash, 42); /* Arbitrary number. */
}
return hash;
flow_hash_in_wildcards(const struct flow *flow,
const struct flow_wildcards *wc, uint32_t basis)
{
- const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
- const uint32_t *flow_u32 = (const uint32_t *) flow;
+ const uint64_t *wc_u64 = (const uint64_t *) &wc->masks;
+ const uint64_t *flow_u64 = (const uint64_t *) flow;
uint32_t hash;
size_t i;
hash = basis;
- for (i = 0; i < FLOW_U32S; i++) {
- hash = hash_add(hash, flow_u32[i] & wc_u32[i]);
+ for (i = 0; i < FLOW_U64S; i++) {
+ hash = hash_add64(hash, flow_u64[i] & wc_u64[i]);
}
- return hash_finish(hash, 4 * FLOW_U32S);
+ return hash_finish(hash, 8 * FLOW_U64S);
}
/* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
/* dl_type is always masked. */
if (eth_type_mpls(flow->dl_type)) {
int i;
- int len = FLOW_MAX_MPLS_LABELS;
+ int cnt;
- for (i = 0; i < len; i++) {
+ cnt = 0;
+ for (i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
if (wc) {
wc->masks.mpls_lse[i] |= htonl(MPLS_BOS_MASK);
}
if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
return i + 1;
}
+ if (flow->mpls_lse[i]) {
+ cnt++;
+ }
}
-
- return len;
+ return cnt;
} else {
return 0;
}
flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
- /* Clear all L3 and L4 fields. */
- BUILD_ASSERT(FLOW_WC_SEQ == 28);
+ /* Clear all L3 and L4 fields and dp_hash. */
+ BUILD_ASSERT(FLOW_WC_SEQ == 31);
memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
+ flow->dp_hash = 0;
}
flow->dl_type = mpls_eth_type;
}
}
static size_t
-flow_compose_l4(struct ofpbuf *b, const struct flow *flow)
+flow_compose_l4(struct dp_packet *p, const struct flow *flow)
{
size_t l4_len = 0;
struct tcp_header *tcp;
l4_len = sizeof *tcp;
- tcp = ofpbuf_put_zeros(b, l4_len);
+ tcp = dp_packet_put_zeros(p, l4_len);
tcp->tcp_src = flow->tp_src;
tcp->tcp_dst = flow->tp_dst;
tcp->tcp_ctl = TCP_CTL(ntohs(flow->tcp_flags), 5);
struct udp_header *udp;
l4_len = sizeof *udp;
- udp = ofpbuf_put_zeros(b, l4_len);
+ udp = dp_packet_put_zeros(p, l4_len);
udp->udp_src = flow->tp_src;
udp->udp_dst = flow->tp_dst;
} else if (flow->nw_proto == IPPROTO_SCTP) {
struct sctp_header *sctp;
l4_len = sizeof *sctp;
- sctp = ofpbuf_put_zeros(b, l4_len);
+ sctp = dp_packet_put_zeros(p, l4_len);
sctp->sctp_src = flow->tp_src;
sctp->sctp_dst = flow->tp_dst;
} else if (flow->nw_proto == IPPROTO_ICMP) {
struct icmp_header *icmp;
l4_len = sizeof *icmp;
- icmp = ofpbuf_put_zeros(b, l4_len);
+ icmp = dp_packet_put_zeros(p, l4_len);
icmp->icmp_type = ntohs(flow->tp_src);
icmp->icmp_code = ntohs(flow->tp_dst);
icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
struct igmp_header *igmp;
l4_len = sizeof *igmp;
- igmp = ofpbuf_put_zeros(b, l4_len);
+ igmp = dp_packet_put_zeros(p, l4_len);
igmp->igmp_type = ntohs(flow->tp_src);
igmp->igmp_code = ntohs(flow->tp_dst);
put_16aligned_be32(&igmp->group, flow->igmp_group_ip4);
struct icmp6_hdr *icmp;
l4_len = sizeof *icmp;
- icmp = ofpbuf_put_zeros(b, l4_len);
+ icmp = dp_packet_put_zeros(p, l4_len);
icmp->icmp6_type = ntohs(flow->tp_src);
icmp->icmp6_code = ntohs(flow->tp_dst);
struct nd_opt_hdr *nd_opt;
l4_len += sizeof *nd_target;
- nd_target = ofpbuf_put_zeros(b, sizeof *nd_target);
+ nd_target = dp_packet_put_zeros(p, sizeof *nd_target);
*nd_target = flow->nd_target;
if (!eth_addr_is_zero(flow->arp_sha)) {
l4_len += 8;
- nd_opt = ofpbuf_put_zeros(b, 8);
+ nd_opt = dp_packet_put_zeros(p, 8);
nd_opt->nd_opt_len = 1;
nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR;
memcpy(nd_opt + 1, flow->arp_sha, ETH_ADDR_LEN);
}
if (!eth_addr_is_zero(flow->arp_tha)) {
l4_len += 8;
- nd_opt = ofpbuf_put_zeros(b, 8);
+ nd_opt = dp_packet_put_zeros(p, 8);
nd_opt->nd_opt_len = 1;
nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
memcpy(nd_opt + 1, flow->arp_tha, ETH_ADDR_LEN);
}
}
icmp->icmp6_cksum = (OVS_FORCE uint16_t)
- csum(icmp, (char *)ofpbuf_tail(b) - (char *)icmp);
+ csum(icmp, (char *)dp_packet_tail(p) - (char *)icmp);
}
}
return l4_len;
* valid. It hasn't got some checksums filled in, for one, and lots of fields
* are just zeroed.) */
void
-flow_compose(struct ofpbuf *b, const struct flow *flow)
+flow_compose(struct dp_packet *p, const struct flow *flow)
{
size_t l4_len;
/* eth_compose() sets l3 pointer and makes sure it is 32-bit aligned. */
- eth_compose(b, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0);
+ eth_compose(p, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0);
if (flow->dl_type == htons(FLOW_DL_TYPE_NONE)) {
- struct eth_header *eth = ofpbuf_l2(b);
- eth->eth_type = htons(ofpbuf_size(b));
+ struct eth_header *eth = dp_packet_l2(p);
+ eth->eth_type = htons(dp_packet_size(p));
return;
}
if (flow->vlan_tci & htons(VLAN_CFI)) {
- eth_push_vlan(b, htons(ETH_TYPE_VLAN), flow->vlan_tci);
+ eth_push_vlan(p, htons(ETH_TYPE_VLAN), flow->vlan_tci);
}
if (flow->dl_type == htons(ETH_TYPE_IP)) {
struct ip_header *ip;
- ip = ofpbuf_put_zeros(b, sizeof *ip);
+ ip = dp_packet_put_zeros(p, sizeof *ip);
ip->ip_ihl_ver = IP_IHL_VER(5, 4);
ip->ip_tos = flow->nw_tos;
ip->ip_ttl = flow->nw_ttl;
}
}
- ofpbuf_set_l4(b, ofpbuf_tail(b));
+ dp_packet_set_l4(p, dp_packet_tail(p));
- l4_len = flow_compose_l4(b, flow);
+ l4_len = flow_compose_l4(p, flow);
- ip = ofpbuf_l3(b);
- ip->ip_tot_len = htons(b->l4_ofs - b->l3_ofs + l4_len);
+ ip = dp_packet_l3(p);
+ ip->ip_tot_len = htons(p->l4_ofs - p->l3_ofs + l4_len);
ip->ip_csum = csum(ip, sizeof *ip);
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
struct ovs_16aligned_ip6_hdr *nh;
- nh = ofpbuf_put_zeros(b, sizeof *nh);
+ nh = dp_packet_put_zeros(p, sizeof *nh);
put_16aligned_be32(&nh->ip6_flow, htonl(6 << 28) |
htonl(flow->nw_tos << 20) | flow->ipv6_label);
nh->ip6_hlim = flow->nw_ttl;
memcpy(&nh->ip6_src, &flow->ipv6_src, sizeof(nh->ip6_src));
memcpy(&nh->ip6_dst, &flow->ipv6_dst, sizeof(nh->ip6_dst));
- ofpbuf_set_l4(b, ofpbuf_tail(b));
+ dp_packet_set_l4(p, dp_packet_tail(p));
- l4_len = flow_compose_l4(b, flow);
+ l4_len = flow_compose_l4(p, flow);
- nh = ofpbuf_l3(b);
+ nh = dp_packet_l3(p);
nh->ip6_plen = htons(l4_len);
} else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
flow->dl_type == htons(ETH_TYPE_RARP)) {
struct arp_eth_header *arp;
- arp = ofpbuf_put_zeros(b, sizeof *arp);
- ofpbuf_set_l3(b, arp);
+ arp = dp_packet_put_zeros(p, sizeof *arp);
+ dp_packet_set_l3(p, arp);
arp->ar_hrd = htons(1);
arp->ar_pro = htons(ETH_TYPE_IP);
arp->ar_hln = ETH_ADDR_LEN;
if (eth_type_mpls(flow->dl_type)) {
int n;
- b->l2_5_ofs = b->l3_ofs;
+ p->l2_5_ofs = p->l3_ofs;
for (n = 1; n < FLOW_MAX_MPLS_LABELS; n++) {
if (flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK)) {
break;
}
}
while (n > 0) {
- push_mpls(b, flow->dl_type, flow->mpls_lse[--n]);
+ push_mpls(p, flow->dl_type, flow->mpls_lse[--n]);
}
}
}
return count_1bits(flow->map);
}
-static uint32_t *
+static uint64_t *
miniflow_alloc_values(struct miniflow *flow, int n)
{
int size = MINIFLOW_VALUES_SIZE(n);
/* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
* the caller. The caller must have already initialized 'dst->map' properly
- * to indicate the significant uint32_t elements of 'src'. 'n' must be the
+ * to indicate the significant uint64_t elements of 'src'. 'n' must be the
* number of 1-bits in 'dst->map'.
*
* Normally the significant elements are the ones that are non-zero. However,
* so that the flow and mask always have the same maps.
*
* This function initializes values (either inline if possible or with
- * malloc() otherwise) and copies the uint32_t elements of 'src' indicated by
+ * malloc() otherwise) and copies the uint64_t elements of 'src' indicated by
* 'dst->map' into it. */
static void
miniflow_init__(struct miniflow *dst, const struct flow *src, int n)
{
- const uint32_t *src_u32 = (const uint32_t *) src;
- uint32_t *dst_u32 = miniflow_alloc_values(dst, n);
- uint64_t map;
+ const uint64_t *src_u64 = (const uint64_t *) src;
+ uint64_t *dst_u64 = miniflow_alloc_values(dst, n);
+ int idx;
- for (map = dst->map; map; map = zero_rightmost_1bit(map)) {
- *dst_u32++ = src_u32[raw_ctz(map)];
+ MAP_FOR_EACH_INDEX(idx, dst->map) {
+ *dst_u64++ = src_u64[idx];
}
}
void
miniflow_init(struct miniflow *dst, const struct flow *src)
{
- const uint32_t *src_u32 = (const uint32_t *) src;
+ const uint64_t *src_u64 = (const uint64_t *) src;
unsigned int i;
int n;
n = 0;
dst->map = 0;
- for (i = 0; i < FLOW_U32S; i++) {
- if (src_u32[i]) {
+ for (i = 0; i < FLOW_U64S; i++) {
+ if (src_u64[i]) {
dst->map |= UINT64_C(1) << i;
n++;
}
miniflow_clone(struct miniflow *dst, const struct miniflow *src)
{
int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
- uint32_t *values;
+ uint64_t *values;
dst->map = src->map;
if (size <= sizeof dst->inline_values) {
flow_union_with_miniflow(dst, src);
}
-/* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'flow'
- * were expanded into a "struct flow". */
-static uint32_t
-miniflow_get(const struct miniflow *flow, unsigned int u32_ofs)
-{
- return flow->map & (UINT64_C(1) << u32_ofs)
- ? miniflow_get__(flow, u32_ofs) : 0;
-}
-
/* Returns true if 'a' and 'b' are the equal miniflow, false otherwise. */
bool
miniflow_equal(const struct miniflow *a, const struct miniflow *b)
{
- const uint32_t *ap = miniflow_get_u32_values(a);
- const uint32_t *bp = miniflow_get_u32_values(b);
- const uint64_t a_map = a->map;
- const uint64_t b_map = b->map;
+ const uint64_t *ap = miniflow_get_values(a);
+ const uint64_t *bp = miniflow_get_values(b);
- if (OVS_LIKELY(a_map == b_map)) {
+ if (OVS_LIKELY(a->map == b->map)) {
int count = miniflow_n_values(a);
return !memcmp(ap, bp, count * sizeof *ap);
} else {
uint64_t map;
- for (map = a_map | b_map; map; map = zero_rightmost_1bit(map)) {
+ for (map = a->map | b->map; map; map = zero_rightmost_1bit(map)) {
uint64_t bit = rightmost_1bit(map);
- uint64_t a_value = a_map & bit ? *ap++ : 0;
- uint64_t b_value = b_map & bit ? *bp++ : 0;
- if (a_value != b_value) {
+ if ((a->map & bit ? *ap++ : 0) != (b->map & bit ? *bp++ : 0)) {
return false;
}
}
miniflow_equal_in_minimask(const struct miniflow *a, const struct miniflow *b,
const struct minimask *mask)
{
- const uint32_t *p = miniflow_get_u32_values(&mask->masks);
- uint64_t map;
-
- for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
- int ofs = raw_ctz(map);
+ const uint64_t *p = miniflow_get_values(&mask->masks);
+ int idx;
- if ((miniflow_get(a, ofs) ^ miniflow_get(b, ofs)) & *p++) {
+ MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
+ if ((miniflow_get(a, idx) ^ miniflow_get(b, idx)) & *p++) {
return false;
}
}
miniflow_equal_flow_in_minimask(const struct miniflow *a, const struct flow *b,
const struct minimask *mask)
{
- const uint32_t *b_u32 = (const uint32_t *) b;
- const uint32_t *p = miniflow_get_u32_values(&mask->masks);
- uint64_t map;
-
- for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
- int ofs = raw_ctz(map);
+ const uint64_t *b_u64 = (const uint64_t *) b;
+ const uint64_t *p = miniflow_get_values(&mask->masks);
+ int idx;
- if ((miniflow_get(a, ofs) ^ b_u32[ofs]) & *p++) {
+ MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
+ if ((miniflow_get(a, idx) ^ b_u64[idx]) & *p++) {
return false;
}
}
/* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
*
- * The caller must provide room for FLOW_U32S "uint32_t"s in 'storage', for use
+ * The caller must provide room for FLOW_U64S "uint64_t"s in 'storage', for use
* by 'dst_'. The caller must *not* free 'dst_' with minimask_destroy(). */
void
minimask_combine(struct minimask *dst_,
const struct minimask *a_, const struct minimask *b_,
- uint32_t storage[FLOW_U32S])
+ uint64_t storage[FLOW_U64S])
{
struct miniflow *dst = &dst_->masks;
- uint32_t *dst_values = storage;
+ uint64_t *dst_values = storage;
const struct miniflow *a = &a_->masks;
const struct miniflow *b = &b_->masks;
- uint64_t map;
- int n = 0;
+ int idx;
dst->values_inline = false;
dst->offline_values = storage;
dst->map = 0;
- for (map = a->map & b->map; map; map = zero_rightmost_1bit(map)) {
- int ofs = raw_ctz(map);
+ MAP_FOR_EACH_INDEX(idx, a->map & b->map) {
/* Both 'a' and 'b' have non-zero data at 'idx'. */
- uint32_t mask = miniflow_get__(a, ofs) & miniflow_get__(b, ofs);
+ uint64_t mask = miniflow_get__(a, idx) & miniflow_get__(b, idx);
if (mask) {
- dst->map |= rightmost_1bit(map);
- dst_values[n++] = mask;
+ dst->map |= UINT64_C(1) << idx;
+ *dst_values++ = mask;
}
}
}
miniflow_expand(&mask->masks, &wc->masks);
}
-/* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'mask'
- * were expanded into a "struct flow_wildcards". */
-uint32_t
-minimask_get(const struct minimask *mask, unsigned int u32_ofs)
-{
- return miniflow_get(&mask->masks, u32_ofs);
-}
-
/* Returns true if 'a' and 'b' are the same flow mask, false otherwise.
* Minimasks may not have zero data values, so for the minimasks to be the
* same, they need to have the same map and the same data values. */
minimask_equal(const struct minimask *a, const struct minimask *b)
{
return a->masks.map == b->masks.map &&
- !memcmp(miniflow_get_u32_values(&a->masks),
- miniflow_get_u32_values(&b->masks),
+ !memcmp(miniflow_get_values(&a->masks),
+ miniflow_get_values(&b->masks),
count_1bits(a->masks.map) * sizeof *a->masks.inline_values);
}
bool
minimask_has_extra(const struct minimask *a, const struct minimask *b)
{
- const uint32_t *ap = miniflow_get_u32_values(&a->masks);
- const uint32_t *bp = miniflow_get_u32_values(&b->masks);
+ const uint64_t *ap = miniflow_get_values(&a->masks);
+ const uint64_t *bp = miniflow_get_values(&b->masks);
int idx;
MAP_FOR_EACH_INDEX(idx, b->masks.map) {
- uint32_t b_u32 = *bp++;
+ uint64_t b_u64 = *bp++;
- /* 'b_u32' is non-zero, check if the data in 'a' is either zero
- * or misses some of the bits in 'b_u32'. */
+ /* 'b_u64' is non-zero, check if the data in 'a' is either zero
+ * or misses some of the bits in 'b_u64'. */
if (!(a->masks.map & (UINT64_C(1) << idx))
- || ((miniflow_values_get__(ap, a->masks.map, idx) & b_u32)
- != b_u32)) {
+ || ((miniflow_values_get__(ap, a->masks.map, idx) & b_u64)
+ != b_u64)) {
return true; /* 'a' wildcards some bits 'b' doesn't. */
}
}