/* Context for pushing data to a miniflow. */
struct mf_ctx {
- uint64_t map;
+ struct miniflow maps;
uint64_t *data;
uint64_t * const end;
};
* away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
* defined as macros. */
-#if (FLOW_WC_SEQ != 31)
+#if (FLOW_WC_SEQ != 33)
#define MINIFLOW_ASSERT(X) ovs_assert(X)
BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
"assertions enabled. Consider updating FLOW_WC_SEQ after "
#define MINIFLOW_ASSERT(X)
#endif
-#define miniflow_push_uint64_(MF, OFS, VALUE) \
-{ \
- MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 8 == 0 \
- && !(MF.map & (UINT64_MAX << (OFS) / 8))); \
- *MF.data++ = VALUE; \
- MF.map |= UINT64_C(1) << (OFS) / 8; \
+#define miniflow_set_map(MF, OFS) \
+ if ((OFS) < FLOW_TNL_U64S) { \
+ MINIFLOW_ASSERT(!(MF.maps.tnl_map & (UINT64_MAX << (OFS))) \
+ && !MF.maps.pkt_map); \
+ MF.maps.tnl_map |= UINT64_C(1) << (OFS); \
+ } else { \
+ MINIFLOW_ASSERT(!(MF.maps.pkt_map \
+ & UINT64_MAX << ((OFS) - FLOW_TNL_U64S))); \
+ MF.maps.pkt_map |= UINT64_C(1) << ((OFS) - FLOW_TNL_U64S); \
+ }
+
+#define miniflow_assert_in_map(MF, OFS) \
+ if ((OFS) < FLOW_TNL_U64S) { \
+ MINIFLOW_ASSERT(MF.maps.tnl_map & UINT64_C(1) << (OFS) \
+ && !(MF.maps.tnl_map & UINT64_MAX << ((OFS) + 1)) \
+ && !MF.maps.pkt_map); \
+ } else { \
+ MINIFLOW_ASSERT(MF.maps.pkt_map & UINT64_C(1) << ((OFS) - FLOW_TNL_U64S) \
+ && !(MF.maps.pkt_map & UINT64_MAX << ((OFS) - FLOW_TNL_U64S + 1))); \
+ }
+
+#define miniflow_push_uint64_(MF, OFS, VALUE) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 8 == 0); \
+ *MF.data++ = VALUE; \
+ miniflow_set_map(MF, OFS / 8); \
}
#define miniflow_push_be64_(MF, OFS, VALUE) \
miniflow_push_uint64_(MF, OFS, (OVS_FORCE uint64_t)(VALUE))
-#define miniflow_push_uint32_(MF, OFS, VALUE) \
-{ \
- MINIFLOW_ASSERT(MF.data < MF.end && \
- (((OFS) % 8 == 0 && !(MF.map & (UINT64_MAX << (OFS) / 8))) \
- || ((OFS) % 8 == 4 && MF.map & (UINT64_C(1) << (OFS) / 8) \
- && !(MF.map & (UINT64_MAX << ((OFS) / 8 + 1)))))); \
+#define miniflow_push_uint32_(MF, OFS, VALUE) \
+ { \
+ MINIFLOW_ASSERT(MF.data < MF.end); \
\
if ((OFS) % 8 == 0) { \
+ miniflow_set_map(MF, OFS / 8); \
*(uint32_t *)MF.data = VALUE; \
- MF.map |= UINT64_C(1) << (OFS) / 8; \
} else if ((OFS) % 8 == 4) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
*((uint32_t *)MF.data + 1) = VALUE; \
MF.data++; \
} \
#define miniflow_push_uint16_(MF, OFS, VALUE) \
{ \
- MINIFLOW_ASSERT(MF.data < MF.end && \
- (((OFS) % 8 == 0 && !(MF.map & (UINT64_MAX << (OFS) / 8))) \
- || ((OFS) % 2 == 0 && MF.map & (UINT64_C(1) << (OFS) / 8) \
- && !(MF.map & (UINT64_MAX << ((OFS) / 8 + 1)))))); \
+ MINIFLOW_ASSERT(MF.data < MF.end); \
\
if ((OFS) % 8 == 0) { \
+ miniflow_set_map(MF, OFS / 8); \
*(uint16_t *)MF.data = VALUE; \
- MF.map |= UINT64_C(1) << (OFS) / 8; \
} else if ((OFS) % 8 == 2) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
*((uint16_t *)MF.data + 1) = VALUE; \
} else if ((OFS) % 8 == 4) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
*((uint16_t *)MF.data + 2) = VALUE; \
} else if ((OFS) % 8 == 6) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
*((uint16_t *)MF.data + 3) = VALUE; \
MF.data++; \
} \
}
#define miniflow_pad_to_64_(MF, OFS) \
-{ \
+{ \
MINIFLOW_ASSERT((OFS) % 8 != 0); \
- MINIFLOW_ASSERT(MF.map & (UINT64_C(1) << (OFS) / 8)); \
- MINIFLOW_ASSERT(!(MF.map & (UINT64_MAX << ((OFS) / 8 + 1)))); \
+ miniflow_assert_in_map(MF, OFS / 8); \
\
memset((uint8_t *)MF.data + (OFS) % 8, 0, 8 - (OFS) % 8); \
MF.data++; \
#define miniflow_push_be16_(MF, OFS, VALUE) \
miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
+#define miniflow_set_maps(MF, OFS, N_WORDS) \
+{ \
+ size_t ofs = (OFS); \
+ size_t n_words = (N_WORDS); \
+ uint64_t n_words_mask = UINT64_MAX >> (64 - n_words); \
+ \
+ MINIFLOW_ASSERT(n_words && MF.data + n_words <= MF.end); \
+ if (ofs < FLOW_TNL_U64S) { \
+ MINIFLOW_ASSERT(!(MF.maps.tnl_map & UINT64_MAX << ofs) \
+ && !MF.maps.pkt_map); \
+ MF.maps.tnl_map |= n_words_mask << ofs; \
+ if (n_words > FLOW_TNL_U64S - ofs) { \
+ MF.maps.pkt_map |= n_words_mask >> (FLOW_TNL_U64S - ofs); \
+ } \
+ } else { \
+ ofs -= FLOW_TNL_U64S; \
+ MINIFLOW_ASSERT(!(MF.maps.pkt_map & (UINT64_MAX << ofs))); \
+ MF.maps.pkt_map |= n_words_mask << ofs; \
+ } \
+}
+
/* Data at 'valuep' may be unaligned. */
#define miniflow_push_words_(MF, OFS, VALUEP, N_WORDS) \
{ \
- int ofs64 = (OFS) / 8; \
- \
- MINIFLOW_ASSERT(MF.data + (N_WORDS) <= MF.end && (OFS) % 8 == 0 \
- && !(MF.map & (UINT64_MAX << ofs64))); \
- \
- memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof *MF.data); \
- MF.data += (N_WORDS); \
- MF.map |= ((UINT64_MAX >> (64 - (N_WORDS))) << ofs64); \
+ MINIFLOW_ASSERT((OFS) % 8 == 0); \
+ miniflow_set_maps(MF, (OFS) / 8, (N_WORDS)); \
+ memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof *MF.data); \
+ MF.data += (N_WORDS); \
}
/* Push 32-bit words padded to 64-bits. */
#define miniflow_push_words_32_(MF, OFS, VALUEP, N_WORDS) \
{ \
- int ofs64 = (OFS) / 8; \
- \
- MINIFLOW_ASSERT(MF.data + DIV_ROUND_UP(N_WORDS, 2) <= MF.end \
- && (OFS) % 8 == 0 \
- && !(MF.map & (UINT64_MAX << ofs64))); \
- \
+ miniflow_set_maps(MF, (OFS) / 8, DIV_ROUND_UP(N_WORDS, 2)); \
memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof(uint32_t)); \
MF.data += DIV_ROUND_UP(N_WORDS, 2); \
- MF.map |= ((UINT64_MAX >> (64 - DIV_ROUND_UP(N_WORDS, 2))) << ofs64); \
if ((N_WORDS) & 1) { \
*((uint32_t *)MF.data - 1) = 0; \
} \
/* MACs start 64-aligned, and must be followed by other data or padding. */
#define miniflow_push_macs_(MF, OFS, VALUEP) \
{ \
- int ofs64 = (OFS) / 8; \
- \
- MINIFLOW_ASSERT(MF.data + 2 <= MF.end && (OFS) % 8 == 0 \
- && !(MF.map & (UINT64_MAX << ofs64))); \
- \
+ miniflow_set_maps(MF, (OFS) / 8, 2); \
memcpy(MF.data, (VALUEP), 2 * ETH_ADDR_LEN); \
MF.data += 1; /* First word only. */ \
- MF.map |= UINT64_C(3) << ofs64; /* Both words. */ \
}
#define miniflow_push_uint32(MF, FIELD, VALUE) \
COVERAGE_INC(flow_extract);
- miniflow_initialize(&m.mf, m.buf);
miniflow_extract(packet, &m.mf);
miniflow_expand(&m.mf, flow);
}
const void *data = dp_packet_data(packet);
size_t size = dp_packet_size(packet);
uint64_t *values = miniflow_values(dst);
- struct mf_ctx mf = { 0, values, values + FLOW_U64S };
+ struct mf_ctx mf = { { 0, 0 }, values, values + FLOW_U64S };
const char *l2;
ovs_be16 dl_type;
uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
/* Metadata. */
if (md->tunnel.ip_dst) {
miniflow_push_words(mf, tunnel, &md->tunnel,
- sizeof md->tunnel / sizeof(uint64_t));
+ offsetof(struct flow_tnl, metadata) /
+ sizeof(uint64_t));
+ if (md->tunnel.metadata.opt_map) {
+ miniflow_push_words(mf, tunnel.metadata, &md->tunnel.metadata,
+ sizeof md->tunnel.metadata / sizeof(uint64_t));
+ }
}
if (md->skb_priority || md->pkt_mark) {
miniflow_push_uint32(mf, skb_priority, md->skb_priority);
}
}
out:
- dst->map = mf.map;
+ *dst = mf.maps;
}
/* For every bit of a field that is wildcarded in 'wildcards', sets the
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 31);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
match_init_catchall(flow_metadata);
if (flow->tunnel.tun_id != htonll(0)) {
match_set_tun_id(flow_metadata, flow->tunnel.tun_id);
}
+ if (flow->tunnel.flags & FLOW_TNL_PUB_F_MASK) {
+ match_set_tun_flags(flow_metadata,
+ flow->tunnel.flags & FLOW_TNL_PUB_F_MASK);
+ }
if (flow->tunnel.ip_src != htonl(0)) {
match_set_tun_src(flow_metadata, flow->tunnel.ip_src);
}
if (flow->tunnel.gbp_flags) {
match_set_tun_gbp_flags(flow_metadata, flow->tunnel.gbp_flags);
}
+ tun_metadata_get_fmd(&flow->tunnel.metadata, flow_metadata);
if (flow->metadata != htonll(0)) {
match_set_metadata(flow_metadata, flow->metadata);
}
uint32_t bad = 0;
if (!flags) {
+ ds_put_char(ds, '0');
return;
}
while (flags) {
void
format_flags_masked(struct ds *ds, const char *name,
const char *(*bit_to_string)(uint32_t), uint32_t flags,
- uint32_t mask)
+ uint32_t mask, uint32_t max_mask)
{
if (name) {
ds_put_format(ds, "%s=", name);
}
+
+ if (mask == max_mask) {
+ format_flags(ds, bit_to_string, flags, '|');
+ return;
+ }
+
+ if (!mask) {
+ ds_put_cstr(ds, "0/0");
+ return;
+ }
+
while (mask) {
uint32_t bit = rightmost_1bit(mask);
const char *s = bit_to_string(bit);
}
}
+/* Scans a string 's' of flags to determine their numerical value and
+ * returns the number of characters parsed using 'bit_to_string' to
+ * lookup flag names. Scanning continues until the character 'end' is
+ * reached.
+ *
+ * In the event of a failure, a negative error code will be returned. In
+ * addition, if 'res_string' is non-NULL then a descriptive string will
+ * be returned incorporating the identifying string 'field_name'. This
+ * error string must be freed by the caller.
+ *
+ * Upon success, the flag values will be stored in 'res_flags' and
+ * optionally 'res_mask', if it is non-NULL (if it is NULL then any masks
+ * present in the original string will be considered an error). The
+ * caller may restrict the acceptable set of values through the mask
+ * 'allowed'. */
+int
+parse_flags(const char *s, const char *(*bit_to_string)(uint32_t),
+ char end, const char *field_name, char **res_string,
+ uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask)
+{
+ uint32_t result = 0;
+ int n;
+
+ /* Parse masked flags in numeric format? */
+ if (res_mask && ovs_scan(s, "%"SCNi32"/%"SCNi32"%n",
+ res_flags, res_mask, &n) && n > 0) {
+ if (*res_flags & ~allowed || *res_mask & ~allowed) {
+ goto unknown;
+ }
+ return n;
+ }
+
+ n = 0;
+
+ if (res_mask && (*s == '+' || *s == '-')) {
+ uint32_t flags = 0, mask = 0;
+
+ /* Parse masked flags. */
+ while (s[0] != end) {
+ bool set;
+ uint32_t bit;
+ size_t len;
+
+ if (s[0] == '+') {
+ set = true;
+ } else if (s[0] == '-') {
+ set = false;
+ } else {
+ if (res_string) {
+ *res_string = xasprintf("%s: %s must be preceded by '+' "
+ "(for SET) or '-' (NOT SET)", s,
+ field_name);
+ }
+ return -EINVAL;
+ }
+ s++;
+ n++;
+
+ for (bit = 1; bit; bit <<= 1) {
+ const char *fname = bit_to_string(bit);
+
+ if (!fname) {
+ continue;
+ }
+
+ len = strlen(fname);
+ if (strncmp(s, fname, len) ||
+ (s[len] != '+' && s[len] != '-' && s[len] != end)) {
+ continue;
+ }
+
+ if (mask & bit) {
+ /* bit already set. */
+ if (res_string) {
+ *res_string = xasprintf("%s: Each %s flag can be "
+ "specified only once", s,
+ field_name);
+ }
+ return -EINVAL;
+ }
+ if (!(bit & allowed)) {
+ goto unknown;
+ }
+ if (set) {
+ flags |= bit;
+ }
+ mask |= bit;
+ break;
+ }
+
+ if (!bit) {
+ goto unknown;
+ }
+ s += len;
+ n += len;
+ }
+
+ *res_flags = flags;
+ *res_mask = mask;
+ return n;
+ }
+
+ /* Parse unmasked flags. If a flag is present, it is set, otherwise
+ * it is not set. */
+ while (s[n] != end) {
+ unsigned long long int flags;
+ uint32_t bit;
+ int n0;
+
+ if (ovs_scan(&s[n], "%lli%n", &flags, &n0)) {
+ if (flags & ~allowed) {
+ goto unknown;
+ }
+ n += n0 + (s[n + n0] == '|');
+ result |= flags;
+ continue;
+ }
+
+ for (bit = 1; bit; bit <<= 1) {
+ const char *name = bit_to_string(bit);
+ size_t len;
+
+ if (!name) {
+ continue;
+ }
+
+ len = strlen(name);
+ if (!strncmp(s + n, name, len) &&
+ (s[n + len] == '|' || s[n + len] == end)) {
+ if (!(bit & allowed)) {
+ goto unknown;
+ }
+ result |= bit;
+ n += len + (s[n + len] == '|');
+ break;
+ }
+ }
+
+ if (!bit) {
+ goto unknown;
+ }
+ }
+
+ *res_flags = result;
+ if (res_mask) {
+ *res_mask = UINT32_MAX;
+ }
+ if (res_string) {
+ *res_string = NULL;
+ }
+ return n;
+
+unknown:
+ if (res_string) {
+ *res_string = xasprintf("%s: unknown %s flag(s)", s, field_name);
+ }
+ return -EINVAL;
+}
+
void
flow_format(struct ds *ds, const struct flow *flow)
{
memset(&wc->masks, 0x0, sizeof wc->masks);
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 31);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
if (flow->tunnel.ip_dst) {
if (flow->tunnel.flags & FLOW_TNL_F_KEY) {
WC_MASK_FIELD(wc, tunnel.tp_dst);
WC_MASK_FIELD(wc, tunnel.gbp_id);
WC_MASK_FIELD(wc, tunnel.gbp_flags);
+
+ if (flow->tunnel.metadata.opt_map) {
+ wc->masks.tunnel.metadata.opt_map = flow->tunnel.metadata.opt_map;
+ WC_MASK_FIELD(wc, tunnel.metadata.opts);
+ }
} else if (flow->tunnel.tun_id) {
WC_MASK_FIELD(wc, tunnel.tun_id);
}
* optimal.
*
* This is a less precise version of flow_wildcards_init_for_packet() above. */
-uint64_t
-flow_wc_map(const struct flow *flow)
+void
+flow_wc_map(const struct flow *flow, struct miniflow *map)
{
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 31);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
- uint64_t map = (flow->tunnel.ip_dst) ? MINIFLOW_MAP(tunnel) : 0;
+ map->tnl_map = 0;
+ if (flow->tunnel.ip_dst) {
+ map->tnl_map = MINIFLOW_TNL_MAP(tunnel);
+ if (!flow->tunnel.metadata.opt_map) {
+ map->tnl_map &= ~MINIFLOW_TNL_MAP(tunnel.metadata);
+ }
+ }
/* Metadata fields that can appear on packet input. */
- map |= MINIFLOW_MAP(skb_priority) | MINIFLOW_MAP(pkt_mark)
- | MINIFLOW_MAP(recirc_id) | MINIFLOW_MAP(dp_hash)
- | MINIFLOW_MAP(in_port)
- | MINIFLOW_MAP(dl_dst) | MINIFLOW_MAP(dl_src)
- | MINIFLOW_MAP(dl_type) | MINIFLOW_MAP(vlan_tci);
+ map->pkt_map = MINIFLOW_PKT_MAP(skb_priority) | MINIFLOW_PKT_MAP(pkt_mark)
+ | MINIFLOW_PKT_MAP(recirc_id) | MINIFLOW_PKT_MAP(dp_hash)
+ | MINIFLOW_PKT_MAP(in_port)
+ | MINIFLOW_PKT_MAP(dl_dst) | MINIFLOW_PKT_MAP(dl_src)
+ | MINIFLOW_PKT_MAP(dl_type) | MINIFLOW_PKT_MAP(vlan_tci);
/* Ethertype-dependent fields. */
if (OVS_LIKELY(flow->dl_type == htons(ETH_TYPE_IP))) {
- map |= MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
- | MINIFLOW_MAP(nw_proto) | MINIFLOW_MAP(nw_frag)
- | MINIFLOW_MAP(nw_tos) | MINIFLOW_MAP(nw_ttl);
+ map->pkt_map |= MINIFLOW_PKT_MAP(nw_src) | MINIFLOW_PKT_MAP(nw_dst)
+ | MINIFLOW_PKT_MAP(nw_proto) | MINIFLOW_PKT_MAP(nw_frag)
+ | MINIFLOW_PKT_MAP(nw_tos) | MINIFLOW_PKT_MAP(nw_ttl);
if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_IGMP)) {
- map |= MINIFLOW_MAP(igmp_group_ip4);
+ map->pkt_map |= MINIFLOW_PKT_MAP(igmp_group_ip4);
} else {
- map |= MINIFLOW_MAP(tcp_flags)
- | MINIFLOW_MAP(tp_src) | MINIFLOW_MAP(tp_dst);
+ map->pkt_map |= MINIFLOW_PKT_MAP(tcp_flags)
+ | MINIFLOW_PKT_MAP(tp_src) | MINIFLOW_PKT_MAP(tp_dst);
}
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
- map |= MINIFLOW_MAP(ipv6_src) | MINIFLOW_MAP(ipv6_dst)
- | MINIFLOW_MAP(ipv6_label)
- | MINIFLOW_MAP(nw_proto) | MINIFLOW_MAP(nw_frag)
- | MINIFLOW_MAP(nw_tos) | MINIFLOW_MAP(nw_ttl);
+ map->pkt_map |= MINIFLOW_PKT_MAP(ipv6_src) | MINIFLOW_PKT_MAP(ipv6_dst)
+ | MINIFLOW_PKT_MAP(ipv6_label)
+ | MINIFLOW_PKT_MAP(nw_proto) | MINIFLOW_PKT_MAP(nw_frag)
+ | MINIFLOW_PKT_MAP(nw_tos) | MINIFLOW_PKT_MAP(nw_ttl);
if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_ICMPV6)) {
- map |= MINIFLOW_MAP(nd_target)
- | MINIFLOW_MAP(arp_sha) | MINIFLOW_MAP(arp_tha);
+ map->pkt_map |= MINIFLOW_PKT_MAP(nd_target)
+ | MINIFLOW_PKT_MAP(arp_sha) | MINIFLOW_PKT_MAP(arp_tha);
} else {
- map |= MINIFLOW_MAP(tcp_flags)
- | MINIFLOW_MAP(tp_src) | MINIFLOW_MAP(tp_dst);
+ map->pkt_map |= MINIFLOW_PKT_MAP(tcp_flags)
+ | MINIFLOW_PKT_MAP(tp_src) | MINIFLOW_PKT_MAP(tp_dst);
}
} else if (eth_type_mpls(flow->dl_type)) {
- map |= MINIFLOW_MAP(mpls_lse);
+ map->pkt_map |= MINIFLOW_PKT_MAP(mpls_lse);
} else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
flow->dl_type == htons(ETH_TYPE_RARP)) {
- map |= MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
- | MINIFLOW_MAP(nw_proto)
- | MINIFLOW_MAP(arp_sha) | MINIFLOW_MAP(arp_tha);
+ map->pkt_map |= MINIFLOW_PKT_MAP(nw_src) | MINIFLOW_PKT_MAP(nw_dst)
+ | MINIFLOW_PKT_MAP(nw_proto)
+ | MINIFLOW_PKT_MAP(arp_sha) | MINIFLOW_PKT_MAP(arp_tha);
}
-
- return map;
}
/* Clear the metadata and register wildcard masks. They are not packet
flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
{
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 31);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
/* Separate loops for better optimization. */
if (dl_type == htons(ETH_TYPE_IPV6)) {
- uint64_t map = MINIFLOW_MAP(ipv6_src) | MINIFLOW_MAP(ipv6_dst);
+ struct miniflow maps = { 0, MINIFLOW_PKT_MAP(ipv6_src)
+ | MINIFLOW_PKT_MAP(ipv6_dst) };
uint64_t value;
- MINIFLOW_FOR_EACH_IN_MAP(value, flow, map) {
+ MINIFLOW_FOR_EACH_IN_PKT_MAP(value, flow, maps) {
hash = hash_add64(hash, value);
}
} else {
return jhash_bytes(&fields, sizeof fields, basis);
}
+/* Hashes 'flow' based on its L3 through L4 protocol information */
+uint32_t
+flow_hash_symmetric_l3l4(const struct flow *flow, uint32_t basis,
+ bool inc_udp_ports)
+{
+ uint32_t hash = basis;
+
+ /* UDP source and destination port are also taken into account. */
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ hash = hash_add(hash,
+ (OVS_FORCE uint32_t) (flow->nw_src ^ flow->nw_dst));
+ } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
+ /* IPv6 addresses are 64-bit aligned inside struct flow. */
+ const uint64_t *a = ALIGNED_CAST(uint64_t *, flow->ipv6_src.s6_addr);
+ const uint64_t *b = ALIGNED_CAST(uint64_t *, flow->ipv6_dst.s6_addr);
+
+ for (int i = 0; i < 4; i++) {
+ hash = hash_add64(hash, a[i] ^ b[i]);
+ }
+ } else {
+ /* Cannot hash non-IP flows */
+ return 0;
+ }
+
+ hash = hash_add(hash, flow->nw_proto);
+ if (flow->nw_proto == IPPROTO_TCP || flow->nw_proto == IPPROTO_SCTP ||
+ (inc_udp_ports && flow->nw_proto == IPPROTO_UDP)) {
+ hash = hash_add(hash,
+ (OVS_FORCE uint16_t) (flow->tp_src ^ flow->tp_dst));
+ }
+
+ return hash_finish(hash, basis);
+}
+
/* Initialize a flow with random fields that matter for nx_hash_fields. */
void
flow_random_hash_fields(struct flow *flow)
wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
break;
+ case NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP:
+ if (is_ip_any(flow) && flow->nw_proto == IPPROTO_UDP) {
+ memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
+ memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
+ }
+ /* no break */
+ case NX_HASH_FIELDS_SYMMETRIC_L3L4:
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
+ memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
+ } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
+ memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
+ memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
+ } else {
+ break; /* non-IP flow */
+ }
+
+ memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+ if (flow->nw_proto == IPPROTO_TCP || flow->nw_proto == IPPROTO_SCTP) {
+ memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
+ memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
+ }
+ break;
+
default:
OVS_NOT_REACHED();
}
case NX_HASH_FIELDS_SYMMETRIC_L4:
return flow_hash_symmetric_l4(flow, basis);
+
+ case NX_HASH_FIELDS_SYMMETRIC_L3L4:
+ return flow_hash_symmetric_l3l4(flow, basis, false);
+
+ case NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP:
+ return flow_hash_symmetric_l3l4(flow, basis, true);
+
}
OVS_NOT_REACHED();
switch (fields) {
case NX_HASH_FIELDS_ETH_SRC: return "eth_src";
case NX_HASH_FIELDS_SYMMETRIC_L4: return "symmetric_l4";
+ case NX_HASH_FIELDS_SYMMETRIC_L3L4: return "symmetric_l3l4";
+ case NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP: return "symmetric_l3l4+udp";
default: return "<unknown>";
}
}
flow_hash_fields_valid(enum nx_hash_fields fields)
{
return fields == NX_HASH_FIELDS_ETH_SRC
- || fields == NX_HASH_FIELDS_SYMMETRIC_L4;
+ || fields == NX_HASH_FIELDS_SYMMETRIC_L4
+ || fields == NX_HASH_FIELDS_SYMMETRIC_L3L4
+ || fields == NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP;
}
/* Returns a hash value for the bits of 'flow' that are active based on
flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
/* Clear all L3 and L4 fields and dp_hash. */
- BUILD_ASSERT(FLOW_WC_SEQ == 31);
+ BUILD_ASSERT(FLOW_WC_SEQ == 33);
memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
flow->dp_hash = 0;
\f
/* Compressed flow. */
-static int
-miniflow_n_values(const struct miniflow *flow)
-{
- return count_1bits(flow->map);
-}
-
-static uint64_t *
-miniflow_alloc_values(struct miniflow *flow, int n)
-{
- int size = MINIFLOW_VALUES_SIZE(n);
-
- if (size <= sizeof flow->inline_values) {
- flow->values_inline = true;
- return flow->inline_values;
- } else {
- COVERAGE_INC(miniflow_malloc);
- flow->values_inline = false;
- flow->offline_values = xmalloc(size);
- return flow->offline_values;
- }
-}
-
/* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
- * the caller. The caller must have already initialized 'dst->map' properly
- * to indicate the significant uint64_t elements of 'src'. 'n' must be the
- * number of 1-bits in 'dst->map'.
+ * the caller. The caller must have already computed 'dst->tnl_map' and
+ * 'dst->pkt_map' properly to indicate the significant uint64_t elements of
+ * 'src'.
*
* Normally the significant elements are the ones that are non-zero. However,
* when a miniflow is initialized from a (mini)mask, the values can be zeroes,
- * so that the flow and mask always have the same maps.
- *
- * This function initializes values (either inline if possible or with
- * malloc() otherwise) and copies the uint64_t elements of 'src' indicated by
- * 'dst->map' into it. */
-static void
-miniflow_init__(struct miniflow *dst, const struct flow *src, int n)
+ * so that the flow and mask always have the same maps. */
+void
+miniflow_init(struct miniflow *dst, const struct flow *src)
{
const uint64_t *src_u64 = (const uint64_t *) src;
- uint64_t *dst_u64 = miniflow_alloc_values(dst, n);
- int idx;
+ uint64_t *dst_u64 = miniflow_values(dst);
+ size_t idx;
- MAP_FOR_EACH_INDEX(idx, dst->map) {
+ MAPS_FOR_EACH_INDEX(idx, *dst) {
*dst_u64++ = src_u64[idx];
}
}
-/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
- * with miniflow_destroy().
- * Always allocates offline storage. */
+/* Initialize the maps of 'flow' from 'src'. */
void
-miniflow_init(struct miniflow *dst, const struct flow *src)
+miniflow_map_init(struct miniflow *flow, const struct flow *src)
{
const uint64_t *src_u64 = (const uint64_t *) src;
- unsigned int i;
- int n;
-
- /* Initialize dst->map, counting the number of nonzero elements. */
- n = 0;
- dst->map = 0;
+ int i;
- for (i = 0; i < FLOW_U64S; i++) {
+ /* Initialize map, counting the number of nonzero elements. */
+ flow->tnl_map = 0;
+ for (i = 0; i < FLOW_TNL_U64S; i++) {
if (src_u64[i]) {
- dst->map |= UINT64_C(1) << i;
- n++;
+ flow->tnl_map |= UINT64_C(1) << i;
+ }
+ }
+ src_u64 += FLOW_TNL_U64S;
+ flow->pkt_map = 0;
+ for (i = 0; i < FLOW_U64S - FLOW_TNL_U64S; i++) {
+ if (src_u64[i]) {
+ flow->pkt_map |= UINT64_C(1) << i;
}
}
-
- miniflow_init__(dst, src, n);
}
-/* Initializes 'dst' as a copy of 'src', using 'mask->map' as 'dst''s map. The
- * caller must eventually free 'dst' with miniflow_destroy(). */
-void
-miniflow_init_with_minimask(struct miniflow *dst, const struct flow *src,
- const struct minimask *mask)
+/* Allocates 'n' count of miniflows, consecutive in memory, initializing the
+ * map of each from 'src'.
+ * Returns the size of the miniflow data. */
+size_t
+miniflow_alloc(struct miniflow *dsts[], size_t n, const struct miniflow *src)
{
- dst->map = mask->masks.map;
- miniflow_init__(dst, src, miniflow_n_values(dst));
-}
+ size_t n_values = miniflow_n_values(src);
+ size_t data_size = MINIFLOW_VALUES_SIZE(n_values);
+ struct miniflow *dst = xmalloc(n * (sizeof *src + data_size));
+ unsigned int i;
-/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
- * with miniflow_destroy(). */
-void
-miniflow_clone(struct miniflow *dst, const struct miniflow *src)
-{
- int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
- uint64_t *values;
+ COVERAGE_INC(miniflow_malloc);
- dst->map = src->map;
- if (size <= sizeof dst->inline_values) {
- dst->values_inline = true;
- values = dst->inline_values;
- } else {
- dst->values_inline = false;
- COVERAGE_INC(miniflow_malloc);
- dst->offline_values = xmalloc(size);
- values = dst->offline_values;
+ for (i = 0; i < n; i++) {
+ *dst = *src; /* Copy maps. */
+ dsts[i] = dst;
+ dst += 1; /* Just past the maps. */
+ dst = (struct miniflow *)((uint64_t *)dst + n_values); /* Skip data. */
}
- memcpy(values, miniflow_get_values(src), size);
+ return data_size;
}
-/* Initializes 'dst' as a copy of 'src'. The caller must have allocated
- * 'dst' to have inline space all data in 'src'. */
-void
-miniflow_clone_inline(struct miniflow *dst, const struct miniflow *src,
- size_t n_values)
+/* Returns a miniflow copy of 'src'. The caller must eventually free() the
+ * returned miniflow. */
+struct miniflow *
+miniflow_create(const struct flow *src)
{
- dst->values_inline = true;
- dst->map = src->map;
- memcpy(dst->inline_values, miniflow_get_values(src),
- MINIFLOW_VALUES_SIZE(n_values));
-}
+ struct miniflow tmp;
+ struct miniflow *dst;
-/* Initializes 'dst' with the data in 'src', destroying 'src'.
- * The caller must eventually free 'dst' with miniflow_destroy().
- * 'dst' must be regularly sized miniflow, but 'src' can have
- * storage for more than the default MINI_N_INLINE inline
- * values. */
-void
-miniflow_move(struct miniflow *dst, struct miniflow *src)
-{
- int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
-
- dst->map = src->map;
- if (size <= sizeof dst->inline_values) {
- dst->values_inline = true;
- memcpy(dst->inline_values, miniflow_get_values(src), size);
- miniflow_destroy(src);
- } else if (src->values_inline) {
- dst->values_inline = false;
- COVERAGE_INC(miniflow_malloc);
- dst->offline_values = xmalloc(size);
- memcpy(dst->offline_values, src->inline_values, size);
- } else {
- dst->values_inline = false;
- dst->offline_values = src->offline_values;
- }
+ miniflow_map_init(&tmp, src);
+
+ miniflow_alloc(&dst, 1, &tmp);
+ miniflow_init(dst, src);
+ return dst;
}
-/* Frees any memory owned by 'flow'. Does not free the storage in which 'flow'
- * itself resides; the caller is responsible for that. */
+/* Initializes 'dst' as a copy of 'src'. The caller must have allocated
+ * 'dst' to have inline space for 'n_values' data in 'src'. */
void
-miniflow_destroy(struct miniflow *flow)
+miniflow_clone(struct miniflow *dst, const struct miniflow *src,
+ size_t n_values)
{
- if (!flow->values_inline) {
- free(flow->offline_values);
- }
+ *dst = *src; /* Copy maps. */
+ memcpy(miniflow_values(dst), miniflow_get_values(src),
+ MINIFLOW_VALUES_SIZE(n_values));
}
/* Initializes 'dst' as a copy of 'src'. */
flow_union_with_miniflow(dst, src);
}
-/* Returns true if 'a' and 'b' are the equal miniflow, false otherwise. */
+/* Returns true if 'a' and 'b' are equal miniflows, false otherwise. */
bool
miniflow_equal(const struct miniflow *a, const struct miniflow *b)
{
const uint64_t *ap = miniflow_get_values(a);
const uint64_t *bp = miniflow_get_values(b);
- if (OVS_LIKELY(a->map == b->map)) {
- int count = miniflow_n_values(a);
-
- return !memcmp(ap, bp, count * sizeof *ap);
+ if (OVS_LIKELY(a->tnl_map == b->tnl_map && a->pkt_map == b->pkt_map)) {
+ return !memcmp(ap, bp, miniflow_n_values(a) * sizeof *ap);
} else {
uint64_t map;
- for (map = a->map | b->map; map; map = zero_rightmost_1bit(map)) {
+ map = a->tnl_map | b->tnl_map;
+ for (; map; map = zero_rightmost_1bit(map)) {
+ uint64_t bit = rightmost_1bit(map);
+
+ if ((a->tnl_map & bit ? *ap++ : 0)
+ != (b->tnl_map & bit ? *bp++ : 0)) {
+ return false;
+ }
+ }
+ map = a->pkt_map | b->pkt_map;
+ for (; map; map = zero_rightmost_1bit(map)) {
uint64_t bit = rightmost_1bit(map);
- if ((a->map & bit ? *ap++ : 0) != (b->map & bit ? *bp++ : 0)) {
+ if ((a->pkt_map & bit ? *ap++ : 0)
+ != (b->pkt_map & bit ? *bp++ : 0)) {
return false;
}
}
const struct minimask *mask)
{
const uint64_t *p = miniflow_get_values(&mask->masks);
- int idx;
+ size_t idx;
- MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
+ MAPS_FOR_EACH_INDEX(idx, mask->masks) {
if ((miniflow_get(a, idx) ^ miniflow_get(b, idx)) & *p++) {
return false;
}
{
const uint64_t *b_u64 = (const uint64_t *) b;
const uint64_t *p = miniflow_get_values(&mask->masks);
- int idx;
+ size_t idx;
- MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
+ MAPS_FOR_EACH_INDEX(idx, mask->masks) {
if ((miniflow_get(a, idx) ^ b_u64[idx]) & *p++) {
return false;
}
}
\f
-/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
- * with minimask_destroy(). */
void
minimask_init(struct minimask *mask, const struct flow_wildcards *wc)
{
miniflow_init(&mask->masks, &wc->masks);
}
-/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
- * with minimask_destroy(). */
-void
-minimask_clone(struct minimask *dst, const struct minimask *src)
-{
- miniflow_clone(&dst->masks, &src->masks);
-}
-
-/* Initializes 'dst' with the data in 'src', destroying 'src'.
- * The caller must eventually free 'dst' with minimask_destroy(). */
-void
-minimask_move(struct minimask *dst, struct minimask *src)
+/* Returns a minimask copy of 'wc'. The caller must eventually free the
+ * returned minimask with free(). */
+struct minimask *
+minimask_create(const struct flow_wildcards *wc)
{
- miniflow_move(&dst->masks, &src->masks);
+ return (struct minimask *)miniflow_create(&wc->masks);
}
/* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
*
- * The caller must provide room for FLOW_U64S "uint64_t"s in 'storage', for use
- * by 'dst_'. The caller must *not* free 'dst_' with minimask_destroy(). */
+ * The caller must provide room for FLOW_U64S "uint64_t"s in 'storage', which
+ * must follow '*dst_' in memory, for use by 'dst_'. The caller must *not*
+ * free 'dst_' free(). */
void
minimask_combine(struct minimask *dst_,
const struct minimask *a_, const struct minimask *b_,
uint64_t *dst_values = storage;
const struct miniflow *a = &a_->masks;
const struct miniflow *b = &b_->masks;
- int idx;
-
- dst->values_inline = false;
- dst->offline_values = storage;
+ const uint64_t *ap = miniflow_get_values(a);
+ const uint64_t *bp = miniflow_get_values(b);
+ size_t idx;
- dst->map = 0;
- MAP_FOR_EACH_INDEX(idx, a->map & b->map) {
+ dst->tnl_map = 0;
+ MAP_FOR_EACH_INDEX(idx, a->tnl_map & b->tnl_map) {
/* Both 'a' and 'b' have non-zero data at 'idx'. */
- uint64_t mask = miniflow_get__(a, idx) & miniflow_get__(b, idx);
+ uint64_t mask = *miniflow_values_get__(ap, a->tnl_map, idx)
+ & *miniflow_values_get__(bp, b->tnl_map, idx);
if (mask) {
- dst->map |= UINT64_C(1) << idx;
+ dst->tnl_map |= UINT64_C(1) << idx;
*dst_values++ = mask;
}
}
-}
+ dst->pkt_map = 0;
+ ap += count_1bits(a->tnl_map); /* Skip tnl_map values. */
+ bp += count_1bits(b->tnl_map); /* Skip tnl_map values. */
+ MAP_FOR_EACH_INDEX(idx, a->pkt_map & b->pkt_map) {
+ /* Both 'a' and 'b' have non-zero data at 'idx'. */
+ uint64_t mask = *miniflow_values_get__(ap, a->pkt_map, idx)
+ & *miniflow_values_get__(bp, b->pkt_map, idx);
-/* Frees any memory owned by 'mask'. Does not free the storage in which 'mask'
- * itself resides; the caller is responsible for that. */
-void
-minimask_destroy(struct minimask *mask)
-{
- miniflow_destroy(&mask->masks);
+ if (mask) {
+ dst->pkt_map |= UINT64_C(1) << idx;
+ *dst_values++ = mask;
+ }
+ }
}
-/* Initializes 'dst' as a copy of 'src'. */
+/* Initializes 'wc' as a copy of 'mask'. */
void
minimask_expand(const struct minimask *mask, struct flow_wildcards *wc)
{
bool
minimask_equal(const struct minimask *a, const struct minimask *b)
{
- return a->masks.map == b->masks.map &&
- !memcmp(miniflow_get_values(&a->masks),
- miniflow_get_values(&b->masks),
- count_1bits(a->masks.map) * sizeof *a->masks.inline_values);
+ return a->masks.tnl_map == b->masks.tnl_map
+ && a->masks.pkt_map == b->masks.pkt_map &&
+ !memcmp(miniflow_get_values(&a->masks), miniflow_get_values(&b->masks),
+ MINIFLOW_VALUES_SIZE(miniflow_n_values(&a->masks)));
}
/* Returns true if at least one bit matched by 'b' is wildcarded by 'a',
{
const uint64_t *ap = miniflow_get_values(&a->masks);
const uint64_t *bp = miniflow_get_values(&b->masks);
- int idx;
+ size_t idx;
- MAP_FOR_EACH_INDEX(idx, b->masks.map) {
+ MAP_FOR_EACH_INDEX(idx, b->masks.tnl_map) {
uint64_t b_u64 = *bp++;
/* 'b_u64' is non-zero, check if the data in 'a' is either zero
* or misses some of the bits in 'b_u64'. */
- if (!(a->masks.map & (UINT64_C(1) << idx))
- || ((miniflow_values_get__(ap, a->masks.map, idx) & b_u64)
+ if (!(a->masks.tnl_map & (UINT64_C(1) << idx))
+ || ((*miniflow_values_get__(ap, a->masks.tnl_map, idx) & b_u64)
+ != b_u64)) {
+ return true; /* 'a' wildcards some bits 'b' doesn't. */
+ }
+ }
+ ap += count_1bits(a->masks.tnl_map); /* Skip tnl_map values. */
+ MAP_FOR_EACH_INDEX(idx, b->masks.pkt_map) {
+ uint64_t b_u64 = *bp++;
+
+ if (!(a->masks.pkt_map & (UINT64_C(1) << idx))
+ || ((*miniflow_values_get__(ap, a->masks.pkt_map, idx) & b_u64)
!= b_u64)) {
return true; /* 'a' wildcards some bits 'b' doesn't. */
}