/* miniflow_push_* macros allow filling in a miniflow data values in order.
* Assertions are needed only when the layout of the struct flow is modified.
* 'ofs' is a compile-time constant, which allows most of the code be optimized
- * away. Some GCC versions gave warnigns on ALWAYS_INLINE, so these are
+ * away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
* defined as macros. */
-#if (FLOW_WC_SEQ != 26)
+#if (FLOW_WC_SEQ != 27)
#define MINIFLOW_ASSERT(X) ovs_assert(X)
+BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
+ "assertions enabled. Consider updating FLOW_WC_SEQ after "
+ "testing")
#else
#define MINIFLOW_ASSERT(X)
#endif
break;
}
}
- return MAX(count, FLOW_MAX_MPLS_LABELS);
+ return MIN(count, FLOW_MAX_MPLS_LABELS);
}
static inline ovs_be16
(icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
- *nd_target = data_try_pull(datap, sizep, sizeof *nd_target);
+ *nd_target = data_try_pull(datap, sizep, sizeof **nd_target);
if (OVS_UNLIKELY(!*nd_target)) {
return false;
}
flow_extract(struct ofpbuf *packet, const struct pkt_metadata *md,
struct flow *flow)
{
- uint32_t buf[FLOW_U32S];
- struct miniflow mf;
+ struct {
+ struct miniflow mf;
+ uint32_t buf[FLOW_U32S];
+ } m;
COVERAGE_INC(flow_extract);
- miniflow_initialize(&mf, buf);
- miniflow_extract(packet, md, &mf);
- miniflow_expand(&mf, flow);
+ miniflow_initialize(&m.mf, m.buf);
+ miniflow_extract(packet, md, &m.mf);
+ miniflow_expand(&m.mf, flow);
}
-/* Caller is responsible for initializing 'dst->values' with enough storage
- * for FLOW_U32S * 4 bytes. */
+/* Caller is responsible for initializing 'dst' with enough storage for
+ * FLOW_U32S * 4 bytes. */
void
miniflow_extract(struct ofpbuf *packet, const struct pkt_metadata *md,
struct miniflow *dst)
{
void *data = ofpbuf_data(packet);
size_t size = ofpbuf_size(packet);
+ uint32_t *values = miniflow_values(dst);
+ struct mf_ctx mf = { 0, values, values + FLOW_U32S };
char *l2;
- struct mf_ctx mf = { 0, dst->values, dst->values + FLOW_U32S };
ovs_be16 dl_type;
uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
miniflow_push_be16(mf, tp_src, htons(icmp->icmp_type));
miniflow_push_be16(mf, tp_dst, htons(icmp->icmp_code));
}
+ } else if (OVS_LIKELY(nw_proto == IPPROTO_IGMP)) {
+ if (OVS_LIKELY(size >= IGMP_HEADER_LEN)) {
+ const struct igmp_header *igmp = data;
+
+ miniflow_push_be16(mf, tp_src, htons(igmp->igmp_type));
+ miniflow_push_be16(mf, tp_dst, htons(igmp->igmp_code));
+ miniflow_push_be32(mf, igmp_group_ip4,
+ get_16aligned_be32(&igmp->group));
+ }
} else if (OVS_LIKELY(nw_proto == IPPROTO_ICMPV6)) {
if (OVS_LIKELY(size >= sizeof(struct icmp6_hdr))) {
const struct in6_addr *nd_target = NULL;
memset(arp_buf, 0, sizeof arp_buf);
if (OVS_LIKELY(parse_icmpv6(&data, &size, icmp, &nd_target,
arp_buf))) {
+ miniflow_push_words(mf, arp_sha, arp_buf,
+ ETH_ADDR_LEN * 2 / 4);
if (nd_target) {
miniflow_push_words(mf, nd_target, nd_target,
sizeof *nd_target / 4);
}
- miniflow_push_words(mf, arp_sha, arp_buf,
- ETH_ADDR_LEN * 2 / 4);
miniflow_push_be16(mf, tp_src, htons(icmp->icmp6_type));
miniflow_push_be16(mf, tp_dst, htons(icmp->icmp6_code));
}
void
flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 26);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 27);
fmd->dp_hash = flow->dp_hash;
fmd->recirc_id = flow->recirc_id;
return "csum";
case FLOW_TNL_F_KEY:
return "key";
+ case FLOW_TNL_F_OAM:
+ return "oam";
default:
return NULL;
}
flow_format(struct ds *ds, const struct flow *flow)
{
struct match match;
+ struct flow_wildcards *wc = &match.wc;
match_wc_init(&match, flow);
+
+ /* As this function is most often used for formatting a packet in a
+ * packet-in message, skip formatting the packet context fields that are
+ * all-zeroes (Openflow spec encourages leaving out all-zeroes context
+ * fields from the packet-in messages). We make an exception with the
+ * 'in_port' field, which we always format, as packets usually have an
+ * in_port, and 0 is a port just like any other port. */
+ if (!flow->skb_priority) {
+ WC_UNMASK_FIELD(wc, skb_priority);
+ }
+ if (!flow->pkt_mark) {
+ WC_UNMASK_FIELD(wc, pkt_mark);
+ }
+ if (!flow->recirc_id) {
+ WC_UNMASK_FIELD(wc, recirc_id);
+ }
+ for (int i = 0; i < FLOW_N_REGS; i++) {
+ if (!flow->regs[i]) {
+ WC_UNMASK_FIELD(wc, regs[i]);
+ }
+ }
+ if (!flow->metadata) {
+ WC_UNMASK_FIELD(wc, metadata);
+ }
+
match_format(&match, ds, OFP_DEFAULT_PRIORITY);
}
memset(&wc->masks, 0, sizeof wc->masks);
}
+/* Converts a flow into flow wildcards. It sets the wildcard masks based on
+ * the packet headers extracted to 'flow'. It will not set the mask for fields
+ * that do not make sense for the packet type. OpenFlow-only metadata is
+ * wildcarded, but other metadata is unconditionally exact-matched. */
+void flow_wildcards_init_for_packet(struct flow_wildcards *wc,
+ const struct flow *flow)
+{
+ memset(&wc->masks, 0x0, sizeof wc->masks);
+
+ /* Update this function whenever struct flow changes. */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 27);
+
+ if (flow->tunnel.ip_dst) {
+ if (flow->tunnel.flags & FLOW_TNL_F_KEY) {
+ WC_MASK_FIELD(wc, tunnel.tun_id);
+ }
+ WC_MASK_FIELD(wc, tunnel.ip_src);
+ WC_MASK_FIELD(wc, tunnel.ip_dst);
+ WC_MASK_FIELD(wc, tunnel.flags);
+ WC_MASK_FIELD(wc, tunnel.ip_tos);
+ WC_MASK_FIELD(wc, tunnel.ip_ttl);
+ WC_MASK_FIELD(wc, tunnel.tp_src);
+ WC_MASK_FIELD(wc, tunnel.tp_dst);
+ } else if (flow->tunnel.tun_id) {
+ WC_MASK_FIELD(wc, tunnel.tun_id);
+ }
+
+ /* metadata and regs wildcarded. */
+
+ WC_MASK_FIELD(wc, skb_priority);
+ WC_MASK_FIELD(wc, pkt_mark);
+ WC_MASK_FIELD(wc, recirc_id);
+ WC_MASK_FIELD(wc, dp_hash);
+ WC_MASK_FIELD(wc, in_port);
+
+ WC_MASK_FIELD(wc, dl_dst);
+ WC_MASK_FIELD(wc, dl_src);
+ WC_MASK_FIELD(wc, dl_type);
+ WC_MASK_FIELD(wc, vlan_tci);
+
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ WC_MASK_FIELD(wc, nw_src);
+ WC_MASK_FIELD(wc, nw_dst);
+ } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
+ WC_MASK_FIELD(wc, ipv6_src);
+ WC_MASK_FIELD(wc, ipv6_dst);
+ WC_MASK_FIELD(wc, ipv6_label);
+ } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
+ flow->dl_type == htons(ETH_TYPE_RARP)) {
+ WC_MASK_FIELD(wc, nw_src);
+ WC_MASK_FIELD(wc, nw_dst);
+ WC_MASK_FIELD(wc, nw_proto);
+ WC_MASK_FIELD(wc, arp_sha);
+ WC_MASK_FIELD(wc, arp_tha);
+ return;
+ } else if (eth_type_mpls(flow->dl_type)) {
+ for (int i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
+ WC_MASK_FIELD(wc, mpls_lse[i]);
+ if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
+ break;
+ }
+ }
+ return;
+ } else {
+ return; /* Unknown ethertype. */
+ }
+
+ /* IPv4 or IPv6. */
+ WC_MASK_FIELD(wc, nw_frag);
+ WC_MASK_FIELD(wc, nw_tos);
+ WC_MASK_FIELD(wc, nw_ttl);
+ WC_MASK_FIELD(wc, nw_proto);
+
+ /* No transport layer header in later fragments. */
+ if (!(flow->nw_frag & FLOW_NW_FRAG_LATER) &&
+ (flow->nw_proto == IPPROTO_ICMP ||
+ flow->nw_proto == IPPROTO_ICMPV6 ||
+ flow->nw_proto == IPPROTO_TCP ||
+ flow->nw_proto == IPPROTO_UDP ||
+ flow->nw_proto == IPPROTO_SCTP ||
+ flow->nw_proto == IPPROTO_IGMP)) {
+ WC_MASK_FIELD(wc, tp_src);
+ WC_MASK_FIELD(wc, tp_dst);
+
+ if (flow->nw_proto == IPPROTO_TCP) {
+ WC_MASK_FIELD(wc, tcp_flags);
+ } else if (flow->nw_proto == IPPROTO_ICMPV6) {
+ WC_MASK_FIELD(wc, arp_sha);
+ WC_MASK_FIELD(wc, arp_tha);
+ WC_MASK_FIELD(wc, nd_target);
+ } else if (flow->nw_proto == IPPROTO_IGMP) {
+ WC_MASK_FIELD(wc, igmp_group_ip4);
+ }
+ }
+}
+
+/* Return a map of possible fields for a packet of the same type as 'flow'.
+ * Including extra bits in the returned mask is not wrong, it is just less
+ * optimal.
+ *
+ * This is a less precise version of flow_wildcards_init_for_packet() above. */
+uint64_t
+flow_wc_map(const struct flow *flow)
+{
+ /* Update this function whenever struct flow changes. */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 27);
+
+ uint64_t map = (flow->tunnel.ip_dst) ? MINIFLOW_MAP(tunnel) : 0;
+
+ /* Metadata fields that can appear on packet input. */
+ map |= MINIFLOW_MAP(skb_priority) | MINIFLOW_MAP(pkt_mark)
+ | MINIFLOW_MAP(recirc_id) | MINIFLOW_MAP(dp_hash)
+ | MINIFLOW_MAP(in_port)
+ | MINIFLOW_MAP(dl_dst) | MINIFLOW_MAP(dl_src)
+ | MINIFLOW_MAP(dl_type) | MINIFLOW_MAP(vlan_tci);
+
+ /* Ethertype-dependent fields. */
+ if (OVS_LIKELY(flow->dl_type == htons(ETH_TYPE_IP))) {
+ map |= MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
+ | MINIFLOW_MAP(nw_proto) | MINIFLOW_MAP(nw_frag)
+ | MINIFLOW_MAP(nw_tos) | MINIFLOW_MAP(nw_ttl);
+ if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_IGMP)) {
+ map |= MINIFLOW_MAP(igmp_group_ip4);
+ } else {
+ map |= MINIFLOW_MAP(tcp_flags)
+ | MINIFLOW_MAP(tp_src) | MINIFLOW_MAP(tp_dst);
+ }
+ } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
+ map |= MINIFLOW_MAP(ipv6_src) | MINIFLOW_MAP(ipv6_dst)
+ | MINIFLOW_MAP(ipv6_label)
+ | MINIFLOW_MAP(nw_proto) | MINIFLOW_MAP(nw_frag)
+ | MINIFLOW_MAP(nw_tos) | MINIFLOW_MAP(nw_ttl);
+ if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_ICMPV6)) {
+ map |= MINIFLOW_MAP(nd_target)
+ | MINIFLOW_MAP(arp_sha) | MINIFLOW_MAP(arp_tha);
+ } else {
+ map |= MINIFLOW_MAP(tcp_flags)
+ | MINIFLOW_MAP(tp_src) | MINIFLOW_MAP(tp_dst);
+ }
+ } else if (eth_type_mpls(flow->dl_type)) {
+ map |= MINIFLOW_MAP(mpls_lse);
+ } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
+ flow->dl_type == htons(ETH_TYPE_RARP)) {
+ map |= MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
+ | MINIFLOW_MAP(nw_proto)
+ | MINIFLOW_MAP(arp_sha) | MINIFLOW_MAP(arp_tha);
+ }
+
+ return map;
+}
+
/* Clear the metadata and register wildcard masks. They are not packet
* header fields. */
void
flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
{
+ /* Update this function whenever struct flow changes. */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 27);
+
memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
}
wc->masks.regs[idx] = mask;
}
+/* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
+ * (A 0-bit indicates a wildcard bit.) */
+void
+flow_wildcards_set_xreg_mask(struct flow_wildcards *wc, int idx, uint64_t mask)
+{
+ flow_set_xreg(&wc->masks, idx, mask);
+}
+
/* Calculates the 5-tuple hash from the given miniflow.
* This returns the same value as flow_hash_5tuple for the corresponding
* flow. */
if (flow) {
ovs_be16 dl_type = MINIFLOW_GET_BE16(flow, dl_type);
- hash = mhash_add(hash, MINIFLOW_GET_U8(flow, nw_proto));
+ hash = hash_add(hash, MINIFLOW_GET_U8(flow, nw_proto));
/* Separate loops for better optimization. */
if (dl_type == htons(ETH_TYPE_IPV6)) {
uint32_t value;
MINIFLOW_FOR_EACH_IN_MAP(value, flow, map) {
- hash = mhash_add(hash, value);
+ hash = hash_add(hash, value);
}
} else {
uint64_t map = MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
uint32_t value;
MINIFLOW_FOR_EACH_IN_MAP(value, flow, map) {
- hash = mhash_add(hash, value);
+ hash = hash_add(hash, value);
}
}
- hash = mhash_finish(hash, 42); /* Arbitrary number. */
+ hash = hash_finish(hash, 42); /* Arbitrary number. */
}
return hash;
}
if (flow) {
const uint32_t *flow_u32 = (const uint32_t *)flow;
- hash = mhash_add(hash, flow->nw_proto);
+ hash = hash_add(hash, flow->nw_proto);
if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
int ofs = offsetof(struct flow, ipv6_src) / 4;
int end = ofs + 2 * sizeof flow->ipv6_src / 4;
while (ofs < end) {
- hash = mhash_add(hash, flow_u32[ofs++]);
+ hash = hash_add(hash, flow_u32[ofs++]);
}
} else {
- hash = mhash_add(hash, (OVS_FORCE uint32_t) flow->nw_src);
- hash = mhash_add(hash, (OVS_FORCE uint32_t) flow->nw_dst);
+ hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_src);
+ hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_dst);
}
- hash = mhash_add(hash, flow_u32[offsetof(struct flow, tp_src) / 4]);
+ hash = hash_add(hash, flow_u32[offsetof(struct flow, tp_src) / 4]);
- hash = mhash_finish(hash, 42); /* Arbitrary number. */
+ hash = hash_finish(hash, 42); /* Arbitrary number. */
}
return hash;
}
hash = basis;
for (i = 0; i < FLOW_U32S; i++) {
- hash = mhash_add(hash, flow_u32[i] & wc_u32[i]);
+ hash = hash_add(hash, flow_u32[i] & wc_u32[i]);
}
- return mhash_finish(hash, 4 * FLOW_U32S);
+ return hash_finish(hash, 4 * FLOW_U32S);
}
/* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
int
flow_count_mpls_labels(const struct flow *flow, struct flow_wildcards *wc)
{
- if (wc) {
- wc->masks.dl_type = OVS_BE16_MAX;
- }
+ /* dl_type is always masked. */
if (eth_type_mpls(flow->dl_type)) {
int i;
int len = FLOW_MAX_MPLS_LABELS;
*
* - BoS: 1.
*
- * If the new label is the second or label MPLS label in 'flow', it is
+ * If the new label is the second or later label MPLS label in 'flow', it is
* generated as;
*
* - label: Copied from outer label.
ovs_assert(eth_type_mpls(mpls_eth_type));
ovs_assert(n < FLOW_MAX_MPLS_LABELS);
- memset(wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
if (n) {
int i;
+ if (wc) {
+ memset(&wc->masks.mpls_lse, 0xff, sizeof *wc->masks.mpls_lse * n);
+ }
for (i = n; i >= 1; i--) {
flow->mpls_lse[i] = flow->mpls_lse[i - 1];
}
- flow->mpls_lse[0] = (flow->mpls_lse[1]
- & htonl(~MPLS_BOS_MASK));
+ flow->mpls_lse[0] = (flow->mpls_lse[1] & htonl(~MPLS_BOS_MASK));
} else {
int label = 0; /* IPv4 Explicit Null. */
int tc = 0;
if (is_ip_any(flow)) {
tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
- wc->masks.nw_tos |= IP_DSCP_MASK;
+ if (wc) {
+ wc->masks.nw_tos |= IP_DSCP_MASK;
+ wc->masks.nw_ttl = 0xff;
+ }
if (flow->nw_ttl) {
ttl = flow->nw_ttl;
}
- wc->masks.nw_ttl = 0xff;
}
flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
/* Clear all L3 and L4 fields. */
- BUILD_ASSERT(FLOW_WC_SEQ == 26);
+ BUILD_ASSERT(FLOW_WC_SEQ == 27);
memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
}
if (n == 0) {
/* Nothing to pop. */
return false;
- } else if (n == FLOW_MAX_MPLS_LABELS
- && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
- /* Can't pop because we don't know what to fill in mpls_lse[n - 1]. */
- return false;
+ } else if (n == FLOW_MAX_MPLS_LABELS) {
+ if (wc) {
+ wc->masks.mpls_lse[n - 1] |= htonl(MPLS_BOS_MASK);
+ }
+ if (!(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
+ /* Can't pop because don't know what to fill in mpls_lse[n - 1]. */
+ return false;
+ }
}
- memset(wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
+ if (wc) {
+ memset(&wc->masks.mpls_lse[1], 0xff,
+ sizeof *wc->masks.mpls_lse * (n - 1));
+ }
for (i = 1; i < n; i++) {
flow->mpls_lse[i - 1] = flow->mpls_lse[i];
}
icmp->icmp_type = ntohs(flow->tp_src);
icmp->icmp_code = ntohs(flow->tp_dst);
icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
+ } else if (flow->nw_proto == IPPROTO_IGMP) {
+ struct igmp_header *igmp;
+
+ l4_len = sizeof *igmp;
+ igmp = ofpbuf_put_zeros(b, l4_len);
+ igmp->igmp_type = ntohs(flow->tp_src);
+ igmp->igmp_code = ntohs(flow->tp_dst);
+ put_16aligned_be32(&igmp->group, flow->igmp_group_ip4);
+ igmp->igmp_csum = csum(igmp, IGMP_HEADER_LEN);
} else if (flow->nw_proto == IPPROTO_ICMPV6) {
struct icmp6_hdr *icmp;
l4_len = flow_compose_l4(b, flow);
+ ip = ofpbuf_l3(b);
ip->ip_tot_len = htons(b->l4_ofs - b->l3_ofs + l4_len);
ip->ip_csum = csum(ip, sizeof *ip);
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
l4_len = flow_compose_l4(b, flow);
+ nh = ofpbuf_l3(b);
nh->ip6_plen = htons(l4_len);
} else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
flow->dl_type == htons(ETH_TYPE_RARP)) {
static uint32_t *
miniflow_alloc_values(struct miniflow *flow, int n)
{
- if (n <= MINI_N_INLINE) {
+ int size = MINIFLOW_VALUES_SIZE(n);
+
+ if (size <= sizeof flow->inline_values) {
+ flow->values_inline = true;
return flow->inline_values;
} else {
COVERAGE_INC(miniflow_malloc);
- return xmalloc(n * sizeof *flow->values);
+ flow->values_inline = false;
+ flow->offline_values = xmalloc(size);
+ return flow->offline_values;
}
}
* when a miniflow is initialized from a (mini)mask, the values can be zeroes,
* so that the flow and mask always have the same maps.
*
- * This function initializes 'dst->values' (either inline if possible or with
+ * This function initializes values (either inline if possible or with
* malloc() otherwise) and copies the uint32_t elements of 'src' indicated by
* 'dst->map' into it. */
static void
miniflow_init__(struct miniflow *dst, const struct flow *src, int n)
{
const uint32_t *src_u32 = (const uint32_t *) src;
- unsigned int ofs;
+ uint32_t *dst_u32 = miniflow_alloc_values(dst, n);
uint64_t map;
- dst->values = miniflow_alloc_values(dst, n);
- ofs = 0;
for (map = dst->map; map; map = zero_rightmost_1bit(map)) {
- dst->values[ofs++] = src_u32[raw_ctz(map)];
+ *dst_u32++ = src_u32[raw_ctz(map)];
}
}
/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
- * with miniflow_destroy(). */
+ * with miniflow_destroy().
+ * Always allocates offline storage. */
void
miniflow_init(struct miniflow *dst, const struct flow *src)
{
void
miniflow_clone(struct miniflow *dst, const struct miniflow *src)
{
- int n = miniflow_n_values(src);
+ int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
+ uint32_t *values;
+
+ dst->map = src->map;
+ if (size <= sizeof dst->inline_values) {
+ dst->values_inline = true;
+ values = dst->inline_values;
+ } else {
+ dst->values_inline = false;
+ COVERAGE_INC(miniflow_malloc);
+ dst->offline_values = xmalloc(size);
+ values = dst->offline_values;
+ }
+ memcpy(values, miniflow_get_values(src), size);
+}
+
+/* Initializes 'dst' as a copy of 'src'. The caller must have allocated
+ * 'dst' to have inline space all data in 'src'. */
+void
+miniflow_clone_inline(struct miniflow *dst, const struct miniflow *src,
+ size_t n_values)
+{
+ dst->values_inline = true;
dst->map = src->map;
- dst->values = miniflow_alloc_values(dst, n);
- memcpy(dst->values, src->values, n * sizeof *dst->values);
+ memcpy(dst->inline_values, miniflow_get_values(src),
+ MINIFLOW_VALUES_SIZE(n_values));
}
/* Initializes 'dst' with the data in 'src', destroying 'src'.
- * The caller must eventually free 'dst' with miniflow_destroy(). */
+ * The caller must eventually free 'dst' with miniflow_destroy().
+ * 'dst' must be regularly sized miniflow, but 'src' can have
+ * storage for more than the default MINI_N_INLINE inline
+ * values. */
void
miniflow_move(struct miniflow *dst, struct miniflow *src)
{
- if (src->values == src->inline_values) {
- dst->values = dst->inline_values;
- memcpy(dst->values, src->values,
- miniflow_n_values(src) * sizeof *dst->values);
+ int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
+
+ dst->map = src->map;
+ if (size <= sizeof dst->inline_values) {
+ dst->values_inline = true;
+ memcpy(dst->inline_values, miniflow_get_values(src), size);
+ miniflow_destroy(src);
+ } else if (src->values_inline) {
+ dst->values_inline = false;
+ COVERAGE_INC(miniflow_malloc);
+ dst->offline_values = xmalloc(size);
+ memcpy(dst->offline_values, src->inline_values, size);
} else {
- dst->values = src->values;
+ dst->values_inline = false;
+ dst->offline_values = src->offline_values;
}
- dst->map = src->map;
}
/* Frees any memory owned by 'flow'. Does not free the storage in which 'flow'
void
miniflow_destroy(struct miniflow *flow)
{
- if (flow->values != flow->inline_values) {
- free(flow->values);
+ if (!flow->values_inline) {
+ free(flow->offline_values);
}
}
miniflow_get(const struct miniflow *flow, unsigned int u32_ofs)
{
return (flow->map & UINT64_C(1) << u32_ofs)
- ? *(flow->values +
+ ? *(miniflow_get_u32_values(flow) +
count_1bits(flow->map & ((UINT64_C(1) << u32_ofs) - 1)))
: 0;
}
-/* Returns true if 'a' and 'b' are the same flow, false otherwise. */
+/* Returns true if 'a' and 'b' are the equal miniflow, false otherwise. */
bool
miniflow_equal(const struct miniflow *a, const struct miniflow *b)
{
- const uint32_t *ap = a->values;
- const uint32_t *bp = b->values;
+ const uint32_t *ap = miniflow_get_u32_values(a);
+ const uint32_t *bp = miniflow_get_u32_values(b);
const uint64_t a_map = a->map;
const uint64_t b_map = b->map;
- uint64_t map;
- if (a_map == b_map) {
- for (map = a_map; map; map = zero_rightmost_1bit(map)) {
- if (*ap++ != *bp++) {
- return false;
- }
- }
+ if (OVS_LIKELY(a_map == b_map)) {
+ int count = miniflow_n_values(a);
+
+ return !memcmp(ap, bp, count * sizeof *ap);
} else {
+ uint64_t map;
+
for (map = a_map | b_map; map; map = zero_rightmost_1bit(map)) {
uint64_t bit = rightmost_1bit(map);
uint64_t a_value = a_map & bit ? *ap++ : 0;
miniflow_equal_in_minimask(const struct miniflow *a, const struct miniflow *b,
const struct minimask *mask)
{
- const uint32_t *p;
+ const uint32_t *p = miniflow_get_u32_values(&mask->masks);
uint64_t map;
- p = mask->masks.values;
-
for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
int ofs = raw_ctz(map);
- if ((miniflow_get(a, ofs) ^ miniflow_get(b, ofs)) & *p) {
+ if ((miniflow_get(a, ofs) ^ miniflow_get(b, ofs)) & *p++) {
return false;
}
- p++;
}
return true;
const struct minimask *mask)
{
const uint32_t *b_u32 = (const uint32_t *) b;
- const uint32_t *p;
+ const uint32_t *p = miniflow_get_u32_values(&mask->masks);
uint64_t map;
- p = mask->masks.values;
-
for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
int ofs = raw_ctz(map);
- if ((miniflow_get(a, ofs) ^ b_u32[ofs]) & *p) {
+ if ((miniflow_get(a, ofs) ^ b_u32[ofs]) & *p++) {
return false;
}
- p++;
}
return true;
uint32_t storage[FLOW_U32S])
{
struct miniflow *dst = &dst_->masks;
+ uint32_t *dst_values = storage;
const struct miniflow *a = &a_->masks;
const struct miniflow *b = &b_->masks;
uint64_t map;
int n = 0;
- dst->values = storage;
+ dst->values_inline = false;
+ dst->offline_values = storage;
dst->map = 0;
for (map = a->map & b->map; map; map = zero_rightmost_1bit(map)) {
if (mask) {
dst->map |= rightmost_1bit(map);
- dst->values[n++] = mask;
+ dst_values[n++] = mask;
}
}
}
bool
minimask_has_extra(const struct minimask *a, const struct minimask *b)
{
- const uint32_t *p = b->masks.values;
+ const uint32_t *p = miniflow_get_u32_values(&b->masks);
uint64_t map;
for (map = b->masks.map; map; map = zero_rightmost_1bit(map)) {