2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <sys/types.h>
22 #include <netinet/in.h>
23 #include <netinet/icmp6.h>
24 #include <netinet/ip6.h>
28 #include "byte-order.h"
31 #include "dynamic-string.h"
36 #include "openflow/openflow.h"
40 #include "unaligned.h"
42 COVERAGE_DEFINE(flow_extract);
43 COVERAGE_DEFINE(miniflow_malloc);
45 /* U32 indices for segmented flow classification. */
46 const uint8_t flow_segment_u32s[4] = {
47 FLOW_SEGMENT_1_ENDS_AT / 4,
48 FLOW_SEGMENT_2_ENDS_AT / 4,
49 FLOW_SEGMENT_3_ENDS_AT / 4,
53 /* miniflow_extract() assumes the following to be true to optimize the
54 * extraction process. */
55 BUILD_ASSERT_DECL(offsetof(struct flow, dl_type) + 2
56 == offsetof(struct flow, vlan_tci) &&
57 offsetof(struct flow, dl_type) / 4
58 == offsetof(struct flow, vlan_tci) / 4 );
60 BUILD_ASSERT_DECL(offsetof(struct flow, nw_frag) + 3
61 == offsetof(struct flow, nw_proto) &&
62 offsetof(struct flow, nw_tos) + 2
63 == offsetof(struct flow, nw_proto) &&
64 offsetof(struct flow, nw_ttl) + 1
65 == offsetof(struct flow, nw_proto) &&
66 offsetof(struct flow, nw_frag) / 4
67 == offsetof(struct flow, nw_tos) / 4 &&
68 offsetof(struct flow, nw_ttl) / 4
69 == offsetof(struct flow, nw_tos) / 4 &&
70 offsetof(struct flow, nw_proto) / 4
71 == offsetof(struct flow, nw_tos) / 4);
73 /* TCP flags in the first half of a BE32, zeroes in the other half. */
74 BUILD_ASSERT_DECL(offsetof(struct flow, tcp_flags) + 2
75 == offsetof(struct flow, pad2) &&
76 offsetof(struct flow, tcp_flags) / 4
77 == offsetof(struct flow, pad2) / 4);
79 #define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl) \
82 #define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl))
85 BUILD_ASSERT_DECL(offsetof(struct flow, tp_src) + 2
86 == offsetof(struct flow, tp_dst) &&
87 offsetof(struct flow, tp_src) / 4
88 == offsetof(struct flow, tp_dst) / 4);
90 /* Removes 'size' bytes from the head end of '*datap', of size '*sizep', which
91 * must contain at least 'size' bytes of data. Returns the first byte of data
93 static inline const void *
94 data_pull(void **datap, size_t *sizep, size_t size)
96 char *data = (char *)*datap;
102 /* If '*datap' has at least 'size' bytes of data, removes that many bytes from
103 * the head end of '*datap' and returns the first byte removed. Otherwise,
104 * returns a null pointer without modifying '*datap'. */
105 static inline const void *
106 data_try_pull(void **datap, size_t *sizep, size_t size)
108 return OVS_LIKELY(*sizep >= size) ? data_pull(datap, sizep, size) : NULL;
111 /* Context for pushing data to a miniflow. */
115 uint32_t * const end;
118 /* miniflow_push_* macros allow filling in a miniflow data values in order.
119 * Assertions are needed only when the layout of the struct flow is modified.
120 * 'ofs' is a compile-time constant, which allows most of the code be optimized
121 * away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
122 * defined as macros. */
124 #if (FLOW_WC_SEQ != 28)
125 #define MINIFLOW_ASSERT(X) ovs_assert(X)
126 BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
127 "assertions enabled. Consider updating FLOW_WC_SEQ after "
130 #define MINIFLOW_ASSERT(X)
133 #define miniflow_push_uint32_(MF, OFS, VALUE) \
135 MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 4 == 0 \
136 && !(MF.map & (UINT64_MAX << (OFS) / 4))); \
137 *MF.data++ = VALUE; \
138 MF.map |= UINT64_C(1) << (OFS) / 4; \
141 #define miniflow_push_be32_(MF, OFS, VALUE) \
142 miniflow_push_uint32_(MF, OFS, (OVS_FORCE uint32_t)(VALUE))
144 #define miniflow_push_uint16_(MF, OFS, VALUE) \
146 MINIFLOW_ASSERT(MF.data < MF.end && \
147 (((OFS) % 4 == 0 && !(MF.map & (UINT64_MAX << (OFS) / 4))) \
148 || ((OFS) % 4 == 2 && MF.map & (UINT64_C(1) << (OFS) / 4) \
149 && !(MF.map & (UINT64_MAX << ((OFS) / 4 + 1)))))); \
151 if ((OFS) % 4 == 0) { \
152 *(uint16_t *)MF.data = VALUE; \
153 MF.map |= UINT64_C(1) << (OFS) / 4; \
154 } else if ((OFS) % 4 == 2) { \
155 *((uint16_t *)MF.data + 1) = VALUE; \
160 #define miniflow_push_be16_(MF, OFS, VALUE) \
161 miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
163 /* Data at 'valuep' may be unaligned. */
164 #define miniflow_push_words_(MF, OFS, VALUEP, N_WORDS) \
166 int ofs32 = (OFS) / 4; \
168 MINIFLOW_ASSERT(MF.data + (N_WORDS) <= MF.end && (OFS) % 4 == 0 \
169 && !(MF.map & (UINT64_MAX << ofs32))); \
171 memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof *MF.data); \
172 MF.data += (N_WORDS); \
173 MF.map |= ((UINT64_MAX >> (64 - (N_WORDS))) << ofs32); \
176 #define miniflow_push_uint32(MF, FIELD, VALUE) \
177 miniflow_push_uint32_(MF, offsetof(struct flow, FIELD), VALUE)
179 #define miniflow_push_be32(MF, FIELD, VALUE) \
180 miniflow_push_be32_(MF, offsetof(struct flow, FIELD), VALUE)
182 #define miniflow_push_uint32_check(MF, FIELD, VALUE) \
183 { if (OVS_LIKELY(VALUE)) { \
184 miniflow_push_uint32_(MF, offsetof(struct flow, FIELD), VALUE); \
188 #define miniflow_push_be32_check(MF, FIELD, VALUE) \
189 { if (OVS_LIKELY(VALUE)) { \
190 miniflow_push_be32_(MF, offsetof(struct flow, FIELD), VALUE); \
194 #define miniflow_push_uint16(MF, FIELD, VALUE) \
195 miniflow_push_uint16_(MF, offsetof(struct flow, FIELD), VALUE)
197 #define miniflow_push_be16(MF, FIELD, VALUE) \
198 miniflow_push_be16_(MF, offsetof(struct flow, FIELD), VALUE)
200 #define miniflow_push_words(MF, FIELD, VALUEP, N_WORDS) \
201 miniflow_push_words_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
203 /* Pulls the MPLS headers at '*datap' and returns the count of them. */
205 parse_mpls(void **datap, size_t *sizep)
207 const struct mpls_hdr *mh;
210 while ((mh = data_try_pull(datap, sizep, sizeof *mh))) {
212 if (mh->mpls_lse.lo & htons(1 << MPLS_BOS_SHIFT)) {
216 return MIN(count, FLOW_MAX_MPLS_LABELS);
219 static inline ovs_be16
220 parse_vlan(void **datap, size_t *sizep)
222 const struct eth_header *eth = *datap;
225 ovs_be16 eth_type; /* ETH_TYPE_VLAN */
229 data_pull(datap, sizep, ETH_ADDR_LEN * 2);
231 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
232 if (OVS_LIKELY(*sizep
233 >= sizeof(struct qtag_prefix) + sizeof(ovs_be16))) {
234 const struct qtag_prefix *qp = data_pull(datap, sizep, sizeof *qp);
235 return qp->tci | htons(VLAN_CFI);
241 static inline ovs_be16
242 parse_ethertype(void **datap, size_t *sizep)
244 const struct llc_snap_header *llc;
247 proto = *(ovs_be16 *) data_pull(datap, sizep, sizeof proto);
248 if (OVS_LIKELY(ntohs(proto) >= ETH_TYPE_MIN)) {
252 if (OVS_UNLIKELY(*sizep < sizeof *llc)) {
253 return htons(FLOW_DL_TYPE_NONE);
257 if (OVS_UNLIKELY(llc->llc.llc_dsap != LLC_DSAP_SNAP
258 || llc->llc.llc_ssap != LLC_SSAP_SNAP
259 || llc->llc.llc_cntl != LLC_CNTL_SNAP
260 || memcmp(llc->snap.snap_org, SNAP_ORG_ETHERNET,
261 sizeof llc->snap.snap_org))) {
262 return htons(FLOW_DL_TYPE_NONE);
265 data_pull(datap, sizep, sizeof *llc);
267 if (OVS_LIKELY(ntohs(llc->snap.snap_type) >= ETH_TYPE_MIN)) {
268 return llc->snap.snap_type;
271 return htons(FLOW_DL_TYPE_NONE);
275 parse_icmpv6(void **datap, size_t *sizep, const struct icmp6_hdr *icmp,
276 const struct in6_addr **nd_target,
277 uint8_t arp_buf[2][ETH_ADDR_LEN])
279 if (icmp->icmp6_code == 0 &&
280 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
281 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
283 *nd_target = data_try_pull(datap, sizep, sizeof **nd_target);
284 if (OVS_UNLIKELY(!*nd_target)) {
288 while (*sizep >= 8) {
289 /* The minimum size of an option is 8 bytes, which also is
290 * the size of Ethernet link-layer options. */
291 const struct nd_opt_hdr *nd_opt = *datap;
292 int opt_len = nd_opt->nd_opt_len * 8;
294 if (!opt_len || opt_len > *sizep) {
298 /* Store the link layer address if the appropriate option is
299 * provided. It is considered an error if the same link
300 * layer option is specified twice. */
301 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
303 if (OVS_LIKELY(eth_addr_is_zero(arp_buf[0]))) {
304 memcpy(arp_buf[0], nd_opt + 1, ETH_ADDR_LEN);
308 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
310 if (OVS_LIKELY(eth_addr_is_zero(arp_buf[1]))) {
311 memcpy(arp_buf[1], nd_opt + 1, ETH_ADDR_LEN);
317 if (OVS_UNLIKELY(!data_try_pull(datap, sizep, opt_len))) {
329 /* Initializes 'flow' members from 'packet' and 'md'
331 * Initializes 'packet' header l2 pointer to the start of the Ethernet
332 * header, and the layer offsets as follows:
334 * - packet->l2_5_ofs to the start of the MPLS shim header, or UINT16_MAX
335 * when there is no MPLS shim header.
337 * - packet->l3_ofs to just past the Ethernet header, or just past the
338 * vlan_header if one is present, to the first byte of the payload of the
339 * Ethernet frame. UINT16_MAX if the frame is too short to contain an
342 * - packet->l4_ofs to just past the IPv4 header, if one is present and
343 * has at least the content used for the fields of interest for the flow,
344 * otherwise UINT16_MAX.
347 flow_extract(struct ofpbuf *packet, const struct pkt_metadata *md,
352 uint32_t buf[FLOW_U32S];
355 COVERAGE_INC(flow_extract);
357 miniflow_initialize(&m.mf, m.buf);
358 miniflow_extract(packet, md, &m.mf);
359 miniflow_expand(&m.mf, flow);
362 /* Caller is responsible for initializing 'dst' with enough storage for
363 * FLOW_U32S * 4 bytes. */
365 miniflow_extract(struct ofpbuf *packet, const struct pkt_metadata *md,
366 struct miniflow *dst)
368 void *data = ofpbuf_data(packet);
369 size_t size = ofpbuf_size(packet);
370 uint32_t *values = miniflow_values(dst);
371 struct mf_ctx mf = { 0, values, values + FLOW_U32S };
374 uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
378 if (md->tunnel.ip_dst) {
379 miniflow_push_words(mf, tunnel, &md->tunnel,
380 sizeof md->tunnel / 4);
382 miniflow_push_uint32_check(mf, skb_priority, md->skb_priority);
383 miniflow_push_uint32_check(mf, pkt_mark, md->pkt_mark);
384 miniflow_push_uint32_check(mf, recirc_id, md->recirc_id);
385 miniflow_push_uint32(mf, in_port, odp_to_u32(md->in_port.odp_port));
388 /* Initialize packet's layer pointer and offsets. */
390 ofpbuf_set_frame(packet, data);
392 /* Must have full Ethernet header to proceed. */
393 if (OVS_UNLIKELY(size < sizeof(struct eth_header))) {
399 BUILD_ASSERT(offsetof(struct flow, dl_dst) + 6
400 == offsetof(struct flow, dl_src));
401 miniflow_push_words(mf, dl_dst, data, ETH_ADDR_LEN * 2 / 4);
402 /* dl_type, vlan_tci. */
403 vlan_tci = parse_vlan(&data, &size);
404 dl_type = parse_ethertype(&data, &size);
405 miniflow_push_be16(mf, dl_type, dl_type);
406 miniflow_push_be16(mf, vlan_tci, vlan_tci);
410 if (OVS_UNLIKELY(eth_type_mpls(dl_type))) {
412 const void *mpls = data;
414 packet->l2_5_ofs = (char *)data - l2;
415 count = parse_mpls(&data, &size);
416 miniflow_push_words(mf, mpls_lse, mpls, count);
420 packet->l3_ofs = (char *)data - l2;
423 if (OVS_LIKELY(dl_type == htons(ETH_TYPE_IP))) {
424 const struct ip_header *nh = data;
427 if (OVS_UNLIKELY(size < IP_HEADER_LEN)) {
430 ip_len = IP_IHL(nh->ip_ihl_ver) * 4;
432 if (OVS_UNLIKELY(ip_len < IP_HEADER_LEN)) {
436 /* Push both source and destination address at once. */
437 miniflow_push_words(mf, nw_src, &nh->ip_src, 2);
441 nw_proto = nh->ip_proto;
442 if (OVS_UNLIKELY(IP_IS_FRAGMENT(nh->ip_frag_off))) {
443 nw_frag = FLOW_NW_FRAG_ANY;
444 if (nh->ip_frag_off & htons(IP_FRAG_OFF_MASK)) {
445 nw_frag |= FLOW_NW_FRAG_LATER;
448 if (OVS_UNLIKELY(size < ip_len)) {
451 data_pull(&data, &size, ip_len);
453 } else if (dl_type == htons(ETH_TYPE_IPV6)) {
454 const struct ovs_16aligned_ip6_hdr *nh;
457 if (OVS_UNLIKELY(size < sizeof *nh)) {
460 nh = data_pull(&data, &size, sizeof *nh);
462 miniflow_push_words(mf, ipv6_src, &nh->ip6_src,
463 sizeof nh->ip6_src / 4);
464 miniflow_push_words(mf, ipv6_dst, &nh->ip6_dst,
465 sizeof nh->ip6_dst / 4);
467 tc_flow = get_16aligned_be32(&nh->ip6_flow);
469 ovs_be32 label = tc_flow & htonl(IPV6_LABEL_MASK);
470 miniflow_push_be32_check(mf, ipv6_label, label);
473 nw_tos = ntohl(tc_flow) >> 20;
474 nw_ttl = nh->ip6_hlim;
475 nw_proto = nh->ip6_nxt;
478 if (OVS_LIKELY((nw_proto != IPPROTO_HOPOPTS)
479 && (nw_proto != IPPROTO_ROUTING)
480 && (nw_proto != IPPROTO_DSTOPTS)
481 && (nw_proto != IPPROTO_AH)
482 && (nw_proto != IPPROTO_FRAGMENT))) {
483 /* It's either a terminal header (e.g., TCP, UDP) or one we
484 * don't understand. In either case, we're done with the
485 * packet, so use it to fill in 'nw_proto'. */
489 /* We only verify that at least 8 bytes of the next header are
490 * available, but many of these headers are longer. Ensure that
491 * accesses within the extension header are within those first 8
492 * bytes. All extension headers are required to be at least 8
494 if (OVS_UNLIKELY(size < 8)) {
498 if ((nw_proto == IPPROTO_HOPOPTS)
499 || (nw_proto == IPPROTO_ROUTING)
500 || (nw_proto == IPPROTO_DSTOPTS)) {
501 /* These headers, while different, have the fields we care
502 * about in the same location and with the same
504 const struct ip6_ext *ext_hdr = data;
505 nw_proto = ext_hdr->ip6e_nxt;
506 if (OVS_UNLIKELY(!data_try_pull(&data, &size,
507 (ext_hdr->ip6e_len + 1) * 8))) {
510 } else if (nw_proto == IPPROTO_AH) {
511 /* A standard AH definition isn't available, but the fields
512 * we care about are in the same location as the generic
513 * option header--only the header length is calculated
515 const struct ip6_ext *ext_hdr = data;
516 nw_proto = ext_hdr->ip6e_nxt;
517 if (OVS_UNLIKELY(!data_try_pull(&data, &size,
518 (ext_hdr->ip6e_len + 2) * 4))) {
521 } else if (nw_proto == IPPROTO_FRAGMENT) {
522 const struct ovs_16aligned_ip6_frag *frag_hdr = data;
524 nw_proto = frag_hdr->ip6f_nxt;
525 if (!data_try_pull(&data, &size, sizeof *frag_hdr)) {
529 /* We only process the first fragment. */
530 if (frag_hdr->ip6f_offlg != htons(0)) {
531 nw_frag = FLOW_NW_FRAG_ANY;
532 if ((frag_hdr->ip6f_offlg & IP6F_OFF_MASK) != htons(0)) {
533 nw_frag |= FLOW_NW_FRAG_LATER;
534 nw_proto = IPPROTO_FRAGMENT;
541 if (dl_type == htons(ETH_TYPE_ARP) ||
542 dl_type == htons(ETH_TYPE_RARP)) {
543 uint8_t arp_buf[2][ETH_ADDR_LEN];
544 const struct arp_eth_header *arp = (const struct arp_eth_header *)
545 data_try_pull(&data, &size, ARP_ETH_HEADER_LEN);
547 if (OVS_LIKELY(arp) && OVS_LIKELY(arp->ar_hrd == htons(1))
548 && OVS_LIKELY(arp->ar_pro == htons(ETH_TYPE_IP))
549 && OVS_LIKELY(arp->ar_hln == ETH_ADDR_LEN)
550 && OVS_LIKELY(arp->ar_pln == 4)) {
551 miniflow_push_words(mf, nw_src, &arp->ar_spa, 1);
552 miniflow_push_words(mf, nw_dst, &arp->ar_tpa, 1);
554 /* We only match on the lower 8 bits of the opcode. */
555 if (OVS_LIKELY(ntohs(arp->ar_op) <= 0xff)) {
556 miniflow_push_be32(mf, nw_frag, htonl(ntohs(arp->ar_op)));
559 /* Must be adjacent. */
560 BUILD_ASSERT(offsetof(struct flow, arp_sha) + 6
561 == offsetof(struct flow, arp_tha));
563 memcpy(arp_buf[0], arp->ar_sha, ETH_ADDR_LEN);
564 memcpy(arp_buf[1], arp->ar_tha, ETH_ADDR_LEN);
565 miniflow_push_words(mf, arp_sha, arp_buf,
566 ETH_ADDR_LEN * 2 / 4);
572 packet->l4_ofs = (char *)data - l2;
573 miniflow_push_be32(mf, nw_frag,
574 BYTES_TO_BE32(nw_frag, nw_tos, nw_ttl, nw_proto));
576 if (OVS_LIKELY(!(nw_frag & FLOW_NW_FRAG_LATER))) {
577 if (OVS_LIKELY(nw_proto == IPPROTO_TCP)) {
578 if (OVS_LIKELY(size >= TCP_HEADER_LEN)) {
579 const struct tcp_header *tcp = data;
581 miniflow_push_be32(mf, tcp_flags,
582 TCP_FLAGS_BE32(tcp->tcp_ctl));
583 miniflow_push_words(mf, tp_src, &tcp->tcp_src, 1);
585 } else if (OVS_LIKELY(nw_proto == IPPROTO_UDP)) {
586 if (OVS_LIKELY(size >= UDP_HEADER_LEN)) {
587 const struct udp_header *udp = data;
589 miniflow_push_words(mf, tp_src, &udp->udp_src, 1);
591 } else if (OVS_LIKELY(nw_proto == IPPROTO_SCTP)) {
592 if (OVS_LIKELY(size >= SCTP_HEADER_LEN)) {
593 const struct sctp_header *sctp = data;
595 miniflow_push_words(mf, tp_src, &sctp->sctp_src, 1);
597 } else if (OVS_LIKELY(nw_proto == IPPROTO_ICMP)) {
598 if (OVS_LIKELY(size >= ICMP_HEADER_LEN)) {
599 const struct icmp_header *icmp = data;
601 miniflow_push_be16(mf, tp_src, htons(icmp->icmp_type));
602 miniflow_push_be16(mf, tp_dst, htons(icmp->icmp_code));
604 } else if (OVS_LIKELY(nw_proto == IPPROTO_IGMP)) {
605 if (OVS_LIKELY(size >= IGMP_HEADER_LEN)) {
606 const struct igmp_header *igmp = data;
608 miniflow_push_be16(mf, tp_src, htons(igmp->igmp_type));
609 miniflow_push_be16(mf, tp_dst, htons(igmp->igmp_code));
610 miniflow_push_be32(mf, igmp_group_ip4,
611 get_16aligned_be32(&igmp->group));
613 } else if (OVS_LIKELY(nw_proto == IPPROTO_ICMPV6)) {
614 if (OVS_LIKELY(size >= sizeof(struct icmp6_hdr))) {
615 const struct in6_addr *nd_target = NULL;
616 uint8_t arp_buf[2][ETH_ADDR_LEN];
617 const struct icmp6_hdr *icmp = data_pull(&data, &size,
619 memset(arp_buf, 0, sizeof arp_buf);
620 if (OVS_LIKELY(parse_icmpv6(&data, &size, icmp, &nd_target,
622 miniflow_push_words(mf, arp_sha, arp_buf,
623 ETH_ADDR_LEN * 2 / 4);
625 miniflow_push_words(mf, nd_target, nd_target,
626 sizeof *nd_target / 4);
628 miniflow_push_be16(mf, tp_src, htons(icmp->icmp6_type));
629 miniflow_push_be16(mf, tp_dst, htons(icmp->icmp6_code));
635 miniflow_push_uint32_check(mf, dp_hash, md->dp_hash);
641 /* For every bit of a field that is wildcarded in 'wildcards', sets the
642 * corresponding bit in 'flow' to zero. */
644 flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
646 uint32_t *flow_u32 = (uint32_t *) flow;
647 const uint32_t *wc_u32 = (const uint32_t *) &wildcards->masks;
650 for (i = 0; i < FLOW_U32S; i++) {
651 flow_u32[i] &= wc_u32[i];
656 flow_unwildcard_tp_ports(const struct flow *flow, struct flow_wildcards *wc)
658 if (flow->nw_proto != IPPROTO_ICMP) {
659 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
660 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
662 wc->masks.tp_src = htons(0xff);
663 wc->masks.tp_dst = htons(0xff);
667 /* Initializes 'fmd' with the metadata found in 'flow'. */
669 flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
671 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
673 fmd->dp_hash = flow->dp_hash;
674 fmd->recirc_id = flow->recirc_id;
675 fmd->tun_id = flow->tunnel.tun_id;
676 fmd->tun_src = flow->tunnel.ip_src;
677 fmd->tun_dst = flow->tunnel.ip_dst;
678 fmd->metadata = flow->metadata;
679 memcpy(fmd->regs, flow->regs, sizeof fmd->regs);
680 fmd->pkt_mark = flow->pkt_mark;
681 fmd->in_port = flow->in_port.ofp_port;
685 flow_to_string(const struct flow *flow)
687 struct ds ds = DS_EMPTY_INITIALIZER;
688 flow_format(&ds, flow);
693 flow_tun_flag_to_string(uint32_t flags)
696 case FLOW_TNL_F_DONT_FRAGMENT:
698 case FLOW_TNL_F_CSUM:
710 format_flags(struct ds *ds, const char *(*bit_to_string)(uint32_t),
711 uint32_t flags, char del)
719 uint32_t bit = rightmost_1bit(flags);
722 s = bit_to_string(bit);
724 ds_put_format(ds, "%s%c", s, del);
733 ds_put_format(ds, "0x%"PRIx32"%c", bad, del);
739 format_flags_masked(struct ds *ds, const char *name,
740 const char *(*bit_to_string)(uint32_t), uint32_t flags,
744 ds_put_format(ds, "%s=", name);
747 uint32_t bit = rightmost_1bit(mask);
748 const char *s = bit_to_string(bit);
750 ds_put_format(ds, "%s%s", (flags & bit) ? "+" : "-",
751 s ? s : "[Unknown]");
757 flow_format(struct ds *ds, const struct flow *flow)
760 struct flow_wildcards *wc = &match.wc;
762 match_wc_init(&match, flow);
764 /* As this function is most often used for formatting a packet in a
765 * packet-in message, skip formatting the packet context fields that are
766 * all-zeroes (Openflow spec encourages leaving out all-zeroes context
767 * fields from the packet-in messages). We make an exception with the
768 * 'in_port' field, which we always format, as packets usually have an
769 * in_port, and 0 is a port just like any other port. */
770 if (!flow->skb_priority) {
771 WC_UNMASK_FIELD(wc, skb_priority);
773 if (!flow->pkt_mark) {
774 WC_UNMASK_FIELD(wc, pkt_mark);
776 if (!flow->recirc_id) {
777 WC_UNMASK_FIELD(wc, recirc_id);
779 for (int i = 0; i < FLOW_N_REGS; i++) {
780 if (!flow->regs[i]) {
781 WC_UNMASK_FIELD(wc, regs[i]);
784 if (!flow->metadata) {
785 WC_UNMASK_FIELD(wc, metadata);
788 match_format(&match, ds, OFP_DEFAULT_PRIORITY);
792 flow_print(FILE *stream, const struct flow *flow)
794 char *s = flow_to_string(flow);
799 /* flow_wildcards functions. */
801 /* Initializes 'wc' as a set of wildcards that matches every packet. */
803 flow_wildcards_init_catchall(struct flow_wildcards *wc)
805 memset(&wc->masks, 0, sizeof wc->masks);
808 /* Converts a flow into flow wildcards. It sets the wildcard masks based on
809 * the packet headers extracted to 'flow'. It will not set the mask for fields
810 * that do not make sense for the packet type. OpenFlow-only metadata is
811 * wildcarded, but other metadata is unconditionally exact-matched. */
812 void flow_wildcards_init_for_packet(struct flow_wildcards *wc,
813 const struct flow *flow)
815 memset(&wc->masks, 0x0, sizeof wc->masks);
817 /* Update this function whenever struct flow changes. */
818 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
820 if (flow->tunnel.ip_dst) {
821 if (flow->tunnel.flags & FLOW_TNL_F_KEY) {
822 WC_MASK_FIELD(wc, tunnel.tun_id);
824 WC_MASK_FIELD(wc, tunnel.ip_src);
825 WC_MASK_FIELD(wc, tunnel.ip_dst);
826 WC_MASK_FIELD(wc, tunnel.flags);
827 WC_MASK_FIELD(wc, tunnel.ip_tos);
828 WC_MASK_FIELD(wc, tunnel.ip_ttl);
829 WC_MASK_FIELD(wc, tunnel.tp_src);
830 WC_MASK_FIELD(wc, tunnel.tp_dst);
831 } else if (flow->tunnel.tun_id) {
832 WC_MASK_FIELD(wc, tunnel.tun_id);
835 /* metadata and regs wildcarded. */
837 WC_MASK_FIELD(wc, skb_priority);
838 WC_MASK_FIELD(wc, pkt_mark);
839 WC_MASK_FIELD(wc, recirc_id);
840 WC_MASK_FIELD(wc, dp_hash);
841 WC_MASK_FIELD(wc, in_port);
843 /* actset_output wildcarded. */
845 WC_MASK_FIELD(wc, dl_dst);
846 WC_MASK_FIELD(wc, dl_src);
847 WC_MASK_FIELD(wc, dl_type);
848 WC_MASK_FIELD(wc, vlan_tci);
850 if (flow->dl_type == htons(ETH_TYPE_IP)) {
851 WC_MASK_FIELD(wc, nw_src);
852 WC_MASK_FIELD(wc, nw_dst);
853 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
854 WC_MASK_FIELD(wc, ipv6_src);
855 WC_MASK_FIELD(wc, ipv6_dst);
856 WC_MASK_FIELD(wc, ipv6_label);
857 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
858 flow->dl_type == htons(ETH_TYPE_RARP)) {
859 WC_MASK_FIELD(wc, nw_src);
860 WC_MASK_FIELD(wc, nw_dst);
861 WC_MASK_FIELD(wc, nw_proto);
862 WC_MASK_FIELD(wc, arp_sha);
863 WC_MASK_FIELD(wc, arp_tha);
865 } else if (eth_type_mpls(flow->dl_type)) {
866 for (int i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
867 WC_MASK_FIELD(wc, mpls_lse[i]);
868 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
874 return; /* Unknown ethertype. */
878 WC_MASK_FIELD(wc, nw_frag);
879 WC_MASK_FIELD(wc, nw_tos);
880 WC_MASK_FIELD(wc, nw_ttl);
881 WC_MASK_FIELD(wc, nw_proto);
883 /* No transport layer header in later fragments. */
884 if (!(flow->nw_frag & FLOW_NW_FRAG_LATER) &&
885 (flow->nw_proto == IPPROTO_ICMP ||
886 flow->nw_proto == IPPROTO_ICMPV6 ||
887 flow->nw_proto == IPPROTO_TCP ||
888 flow->nw_proto == IPPROTO_UDP ||
889 flow->nw_proto == IPPROTO_SCTP ||
890 flow->nw_proto == IPPROTO_IGMP)) {
891 WC_MASK_FIELD(wc, tp_src);
892 WC_MASK_FIELD(wc, tp_dst);
894 if (flow->nw_proto == IPPROTO_TCP) {
895 WC_MASK_FIELD(wc, tcp_flags);
896 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
897 WC_MASK_FIELD(wc, arp_sha);
898 WC_MASK_FIELD(wc, arp_tha);
899 WC_MASK_FIELD(wc, nd_target);
900 } else if (flow->nw_proto == IPPROTO_IGMP) {
901 WC_MASK_FIELD(wc, igmp_group_ip4);
906 /* Return a map of possible fields for a packet of the same type as 'flow'.
907 * Including extra bits in the returned mask is not wrong, it is just less
910 * This is a less precise version of flow_wildcards_init_for_packet() above. */
912 flow_wc_map(const struct flow *flow)
914 /* Update this function whenever struct flow changes. */
915 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
917 uint64_t map = (flow->tunnel.ip_dst) ? MINIFLOW_MAP(tunnel) : 0;
919 /* Metadata fields that can appear on packet input. */
920 map |= MINIFLOW_MAP(skb_priority) | MINIFLOW_MAP(pkt_mark)
921 | MINIFLOW_MAP(recirc_id) | MINIFLOW_MAP(dp_hash)
922 | MINIFLOW_MAP(in_port)
923 | MINIFLOW_MAP(dl_dst) | MINIFLOW_MAP(dl_src)
924 | MINIFLOW_MAP(dl_type) | MINIFLOW_MAP(vlan_tci);
926 /* Ethertype-dependent fields. */
927 if (OVS_LIKELY(flow->dl_type == htons(ETH_TYPE_IP))) {
928 map |= MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
929 | MINIFLOW_MAP(nw_proto) | MINIFLOW_MAP(nw_frag)
930 | MINIFLOW_MAP(nw_tos) | MINIFLOW_MAP(nw_ttl);
931 if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_IGMP)) {
932 map |= MINIFLOW_MAP(igmp_group_ip4);
934 map |= MINIFLOW_MAP(tcp_flags)
935 | MINIFLOW_MAP(tp_src) | MINIFLOW_MAP(tp_dst);
937 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
938 map |= MINIFLOW_MAP(ipv6_src) | MINIFLOW_MAP(ipv6_dst)
939 | MINIFLOW_MAP(ipv6_label)
940 | MINIFLOW_MAP(nw_proto) | MINIFLOW_MAP(nw_frag)
941 | MINIFLOW_MAP(nw_tos) | MINIFLOW_MAP(nw_ttl);
942 if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_ICMPV6)) {
943 map |= MINIFLOW_MAP(nd_target)
944 | MINIFLOW_MAP(arp_sha) | MINIFLOW_MAP(arp_tha);
946 map |= MINIFLOW_MAP(tcp_flags)
947 | MINIFLOW_MAP(tp_src) | MINIFLOW_MAP(tp_dst);
949 } else if (eth_type_mpls(flow->dl_type)) {
950 map |= MINIFLOW_MAP(mpls_lse);
951 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
952 flow->dl_type == htons(ETH_TYPE_RARP)) {
953 map |= MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
954 | MINIFLOW_MAP(nw_proto)
955 | MINIFLOW_MAP(arp_sha) | MINIFLOW_MAP(arp_tha);
961 /* Clear the metadata and register wildcard masks. They are not packet
964 flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
966 /* Update this function whenever struct flow changes. */
967 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
969 memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
970 memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
971 wc->masks.actset_output = 0;
974 /* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
977 flow_wildcards_is_catchall(const struct flow_wildcards *wc)
979 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
982 for (i = 0; i < FLOW_U32S; i++) {
990 /* Sets 'dst' as the bitwise AND of wildcards in 'src1' and 'src2'.
991 * That is, a bit or a field is wildcarded in 'dst' if it is wildcarded
992 * in 'src1' or 'src2' or both. */
994 flow_wildcards_and(struct flow_wildcards *dst,
995 const struct flow_wildcards *src1,
996 const struct flow_wildcards *src2)
998 uint32_t *dst_u32 = (uint32_t *) &dst->masks;
999 const uint32_t *src1_u32 = (const uint32_t *) &src1->masks;
1000 const uint32_t *src2_u32 = (const uint32_t *) &src2->masks;
1003 for (i = 0; i < FLOW_U32S; i++) {
1004 dst_u32[i] = src1_u32[i] & src2_u32[i];
1008 /* Sets 'dst' as the bitwise OR of wildcards in 'src1' and 'src2'. That
1009 * is, a bit or a field is wildcarded in 'dst' if it is neither
1010 * wildcarded in 'src1' nor 'src2'. */
1012 flow_wildcards_or(struct flow_wildcards *dst,
1013 const struct flow_wildcards *src1,
1014 const struct flow_wildcards *src2)
1016 uint32_t *dst_u32 = (uint32_t *) &dst->masks;
1017 const uint32_t *src1_u32 = (const uint32_t *) &src1->masks;
1018 const uint32_t *src2_u32 = (const uint32_t *) &src2->masks;
1021 for (i = 0; i < FLOW_U32S; i++) {
1022 dst_u32[i] = src1_u32[i] | src2_u32[i];
1026 /* Returns a hash of the wildcards in 'wc'. */
1028 flow_wildcards_hash(const struct flow_wildcards *wc, uint32_t basis)
1030 return flow_hash(&wc->masks, basis);
1033 /* Returns true if 'a' and 'b' represent the same wildcards, false if they are
1036 flow_wildcards_equal(const struct flow_wildcards *a,
1037 const struct flow_wildcards *b)
1039 return flow_equal(&a->masks, &b->masks);
1042 /* Returns true if at least one bit or field is wildcarded in 'a' but not in
1043 * 'b', false otherwise. */
1045 flow_wildcards_has_extra(const struct flow_wildcards *a,
1046 const struct flow_wildcards *b)
1048 const uint32_t *a_u32 = (const uint32_t *) &a->masks;
1049 const uint32_t *b_u32 = (const uint32_t *) &b->masks;
1052 for (i = 0; i < FLOW_U32S; i++) {
1053 if ((a_u32[i] & b_u32[i]) != b_u32[i]) {
1060 /* Returns true if 'a' and 'b' are equal, except that 0-bits (wildcarded bits)
1061 * in 'wc' do not need to be equal in 'a' and 'b'. */
1063 flow_equal_except(const struct flow *a, const struct flow *b,
1064 const struct flow_wildcards *wc)
1066 const uint32_t *a_u32 = (const uint32_t *) a;
1067 const uint32_t *b_u32 = (const uint32_t *) b;
1068 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
1071 for (i = 0; i < FLOW_U32S; i++) {
1072 if ((a_u32[i] ^ b_u32[i]) & wc_u32[i]) {
1079 /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
1080 * (A 0-bit indicates a wildcard bit.) */
1082 flow_wildcards_set_reg_mask(struct flow_wildcards *wc, int idx, uint32_t mask)
1084 wc->masks.regs[idx] = mask;
1087 /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
1088 * (A 0-bit indicates a wildcard bit.) */
1090 flow_wildcards_set_xreg_mask(struct flow_wildcards *wc, int idx, uint64_t mask)
1092 flow_set_xreg(&wc->masks, idx, mask);
1095 /* Calculates the 5-tuple hash from the given miniflow.
1096 * This returns the same value as flow_hash_5tuple for the corresponding
1099 miniflow_hash_5tuple(const struct miniflow *flow, uint32_t basis)
1101 uint32_t hash = basis;
1104 ovs_be16 dl_type = MINIFLOW_GET_BE16(flow, dl_type);
1106 hash = hash_add(hash, MINIFLOW_GET_U8(flow, nw_proto));
1108 /* Separate loops for better optimization. */
1109 if (dl_type == htons(ETH_TYPE_IPV6)) {
1110 uint64_t map = MINIFLOW_MAP(ipv6_src) | MINIFLOW_MAP(ipv6_dst)
1111 | MINIFLOW_MAP(tp_src); /* Covers both ports */
1114 MINIFLOW_FOR_EACH_IN_MAP(value, flow, map) {
1115 hash = hash_add(hash, value);
1118 uint64_t map = MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
1119 | MINIFLOW_MAP(tp_src); /* Covers both ports */
1122 MINIFLOW_FOR_EACH_IN_MAP(value, flow, map) {
1123 hash = hash_add(hash, value);
1126 hash = hash_finish(hash, 42); /* Arbitrary number. */
1131 BUILD_ASSERT_DECL(offsetof(struct flow, tp_src) + 2
1132 == offsetof(struct flow, tp_dst) &&
1133 offsetof(struct flow, tp_src) / 4
1134 == offsetof(struct flow, tp_dst) / 4);
1135 BUILD_ASSERT_DECL(offsetof(struct flow, ipv6_src) + 16
1136 == offsetof(struct flow, ipv6_dst));
1138 /* Calculates the 5-tuple hash from the given flow. */
1140 flow_hash_5tuple(const struct flow *flow, uint32_t basis)
1142 uint32_t hash = basis;
1145 const uint32_t *flow_u32 = (const uint32_t *)flow;
1147 hash = hash_add(hash, flow->nw_proto);
1149 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1150 int ofs = offsetof(struct flow, ipv6_src) / 4;
1151 int end = ofs + 2 * sizeof flow->ipv6_src / 4;
1154 hash = hash_add(hash, flow_u32[ofs++]);
1157 hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_src);
1158 hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_dst);
1160 hash = hash_add(hash, flow_u32[offsetof(struct flow, tp_src) / 4]);
1162 hash = hash_finish(hash, 42); /* Arbitrary number. */
1167 /* Hashes 'flow' based on its L2 through L4 protocol information. */
1169 flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis)
1174 struct in6_addr ipv6_addr;
1179 uint8_t eth_addr[ETH_ADDR_LEN];
1185 memset(&fields, 0, sizeof fields);
1186 for (i = 0; i < ETH_ADDR_LEN; i++) {
1187 fields.eth_addr[i] = flow->dl_src[i] ^ flow->dl_dst[i];
1189 fields.vlan_tci = flow->vlan_tci & htons(VLAN_VID_MASK);
1190 fields.eth_type = flow->dl_type;
1192 /* UDP source and destination port are not taken into account because they
1193 * will not necessarily be symmetric in a bidirectional flow. */
1194 if (fields.eth_type == htons(ETH_TYPE_IP)) {
1195 fields.ipv4_addr = flow->nw_src ^ flow->nw_dst;
1196 fields.ip_proto = flow->nw_proto;
1197 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
1198 fields.tp_port = flow->tp_src ^ flow->tp_dst;
1200 } else if (fields.eth_type == htons(ETH_TYPE_IPV6)) {
1201 const uint8_t *a = &flow->ipv6_src.s6_addr[0];
1202 const uint8_t *b = &flow->ipv6_dst.s6_addr[0];
1203 uint8_t *ipv6_addr = &fields.ipv6_addr.s6_addr[0];
1205 for (i=0; i<16; i++) {
1206 ipv6_addr[i] = a[i] ^ b[i];
1208 fields.ip_proto = flow->nw_proto;
1209 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
1210 fields.tp_port = flow->tp_src ^ flow->tp_dst;
1213 return jhash_bytes(&fields, sizeof fields, basis);
1216 /* Initialize a flow with random fields that matter for nx_hash_fields. */
1218 flow_random_hash_fields(struct flow *flow)
1220 uint16_t rnd = random_uint16();
1222 /* Initialize to all zeros. */
1223 memset(flow, 0, sizeof *flow);
1225 eth_addr_random(flow->dl_src);
1226 eth_addr_random(flow->dl_dst);
1228 flow->vlan_tci = (OVS_FORCE ovs_be16) (random_uint16() & VLAN_VID_MASK);
1230 /* Make most of the random flows IPv4, some IPv6, and rest random. */
1231 flow->dl_type = rnd < 0x8000 ? htons(ETH_TYPE_IP) :
1232 rnd < 0xc000 ? htons(ETH_TYPE_IPV6) : (OVS_FORCE ovs_be16)rnd;
1234 if (dl_type_is_ip_any(flow->dl_type)) {
1235 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1236 flow->nw_src = (OVS_FORCE ovs_be32)random_uint32();
1237 flow->nw_dst = (OVS_FORCE ovs_be32)random_uint32();
1239 random_bytes(&flow->ipv6_src, sizeof flow->ipv6_src);
1240 random_bytes(&flow->ipv6_dst, sizeof flow->ipv6_dst);
1242 /* Make most of IP flows TCP, some UDP or SCTP, and rest random. */
1243 rnd = random_uint16();
1244 flow->nw_proto = rnd < 0x8000 ? IPPROTO_TCP :
1245 rnd < 0xc000 ? IPPROTO_UDP :
1246 rnd < 0xd000 ? IPPROTO_SCTP : (uint8_t)rnd;
1247 if (flow->nw_proto == IPPROTO_TCP ||
1248 flow->nw_proto == IPPROTO_UDP ||
1249 flow->nw_proto == IPPROTO_SCTP) {
1250 flow->tp_src = (OVS_FORCE ovs_be16)random_uint16();
1251 flow->tp_dst = (OVS_FORCE ovs_be16)random_uint16();
1256 /* Masks the fields in 'wc' that are used by the flow hash 'fields'. */
1258 flow_mask_hash_fields(const struct flow *flow, struct flow_wildcards *wc,
1259 enum nx_hash_fields fields)
1262 case NX_HASH_FIELDS_ETH_SRC:
1263 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
1266 case NX_HASH_FIELDS_SYMMETRIC_L4:
1267 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
1268 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1269 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1270 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
1271 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
1272 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1273 memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
1274 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
1276 if (is_ip_any(flow)) {
1277 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1278 flow_unwildcard_tp_ports(flow, wc);
1280 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
1288 /* Hashes the portions of 'flow' designated by 'fields'. */
1290 flow_hash_fields(const struct flow *flow, enum nx_hash_fields fields,
1295 case NX_HASH_FIELDS_ETH_SRC:
1296 return jhash_bytes(flow->dl_src, sizeof flow->dl_src, basis);
1298 case NX_HASH_FIELDS_SYMMETRIC_L4:
1299 return flow_hash_symmetric_l4(flow, basis);
1305 /* Returns a string representation of 'fields'. */
1307 flow_hash_fields_to_str(enum nx_hash_fields fields)
1310 case NX_HASH_FIELDS_ETH_SRC: return "eth_src";
1311 case NX_HASH_FIELDS_SYMMETRIC_L4: return "symmetric_l4";
1312 default: return "<unknown>";
1316 /* Returns true if the value of 'fields' is supported. Otherwise false. */
1318 flow_hash_fields_valid(enum nx_hash_fields fields)
1320 return fields == NX_HASH_FIELDS_ETH_SRC
1321 || fields == NX_HASH_FIELDS_SYMMETRIC_L4;
1324 /* Returns a hash value for the bits of 'flow' that are active based on
1325 * 'wc', given 'basis'. */
1327 flow_hash_in_wildcards(const struct flow *flow,
1328 const struct flow_wildcards *wc, uint32_t basis)
1330 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
1331 const uint32_t *flow_u32 = (const uint32_t *) flow;
1336 for (i = 0; i < FLOW_U32S; i++) {
1337 hash = hash_add(hash, flow_u32[i] & wc_u32[i]);
1339 return hash_finish(hash, 4 * FLOW_U32S);
1342 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1343 * OpenFlow 1.0 "dl_vlan" value:
1345 * - If it is in the range 0...4095, 'flow->vlan_tci' is set to match
1346 * that VLAN. Any existing PCP match is unchanged (it becomes 0 if
1347 * 'flow' previously matched packets without a VLAN header).
1349 * - If it is OFP_VLAN_NONE, 'flow->vlan_tci' is set to match a packet
1350 * without a VLAN tag.
1352 * - Other values of 'vid' should not be used. */
1354 flow_set_dl_vlan(struct flow *flow, ovs_be16 vid)
1356 if (vid == htons(OFP10_VLAN_NONE)) {
1357 flow->vlan_tci = htons(0);
1359 vid &= htons(VLAN_VID_MASK);
1360 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
1361 flow->vlan_tci |= htons(VLAN_CFI) | vid;
1365 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1366 * OpenFlow 1.2 "vlan_vid" value, that is, the low 13 bits of 'vlan_tci' (VID
1369 flow_set_vlan_vid(struct flow *flow, ovs_be16 vid)
1371 ovs_be16 mask = htons(VLAN_VID_MASK | VLAN_CFI);
1372 flow->vlan_tci &= ~mask;
1373 flow->vlan_tci |= vid & mask;
1376 /* Sets the VLAN PCP that 'flow' matches to 'pcp', which should be in the
1379 * This function has no effect on the VLAN ID that 'flow' matches.
1381 * After calling this function, 'flow' will not match packets without a VLAN
1384 flow_set_vlan_pcp(struct flow *flow, uint8_t pcp)
1387 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
1388 flow->vlan_tci |= htons((pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
1391 /* Returns the number of MPLS LSEs present in 'flow'
1393 * Returns 0 if the 'dl_type' of 'flow' is not an MPLS ethernet type.
1394 * Otherwise traverses 'flow''s MPLS label stack stopping at the
1395 * first entry that has the BoS bit set. If no such entry exists then
1396 * the maximum number of LSEs that can be stored in 'flow' is returned.
1399 flow_count_mpls_labels(const struct flow *flow, struct flow_wildcards *wc)
1401 /* dl_type is always masked. */
1402 if (eth_type_mpls(flow->dl_type)) {
1404 int len = FLOW_MAX_MPLS_LABELS;
1406 for (i = 0; i < len; i++) {
1408 wc->masks.mpls_lse[i] |= htonl(MPLS_BOS_MASK);
1410 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
1421 /* Returns the number consecutive of MPLS LSEs, starting at the
1422 * innermost LSE, that are common in 'a' and 'b'.
1424 * 'an' must be flow_count_mpls_labels(a).
1425 * 'bn' must be flow_count_mpls_labels(b).
1428 flow_count_common_mpls_labels(const struct flow *a, int an,
1429 const struct flow *b, int bn,
1430 struct flow_wildcards *wc)
1432 int min_n = MIN(an, bn);
1437 int a_last = an - 1;
1438 int b_last = bn - 1;
1441 for (i = 0; i < min_n; i++) {
1443 wc->masks.mpls_lse[a_last - i] = OVS_BE32_MAX;
1444 wc->masks.mpls_lse[b_last - i] = OVS_BE32_MAX;
1446 if (a->mpls_lse[a_last - i] != b->mpls_lse[b_last - i]) {
1457 /* Adds a new outermost MPLS label to 'flow' and changes 'flow''s Ethernet type
1458 * to 'mpls_eth_type', which must be an MPLS Ethertype.
1460 * If the new label is the first MPLS label in 'flow', it is generated as;
1462 * - label: 2, if 'flow' is IPv6, otherwise 0.
1464 * - TTL: IPv4 or IPv6 TTL, if present and nonzero, otherwise 64.
1466 * - TC: IPv4 or IPv6 TOS, if present, otherwise 0.
1470 * If the new label is the second or later label MPLS label in 'flow', it is
1473 * - label: Copied from outer label.
1475 * - TTL: Copied from outer label.
1477 * - TC: Copied from outer label.
1481 * 'n' must be flow_count_mpls_labels(flow). 'n' must be less than
1482 * FLOW_MAX_MPLS_LABELS (because otherwise flow->mpls_lse[] would overflow).
1485 flow_push_mpls(struct flow *flow, int n, ovs_be16 mpls_eth_type,
1486 struct flow_wildcards *wc)
1488 ovs_assert(eth_type_mpls(mpls_eth_type));
1489 ovs_assert(n < FLOW_MAX_MPLS_LABELS);
1495 memset(&wc->masks.mpls_lse, 0xff, sizeof *wc->masks.mpls_lse * n);
1497 for (i = n; i >= 1; i--) {
1498 flow->mpls_lse[i] = flow->mpls_lse[i - 1];
1500 flow->mpls_lse[0] = (flow->mpls_lse[1] & htonl(~MPLS_BOS_MASK));
1502 int label = 0; /* IPv4 Explicit Null. */
1506 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1510 if (is_ip_any(flow)) {
1511 tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
1513 wc->masks.nw_tos |= IP_DSCP_MASK;
1514 wc->masks.nw_ttl = 0xff;
1522 flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
1524 /* Clear all L3 and L4 fields. */
1525 BUILD_ASSERT(FLOW_WC_SEQ == 28);
1526 memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
1527 sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
1529 flow->dl_type = mpls_eth_type;
1532 /* Tries to remove the outermost MPLS label from 'flow'. Returns true if
1533 * successful, false otherwise. On success, sets 'flow''s Ethernet type to
1536 * 'n' must be flow_count_mpls_labels(flow). */
1538 flow_pop_mpls(struct flow *flow, int n, ovs_be16 eth_type,
1539 struct flow_wildcards *wc)
1544 /* Nothing to pop. */
1546 } else if (n == FLOW_MAX_MPLS_LABELS) {
1548 wc->masks.mpls_lse[n - 1] |= htonl(MPLS_BOS_MASK);
1550 if (!(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
1551 /* Can't pop because don't know what to fill in mpls_lse[n - 1]. */
1557 memset(&wc->masks.mpls_lse[1], 0xff,
1558 sizeof *wc->masks.mpls_lse * (n - 1));
1560 for (i = 1; i < n; i++) {
1561 flow->mpls_lse[i - 1] = flow->mpls_lse[i];
1563 flow->mpls_lse[n - 1] = 0;
1564 flow->dl_type = eth_type;
1568 /* Sets the MPLS Label that 'flow' matches to 'label', which is interpreted
1569 * as an OpenFlow 1.1 "mpls_label" value. */
1571 flow_set_mpls_label(struct flow *flow, int idx, ovs_be32 label)
1573 set_mpls_lse_label(&flow->mpls_lse[idx], label);
1576 /* Sets the MPLS TTL that 'flow' matches to 'ttl', which should be in the
1579 flow_set_mpls_ttl(struct flow *flow, int idx, uint8_t ttl)
1581 set_mpls_lse_ttl(&flow->mpls_lse[idx], ttl);
1584 /* Sets the MPLS TC that 'flow' matches to 'tc', which should be in the
1587 flow_set_mpls_tc(struct flow *flow, int idx, uint8_t tc)
1589 set_mpls_lse_tc(&flow->mpls_lse[idx], tc);
1592 /* Sets the MPLS BOS bit that 'flow' matches to which should be 0 or 1. */
1594 flow_set_mpls_bos(struct flow *flow, int idx, uint8_t bos)
1596 set_mpls_lse_bos(&flow->mpls_lse[idx], bos);
1599 /* Sets the entire MPLS LSE. */
1601 flow_set_mpls_lse(struct flow *flow, int idx, ovs_be32 lse)
1603 flow->mpls_lse[idx] = lse;
1607 flow_compose_l4(struct ofpbuf *b, const struct flow *flow)
1611 if (!(flow->nw_frag & FLOW_NW_FRAG_ANY)
1612 || !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
1613 if (flow->nw_proto == IPPROTO_TCP) {
1614 struct tcp_header *tcp;
1616 l4_len = sizeof *tcp;
1617 tcp = ofpbuf_put_zeros(b, l4_len);
1618 tcp->tcp_src = flow->tp_src;
1619 tcp->tcp_dst = flow->tp_dst;
1620 tcp->tcp_ctl = TCP_CTL(ntohs(flow->tcp_flags), 5);
1621 } else if (flow->nw_proto == IPPROTO_UDP) {
1622 struct udp_header *udp;
1624 l4_len = sizeof *udp;
1625 udp = ofpbuf_put_zeros(b, l4_len);
1626 udp->udp_src = flow->tp_src;
1627 udp->udp_dst = flow->tp_dst;
1628 } else if (flow->nw_proto == IPPROTO_SCTP) {
1629 struct sctp_header *sctp;
1631 l4_len = sizeof *sctp;
1632 sctp = ofpbuf_put_zeros(b, l4_len);
1633 sctp->sctp_src = flow->tp_src;
1634 sctp->sctp_dst = flow->tp_dst;
1635 } else if (flow->nw_proto == IPPROTO_ICMP) {
1636 struct icmp_header *icmp;
1638 l4_len = sizeof *icmp;
1639 icmp = ofpbuf_put_zeros(b, l4_len);
1640 icmp->icmp_type = ntohs(flow->tp_src);
1641 icmp->icmp_code = ntohs(flow->tp_dst);
1642 icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
1643 } else if (flow->nw_proto == IPPROTO_IGMP) {
1644 struct igmp_header *igmp;
1646 l4_len = sizeof *igmp;
1647 igmp = ofpbuf_put_zeros(b, l4_len);
1648 igmp->igmp_type = ntohs(flow->tp_src);
1649 igmp->igmp_code = ntohs(flow->tp_dst);
1650 put_16aligned_be32(&igmp->group, flow->igmp_group_ip4);
1651 igmp->igmp_csum = csum(igmp, IGMP_HEADER_LEN);
1652 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
1653 struct icmp6_hdr *icmp;
1655 l4_len = sizeof *icmp;
1656 icmp = ofpbuf_put_zeros(b, l4_len);
1657 icmp->icmp6_type = ntohs(flow->tp_src);
1658 icmp->icmp6_code = ntohs(flow->tp_dst);
1660 if (icmp->icmp6_code == 0 &&
1661 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
1662 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
1663 struct in6_addr *nd_target;
1664 struct nd_opt_hdr *nd_opt;
1666 l4_len += sizeof *nd_target;
1667 nd_target = ofpbuf_put_zeros(b, sizeof *nd_target);
1668 *nd_target = flow->nd_target;
1670 if (!eth_addr_is_zero(flow->arp_sha)) {
1672 nd_opt = ofpbuf_put_zeros(b, 8);
1673 nd_opt->nd_opt_len = 1;
1674 nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR;
1675 memcpy(nd_opt + 1, flow->arp_sha, ETH_ADDR_LEN);
1677 if (!eth_addr_is_zero(flow->arp_tha)) {
1679 nd_opt = ofpbuf_put_zeros(b, 8);
1680 nd_opt->nd_opt_len = 1;
1681 nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
1682 memcpy(nd_opt + 1, flow->arp_tha, ETH_ADDR_LEN);
1685 icmp->icmp6_cksum = (OVS_FORCE uint16_t)
1686 csum(icmp, (char *)ofpbuf_tail(b) - (char *)icmp);
1692 /* Puts into 'b' a packet that flow_extract() would parse as having the given
1695 * (This is useful only for testing, obviously, and the packet isn't really
1696 * valid. It hasn't got some checksums filled in, for one, and lots of fields
1697 * are just zeroed.) */
1699 flow_compose(struct ofpbuf *b, const struct flow *flow)
1703 /* eth_compose() sets l3 pointer and makes sure it is 32-bit aligned. */
1704 eth_compose(b, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0);
1705 if (flow->dl_type == htons(FLOW_DL_TYPE_NONE)) {
1706 struct eth_header *eth = ofpbuf_l2(b);
1707 eth->eth_type = htons(ofpbuf_size(b));
1711 if (flow->vlan_tci & htons(VLAN_CFI)) {
1712 eth_push_vlan(b, htons(ETH_TYPE_VLAN), flow->vlan_tci);
1715 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1716 struct ip_header *ip;
1718 ip = ofpbuf_put_zeros(b, sizeof *ip);
1719 ip->ip_ihl_ver = IP_IHL_VER(5, 4);
1720 ip->ip_tos = flow->nw_tos;
1721 ip->ip_ttl = flow->nw_ttl;
1722 ip->ip_proto = flow->nw_proto;
1723 put_16aligned_be32(&ip->ip_src, flow->nw_src);
1724 put_16aligned_be32(&ip->ip_dst, flow->nw_dst);
1726 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
1727 ip->ip_frag_off |= htons(IP_MORE_FRAGMENTS);
1728 if (flow->nw_frag & FLOW_NW_FRAG_LATER) {
1729 ip->ip_frag_off |= htons(100);
1733 ofpbuf_set_l4(b, ofpbuf_tail(b));
1735 l4_len = flow_compose_l4(b, flow);
1738 ip->ip_tot_len = htons(b->l4_ofs - b->l3_ofs + l4_len);
1739 ip->ip_csum = csum(ip, sizeof *ip);
1740 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1741 struct ovs_16aligned_ip6_hdr *nh;
1743 nh = ofpbuf_put_zeros(b, sizeof *nh);
1744 put_16aligned_be32(&nh->ip6_flow, htonl(6 << 28) |
1745 htonl(flow->nw_tos << 20) | flow->ipv6_label);
1746 nh->ip6_hlim = flow->nw_ttl;
1747 nh->ip6_nxt = flow->nw_proto;
1749 memcpy(&nh->ip6_src, &flow->ipv6_src, sizeof(nh->ip6_src));
1750 memcpy(&nh->ip6_dst, &flow->ipv6_dst, sizeof(nh->ip6_dst));
1752 ofpbuf_set_l4(b, ofpbuf_tail(b));
1754 l4_len = flow_compose_l4(b, flow);
1757 nh->ip6_plen = htons(l4_len);
1758 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
1759 flow->dl_type == htons(ETH_TYPE_RARP)) {
1760 struct arp_eth_header *arp;
1762 arp = ofpbuf_put_zeros(b, sizeof *arp);
1763 ofpbuf_set_l3(b, arp);
1764 arp->ar_hrd = htons(1);
1765 arp->ar_pro = htons(ETH_TYPE_IP);
1766 arp->ar_hln = ETH_ADDR_LEN;
1768 arp->ar_op = htons(flow->nw_proto);
1770 if (flow->nw_proto == ARP_OP_REQUEST ||
1771 flow->nw_proto == ARP_OP_REPLY) {
1772 put_16aligned_be32(&arp->ar_spa, flow->nw_src);
1773 put_16aligned_be32(&arp->ar_tpa, flow->nw_dst);
1774 memcpy(arp->ar_sha, flow->arp_sha, ETH_ADDR_LEN);
1775 memcpy(arp->ar_tha, flow->arp_tha, ETH_ADDR_LEN);
1779 if (eth_type_mpls(flow->dl_type)) {
1782 b->l2_5_ofs = b->l3_ofs;
1783 for (n = 1; n < FLOW_MAX_MPLS_LABELS; n++) {
1784 if (flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK)) {
1789 push_mpls(b, flow->dl_type, flow->mpls_lse[--n]);
1794 /* Compressed flow. */
1797 miniflow_n_values(const struct miniflow *flow)
1799 return count_1bits(flow->map);
1803 miniflow_alloc_values(struct miniflow *flow, int n)
1805 int size = MINIFLOW_VALUES_SIZE(n);
1807 if (size <= sizeof flow->inline_values) {
1808 flow->values_inline = true;
1809 return flow->inline_values;
1811 COVERAGE_INC(miniflow_malloc);
1812 flow->values_inline = false;
1813 flow->offline_values = xmalloc(size);
1814 return flow->offline_values;
1818 /* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
1819 * the caller. The caller must have already initialized 'dst->map' properly
1820 * to indicate the significant uint32_t elements of 'src'. 'n' must be the
1821 * number of 1-bits in 'dst->map'.
1823 * Normally the significant elements are the ones that are non-zero. However,
1824 * when a miniflow is initialized from a (mini)mask, the values can be zeroes,
1825 * so that the flow and mask always have the same maps.
1827 * This function initializes values (either inline if possible or with
1828 * malloc() otherwise) and copies the uint32_t elements of 'src' indicated by
1829 * 'dst->map' into it. */
1831 miniflow_init__(struct miniflow *dst, const struct flow *src, int n)
1833 const uint32_t *src_u32 = (const uint32_t *) src;
1834 uint32_t *dst_u32 = miniflow_alloc_values(dst, n);
1837 for (map = dst->map; map; map = zero_rightmost_1bit(map)) {
1838 *dst_u32++ = src_u32[raw_ctz(map)];
1842 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1843 * with miniflow_destroy().
1844 * Always allocates offline storage. */
1846 miniflow_init(struct miniflow *dst, const struct flow *src)
1848 const uint32_t *src_u32 = (const uint32_t *) src;
1852 /* Initialize dst->map, counting the number of nonzero elements. */
1856 for (i = 0; i < FLOW_U32S; i++) {
1858 dst->map |= UINT64_C(1) << i;
1863 miniflow_init__(dst, src, n);
1866 /* Initializes 'dst' as a copy of 'src', using 'mask->map' as 'dst''s map. The
1867 * caller must eventually free 'dst' with miniflow_destroy(). */
1869 miniflow_init_with_minimask(struct miniflow *dst, const struct flow *src,
1870 const struct minimask *mask)
1872 dst->map = mask->masks.map;
1873 miniflow_init__(dst, src, miniflow_n_values(dst));
1876 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1877 * with miniflow_destroy(). */
1879 miniflow_clone(struct miniflow *dst, const struct miniflow *src)
1881 int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
1884 dst->map = src->map;
1885 if (size <= sizeof dst->inline_values) {
1886 dst->values_inline = true;
1887 values = dst->inline_values;
1889 dst->values_inline = false;
1890 COVERAGE_INC(miniflow_malloc);
1891 dst->offline_values = xmalloc(size);
1892 values = dst->offline_values;
1894 memcpy(values, miniflow_get_values(src), size);
1897 /* Initializes 'dst' as a copy of 'src'. The caller must have allocated
1898 * 'dst' to have inline space all data in 'src'. */
1900 miniflow_clone_inline(struct miniflow *dst, const struct miniflow *src,
1903 dst->values_inline = true;
1904 dst->map = src->map;
1905 memcpy(dst->inline_values, miniflow_get_values(src),
1906 MINIFLOW_VALUES_SIZE(n_values));
1909 /* Initializes 'dst' with the data in 'src', destroying 'src'.
1910 * The caller must eventually free 'dst' with miniflow_destroy().
1911 * 'dst' must be regularly sized miniflow, but 'src' can have
1912 * storage for more than the default MINI_N_INLINE inline
1915 miniflow_move(struct miniflow *dst, struct miniflow *src)
1917 int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
1919 dst->map = src->map;
1920 if (size <= sizeof dst->inline_values) {
1921 dst->values_inline = true;
1922 memcpy(dst->inline_values, miniflow_get_values(src), size);
1923 miniflow_destroy(src);
1924 } else if (src->values_inline) {
1925 dst->values_inline = false;
1926 COVERAGE_INC(miniflow_malloc);
1927 dst->offline_values = xmalloc(size);
1928 memcpy(dst->offline_values, src->inline_values, size);
1930 dst->values_inline = false;
1931 dst->offline_values = src->offline_values;
1935 /* Frees any memory owned by 'flow'. Does not free the storage in which 'flow'
1936 * itself resides; the caller is responsible for that. */
1938 miniflow_destroy(struct miniflow *flow)
1940 if (!flow->values_inline) {
1941 free(flow->offline_values);
1945 /* Initializes 'dst' as a copy of 'src'. */
1947 miniflow_expand(const struct miniflow *src, struct flow *dst)
1949 memset(dst, 0, sizeof *dst);
1950 flow_union_with_miniflow(dst, src);
1953 /* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'flow'
1954 * were expanded into a "struct flow". */
1956 miniflow_get(const struct miniflow *flow, unsigned int u32_ofs)
1958 return (flow->map & UINT64_C(1) << u32_ofs)
1959 ? *(miniflow_get_u32_values(flow) +
1960 count_1bits(flow->map & ((UINT64_C(1) << u32_ofs) - 1)))
1964 /* Returns true if 'a' and 'b' are the equal miniflow, false otherwise. */
1966 miniflow_equal(const struct miniflow *a, const struct miniflow *b)
1968 const uint32_t *ap = miniflow_get_u32_values(a);
1969 const uint32_t *bp = miniflow_get_u32_values(b);
1970 const uint64_t a_map = a->map;
1971 const uint64_t b_map = b->map;
1973 if (OVS_LIKELY(a_map == b_map)) {
1974 int count = miniflow_n_values(a);
1976 return !memcmp(ap, bp, count * sizeof *ap);
1980 for (map = a_map | b_map; map; map = zero_rightmost_1bit(map)) {
1981 uint64_t bit = rightmost_1bit(map);
1982 uint64_t a_value = a_map & bit ? *ap++ : 0;
1983 uint64_t b_value = b_map & bit ? *bp++ : 0;
1985 if (a_value != b_value) {
1994 /* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
1995 * in 'mask', false if they differ. */
1997 miniflow_equal_in_minimask(const struct miniflow *a, const struct miniflow *b,
1998 const struct minimask *mask)
2000 const uint32_t *p = miniflow_get_u32_values(&mask->masks);
2003 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
2004 int ofs = raw_ctz(map);
2006 if ((miniflow_get(a, ofs) ^ miniflow_get(b, ofs)) & *p++) {
2014 /* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
2015 * in 'mask', false if they differ. */
2017 miniflow_equal_flow_in_minimask(const struct miniflow *a, const struct flow *b,
2018 const struct minimask *mask)
2020 const uint32_t *b_u32 = (const uint32_t *) b;
2021 const uint32_t *p = miniflow_get_u32_values(&mask->masks);
2024 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
2025 int ofs = raw_ctz(map);
2027 if ((miniflow_get(a, ofs) ^ b_u32[ofs]) & *p++) {
2036 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
2037 * with minimask_destroy(). */
2039 minimask_init(struct minimask *mask, const struct flow_wildcards *wc)
2041 miniflow_init(&mask->masks, &wc->masks);
2044 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
2045 * with minimask_destroy(). */
2047 minimask_clone(struct minimask *dst, const struct minimask *src)
2049 miniflow_clone(&dst->masks, &src->masks);
2052 /* Initializes 'dst' with the data in 'src', destroying 'src'.
2053 * The caller must eventually free 'dst' with minimask_destroy(). */
2055 minimask_move(struct minimask *dst, struct minimask *src)
2057 miniflow_move(&dst->masks, &src->masks);
2060 /* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
2062 * The caller must provide room for FLOW_U32S "uint32_t"s in 'storage', for use
2063 * by 'dst_'. The caller must *not* free 'dst_' with minimask_destroy(). */
2065 minimask_combine(struct minimask *dst_,
2066 const struct minimask *a_, const struct minimask *b_,
2067 uint32_t storage[FLOW_U32S])
2069 struct miniflow *dst = &dst_->masks;
2070 uint32_t *dst_values = storage;
2071 const struct miniflow *a = &a_->masks;
2072 const struct miniflow *b = &b_->masks;
2076 dst->values_inline = false;
2077 dst->offline_values = storage;
2080 for (map = a->map & b->map; map; map = zero_rightmost_1bit(map)) {
2081 int ofs = raw_ctz(map);
2082 uint32_t mask = miniflow_get(a, ofs) & miniflow_get(b, ofs);
2085 dst->map |= rightmost_1bit(map);
2086 dst_values[n++] = mask;
2091 /* Frees any memory owned by 'mask'. Does not free the storage in which 'mask'
2092 * itself resides; the caller is responsible for that. */
2094 minimask_destroy(struct minimask *mask)
2096 miniflow_destroy(&mask->masks);
2099 /* Initializes 'dst' as a copy of 'src'. */
2101 minimask_expand(const struct minimask *mask, struct flow_wildcards *wc)
2103 miniflow_expand(&mask->masks, &wc->masks);
2106 /* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'mask'
2107 * were expanded into a "struct flow_wildcards". */
2109 minimask_get(const struct minimask *mask, unsigned int u32_ofs)
2111 return miniflow_get(&mask->masks, u32_ofs);
2114 /* Returns true if 'a' and 'b' are the same flow mask, false otherwise. */
2116 minimask_equal(const struct minimask *a, const struct minimask *b)
2118 return miniflow_equal(&a->masks, &b->masks);
2121 /* Returns true if at least one bit matched by 'b' is wildcarded by 'a',
2122 * false otherwise. */
2124 minimask_has_extra(const struct minimask *a, const struct minimask *b)
2126 const uint32_t *p = miniflow_get_u32_values(&b->masks);
2129 for (map = b->masks.map; map; map = zero_rightmost_1bit(map)) {
2130 uint32_t a_u32 = minimask_get(a, raw_ctz(map));
2131 uint32_t b_u32 = *p++;
2133 if ((a_u32 & b_u32) != b_u32) {