2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <sys/types.h>
22 #include <netinet/in.h>
23 #include <netinet/icmp6.h>
24 #include <netinet/ip6.h>
28 #include "byte-order.h"
31 #include "dynamic-string.h"
35 #include "dp-packet.h"
36 #include "openflow/openflow.h"
40 #include "unaligned.h"
42 COVERAGE_DEFINE(flow_extract);
43 COVERAGE_DEFINE(miniflow_malloc);
45 /* U64 indices for segmented flow classification. */
46 const uint8_t flow_segment_u64s[4] = {
47 FLOW_SEGMENT_1_ENDS_AT / sizeof(uint64_t),
48 FLOW_SEGMENT_2_ENDS_AT / sizeof(uint64_t),
49 FLOW_SEGMENT_3_ENDS_AT / sizeof(uint64_t),
53 /* Asserts that field 'f1' follows immediately after 'f0' in struct flow,
54 * without any intervening padding. */
55 #define ASSERT_SEQUENTIAL(f0, f1) \
56 BUILD_ASSERT_DECL(offsetof(struct flow, f0) \
57 + MEMBER_SIZEOF(struct flow, f0) \
58 == offsetof(struct flow, f1))
60 /* Asserts that fields 'f0' and 'f1' are in the same 32-bit aligned word within
62 #define ASSERT_SAME_WORD(f0, f1) \
63 BUILD_ASSERT_DECL(offsetof(struct flow, f0) / 4 \
64 == offsetof(struct flow, f1) / 4)
66 /* Asserts that 'f0' and 'f1' are both sequential and within the same 32-bit
67 * aligned word in struct flow. */
68 #define ASSERT_SEQUENTIAL_SAME_WORD(f0, f1) \
69 ASSERT_SEQUENTIAL(f0, f1); \
70 ASSERT_SAME_WORD(f0, f1)
72 /* miniflow_extract() assumes the following to be true to optimize the
73 * extraction process. */
74 ASSERT_SEQUENTIAL_SAME_WORD(dl_type, vlan_tci);
76 ASSERT_SEQUENTIAL_SAME_WORD(nw_frag, nw_tos);
77 ASSERT_SEQUENTIAL_SAME_WORD(nw_tos, nw_ttl);
78 ASSERT_SEQUENTIAL_SAME_WORD(nw_ttl, nw_proto);
80 /* TCP flags in the middle of a BE64, zeroes in the other half. */
81 BUILD_ASSERT_DECL(offsetof(struct flow, tcp_flags) % 8 == 4);
84 #define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl) \
87 #define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl))
90 ASSERT_SEQUENTIAL_SAME_WORD(tp_src, tp_dst);
92 /* Removes 'size' bytes from the head end of '*datap', of size '*sizep', which
93 * must contain at least 'size' bytes of data. Returns the first byte of data
95 static inline const void *
96 data_pull(const void **datap, size_t *sizep, size_t size)
98 const char *data = *datap;
104 /* If '*datap' has at least 'size' bytes of data, removes that many bytes from
105 * the head end of '*datap' and returns the first byte removed. Otherwise,
106 * returns a null pointer without modifying '*datap'. */
107 static inline const void *
108 data_try_pull(const void **datap, size_t *sizep, size_t size)
110 return OVS_LIKELY(*sizep >= size) ? data_pull(datap, sizep, size) : NULL;
113 /* Context for pushing data to a miniflow. */
115 struct miniflow maps;
117 uint64_t * const end;
120 /* miniflow_push_* macros allow filling in a miniflow data values in order.
121 * Assertions are needed only when the layout of the struct flow is modified.
122 * 'ofs' is a compile-time constant, which allows most of the code be optimized
123 * away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
124 * defined as macros. */
126 #if (FLOW_WC_SEQ != 33)
127 #define MINIFLOW_ASSERT(X) ovs_assert(X)
128 BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
129 "assertions enabled. Consider updating FLOW_WC_SEQ after "
132 #define MINIFLOW_ASSERT(X)
135 #define miniflow_set_map(MF, OFS) \
137 unsigned int ofs = (OFS); \
139 if (ofs < FLOW_TNL_U64S) { \
140 MINIFLOW_ASSERT(!(MF.maps.tnl_map & (UINT64_MAX << ofs)) \
141 && !MF.maps.pkt_map); \
142 MF.maps.tnl_map |= UINT64_C(1) << ofs; \
144 ofs -= FLOW_TNL_U64S; \
145 MINIFLOW_ASSERT(!(MF.maps.pkt_map & (UINT64_MAX << ofs))); \
146 MF.maps.pkt_map |= UINT64_C(1) << ofs; \
150 #define miniflow_assert_in_map(MF, OFS) \
152 unsigned int ofs = (OFS); \
154 if (ofs < FLOW_TNL_U64S) { \
155 MINIFLOW_ASSERT(MF.maps.tnl_map & UINT64_C(1) << ofs \
156 && !(MF.maps.tnl_map & UINT64_MAX << (ofs + 1)) \
157 && !MF.maps.pkt_map); \
159 ofs -= FLOW_TNL_U64S; \
160 MINIFLOW_ASSERT(MF.maps.pkt_map & UINT64_C(1) << ofs \
161 && !(MF.maps.pkt_map & UINT64_MAX << (ofs + 1))); \
165 #define miniflow_push_uint64_(MF, OFS, VALUE) \
167 MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 8 == 0); \
168 *MF.data++ = VALUE; \
169 miniflow_set_map(MF, OFS / 8); \
172 #define miniflow_push_be64_(MF, OFS, VALUE) \
173 miniflow_push_uint64_(MF, OFS, (OVS_FORCE uint64_t)(VALUE))
175 #define miniflow_push_uint32_(MF, OFS, VALUE) \
177 MINIFLOW_ASSERT(MF.data < MF.end); \
179 if ((OFS) % 8 == 0) { \
180 miniflow_set_map(MF, OFS / 8); \
181 *(uint32_t *)MF.data = VALUE; \
182 } else if ((OFS) % 8 == 4) { \
183 miniflow_assert_in_map(MF, OFS / 8); \
184 *((uint32_t *)MF.data + 1) = VALUE; \
189 #define miniflow_push_be32_(MF, OFS, VALUE) \
190 miniflow_push_uint32_(MF, OFS, (OVS_FORCE uint32_t)(VALUE))
192 #define miniflow_push_uint16_(MF, OFS, VALUE) \
194 MINIFLOW_ASSERT(MF.data < MF.end); \
196 if ((OFS) % 8 == 0) { \
197 miniflow_set_map(MF, OFS / 8); \
198 *(uint16_t *)MF.data = VALUE; \
199 } else if ((OFS) % 8 == 2) { \
200 miniflow_assert_in_map(MF, OFS / 8); \
201 *((uint16_t *)MF.data + 1) = VALUE; \
202 } else if ((OFS) % 8 == 4) { \
203 miniflow_assert_in_map(MF, OFS / 8); \
204 *((uint16_t *)MF.data + 2) = VALUE; \
205 } else if ((OFS) % 8 == 6) { \
206 miniflow_assert_in_map(MF, OFS / 8); \
207 *((uint16_t *)MF.data + 3) = VALUE; \
212 #define miniflow_pad_to_64_(MF, OFS) \
214 MINIFLOW_ASSERT((OFS) % 8 != 0); \
215 miniflow_assert_in_map(MF, OFS / 8); \
217 memset((uint8_t *)MF.data + (OFS) % 8, 0, 8 - (OFS) % 8); \
221 #define miniflow_push_be16_(MF, OFS, VALUE) \
222 miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
224 #define miniflow_set_maps(MF, OFS, N_WORDS) \
226 unsigned int ofs = (OFS); \
227 unsigned int n_words = (N_WORDS); \
228 uint64_t n_words_mask = UINT64_MAX >> (64 - n_words); \
230 MINIFLOW_ASSERT(n_words && MF.data + n_words <= MF.end); \
231 if (ofs < FLOW_TNL_U64S) { \
232 MINIFLOW_ASSERT(!(MF.maps.tnl_map & UINT64_MAX << ofs) \
233 && !MF.maps.pkt_map); \
234 MF.maps.tnl_map |= n_words_mask << ofs; \
235 if (n_words > FLOW_TNL_U64S - ofs) { \
236 MF.maps.pkt_map |= n_words_mask >> (FLOW_TNL_U64S - ofs); \
239 ofs -= FLOW_TNL_U64S; \
240 MINIFLOW_ASSERT(!(MF.maps.pkt_map & (UINT64_MAX << ofs))); \
241 MF.maps.pkt_map |= n_words_mask << ofs; \
245 /* Data at 'valuep' may be unaligned. */
246 #define miniflow_push_words_(MF, OFS, VALUEP, N_WORDS) \
248 MINIFLOW_ASSERT((OFS) % 8 == 0); \
249 miniflow_set_maps(MF, (OFS) / 8, (N_WORDS)); \
250 memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof *MF.data); \
251 MF.data += (N_WORDS); \
254 /* Push 32-bit words padded to 64-bits. */
255 #define miniflow_push_words_32_(MF, OFS, VALUEP, N_WORDS) \
257 miniflow_set_maps(MF, (OFS) / 8, DIV_ROUND_UP(N_WORDS, 2)); \
258 memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof(uint32_t)); \
259 MF.data += DIV_ROUND_UP(N_WORDS, 2); \
260 if ((N_WORDS) & 1) { \
261 *((uint32_t *)MF.data - 1) = 0; \
265 /* Data at 'valuep' may be unaligned. */
266 /* MACs start 64-aligned, and must be followed by other data or padding. */
267 #define miniflow_push_macs_(MF, OFS, VALUEP) \
269 miniflow_set_maps(MF, (OFS) / 8, 2); \
270 memcpy(MF.data, (VALUEP), 2 * ETH_ADDR_LEN); \
271 MF.data += 1; /* First word only. */ \
274 #define miniflow_push_uint32(MF, FIELD, VALUE) \
275 miniflow_push_uint32_(MF, offsetof(struct flow, FIELD), VALUE)
277 #define miniflow_push_be32(MF, FIELD, VALUE) \
278 miniflow_push_be32_(MF, offsetof(struct flow, FIELD), VALUE)
280 #define miniflow_push_uint16(MF, FIELD, VALUE) \
281 miniflow_push_uint16_(MF, offsetof(struct flow, FIELD), VALUE)
283 #define miniflow_push_be16(MF, FIELD, VALUE) \
284 miniflow_push_be16_(MF, offsetof(struct flow, FIELD), VALUE)
286 #define miniflow_pad_to_64(MF, FIELD) \
287 miniflow_pad_to_64_(MF, offsetof(struct flow, FIELD))
289 #define miniflow_push_words(MF, FIELD, VALUEP, N_WORDS) \
290 miniflow_push_words_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
292 #define miniflow_push_words_32(MF, FIELD, VALUEP, N_WORDS) \
293 miniflow_push_words_32_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
295 #define miniflow_push_macs(MF, FIELD, VALUEP) \
296 miniflow_push_macs_(MF, offsetof(struct flow, FIELD), VALUEP)
298 /* Pulls the MPLS headers at '*datap' and returns the count of them. */
300 parse_mpls(const void **datap, size_t *sizep)
302 const struct mpls_hdr *mh;
305 while ((mh = data_try_pull(datap, sizep, sizeof *mh))) {
307 if (mh->mpls_lse.lo & htons(1 << MPLS_BOS_SHIFT)) {
311 return MIN(count, FLOW_MAX_MPLS_LABELS);
314 static inline ovs_be16
315 parse_vlan(const void **datap, size_t *sizep)
317 const struct eth_header *eth = *datap;
320 ovs_be16 eth_type; /* ETH_TYPE_VLAN */
324 data_pull(datap, sizep, ETH_ADDR_LEN * 2);
326 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
327 if (OVS_LIKELY(*sizep
328 >= sizeof(struct qtag_prefix) + sizeof(ovs_be16))) {
329 const struct qtag_prefix *qp = data_pull(datap, sizep, sizeof *qp);
330 return qp->tci | htons(VLAN_CFI);
336 static inline ovs_be16
337 parse_ethertype(const void **datap, size_t *sizep)
339 const struct llc_snap_header *llc;
342 proto = *(ovs_be16 *) data_pull(datap, sizep, sizeof proto);
343 if (OVS_LIKELY(ntohs(proto) >= ETH_TYPE_MIN)) {
347 if (OVS_UNLIKELY(*sizep < sizeof *llc)) {
348 return htons(FLOW_DL_TYPE_NONE);
352 if (OVS_UNLIKELY(llc->llc.llc_dsap != LLC_DSAP_SNAP
353 || llc->llc.llc_ssap != LLC_SSAP_SNAP
354 || llc->llc.llc_cntl != LLC_CNTL_SNAP
355 || memcmp(llc->snap.snap_org, SNAP_ORG_ETHERNET,
356 sizeof llc->snap.snap_org))) {
357 return htons(FLOW_DL_TYPE_NONE);
360 data_pull(datap, sizep, sizeof *llc);
362 if (OVS_LIKELY(ntohs(llc->snap.snap_type) >= ETH_TYPE_MIN)) {
363 return llc->snap.snap_type;
366 return htons(FLOW_DL_TYPE_NONE);
370 parse_icmpv6(const void **datap, size_t *sizep, const struct icmp6_hdr *icmp,
371 const struct in6_addr **nd_target,
372 uint8_t arp_buf[2][ETH_ADDR_LEN])
374 if (icmp->icmp6_code == 0 &&
375 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
376 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
378 *nd_target = data_try_pull(datap, sizep, sizeof **nd_target);
379 if (OVS_UNLIKELY(!*nd_target)) {
383 while (*sizep >= 8) {
384 /* The minimum size of an option is 8 bytes, which also is
385 * the size of Ethernet link-layer options. */
386 const struct nd_opt_hdr *nd_opt = *datap;
387 int opt_len = nd_opt->nd_opt_len * 8;
389 if (!opt_len || opt_len > *sizep) {
393 /* Store the link layer address if the appropriate option is
394 * provided. It is considered an error if the same link
395 * layer option is specified twice. */
396 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
398 if (OVS_LIKELY(eth_addr_is_zero(arp_buf[0]))) {
399 memcpy(arp_buf[0], nd_opt + 1, ETH_ADDR_LEN);
403 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
405 if (OVS_LIKELY(eth_addr_is_zero(arp_buf[1]))) {
406 memcpy(arp_buf[1], nd_opt + 1, ETH_ADDR_LEN);
412 if (OVS_UNLIKELY(!data_try_pull(datap, sizep, opt_len))) {
424 /* Initializes 'flow' members from 'packet' and 'md'
426 * Initializes 'packet' header l2 pointer to the start of the Ethernet
427 * header, and the layer offsets as follows:
429 * - packet->l2_5_ofs to the start of the MPLS shim header, or UINT16_MAX
430 * when there is no MPLS shim header.
432 * - packet->l3_ofs to just past the Ethernet header, or just past the
433 * vlan_header if one is present, to the first byte of the payload of the
434 * Ethernet frame. UINT16_MAX if the frame is too short to contain an
437 * - packet->l4_ofs to just past the IPv4 header, if one is present and
438 * has at least the content used for the fields of interest for the flow,
439 * otherwise UINT16_MAX.
442 flow_extract(struct dp_packet *packet, struct flow *flow)
446 uint64_t buf[FLOW_U64S];
449 COVERAGE_INC(flow_extract);
451 miniflow_extract(packet, &m.mf);
452 miniflow_expand(&m.mf, flow);
455 /* Caller is responsible for initializing 'dst' with enough storage for
456 * FLOW_U64S * 8 bytes. */
458 miniflow_extract(struct dp_packet *packet, struct miniflow *dst)
460 const struct pkt_metadata *md = &packet->md;
461 const void *data = dp_packet_data(packet);
462 size_t size = dp_packet_size(packet);
463 uint64_t *values = miniflow_values(dst);
464 struct mf_ctx mf = { { 0, 0 }, values, values + FLOW_U64S };
467 uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
470 if (md->tunnel.ip_dst) {
471 miniflow_push_words(mf, tunnel, &md->tunnel,
472 offsetof(struct flow_tnl, metadata) /
475 if (!(md->tunnel.flags & FLOW_TNL_F_UDPIF)) {
476 if (md->tunnel.metadata.present.map) {
477 miniflow_push_words(mf, tunnel.metadata, &md->tunnel.metadata,
478 sizeof md->tunnel.metadata /
482 if (md->tunnel.metadata.present.len) {
483 miniflow_push_words(mf, tunnel.metadata.present,
484 &md->tunnel.metadata.present, 1);
485 miniflow_push_words(mf, tunnel.metadata.opts.gnv,
486 md->tunnel.metadata.opts.gnv,
487 DIV_ROUND_UP(md->tunnel.metadata.present.len,
492 if (md->skb_priority || md->pkt_mark) {
493 miniflow_push_uint32(mf, skb_priority, md->skb_priority);
494 miniflow_push_uint32(mf, pkt_mark, md->pkt_mark);
496 miniflow_push_uint32(mf, dp_hash, md->dp_hash);
497 miniflow_push_uint32(mf, in_port, odp_to_u32(md->in_port.odp_port));
499 miniflow_push_uint32(mf, recirc_id, md->recirc_id);
500 miniflow_pad_to_64(mf, conj_id);
503 /* Initialize packet's layer pointer and offsets. */
505 dp_packet_reset_offsets(packet);
507 /* Must have full Ethernet header to proceed. */
508 if (OVS_UNLIKELY(size < sizeof(struct eth_header))) {
514 ASSERT_SEQUENTIAL(dl_dst, dl_src);
515 miniflow_push_macs(mf, dl_dst, data);
516 /* dl_type, vlan_tci. */
517 vlan_tci = parse_vlan(&data, &size);
518 dl_type = parse_ethertype(&data, &size);
519 miniflow_push_be16(mf, dl_type, dl_type);
520 miniflow_push_be16(mf, vlan_tci, vlan_tci);
524 if (OVS_UNLIKELY(eth_type_mpls(dl_type))) {
526 const void *mpls = data;
528 packet->l2_5_ofs = (char *)data - l2;
529 count = parse_mpls(&data, &size);
530 miniflow_push_words_32(mf, mpls_lse, mpls, count);
534 packet->l3_ofs = (char *)data - l2;
537 if (OVS_LIKELY(dl_type == htons(ETH_TYPE_IP))) {
538 const struct ip_header *nh = data;
542 if (OVS_UNLIKELY(size < IP_HEADER_LEN)) {
545 ip_len = IP_IHL(nh->ip_ihl_ver) * 4;
547 if (OVS_UNLIKELY(ip_len < IP_HEADER_LEN)) {
550 if (OVS_UNLIKELY(size < ip_len)) {
553 tot_len = ntohs(nh->ip_tot_len);
554 if (OVS_UNLIKELY(tot_len > size)) {
557 if (OVS_UNLIKELY(size - tot_len > UINT8_MAX)) {
560 dp_packet_set_l2_pad_size(packet, size - tot_len);
561 size = tot_len; /* Never pull padding. */
563 /* Push both source and destination address at once. */
564 miniflow_push_words(mf, nw_src, &nh->ip_src, 1);
566 miniflow_push_be32(mf, ipv6_label, 0); /* Padding for IPv4. */
570 nw_proto = nh->ip_proto;
571 if (OVS_UNLIKELY(IP_IS_FRAGMENT(nh->ip_frag_off))) {
572 nw_frag = FLOW_NW_FRAG_ANY;
573 if (nh->ip_frag_off & htons(IP_FRAG_OFF_MASK)) {
574 nw_frag |= FLOW_NW_FRAG_LATER;
577 data_pull(&data, &size, ip_len);
578 } else if (dl_type == htons(ETH_TYPE_IPV6)) {
579 const struct ovs_16aligned_ip6_hdr *nh;
583 if (OVS_UNLIKELY(size < sizeof *nh)) {
586 nh = data_pull(&data, &size, sizeof *nh);
588 plen = ntohs(nh->ip6_plen);
589 if (OVS_UNLIKELY(plen > size)) {
592 /* Jumbo Payload option not supported yet. */
593 if (OVS_UNLIKELY(size - plen > UINT8_MAX)) {
596 dp_packet_set_l2_pad_size(packet, size - plen);
597 size = plen; /* Never pull padding. */
599 miniflow_push_words(mf, ipv6_src, &nh->ip6_src,
600 sizeof nh->ip6_src / 8);
601 miniflow_push_words(mf, ipv6_dst, &nh->ip6_dst,
602 sizeof nh->ip6_dst / 8);
604 tc_flow = get_16aligned_be32(&nh->ip6_flow);
606 ovs_be32 label = tc_flow & htonl(IPV6_LABEL_MASK);
607 miniflow_push_be32(mf, ipv6_label, label);
610 nw_tos = ntohl(tc_flow) >> 20;
611 nw_ttl = nh->ip6_hlim;
612 nw_proto = nh->ip6_nxt;
615 if (OVS_LIKELY((nw_proto != IPPROTO_HOPOPTS)
616 && (nw_proto != IPPROTO_ROUTING)
617 && (nw_proto != IPPROTO_DSTOPTS)
618 && (nw_proto != IPPROTO_AH)
619 && (nw_proto != IPPROTO_FRAGMENT))) {
620 /* It's either a terminal header (e.g., TCP, UDP) or one we
621 * don't understand. In either case, we're done with the
622 * packet, so use it to fill in 'nw_proto'. */
626 /* We only verify that at least 8 bytes of the next header are
627 * available, but many of these headers are longer. Ensure that
628 * accesses within the extension header are within those first 8
629 * bytes. All extension headers are required to be at least 8
631 if (OVS_UNLIKELY(size < 8)) {
635 if ((nw_proto == IPPROTO_HOPOPTS)
636 || (nw_proto == IPPROTO_ROUTING)
637 || (nw_proto == IPPROTO_DSTOPTS)) {
638 /* These headers, while different, have the fields we care
639 * about in the same location and with the same
641 const struct ip6_ext *ext_hdr = data;
642 nw_proto = ext_hdr->ip6e_nxt;
643 if (OVS_UNLIKELY(!data_try_pull(&data, &size,
644 (ext_hdr->ip6e_len + 1) * 8))) {
647 } else if (nw_proto == IPPROTO_AH) {
648 /* A standard AH definition isn't available, but the fields
649 * we care about are in the same location as the generic
650 * option header--only the header length is calculated
652 const struct ip6_ext *ext_hdr = data;
653 nw_proto = ext_hdr->ip6e_nxt;
654 if (OVS_UNLIKELY(!data_try_pull(&data, &size,
655 (ext_hdr->ip6e_len + 2) * 4))) {
658 } else if (nw_proto == IPPROTO_FRAGMENT) {
659 const struct ovs_16aligned_ip6_frag *frag_hdr = data;
661 nw_proto = frag_hdr->ip6f_nxt;
662 if (!data_try_pull(&data, &size, sizeof *frag_hdr)) {
666 /* We only process the first fragment. */
667 if (frag_hdr->ip6f_offlg != htons(0)) {
668 nw_frag = FLOW_NW_FRAG_ANY;
669 if ((frag_hdr->ip6f_offlg & IP6F_OFF_MASK) != htons(0)) {
670 nw_frag |= FLOW_NW_FRAG_LATER;
671 nw_proto = IPPROTO_FRAGMENT;
678 if (dl_type == htons(ETH_TYPE_ARP) ||
679 dl_type == htons(ETH_TYPE_RARP)) {
680 uint8_t arp_buf[2][ETH_ADDR_LEN];
681 const struct arp_eth_header *arp = (const struct arp_eth_header *)
682 data_try_pull(&data, &size, ARP_ETH_HEADER_LEN);
684 if (OVS_LIKELY(arp) && OVS_LIKELY(arp->ar_hrd == htons(1))
685 && OVS_LIKELY(arp->ar_pro == htons(ETH_TYPE_IP))
686 && OVS_LIKELY(arp->ar_hln == ETH_ADDR_LEN)
687 && OVS_LIKELY(arp->ar_pln == 4)) {
688 miniflow_push_be32(mf, nw_src,
689 get_16aligned_be32(&arp->ar_spa));
690 miniflow_push_be32(mf, nw_dst,
691 get_16aligned_be32(&arp->ar_tpa));
693 /* We only match on the lower 8 bits of the opcode. */
694 if (OVS_LIKELY(ntohs(arp->ar_op) <= 0xff)) {
695 miniflow_push_be32(mf, ipv6_label, 0); /* Pad with ARP. */
696 miniflow_push_be32(mf, nw_frag, htonl(ntohs(arp->ar_op)));
699 /* Must be adjacent. */
700 ASSERT_SEQUENTIAL(arp_sha, arp_tha);
702 memcpy(arp_buf[0], arp->ar_sha, ETH_ADDR_LEN);
703 memcpy(arp_buf[1], arp->ar_tha, ETH_ADDR_LEN);
704 miniflow_push_macs(mf, arp_sha, arp_buf);
705 miniflow_pad_to_64(mf, tcp_flags);
711 packet->l4_ofs = (char *)data - l2;
712 miniflow_push_be32(mf, nw_frag,
713 BYTES_TO_BE32(nw_frag, nw_tos, nw_ttl, nw_proto));
715 if (OVS_LIKELY(!(nw_frag & FLOW_NW_FRAG_LATER))) {
716 if (OVS_LIKELY(nw_proto == IPPROTO_TCP)) {
717 if (OVS_LIKELY(size >= TCP_HEADER_LEN)) {
718 const struct tcp_header *tcp = data;
720 miniflow_push_be32(mf, arp_tha[2], 0);
721 miniflow_push_be32(mf, tcp_flags,
722 TCP_FLAGS_BE32(tcp->tcp_ctl));
723 miniflow_push_be16(mf, tp_src, tcp->tcp_src);
724 miniflow_push_be16(mf, tp_dst, tcp->tcp_dst);
725 miniflow_pad_to_64(mf, igmp_group_ip4);
727 } else if (OVS_LIKELY(nw_proto == IPPROTO_UDP)) {
728 if (OVS_LIKELY(size >= UDP_HEADER_LEN)) {
729 const struct udp_header *udp = data;
731 miniflow_push_be16(mf, tp_src, udp->udp_src);
732 miniflow_push_be16(mf, tp_dst, udp->udp_dst);
733 miniflow_pad_to_64(mf, igmp_group_ip4);
735 } else if (OVS_LIKELY(nw_proto == IPPROTO_SCTP)) {
736 if (OVS_LIKELY(size >= SCTP_HEADER_LEN)) {
737 const struct sctp_header *sctp = data;
739 miniflow_push_be16(mf, tp_src, sctp->sctp_src);
740 miniflow_push_be16(mf, tp_dst, sctp->sctp_dst);
741 miniflow_pad_to_64(mf, igmp_group_ip4);
743 } else if (OVS_LIKELY(nw_proto == IPPROTO_ICMP)) {
744 if (OVS_LIKELY(size >= ICMP_HEADER_LEN)) {
745 const struct icmp_header *icmp = data;
747 miniflow_push_be16(mf, tp_src, htons(icmp->icmp_type));
748 miniflow_push_be16(mf, tp_dst, htons(icmp->icmp_code));
749 miniflow_pad_to_64(mf, igmp_group_ip4);
751 } else if (OVS_LIKELY(nw_proto == IPPROTO_IGMP)) {
752 if (OVS_LIKELY(size >= IGMP_HEADER_LEN)) {
753 const struct igmp_header *igmp = data;
755 miniflow_push_be16(mf, tp_src, htons(igmp->igmp_type));
756 miniflow_push_be16(mf, tp_dst, htons(igmp->igmp_code));
757 miniflow_push_be32(mf, igmp_group_ip4,
758 get_16aligned_be32(&igmp->group));
760 } else if (OVS_LIKELY(nw_proto == IPPROTO_ICMPV6)) {
761 if (OVS_LIKELY(size >= sizeof(struct icmp6_hdr))) {
762 const struct in6_addr *nd_target = NULL;
763 uint8_t arp_buf[2][ETH_ADDR_LEN];
764 const struct icmp6_hdr *icmp = data_pull(&data, &size,
766 memset(arp_buf, 0, sizeof arp_buf);
767 if (OVS_LIKELY(parse_icmpv6(&data, &size, icmp, &nd_target,
770 miniflow_push_words(mf, nd_target, nd_target,
771 sizeof *nd_target / 8);
773 miniflow_push_macs(mf, arp_sha, arp_buf);
774 miniflow_pad_to_64(mf, tcp_flags);
775 miniflow_push_be16(mf, tp_src, htons(icmp->icmp6_type));
776 miniflow_push_be16(mf, tp_dst, htons(icmp->icmp6_code));
777 miniflow_pad_to_64(mf, igmp_group_ip4);
786 /* For every bit of a field that is wildcarded in 'wildcards', sets the
787 * corresponding bit in 'flow' to zero. */
789 flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
791 uint64_t *flow_u64 = (uint64_t *) flow;
792 const uint64_t *wc_u64 = (const uint64_t *) &wildcards->masks;
795 for (i = 0; i < FLOW_U64S; i++) {
796 flow_u64[i] &= wc_u64[i];
801 flow_unwildcard_tp_ports(const struct flow *flow, struct flow_wildcards *wc)
803 if (flow->nw_proto != IPPROTO_ICMP) {
804 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
805 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
807 wc->masks.tp_src = htons(0xff);
808 wc->masks.tp_dst = htons(0xff);
812 /* Initializes 'flow_metadata' with the metadata found in 'flow'. */
814 flow_get_metadata(const struct flow *flow, struct match *flow_metadata)
818 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
820 match_init_catchall(flow_metadata);
821 if (flow->tunnel.tun_id != htonll(0)) {
822 match_set_tun_id(flow_metadata, flow->tunnel.tun_id);
824 if (flow->tunnel.flags & FLOW_TNL_PUB_F_MASK) {
825 match_set_tun_flags(flow_metadata,
826 flow->tunnel.flags & FLOW_TNL_PUB_F_MASK);
828 if (flow->tunnel.ip_src != htonl(0)) {
829 match_set_tun_src(flow_metadata, flow->tunnel.ip_src);
831 if (flow->tunnel.ip_dst != htonl(0)) {
832 match_set_tun_dst(flow_metadata, flow->tunnel.ip_dst);
834 if (flow->tunnel.gbp_id != htons(0)) {
835 match_set_tun_gbp_id(flow_metadata, flow->tunnel.gbp_id);
837 if (flow->tunnel.gbp_flags) {
838 match_set_tun_gbp_flags(flow_metadata, flow->tunnel.gbp_flags);
840 tun_metadata_get_fmd(&flow->tunnel, flow_metadata);
841 if (flow->metadata != htonll(0)) {
842 match_set_metadata(flow_metadata, flow->metadata);
845 for (i = 0; i < FLOW_N_REGS; i++) {
847 match_set_reg(flow_metadata, i, flow->regs[i]);
851 if (flow->pkt_mark != 0) {
852 match_set_pkt_mark(flow_metadata, flow->pkt_mark);
855 match_set_in_port(flow_metadata, flow->in_port.ofp_port);
859 flow_to_string(const struct flow *flow)
861 struct ds ds = DS_EMPTY_INITIALIZER;
862 flow_format(&ds, flow);
867 flow_tun_flag_to_string(uint32_t flags)
870 case FLOW_TNL_F_DONT_FRAGMENT:
872 case FLOW_TNL_F_CSUM:
884 format_flags(struct ds *ds, const char *(*bit_to_string)(uint32_t),
885 uint32_t flags, char del)
890 ds_put_char(ds, '0');
894 uint32_t bit = rightmost_1bit(flags);
897 s = bit_to_string(bit);
899 ds_put_format(ds, "%s%c", s, del);
908 ds_put_format(ds, "0x%"PRIx32"%c", bad, del);
914 format_flags_masked(struct ds *ds, const char *name,
915 const char *(*bit_to_string)(uint32_t), uint32_t flags,
916 uint32_t mask, uint32_t max_mask)
919 ds_put_format(ds, "%s=", name);
922 if (mask == max_mask) {
923 format_flags(ds, bit_to_string, flags, '|');
928 ds_put_cstr(ds, "0/0");
933 uint32_t bit = rightmost_1bit(mask);
934 const char *s = bit_to_string(bit);
936 ds_put_format(ds, "%s%s", (flags & bit) ? "+" : "-",
937 s ? s : "[Unknown]");
942 /* Scans a string 's' of flags to determine their numerical value and
943 * returns the number of characters parsed using 'bit_to_string' to
944 * lookup flag names. Scanning continues until the character 'end' is
947 * In the event of a failure, a negative error code will be returned. In
948 * addition, if 'res_string' is non-NULL then a descriptive string will
949 * be returned incorporating the identifying string 'field_name'. This
950 * error string must be freed by the caller.
952 * Upon success, the flag values will be stored in 'res_flags' and
953 * optionally 'res_mask', if it is non-NULL (if it is NULL then any masks
954 * present in the original string will be considered an error). The
955 * caller may restrict the acceptable set of values through the mask
958 parse_flags(const char *s, const char *(*bit_to_string)(uint32_t),
959 char end, const char *field_name, char **res_string,
960 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask)
965 /* Parse masked flags in numeric format? */
966 if (res_mask && ovs_scan(s, "%"SCNi32"/%"SCNi32"%n",
967 res_flags, res_mask, &n) && n > 0) {
968 if (*res_flags & ~allowed || *res_mask & ~allowed) {
976 if (res_mask && (*s == '+' || *s == '-')) {
977 uint32_t flags = 0, mask = 0;
979 /* Parse masked flags. */
980 while (s[0] != end) {
987 } else if (s[0] == '-') {
991 *res_string = xasprintf("%s: %s must be preceded by '+' "
992 "(for SET) or '-' (NOT SET)", s,
1000 for (bit = 1; bit; bit <<= 1) {
1001 const char *fname = bit_to_string(bit);
1007 len = strlen(fname);
1008 if (strncmp(s, fname, len) ||
1009 (s[len] != '+' && s[len] != '-' && s[len] != end)) {
1014 /* bit already set. */
1016 *res_string = xasprintf("%s: Each %s flag can be "
1017 "specified only once", s,
1022 if (!(bit & allowed)) {
1044 /* Parse unmasked flags. If a flag is present, it is set, otherwise
1046 while (s[n] != end) {
1047 unsigned long long int flags;
1051 if (ovs_scan(&s[n], "%lli%n", &flags, &n0)) {
1052 if (flags & ~allowed) {
1055 n += n0 + (s[n + n0] == '|');
1060 for (bit = 1; bit; bit <<= 1) {
1061 const char *name = bit_to_string(bit);
1069 if (!strncmp(s + n, name, len) &&
1070 (s[n + len] == '|' || s[n + len] == end)) {
1071 if (!(bit & allowed)) {
1075 n += len + (s[n + len] == '|');
1085 *res_flags = result;
1087 *res_mask = UINT32_MAX;
1096 *res_string = xasprintf("%s: unknown %s flag(s)", s, field_name);
1102 flow_format(struct ds *ds, const struct flow *flow)
1105 struct flow_wildcards *wc = &match.wc;
1107 match_wc_init(&match, flow);
1109 /* As this function is most often used for formatting a packet in a
1110 * packet-in message, skip formatting the packet context fields that are
1111 * all-zeroes to make the print-out easier on the eyes. This means that a
1112 * missing context field implies a zero value for that field. This is
1113 * similar to OpenFlow encoding of these fields, as the specification
1114 * states that all-zeroes context fields should not be encoded in the
1115 * packet-in messages. */
1116 if (!flow->in_port.ofp_port) {
1117 WC_UNMASK_FIELD(wc, in_port);
1119 if (!flow->skb_priority) {
1120 WC_UNMASK_FIELD(wc, skb_priority);
1122 if (!flow->pkt_mark) {
1123 WC_UNMASK_FIELD(wc, pkt_mark);
1125 if (!flow->recirc_id) {
1126 WC_UNMASK_FIELD(wc, recirc_id);
1128 if (!flow->dp_hash) {
1129 WC_UNMASK_FIELD(wc, dp_hash);
1131 for (int i = 0; i < FLOW_N_REGS; i++) {
1132 if (!flow->regs[i]) {
1133 WC_UNMASK_FIELD(wc, regs[i]);
1136 if (!flow->metadata) {
1137 WC_UNMASK_FIELD(wc, metadata);
1140 match_format(&match, ds, OFP_DEFAULT_PRIORITY);
1144 flow_print(FILE *stream, const struct flow *flow)
1146 char *s = flow_to_string(flow);
1151 /* flow_wildcards functions. */
1153 /* Initializes 'wc' as a set of wildcards that matches every packet. */
1155 flow_wildcards_init_catchall(struct flow_wildcards *wc)
1157 memset(&wc->masks, 0, sizeof wc->masks);
1160 /* Converts a flow into flow wildcards. It sets the wildcard masks based on
1161 * the packet headers extracted to 'flow'. It will not set the mask for fields
1162 * that do not make sense for the packet type. OpenFlow-only metadata is
1163 * wildcarded, but other metadata is unconditionally exact-matched. */
1164 void flow_wildcards_init_for_packet(struct flow_wildcards *wc,
1165 const struct flow *flow)
1167 memset(&wc->masks, 0x0, sizeof wc->masks);
1169 /* Update this function whenever struct flow changes. */
1170 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
1172 if (flow->tunnel.ip_dst) {
1173 if (flow->tunnel.flags & FLOW_TNL_F_KEY) {
1174 WC_MASK_FIELD(wc, tunnel.tun_id);
1176 WC_MASK_FIELD(wc, tunnel.ip_src);
1177 WC_MASK_FIELD(wc, tunnel.ip_dst);
1178 WC_MASK_FIELD(wc, tunnel.flags);
1179 WC_MASK_FIELD(wc, tunnel.ip_tos);
1180 WC_MASK_FIELD(wc, tunnel.ip_ttl);
1181 WC_MASK_FIELD(wc, tunnel.tp_src);
1182 WC_MASK_FIELD(wc, tunnel.tp_dst);
1183 WC_MASK_FIELD(wc, tunnel.gbp_id);
1184 WC_MASK_FIELD(wc, tunnel.gbp_flags);
1186 if (!(flow->tunnel.flags & FLOW_TNL_F_UDPIF)) {
1187 if (flow->tunnel.metadata.present.map) {
1188 wc->masks.tunnel.metadata.present.map =
1189 flow->tunnel.metadata.present.map;
1190 WC_MASK_FIELD(wc, tunnel.metadata.opts.u8);
1193 WC_MASK_FIELD(wc, tunnel.metadata.present.len);
1194 memset(wc->masks.tunnel.metadata.opts.gnv, 0xff,
1195 flow->tunnel.metadata.present.len);
1197 } else if (flow->tunnel.tun_id) {
1198 WC_MASK_FIELD(wc, tunnel.tun_id);
1201 /* metadata, regs, and conj_id wildcarded. */
1203 WC_MASK_FIELD(wc, skb_priority);
1204 WC_MASK_FIELD(wc, pkt_mark);
1205 WC_MASK_FIELD(wc, recirc_id);
1206 WC_MASK_FIELD(wc, dp_hash);
1207 WC_MASK_FIELD(wc, in_port);
1209 /* actset_output wildcarded. */
1211 WC_MASK_FIELD(wc, dl_dst);
1212 WC_MASK_FIELD(wc, dl_src);
1213 WC_MASK_FIELD(wc, dl_type);
1214 WC_MASK_FIELD(wc, vlan_tci);
1216 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1217 WC_MASK_FIELD(wc, nw_src);
1218 WC_MASK_FIELD(wc, nw_dst);
1219 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1220 WC_MASK_FIELD(wc, ipv6_src);
1221 WC_MASK_FIELD(wc, ipv6_dst);
1222 WC_MASK_FIELD(wc, ipv6_label);
1223 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
1224 flow->dl_type == htons(ETH_TYPE_RARP)) {
1225 WC_MASK_FIELD(wc, nw_src);
1226 WC_MASK_FIELD(wc, nw_dst);
1227 WC_MASK_FIELD(wc, nw_proto);
1228 WC_MASK_FIELD(wc, arp_sha);
1229 WC_MASK_FIELD(wc, arp_tha);
1231 } else if (eth_type_mpls(flow->dl_type)) {
1232 for (int i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
1233 WC_MASK_FIELD(wc, mpls_lse[i]);
1234 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
1240 return; /* Unknown ethertype. */
1244 WC_MASK_FIELD(wc, nw_frag);
1245 WC_MASK_FIELD(wc, nw_tos);
1246 WC_MASK_FIELD(wc, nw_ttl);
1247 WC_MASK_FIELD(wc, nw_proto);
1249 /* No transport layer header in later fragments. */
1250 if (!(flow->nw_frag & FLOW_NW_FRAG_LATER) &&
1251 (flow->nw_proto == IPPROTO_ICMP ||
1252 flow->nw_proto == IPPROTO_ICMPV6 ||
1253 flow->nw_proto == IPPROTO_TCP ||
1254 flow->nw_proto == IPPROTO_UDP ||
1255 flow->nw_proto == IPPROTO_SCTP ||
1256 flow->nw_proto == IPPROTO_IGMP)) {
1257 WC_MASK_FIELD(wc, tp_src);
1258 WC_MASK_FIELD(wc, tp_dst);
1260 if (flow->nw_proto == IPPROTO_TCP) {
1261 WC_MASK_FIELD(wc, tcp_flags);
1262 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
1263 WC_MASK_FIELD(wc, arp_sha);
1264 WC_MASK_FIELD(wc, arp_tha);
1265 WC_MASK_FIELD(wc, nd_target);
1266 } else if (flow->nw_proto == IPPROTO_IGMP) {
1267 WC_MASK_FIELD(wc, igmp_group_ip4);
1272 /* Return a map of possible fields for a packet of the same type as 'flow'.
1273 * Including extra bits in the returned mask is not wrong, it is just less
1276 * This is a less precise version of flow_wildcards_init_for_packet() above. */
1278 flow_wc_map(const struct flow *flow, struct miniflow *map)
1280 /* Update this function whenever struct flow changes. */
1281 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
1284 if (flow->tunnel.ip_dst) {
1285 map->tnl_map |= MINIFLOW_TNL_MAP__(tunnel,
1286 offsetof(struct flow_tnl, metadata));
1287 if (!(flow->tunnel.flags & FLOW_TNL_F_UDPIF)) {
1288 if (flow->tunnel.metadata.present.map) {
1289 map->tnl_map |= MINIFLOW_TNL_MAP(tunnel.metadata);
1292 map->tnl_map |= MINIFLOW_TNL_MAP(tunnel.metadata.present.len);
1293 map->tnl_map |= MINIFLOW_TNL_MAP__(tunnel.metadata.opts.gnv,
1294 flow->tunnel.metadata.present.len);
1298 /* Metadata fields that can appear on packet input. */
1299 map->pkt_map = MINIFLOW_PKT_MAP(skb_priority) | MINIFLOW_PKT_MAP(pkt_mark)
1300 | MINIFLOW_PKT_MAP(recirc_id) | MINIFLOW_PKT_MAP(dp_hash)
1301 | MINIFLOW_PKT_MAP(in_port)
1302 | MINIFLOW_PKT_MAP(dl_dst) | MINIFLOW_PKT_MAP(dl_src)
1303 | MINIFLOW_PKT_MAP(dl_type) | MINIFLOW_PKT_MAP(vlan_tci);
1305 /* Ethertype-dependent fields. */
1306 if (OVS_LIKELY(flow->dl_type == htons(ETH_TYPE_IP))) {
1307 map->pkt_map |= MINIFLOW_PKT_MAP(nw_src) | MINIFLOW_PKT_MAP(nw_dst)
1308 | MINIFLOW_PKT_MAP(nw_proto) | MINIFLOW_PKT_MAP(nw_frag)
1309 | MINIFLOW_PKT_MAP(nw_tos) | MINIFLOW_PKT_MAP(nw_ttl);
1310 if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_IGMP)) {
1311 map->pkt_map |= MINIFLOW_PKT_MAP(igmp_group_ip4);
1313 map->pkt_map |= MINIFLOW_PKT_MAP(tcp_flags)
1314 | MINIFLOW_PKT_MAP(tp_src) | MINIFLOW_PKT_MAP(tp_dst);
1316 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1317 map->pkt_map |= MINIFLOW_PKT_MAP(ipv6_src) | MINIFLOW_PKT_MAP(ipv6_dst)
1318 | MINIFLOW_PKT_MAP(ipv6_label)
1319 | MINIFLOW_PKT_MAP(nw_proto) | MINIFLOW_PKT_MAP(nw_frag)
1320 | MINIFLOW_PKT_MAP(nw_tos) | MINIFLOW_PKT_MAP(nw_ttl);
1321 if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_ICMPV6)) {
1322 map->pkt_map |= MINIFLOW_PKT_MAP(nd_target)
1323 | MINIFLOW_PKT_MAP(arp_sha) | MINIFLOW_PKT_MAP(arp_tha);
1325 map->pkt_map |= MINIFLOW_PKT_MAP(tcp_flags)
1326 | MINIFLOW_PKT_MAP(tp_src) | MINIFLOW_PKT_MAP(tp_dst);
1328 } else if (eth_type_mpls(flow->dl_type)) {
1329 map->pkt_map |= MINIFLOW_PKT_MAP(mpls_lse);
1330 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
1331 flow->dl_type == htons(ETH_TYPE_RARP)) {
1332 map->pkt_map |= MINIFLOW_PKT_MAP(nw_src) | MINIFLOW_PKT_MAP(nw_dst)
1333 | MINIFLOW_PKT_MAP(nw_proto)
1334 | MINIFLOW_PKT_MAP(arp_sha) | MINIFLOW_PKT_MAP(arp_tha);
1338 /* Clear the metadata and register wildcard masks. They are not packet
1341 flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
1343 /* Update this function whenever struct flow changes. */
1344 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
1346 memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
1347 memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
1348 wc->masks.actset_output = 0;
1349 wc->masks.conj_id = 0;
1352 /* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
1355 flow_wildcards_is_catchall(const struct flow_wildcards *wc)
1357 const uint64_t *wc_u64 = (const uint64_t *) &wc->masks;
1360 for (i = 0; i < FLOW_U64S; i++) {
1368 /* Sets 'dst' as the bitwise AND of wildcards in 'src1' and 'src2'.
1369 * That is, a bit or a field is wildcarded in 'dst' if it is wildcarded
1370 * in 'src1' or 'src2' or both. */
1372 flow_wildcards_and(struct flow_wildcards *dst,
1373 const struct flow_wildcards *src1,
1374 const struct flow_wildcards *src2)
1376 uint64_t *dst_u64 = (uint64_t *) &dst->masks;
1377 const uint64_t *src1_u64 = (const uint64_t *) &src1->masks;
1378 const uint64_t *src2_u64 = (const uint64_t *) &src2->masks;
1381 for (i = 0; i < FLOW_U64S; i++) {
1382 dst_u64[i] = src1_u64[i] & src2_u64[i];
1386 /* Sets 'dst' as the bitwise OR of wildcards in 'src1' and 'src2'. That
1387 * is, a bit or a field is wildcarded in 'dst' if it is neither
1388 * wildcarded in 'src1' nor 'src2'. */
1390 flow_wildcards_or(struct flow_wildcards *dst,
1391 const struct flow_wildcards *src1,
1392 const struct flow_wildcards *src2)
1394 uint64_t *dst_u64 = (uint64_t *) &dst->masks;
1395 const uint64_t *src1_u64 = (const uint64_t *) &src1->masks;
1396 const uint64_t *src2_u64 = (const uint64_t *) &src2->masks;
1399 for (i = 0; i < FLOW_U64S; i++) {
1400 dst_u64[i] = src1_u64[i] | src2_u64[i];
1404 /* Returns a hash of the wildcards in 'wc'. */
1406 flow_wildcards_hash(const struct flow_wildcards *wc, uint32_t basis)
1408 return flow_hash(&wc->masks, basis);
1411 /* Returns true if 'a' and 'b' represent the same wildcards, false if they are
1414 flow_wildcards_equal(const struct flow_wildcards *a,
1415 const struct flow_wildcards *b)
1417 return flow_equal(&a->masks, &b->masks);
1420 /* Returns true if at least one bit or field is wildcarded in 'a' but not in
1421 * 'b', false otherwise. */
1423 flow_wildcards_has_extra(const struct flow_wildcards *a,
1424 const struct flow_wildcards *b)
1426 const uint64_t *a_u64 = (const uint64_t *) &a->masks;
1427 const uint64_t *b_u64 = (const uint64_t *) &b->masks;
1430 for (i = 0; i < FLOW_U64S; i++) {
1431 if ((a_u64[i] & b_u64[i]) != b_u64[i]) {
1438 /* Returns true if 'a' and 'b' are equal, except that 0-bits (wildcarded bits)
1439 * in 'wc' do not need to be equal in 'a' and 'b'. */
1441 flow_equal_except(const struct flow *a, const struct flow *b,
1442 const struct flow_wildcards *wc)
1444 const uint64_t *a_u64 = (const uint64_t *) a;
1445 const uint64_t *b_u64 = (const uint64_t *) b;
1446 const uint64_t *wc_u64 = (const uint64_t *) &wc->masks;
1449 for (i = 0; i < FLOW_U64S; i++) {
1450 if ((a_u64[i] ^ b_u64[i]) & wc_u64[i]) {
1457 /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
1458 * (A 0-bit indicates a wildcard bit.) */
1460 flow_wildcards_set_reg_mask(struct flow_wildcards *wc, int idx, uint32_t mask)
1462 wc->masks.regs[idx] = mask;
1465 /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
1466 * (A 0-bit indicates a wildcard bit.) */
1468 flow_wildcards_set_xreg_mask(struct flow_wildcards *wc, int idx, uint64_t mask)
1470 flow_set_xreg(&wc->masks, idx, mask);
1473 /* Calculates the 5-tuple hash from the given miniflow.
1474 * This returns the same value as flow_hash_5tuple for the corresponding
1477 miniflow_hash_5tuple(const struct miniflow *flow, uint32_t basis)
1479 uint32_t hash = basis;
1482 ovs_be16 dl_type = MINIFLOW_GET_BE16(flow, dl_type);
1484 hash = hash_add(hash, MINIFLOW_GET_U8(flow, nw_proto));
1486 /* Separate loops for better optimization. */
1487 if (dl_type == htons(ETH_TYPE_IPV6)) {
1488 struct miniflow maps = { 0, MINIFLOW_PKT_MAP(ipv6_src)
1489 | MINIFLOW_PKT_MAP(ipv6_dst) };
1492 MINIFLOW_FOR_EACH_IN_PKT_MAP(value, flow, maps) {
1493 hash = hash_add64(hash, value);
1496 hash = hash_add(hash, MINIFLOW_GET_U32(flow, nw_src));
1497 hash = hash_add(hash, MINIFLOW_GET_U32(flow, nw_dst));
1499 /* Add both ports at once. */
1500 hash = hash_add(hash, MINIFLOW_GET_U32(flow, tp_src));
1501 hash = hash_finish(hash, 42); /* Arbitrary number. */
1506 ASSERT_SEQUENTIAL_SAME_WORD(tp_src, tp_dst);
1507 ASSERT_SEQUENTIAL(ipv6_src, ipv6_dst);
1509 /* Calculates the 5-tuple hash from the given flow. */
1511 flow_hash_5tuple(const struct flow *flow, uint32_t basis)
1513 uint32_t hash = basis;
1516 hash = hash_add(hash, flow->nw_proto);
1518 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1519 const uint64_t *flow_u64 = (const uint64_t *)flow;
1520 int ofs = offsetof(struct flow, ipv6_src) / 8;
1521 int end = ofs + 2 * sizeof flow->ipv6_src / 8;
1523 for (;ofs < end; ofs++) {
1524 hash = hash_add64(hash, flow_u64[ofs]);
1527 hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_src);
1528 hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_dst);
1530 /* Add both ports at once. */
1531 hash = hash_add(hash,
1532 ((const uint32_t *)flow)[offsetof(struct flow, tp_src)
1533 / sizeof(uint32_t)]);
1534 hash = hash_finish(hash, 42); /* Arbitrary number. */
1539 /* Hashes 'flow' based on its L2 through L4 protocol information. */
1541 flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis)
1546 struct in6_addr ipv6_addr;
1551 uint8_t eth_addr[ETH_ADDR_LEN];
1557 memset(&fields, 0, sizeof fields);
1558 for (i = 0; i < ETH_ADDR_LEN; i++) {
1559 fields.eth_addr[i] = flow->dl_src[i] ^ flow->dl_dst[i];
1561 fields.vlan_tci = flow->vlan_tci & htons(VLAN_VID_MASK);
1562 fields.eth_type = flow->dl_type;
1564 /* UDP source and destination port are not taken into account because they
1565 * will not necessarily be symmetric in a bidirectional flow. */
1566 if (fields.eth_type == htons(ETH_TYPE_IP)) {
1567 fields.ipv4_addr = flow->nw_src ^ flow->nw_dst;
1568 fields.ip_proto = flow->nw_proto;
1569 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
1570 fields.tp_port = flow->tp_src ^ flow->tp_dst;
1572 } else if (fields.eth_type == htons(ETH_TYPE_IPV6)) {
1573 const uint8_t *a = &flow->ipv6_src.s6_addr[0];
1574 const uint8_t *b = &flow->ipv6_dst.s6_addr[0];
1575 uint8_t *ipv6_addr = &fields.ipv6_addr.s6_addr[0];
1577 for (i=0; i<16; i++) {
1578 ipv6_addr[i] = a[i] ^ b[i];
1580 fields.ip_proto = flow->nw_proto;
1581 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
1582 fields.tp_port = flow->tp_src ^ flow->tp_dst;
1585 return jhash_bytes(&fields, sizeof fields, basis);
1588 /* Hashes 'flow' based on its L3 through L4 protocol information */
1590 flow_hash_symmetric_l3l4(const struct flow *flow, uint32_t basis,
1593 uint32_t hash = basis;
1595 /* UDP source and destination port are also taken into account. */
1596 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1597 hash = hash_add(hash,
1598 (OVS_FORCE uint32_t) (flow->nw_src ^ flow->nw_dst));
1599 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1600 /* IPv6 addresses are 64-bit aligned inside struct flow. */
1601 const uint64_t *a = ALIGNED_CAST(uint64_t *, flow->ipv6_src.s6_addr);
1602 const uint64_t *b = ALIGNED_CAST(uint64_t *, flow->ipv6_dst.s6_addr);
1604 for (int i = 0; i < 4; i++) {
1605 hash = hash_add64(hash, a[i] ^ b[i]);
1608 /* Cannot hash non-IP flows */
1612 hash = hash_add(hash, flow->nw_proto);
1613 if (flow->nw_proto == IPPROTO_TCP || flow->nw_proto == IPPROTO_SCTP ||
1614 (inc_udp_ports && flow->nw_proto == IPPROTO_UDP)) {
1615 hash = hash_add(hash,
1616 (OVS_FORCE uint16_t) (flow->tp_src ^ flow->tp_dst));
1619 return hash_finish(hash, basis);
1622 /* Initialize a flow with random fields that matter for nx_hash_fields. */
1624 flow_random_hash_fields(struct flow *flow)
1626 uint16_t rnd = random_uint16();
1628 /* Initialize to all zeros. */
1629 memset(flow, 0, sizeof *flow);
1631 eth_addr_random(flow->dl_src);
1632 eth_addr_random(flow->dl_dst);
1634 flow->vlan_tci = (OVS_FORCE ovs_be16) (random_uint16() & VLAN_VID_MASK);
1636 /* Make most of the random flows IPv4, some IPv6, and rest random. */
1637 flow->dl_type = rnd < 0x8000 ? htons(ETH_TYPE_IP) :
1638 rnd < 0xc000 ? htons(ETH_TYPE_IPV6) : (OVS_FORCE ovs_be16)rnd;
1640 if (dl_type_is_ip_any(flow->dl_type)) {
1641 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1642 flow->nw_src = (OVS_FORCE ovs_be32)random_uint32();
1643 flow->nw_dst = (OVS_FORCE ovs_be32)random_uint32();
1645 random_bytes(&flow->ipv6_src, sizeof flow->ipv6_src);
1646 random_bytes(&flow->ipv6_dst, sizeof flow->ipv6_dst);
1648 /* Make most of IP flows TCP, some UDP or SCTP, and rest random. */
1649 rnd = random_uint16();
1650 flow->nw_proto = rnd < 0x8000 ? IPPROTO_TCP :
1651 rnd < 0xc000 ? IPPROTO_UDP :
1652 rnd < 0xd000 ? IPPROTO_SCTP : (uint8_t)rnd;
1653 if (flow->nw_proto == IPPROTO_TCP ||
1654 flow->nw_proto == IPPROTO_UDP ||
1655 flow->nw_proto == IPPROTO_SCTP) {
1656 flow->tp_src = (OVS_FORCE ovs_be16)random_uint16();
1657 flow->tp_dst = (OVS_FORCE ovs_be16)random_uint16();
1662 /* Masks the fields in 'wc' that are used by the flow hash 'fields'. */
1664 flow_mask_hash_fields(const struct flow *flow, struct flow_wildcards *wc,
1665 enum nx_hash_fields fields)
1668 case NX_HASH_FIELDS_ETH_SRC:
1669 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
1672 case NX_HASH_FIELDS_SYMMETRIC_L4:
1673 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
1674 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1675 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1676 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
1677 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
1678 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1679 memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
1680 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
1682 if (is_ip_any(flow)) {
1683 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1684 flow_unwildcard_tp_ports(flow, wc);
1686 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
1689 case NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP:
1690 if (is_ip_any(flow) && flow->nw_proto == IPPROTO_UDP) {
1691 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
1692 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
1695 case NX_HASH_FIELDS_SYMMETRIC_L3L4:
1696 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1697 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
1698 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
1699 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1700 memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
1701 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
1703 break; /* non-IP flow */
1706 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1707 if (flow->nw_proto == IPPROTO_TCP || flow->nw_proto == IPPROTO_SCTP) {
1708 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
1709 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
1718 /* Hashes the portions of 'flow' designated by 'fields'. */
1720 flow_hash_fields(const struct flow *flow, enum nx_hash_fields fields,
1725 case NX_HASH_FIELDS_ETH_SRC:
1726 return jhash_bytes(flow->dl_src, sizeof flow->dl_src, basis);
1728 case NX_HASH_FIELDS_SYMMETRIC_L4:
1729 return flow_hash_symmetric_l4(flow, basis);
1731 case NX_HASH_FIELDS_SYMMETRIC_L3L4:
1732 return flow_hash_symmetric_l3l4(flow, basis, false);
1734 case NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP:
1735 return flow_hash_symmetric_l3l4(flow, basis, true);
1742 /* Returns a string representation of 'fields'. */
1744 flow_hash_fields_to_str(enum nx_hash_fields fields)
1747 case NX_HASH_FIELDS_ETH_SRC: return "eth_src";
1748 case NX_HASH_FIELDS_SYMMETRIC_L4: return "symmetric_l4";
1749 case NX_HASH_FIELDS_SYMMETRIC_L3L4: return "symmetric_l3l4";
1750 case NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP: return "symmetric_l3l4+udp";
1751 default: return "<unknown>";
1755 /* Returns true if the value of 'fields' is supported. Otherwise false. */
1757 flow_hash_fields_valid(enum nx_hash_fields fields)
1759 return fields == NX_HASH_FIELDS_ETH_SRC
1760 || fields == NX_HASH_FIELDS_SYMMETRIC_L4
1761 || fields == NX_HASH_FIELDS_SYMMETRIC_L3L4
1762 || fields == NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP;
1765 /* Returns a hash value for the bits of 'flow' that are active based on
1766 * 'wc', given 'basis'. */
1768 flow_hash_in_wildcards(const struct flow *flow,
1769 const struct flow_wildcards *wc, uint32_t basis)
1771 const uint64_t *wc_u64 = (const uint64_t *) &wc->masks;
1772 const uint64_t *flow_u64 = (const uint64_t *) flow;
1777 for (i = 0; i < FLOW_U64S; i++) {
1778 hash = hash_add64(hash, flow_u64[i] & wc_u64[i]);
1780 return hash_finish(hash, 8 * FLOW_U64S);
1783 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1784 * OpenFlow 1.0 "dl_vlan" value:
1786 * - If it is in the range 0...4095, 'flow->vlan_tci' is set to match
1787 * that VLAN. Any existing PCP match is unchanged (it becomes 0 if
1788 * 'flow' previously matched packets without a VLAN header).
1790 * - If it is OFP_VLAN_NONE, 'flow->vlan_tci' is set to match a packet
1791 * without a VLAN tag.
1793 * - Other values of 'vid' should not be used. */
1795 flow_set_dl_vlan(struct flow *flow, ovs_be16 vid)
1797 if (vid == htons(OFP10_VLAN_NONE)) {
1798 flow->vlan_tci = htons(0);
1800 vid &= htons(VLAN_VID_MASK);
1801 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
1802 flow->vlan_tci |= htons(VLAN_CFI) | vid;
1806 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1807 * OpenFlow 1.2 "vlan_vid" value, that is, the low 13 bits of 'vlan_tci' (VID
1810 flow_set_vlan_vid(struct flow *flow, ovs_be16 vid)
1812 ovs_be16 mask = htons(VLAN_VID_MASK | VLAN_CFI);
1813 flow->vlan_tci &= ~mask;
1814 flow->vlan_tci |= vid & mask;
1817 /* Sets the VLAN PCP that 'flow' matches to 'pcp', which should be in the
1820 * This function has no effect on the VLAN ID that 'flow' matches.
1822 * After calling this function, 'flow' will not match packets without a VLAN
1825 flow_set_vlan_pcp(struct flow *flow, uint8_t pcp)
1828 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
1829 flow->vlan_tci |= htons((pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
1832 /* Returns the number of MPLS LSEs present in 'flow'
1834 * Returns 0 if the 'dl_type' of 'flow' is not an MPLS ethernet type.
1835 * Otherwise traverses 'flow''s MPLS label stack stopping at the
1836 * first entry that has the BoS bit set. If no such entry exists then
1837 * the maximum number of LSEs that can be stored in 'flow' is returned.
1840 flow_count_mpls_labels(const struct flow *flow, struct flow_wildcards *wc)
1842 /* dl_type is always masked. */
1843 if (eth_type_mpls(flow->dl_type)) {
1848 for (i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
1850 wc->masks.mpls_lse[i] |= htonl(MPLS_BOS_MASK);
1852 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
1855 if (flow->mpls_lse[i]) {
1865 /* Returns the number consecutive of MPLS LSEs, starting at the
1866 * innermost LSE, that are common in 'a' and 'b'.
1868 * 'an' must be flow_count_mpls_labels(a).
1869 * 'bn' must be flow_count_mpls_labels(b).
1872 flow_count_common_mpls_labels(const struct flow *a, int an,
1873 const struct flow *b, int bn,
1874 struct flow_wildcards *wc)
1876 int min_n = MIN(an, bn);
1881 int a_last = an - 1;
1882 int b_last = bn - 1;
1885 for (i = 0; i < min_n; i++) {
1887 wc->masks.mpls_lse[a_last - i] = OVS_BE32_MAX;
1888 wc->masks.mpls_lse[b_last - i] = OVS_BE32_MAX;
1890 if (a->mpls_lse[a_last - i] != b->mpls_lse[b_last - i]) {
1901 /* Adds a new outermost MPLS label to 'flow' and changes 'flow''s Ethernet type
1902 * to 'mpls_eth_type', which must be an MPLS Ethertype.
1904 * If the new label is the first MPLS label in 'flow', it is generated as;
1906 * - label: 2, if 'flow' is IPv6, otherwise 0.
1908 * - TTL: IPv4 or IPv6 TTL, if present and nonzero, otherwise 64.
1910 * - TC: IPv4 or IPv6 TOS, if present, otherwise 0.
1914 * If the new label is the second or later label MPLS label in 'flow', it is
1917 * - label: Copied from outer label.
1919 * - TTL: Copied from outer label.
1921 * - TC: Copied from outer label.
1925 * 'n' must be flow_count_mpls_labels(flow). 'n' must be less than
1926 * FLOW_MAX_MPLS_LABELS (because otherwise flow->mpls_lse[] would overflow).
1929 flow_push_mpls(struct flow *flow, int n, ovs_be16 mpls_eth_type,
1930 struct flow_wildcards *wc)
1932 ovs_assert(eth_type_mpls(mpls_eth_type));
1933 ovs_assert(n < FLOW_MAX_MPLS_LABELS);
1939 memset(&wc->masks.mpls_lse, 0xff, sizeof *wc->masks.mpls_lse * n);
1941 for (i = n; i >= 1; i--) {
1942 flow->mpls_lse[i] = flow->mpls_lse[i - 1];
1944 flow->mpls_lse[0] = (flow->mpls_lse[1] & htonl(~MPLS_BOS_MASK));
1946 int label = 0; /* IPv4 Explicit Null. */
1950 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1954 if (is_ip_any(flow)) {
1955 tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
1957 wc->masks.nw_tos |= IP_DSCP_MASK;
1958 wc->masks.nw_ttl = 0xff;
1966 flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
1968 /* Clear all L3 and L4 fields and dp_hash. */
1969 BUILD_ASSERT(FLOW_WC_SEQ == 33);
1970 memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
1971 sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
1974 flow->dl_type = mpls_eth_type;
1977 /* Tries to remove the outermost MPLS label from 'flow'. Returns true if
1978 * successful, false otherwise. On success, sets 'flow''s Ethernet type to
1981 * 'n' must be flow_count_mpls_labels(flow). */
1983 flow_pop_mpls(struct flow *flow, int n, ovs_be16 eth_type,
1984 struct flow_wildcards *wc)
1989 /* Nothing to pop. */
1991 } else if (n == FLOW_MAX_MPLS_LABELS) {
1993 wc->masks.mpls_lse[n - 1] |= htonl(MPLS_BOS_MASK);
1995 if (!(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
1996 /* Can't pop because don't know what to fill in mpls_lse[n - 1]. */
2002 memset(&wc->masks.mpls_lse[1], 0xff,
2003 sizeof *wc->masks.mpls_lse * (n - 1));
2005 for (i = 1; i < n; i++) {
2006 flow->mpls_lse[i - 1] = flow->mpls_lse[i];
2008 flow->mpls_lse[n - 1] = 0;
2009 flow->dl_type = eth_type;
2013 /* Sets the MPLS Label that 'flow' matches to 'label', which is interpreted
2014 * as an OpenFlow 1.1 "mpls_label" value. */
2016 flow_set_mpls_label(struct flow *flow, int idx, ovs_be32 label)
2018 set_mpls_lse_label(&flow->mpls_lse[idx], label);
2021 /* Sets the MPLS TTL that 'flow' matches to 'ttl', which should be in the
2024 flow_set_mpls_ttl(struct flow *flow, int idx, uint8_t ttl)
2026 set_mpls_lse_ttl(&flow->mpls_lse[idx], ttl);
2029 /* Sets the MPLS TC that 'flow' matches to 'tc', which should be in the
2032 flow_set_mpls_tc(struct flow *flow, int idx, uint8_t tc)
2034 set_mpls_lse_tc(&flow->mpls_lse[idx], tc);
2037 /* Sets the MPLS BOS bit that 'flow' matches to which should be 0 or 1. */
2039 flow_set_mpls_bos(struct flow *flow, int idx, uint8_t bos)
2041 set_mpls_lse_bos(&flow->mpls_lse[idx], bos);
2044 /* Sets the entire MPLS LSE. */
2046 flow_set_mpls_lse(struct flow *flow, int idx, ovs_be32 lse)
2048 flow->mpls_lse[idx] = lse;
2052 flow_compose_l4(struct dp_packet *p, const struct flow *flow)
2056 if (!(flow->nw_frag & FLOW_NW_FRAG_ANY)
2057 || !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
2058 if (flow->nw_proto == IPPROTO_TCP) {
2059 struct tcp_header *tcp;
2061 l4_len = sizeof *tcp;
2062 tcp = dp_packet_put_zeros(p, l4_len);
2063 tcp->tcp_src = flow->tp_src;
2064 tcp->tcp_dst = flow->tp_dst;
2065 tcp->tcp_ctl = TCP_CTL(ntohs(flow->tcp_flags), 5);
2066 } else if (flow->nw_proto == IPPROTO_UDP) {
2067 struct udp_header *udp;
2069 l4_len = sizeof *udp;
2070 udp = dp_packet_put_zeros(p, l4_len);
2071 udp->udp_src = flow->tp_src;
2072 udp->udp_dst = flow->tp_dst;
2073 } else if (flow->nw_proto == IPPROTO_SCTP) {
2074 struct sctp_header *sctp;
2076 l4_len = sizeof *sctp;
2077 sctp = dp_packet_put_zeros(p, l4_len);
2078 sctp->sctp_src = flow->tp_src;
2079 sctp->sctp_dst = flow->tp_dst;
2080 } else if (flow->nw_proto == IPPROTO_ICMP) {
2081 struct icmp_header *icmp;
2083 l4_len = sizeof *icmp;
2084 icmp = dp_packet_put_zeros(p, l4_len);
2085 icmp->icmp_type = ntohs(flow->tp_src);
2086 icmp->icmp_code = ntohs(flow->tp_dst);
2087 icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
2088 } else if (flow->nw_proto == IPPROTO_IGMP) {
2089 struct igmp_header *igmp;
2091 l4_len = sizeof *igmp;
2092 igmp = dp_packet_put_zeros(p, l4_len);
2093 igmp->igmp_type = ntohs(flow->tp_src);
2094 igmp->igmp_code = ntohs(flow->tp_dst);
2095 put_16aligned_be32(&igmp->group, flow->igmp_group_ip4);
2096 igmp->igmp_csum = csum(igmp, IGMP_HEADER_LEN);
2097 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
2098 struct icmp6_hdr *icmp;
2100 l4_len = sizeof *icmp;
2101 icmp = dp_packet_put_zeros(p, l4_len);
2102 icmp->icmp6_type = ntohs(flow->tp_src);
2103 icmp->icmp6_code = ntohs(flow->tp_dst);
2105 if (icmp->icmp6_code == 0 &&
2106 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
2107 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
2108 struct in6_addr *nd_target;
2109 struct nd_opt_hdr *nd_opt;
2111 l4_len += sizeof *nd_target;
2112 nd_target = dp_packet_put_zeros(p, sizeof *nd_target);
2113 *nd_target = flow->nd_target;
2115 if (!eth_addr_is_zero(flow->arp_sha)) {
2117 nd_opt = dp_packet_put_zeros(p, 8);
2118 nd_opt->nd_opt_len = 1;
2119 nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR;
2120 memcpy(nd_opt + 1, flow->arp_sha, ETH_ADDR_LEN);
2122 if (!eth_addr_is_zero(flow->arp_tha)) {
2124 nd_opt = dp_packet_put_zeros(p, 8);
2125 nd_opt->nd_opt_len = 1;
2126 nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
2127 memcpy(nd_opt + 1, flow->arp_tha, ETH_ADDR_LEN);
2130 icmp->icmp6_cksum = (OVS_FORCE uint16_t)
2131 csum(icmp, (char *)dp_packet_tail(p) - (char *)icmp);
2137 /* Puts into 'b' a packet that flow_extract() would parse as having the given
2140 * (This is useful only for testing, obviously, and the packet isn't really
2141 * valid. It hasn't got some checksums filled in, for one, and lots of fields
2142 * are just zeroed.) */
2144 flow_compose(struct dp_packet *p, const struct flow *flow)
2148 /* eth_compose() sets l3 pointer and makes sure it is 32-bit aligned. */
2149 eth_compose(p, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0);
2150 if (flow->dl_type == htons(FLOW_DL_TYPE_NONE)) {
2151 struct eth_header *eth = dp_packet_l2(p);
2152 eth->eth_type = htons(dp_packet_size(p));
2156 if (flow->vlan_tci & htons(VLAN_CFI)) {
2157 eth_push_vlan(p, htons(ETH_TYPE_VLAN), flow->vlan_tci);
2160 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2161 struct ip_header *ip;
2163 ip = dp_packet_put_zeros(p, sizeof *ip);
2164 ip->ip_ihl_ver = IP_IHL_VER(5, 4);
2165 ip->ip_tos = flow->nw_tos;
2166 ip->ip_ttl = flow->nw_ttl;
2167 ip->ip_proto = flow->nw_proto;
2168 put_16aligned_be32(&ip->ip_src, flow->nw_src);
2169 put_16aligned_be32(&ip->ip_dst, flow->nw_dst);
2171 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
2172 ip->ip_frag_off |= htons(IP_MORE_FRAGMENTS);
2173 if (flow->nw_frag & FLOW_NW_FRAG_LATER) {
2174 ip->ip_frag_off |= htons(100);
2178 dp_packet_set_l4(p, dp_packet_tail(p));
2180 l4_len = flow_compose_l4(p, flow);
2182 ip = dp_packet_l3(p);
2183 ip->ip_tot_len = htons(p->l4_ofs - p->l3_ofs + l4_len);
2184 ip->ip_csum = csum(ip, sizeof *ip);
2185 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2186 struct ovs_16aligned_ip6_hdr *nh;
2188 nh = dp_packet_put_zeros(p, sizeof *nh);
2189 put_16aligned_be32(&nh->ip6_flow, htonl(6 << 28) |
2190 htonl(flow->nw_tos << 20) | flow->ipv6_label);
2191 nh->ip6_hlim = flow->nw_ttl;
2192 nh->ip6_nxt = flow->nw_proto;
2194 memcpy(&nh->ip6_src, &flow->ipv6_src, sizeof(nh->ip6_src));
2195 memcpy(&nh->ip6_dst, &flow->ipv6_dst, sizeof(nh->ip6_dst));
2197 dp_packet_set_l4(p, dp_packet_tail(p));
2199 l4_len = flow_compose_l4(p, flow);
2201 nh = dp_packet_l3(p);
2202 nh->ip6_plen = htons(l4_len);
2203 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
2204 flow->dl_type == htons(ETH_TYPE_RARP)) {
2205 struct arp_eth_header *arp;
2207 arp = dp_packet_put_zeros(p, sizeof *arp);
2208 dp_packet_set_l3(p, arp);
2209 arp->ar_hrd = htons(1);
2210 arp->ar_pro = htons(ETH_TYPE_IP);
2211 arp->ar_hln = ETH_ADDR_LEN;
2213 arp->ar_op = htons(flow->nw_proto);
2215 if (flow->nw_proto == ARP_OP_REQUEST ||
2216 flow->nw_proto == ARP_OP_REPLY) {
2217 put_16aligned_be32(&arp->ar_spa, flow->nw_src);
2218 put_16aligned_be32(&arp->ar_tpa, flow->nw_dst);
2219 memcpy(arp->ar_sha, flow->arp_sha, ETH_ADDR_LEN);
2220 memcpy(arp->ar_tha, flow->arp_tha, ETH_ADDR_LEN);
2224 if (eth_type_mpls(flow->dl_type)) {
2227 p->l2_5_ofs = p->l3_ofs;
2228 for (n = 1; n < FLOW_MAX_MPLS_LABELS; n++) {
2229 if (flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK)) {
2234 push_mpls(p, flow->dl_type, flow->mpls_lse[--n]);
2239 /* Compressed flow. */
2241 /* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
2242 * the caller. The caller must have already computed 'dst->tnl_map' and
2243 * 'dst->pkt_map' properly to indicate the significant uint64_t elements of
2246 * Normally the significant elements are the ones that are non-zero. However,
2247 * when a miniflow is initialized from a (mini)mask, the values can be zeroes,
2248 * so that the flow and mask always have the same maps. */
2250 miniflow_init(struct miniflow *dst, const struct flow *src)
2252 const uint64_t *src_u64 = (const uint64_t *) src;
2253 uint64_t *dst_u64 = miniflow_values(dst);
2256 MAPS_FOR_EACH_INDEX(idx, *dst) {
2257 *dst_u64++ = src_u64[idx];
2261 /* Initialize the maps of 'flow' from 'src'. */
2263 miniflow_map_init(struct miniflow *flow, const struct flow *src)
2265 const uint64_t *src_u64 = (const uint64_t *) src;
2268 /* Initialize map, counting the number of nonzero elements. */
2270 for (i = 0; i < FLOW_TNL_U64S; i++) {
2272 flow->tnl_map |= UINT64_C(1) << i;
2275 src_u64 += FLOW_TNL_U64S;
2277 for (i = 0; i < FLOW_U64S - FLOW_TNL_U64S; i++) {
2279 flow->pkt_map |= UINT64_C(1) << i;
2284 /* Allocates 'n' count of miniflows, consecutive in memory, initializing the
2285 * map of each from 'src'.
2286 * Returns the size of the miniflow data. */
2288 miniflow_alloc(struct miniflow *dsts[], size_t n, const struct miniflow *src)
2290 size_t n_values = miniflow_n_values(src);
2291 size_t data_size = MINIFLOW_VALUES_SIZE(n_values);
2292 struct miniflow *dst = xmalloc(n * (sizeof *src + data_size));
2295 COVERAGE_INC(miniflow_malloc);
2297 for (i = 0; i < n; i++) {
2298 *dst = *src; /* Copy maps. */
2300 dst += 1; /* Just past the maps. */
2301 dst = (struct miniflow *)((uint64_t *)dst + n_values); /* Skip data. */
2306 /* Returns a miniflow copy of 'src'. The caller must eventually free() the
2307 * returned miniflow. */
2309 miniflow_create(const struct flow *src)
2311 struct miniflow tmp;
2312 struct miniflow *dst;
2314 miniflow_map_init(&tmp, src);
2316 miniflow_alloc(&dst, 1, &tmp);
2317 miniflow_init(dst, src);
2321 /* Initializes 'dst' as a copy of 'src'. The caller must have allocated
2322 * 'dst' to have inline space for 'n_values' data in 'src'. */
2324 miniflow_clone(struct miniflow *dst, const struct miniflow *src,
2327 *dst = *src; /* Copy maps. */
2328 memcpy(miniflow_values(dst), miniflow_get_values(src),
2329 MINIFLOW_VALUES_SIZE(n_values));
2332 /* Initializes 'dst' as a copy of 'src'. */
2334 miniflow_expand(const struct miniflow *src, struct flow *dst)
2336 memset(dst, 0, sizeof *dst);
2337 flow_union_with_miniflow(dst, src);
2340 /* Returns true if 'a' and 'b' are equal miniflows, false otherwise. */
2342 miniflow_equal(const struct miniflow *a, const struct miniflow *b)
2344 const uint64_t *ap = miniflow_get_values(a);
2345 const uint64_t *bp = miniflow_get_values(b);
2347 if (OVS_LIKELY(a->tnl_map == b->tnl_map && a->pkt_map == b->pkt_map)) {
2348 return !memcmp(ap, bp, miniflow_n_values(a) * sizeof *ap);
2352 map = a->tnl_map | b->tnl_map;
2353 for (; map; map = zero_rightmost_1bit(map)) {
2354 uint64_t bit = rightmost_1bit(map);
2356 if ((a->tnl_map & bit ? *ap++ : 0)
2357 != (b->tnl_map & bit ? *bp++ : 0)) {
2361 map = a->pkt_map | b->pkt_map;
2362 for (; map; map = zero_rightmost_1bit(map)) {
2363 uint64_t bit = rightmost_1bit(map);
2365 if ((a->pkt_map & bit ? *ap++ : 0)
2366 != (b->pkt_map & bit ? *bp++ : 0)) {
2375 /* Returns false if 'a' and 'b' differ at the places where there are 1-bits
2376 * in 'mask', true otherwise. */
2378 miniflow_equal_in_minimask(const struct miniflow *a, const struct miniflow *b,
2379 const struct minimask *mask)
2381 const uint64_t *p = miniflow_get_values(&mask->masks);
2384 MAPS_FOR_EACH_INDEX(idx, mask->masks) {
2385 if ((miniflow_get(a, idx) ^ miniflow_get(b, idx)) & *p++) {
2393 /* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
2394 * in 'mask', false if they differ. */
2396 miniflow_equal_flow_in_minimask(const struct miniflow *a, const struct flow *b,
2397 const struct minimask *mask)
2399 const uint64_t *b_u64 = (const uint64_t *) b;
2400 const uint64_t *p = miniflow_get_values(&mask->masks);
2403 MAPS_FOR_EACH_INDEX(idx, mask->masks) {
2404 if ((miniflow_get(a, idx) ^ b_u64[idx]) & *p++) {
2414 minimask_init(struct minimask *mask, const struct flow_wildcards *wc)
2416 miniflow_init(&mask->masks, &wc->masks);
2419 /* Returns a minimask copy of 'wc'. The caller must eventually free the
2420 * returned minimask with free(). */
2422 minimask_create(const struct flow_wildcards *wc)
2424 return (struct minimask *)miniflow_create(&wc->masks);
2427 /* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
2429 * The caller must provide room for FLOW_U64S "uint64_t"s in 'storage', which
2430 * must follow '*dst_' in memory, for use by 'dst_'. The caller must *not*
2431 * free 'dst_' free(). */
2433 minimask_combine(struct minimask *dst_,
2434 const struct minimask *a_, const struct minimask *b_,
2435 uint64_t storage[FLOW_U64S])
2437 struct miniflow *dst = &dst_->masks;
2438 uint64_t *dst_values = storage;
2439 const struct miniflow *a = &a_->masks;
2440 const struct miniflow *b = &b_->masks;
2441 const uint64_t *ap = miniflow_get_values(a);
2442 const uint64_t *bp = miniflow_get_values(b);
2446 MAP_FOR_EACH_INDEX(idx, a->tnl_map & b->tnl_map) {
2447 /* Both 'a' and 'b' have non-zero data at 'idx'. */
2448 uint64_t mask = *miniflow_values_get__(ap, a->tnl_map, idx)
2449 & *miniflow_values_get__(bp, b->tnl_map, idx);
2452 dst->tnl_map |= UINT64_C(1) << idx;
2453 *dst_values++ = mask;
2457 ap += count_1bits(a->tnl_map); /* Skip tnl_map values. */
2458 bp += count_1bits(b->tnl_map); /* Skip tnl_map values. */
2459 MAP_FOR_EACH_INDEX(idx, a->pkt_map & b->pkt_map) {
2460 /* Both 'a' and 'b' have non-zero data at 'idx'. */
2461 uint64_t mask = *miniflow_values_get__(ap, a->pkt_map, idx)
2462 & *miniflow_values_get__(bp, b->pkt_map, idx);
2465 dst->pkt_map |= UINT64_C(1) << idx;
2466 *dst_values++ = mask;
2471 /* Initializes 'wc' as a copy of 'mask'. */
2473 minimask_expand(const struct minimask *mask, struct flow_wildcards *wc)
2475 miniflow_expand(&mask->masks, &wc->masks);
2478 /* Returns true if 'a' and 'b' are the same flow mask, false otherwise.
2479 * Minimasks may not have zero data values, so for the minimasks to be the
2480 * same, they need to have the same map and the same data values. */
2482 minimask_equal(const struct minimask *a, const struct minimask *b)
2484 return a->masks.tnl_map == b->masks.tnl_map
2485 && a->masks.pkt_map == b->masks.pkt_map &&
2486 !memcmp(miniflow_get_values(&a->masks), miniflow_get_values(&b->masks),
2487 MINIFLOW_VALUES_SIZE(miniflow_n_values(&a->masks)));
2490 /* Returns true if at least one bit matched by 'b' is wildcarded by 'a',
2491 * false otherwise. */
2493 minimask_has_extra(const struct minimask *a, const struct minimask *b)
2495 const uint64_t *ap = miniflow_get_values(&a->masks);
2496 const uint64_t *bp = miniflow_get_values(&b->masks);
2499 MAP_FOR_EACH_INDEX(idx, b->masks.tnl_map) {
2500 uint64_t b_u64 = *bp++;
2502 /* 'b_u64' is non-zero, check if the data in 'a' is either zero
2503 * or misses some of the bits in 'b_u64'. */
2504 if (!(a->masks.tnl_map & (UINT64_C(1) << idx))
2505 || ((*miniflow_values_get__(ap, a->masks.tnl_map, idx) & b_u64)
2507 return true; /* 'a' wildcards some bits 'b' doesn't. */
2510 ap += count_1bits(a->masks.tnl_map); /* Skip tnl_map values. */
2511 MAP_FOR_EACH_INDEX(idx, b->masks.pkt_map) {
2512 uint64_t b_u64 = *bp++;
2514 if (!(a->masks.pkt_map & (UINT64_C(1) << idx))
2515 || ((*miniflow_values_get__(ap, a->masks.pkt_map, idx) & b_u64)
2517 return true; /* 'a' wildcards some bits 'b' doesn't. */