2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include <arpa/inet.h>
23 #include <netinet/in.h>
24 #include <netinet/icmp6.h>
28 #include "byte-order.h"
31 #include "dynamic-string.h"
38 #include "unaligned.h"
41 #include "openvswitch/vlog.h"
43 VLOG_DEFINE_THIS_MODULE(odp_util);
45 /* The interface between userspace and kernel uses an "OVS_*" prefix.
46 * Since this is fairly non-specific for the OVS userspace components,
47 * "ODP_*" (Open vSwitch Datapath) is used as the prefix for
48 * interactions with the datapath.
51 /* The set of characters that may separate one action or one key attribute
53 static const char *delimiters = ", \t\r\n";
57 const struct attr_len_tbl *next;
60 #define ATTR_LEN_INVALID -1
61 #define ATTR_LEN_VARIABLE -2
62 #define ATTR_LEN_NESTED -3
64 static int parse_odp_key_mask_attr(const char *, const struct simap *port_names,
65 struct ofpbuf *, struct ofpbuf *);
66 static void format_odp_key_attr(const struct nlattr *a,
67 const struct nlattr *ma,
68 const struct hmap *portno_names, struct ds *ds,
71 static struct nlattr *generate_all_wildcard_mask(const struct attr_len_tbl tbl[],
72 int max, struct ofpbuf *,
73 const struct nlattr *key);
74 /* Returns one the following for the action with the given OVS_ACTION_ATTR_*
77 * - For an action whose argument has a fixed length, returned that
78 * nonnegative length in bytes.
80 * - For an action with a variable-length argument, returns ATTR_LEN_VARIABLE.
82 * - For an invalid 'type', returns ATTR_LEN_INVALID. */
84 odp_action_len(uint16_t type)
86 if (type > OVS_ACTION_ATTR_MAX) {
90 switch ((enum ovs_action_attr) type) {
91 case OVS_ACTION_ATTR_OUTPUT: return sizeof(uint32_t);
92 case OVS_ACTION_ATTR_TUNNEL_PUSH: return ATTR_LEN_VARIABLE;
93 case OVS_ACTION_ATTR_TUNNEL_POP: return sizeof(uint32_t);
94 case OVS_ACTION_ATTR_USERSPACE: return ATTR_LEN_VARIABLE;
95 case OVS_ACTION_ATTR_PUSH_VLAN: return sizeof(struct ovs_action_push_vlan);
96 case OVS_ACTION_ATTR_POP_VLAN: return 0;
97 case OVS_ACTION_ATTR_PUSH_MPLS: return sizeof(struct ovs_action_push_mpls);
98 case OVS_ACTION_ATTR_POP_MPLS: return sizeof(ovs_be16);
99 case OVS_ACTION_ATTR_RECIRC: return sizeof(uint32_t);
100 case OVS_ACTION_ATTR_HASH: return sizeof(struct ovs_action_hash);
101 case OVS_ACTION_ATTR_SET: return ATTR_LEN_VARIABLE;
102 case OVS_ACTION_ATTR_SET_MASKED: return ATTR_LEN_VARIABLE;
103 case OVS_ACTION_ATTR_SAMPLE: return ATTR_LEN_VARIABLE;
105 case OVS_ACTION_ATTR_UNSPEC:
106 case __OVS_ACTION_ATTR_MAX:
107 return ATTR_LEN_INVALID;
110 return ATTR_LEN_INVALID;
113 /* Returns a string form of 'attr'. The return value is either a statically
114 * allocated constant string or the 'bufsize'-byte buffer 'namebuf'. 'bufsize'
115 * should be at least OVS_KEY_ATTR_BUFSIZE. */
116 enum { OVS_KEY_ATTR_BUFSIZE = 3 + INT_STRLEN(unsigned int) + 1 };
118 ovs_key_attr_to_string(enum ovs_key_attr attr, char *namebuf, size_t bufsize)
121 case OVS_KEY_ATTR_UNSPEC: return "unspec";
122 case OVS_KEY_ATTR_ENCAP: return "encap";
123 case OVS_KEY_ATTR_PRIORITY: return "skb_priority";
124 case OVS_KEY_ATTR_SKB_MARK: return "skb_mark";
125 case OVS_KEY_ATTR_TUNNEL: return "tunnel";
126 case OVS_KEY_ATTR_IN_PORT: return "in_port";
127 case OVS_KEY_ATTR_ETHERNET: return "eth";
128 case OVS_KEY_ATTR_VLAN: return "vlan";
129 case OVS_KEY_ATTR_ETHERTYPE: return "eth_type";
130 case OVS_KEY_ATTR_IPV4: return "ipv4";
131 case OVS_KEY_ATTR_IPV6: return "ipv6";
132 case OVS_KEY_ATTR_TCP: return "tcp";
133 case OVS_KEY_ATTR_TCP_FLAGS: return "tcp_flags";
134 case OVS_KEY_ATTR_UDP: return "udp";
135 case OVS_KEY_ATTR_SCTP: return "sctp";
136 case OVS_KEY_ATTR_ICMP: return "icmp";
137 case OVS_KEY_ATTR_ICMPV6: return "icmpv6";
138 case OVS_KEY_ATTR_ARP: return "arp";
139 case OVS_KEY_ATTR_ND: return "nd";
140 case OVS_KEY_ATTR_MPLS: return "mpls";
141 case OVS_KEY_ATTR_DP_HASH: return "dp_hash";
142 case OVS_KEY_ATTR_RECIRC_ID: return "recirc_id";
144 case __OVS_KEY_ATTR_MAX:
146 snprintf(namebuf, bufsize, "key%u", (unsigned int) attr);
152 format_generic_odp_action(struct ds *ds, const struct nlattr *a)
154 size_t len = nl_attr_get_size(a);
156 ds_put_format(ds, "action%"PRId16, nl_attr_type(a));
158 const uint8_t *unspec;
161 unspec = nl_attr_get(a);
162 for (i = 0; i < len; i++) {
163 ds_put_char(ds, i ? ' ': '(');
164 ds_put_format(ds, "%02x", unspec[i]);
166 ds_put_char(ds, ')');
171 format_odp_sample_action(struct ds *ds, const struct nlattr *attr)
173 static const struct nl_policy ovs_sample_policy[] = {
174 [OVS_SAMPLE_ATTR_PROBABILITY] = { .type = NL_A_U32 },
175 [OVS_SAMPLE_ATTR_ACTIONS] = { .type = NL_A_NESTED }
177 struct nlattr *a[ARRAY_SIZE(ovs_sample_policy)];
179 const struct nlattr *nla_acts;
182 ds_put_cstr(ds, "sample");
184 if (!nl_parse_nested(attr, ovs_sample_policy, a, ARRAY_SIZE(a))) {
185 ds_put_cstr(ds, "(error)");
189 percentage = (100.0 * nl_attr_get_u32(a[OVS_SAMPLE_ATTR_PROBABILITY])) /
192 ds_put_format(ds, "(sample=%.1f%%,", percentage);
194 ds_put_cstr(ds, "actions(");
195 nla_acts = nl_attr_get(a[OVS_SAMPLE_ATTR_ACTIONS]);
196 len = nl_attr_get_size(a[OVS_SAMPLE_ATTR_ACTIONS]);
197 format_odp_actions(ds, nla_acts, len);
198 ds_put_format(ds, "))");
202 slow_path_reason_to_string(uint32_t reason)
204 switch ((enum slow_path_reason) reason) {
205 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return STRING;
214 slow_path_reason_to_explanation(enum slow_path_reason reason)
217 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return EXPLANATION;
226 parse_flags(const char *s, const char *(*bit_to_string)(uint32_t),
227 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask)
232 /* Parse masked flags in numeric format? */
233 if (res_mask && ovs_scan(s, "%"SCNi32"/%"SCNi32"%n",
234 res_flags, res_mask, &n) && n > 0) {
235 if (*res_flags & ~allowed || *res_mask & ~allowed) {
243 if (res_mask && (*s == '+' || *s == '-')) {
244 uint32_t flags = 0, mask = 0;
246 /* Parse masked flags. */
247 while (s[0] != ')') {
254 } else if (s[0] == '-') {
262 name_len = strcspn(s, "+-)");
264 for (bit = 1; bit; bit <<= 1) {
265 const char *fname = bit_to_string(bit);
273 if (len != name_len) {
276 if (!strncmp(s, fname, len)) {
278 /* bit already set. */
281 if (!(bit & allowed)) {
293 return -EINVAL; /* Unknown flag name */
304 /* Parse unmasked flags. If a flag is present, it is set, otherwise
306 while (s[n] != ')') {
307 unsigned long long int flags;
311 if (ovs_scan(&s[n], "%lli%n", &flags, &n0)) {
312 if (flags & ~allowed) {
315 n += n0 + (s[n + n0] == ',');
320 for (bit = 1; bit; bit <<= 1) {
321 const char *name = bit_to_string(bit);
329 if (!strncmp(s + n, name, len) &&
330 (s[n + len] == ',' || s[n + len] == ')')) {
331 if (!(bit & allowed)) {
335 n += len + (s[n + len] == ',');
347 *res_mask = UINT32_MAX;
353 format_odp_userspace_action(struct ds *ds, const struct nlattr *attr)
355 static const struct nl_policy ovs_userspace_policy[] = {
356 [OVS_USERSPACE_ATTR_PID] = { .type = NL_A_U32 },
357 [OVS_USERSPACE_ATTR_USERDATA] = { .type = NL_A_UNSPEC,
359 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = { .type = NL_A_U32,
362 struct nlattr *a[ARRAY_SIZE(ovs_userspace_policy)];
363 const struct nlattr *userdata_attr;
364 const struct nlattr *tunnel_out_port_attr;
366 if (!nl_parse_nested(attr, ovs_userspace_policy, a, ARRAY_SIZE(a))) {
367 ds_put_cstr(ds, "userspace(error)");
371 ds_put_format(ds, "userspace(pid=%"PRIu32,
372 nl_attr_get_u32(a[OVS_USERSPACE_ATTR_PID]));
374 userdata_attr = a[OVS_USERSPACE_ATTR_USERDATA];
377 const uint8_t *userdata = nl_attr_get(userdata_attr);
378 size_t userdata_len = nl_attr_get_size(userdata_attr);
379 bool userdata_unspec = true;
380 union user_action_cookie cookie;
382 if (userdata_len >= sizeof cookie.type
383 && userdata_len <= sizeof cookie) {
385 memset(&cookie, 0, sizeof cookie);
386 memcpy(&cookie, userdata, userdata_len);
388 userdata_unspec = false;
390 if (userdata_len == sizeof cookie.sflow
391 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
392 ds_put_format(ds, ",sFlow("
393 "vid=%"PRIu16",pcp=%"PRIu8",output=%"PRIu32")",
394 vlan_tci_to_vid(cookie.sflow.vlan_tci),
395 vlan_tci_to_pcp(cookie.sflow.vlan_tci),
396 cookie.sflow.output);
397 } else if (userdata_len == sizeof cookie.slow_path
398 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
399 ds_put_cstr(ds, ",slow_path(");
400 format_flags(ds, slow_path_reason_to_string,
401 cookie.slow_path.reason, ',');
402 ds_put_format(ds, ")");
403 } else if (userdata_len == sizeof cookie.flow_sample
404 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
405 ds_put_format(ds, ",flow_sample(probability=%"PRIu16
406 ",collector_set_id=%"PRIu32
407 ",obs_domain_id=%"PRIu32
408 ",obs_point_id=%"PRIu32")",
409 cookie.flow_sample.probability,
410 cookie.flow_sample.collector_set_id,
411 cookie.flow_sample.obs_domain_id,
412 cookie.flow_sample.obs_point_id);
413 } else if (userdata_len >= sizeof cookie.ipfix
414 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
415 ds_put_format(ds, ",ipfix(output_port=%"PRIu32")",
416 cookie.ipfix.output_odp_port);
418 userdata_unspec = true;
422 if (userdata_unspec) {
424 ds_put_format(ds, ",userdata(");
425 for (i = 0; i < userdata_len; i++) {
426 ds_put_format(ds, "%02x", userdata[i]);
428 ds_put_char(ds, ')');
432 tunnel_out_port_attr = a[OVS_USERSPACE_ATTR_EGRESS_TUN_PORT];
433 if (tunnel_out_port_attr) {
434 ds_put_format(ds, ",tunnel_out_port=%"PRIu32,
435 nl_attr_get_u32(tunnel_out_port_attr));
438 ds_put_char(ds, ')');
442 format_vlan_tci(struct ds *ds, ovs_be16 tci, ovs_be16 mask, bool verbose)
444 if (verbose || vlan_tci_to_vid(tci) || vlan_tci_to_vid(mask)) {
445 ds_put_format(ds, "vid=%"PRIu16, vlan_tci_to_vid(tci));
446 if (vlan_tci_to_vid(mask) != VLAN_VID_MASK) { /* Partially masked. */
447 ds_put_format(ds, "/0x%"PRIx16, vlan_tci_to_vid(mask));
449 ds_put_char(ds, ',');
451 if (verbose || vlan_tci_to_pcp(tci) || vlan_tci_to_pcp(mask)) {
452 ds_put_format(ds, "pcp=%d", vlan_tci_to_pcp(tci));
453 if (vlan_tci_to_pcp(mask) != (VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) {
454 ds_put_format(ds, "/0x%x", vlan_tci_to_pcp(mask));
456 ds_put_char(ds, ',');
458 if (!(tci & htons(VLAN_CFI))) {
459 ds_put_cstr(ds, "cfi=0");
460 ds_put_char(ds, ',');
466 format_mpls_lse(struct ds *ds, ovs_be32 mpls_lse)
468 ds_put_format(ds, "label=%"PRIu32",tc=%d,ttl=%d,bos=%d",
469 mpls_lse_to_label(mpls_lse),
470 mpls_lse_to_tc(mpls_lse),
471 mpls_lse_to_ttl(mpls_lse),
472 mpls_lse_to_bos(mpls_lse));
476 format_mpls(struct ds *ds, const struct ovs_key_mpls *mpls_key,
477 const struct ovs_key_mpls *mpls_mask, int n)
480 ovs_be32 key = mpls_key->mpls_lse;
482 if (mpls_mask == NULL) {
483 format_mpls_lse(ds, key);
485 ovs_be32 mask = mpls_mask->mpls_lse;
487 ds_put_format(ds, "label=%"PRIu32"/0x%x,tc=%d/%x,ttl=%d/0x%x,bos=%d/%x",
488 mpls_lse_to_label(key), mpls_lse_to_label(mask),
489 mpls_lse_to_tc(key), mpls_lse_to_tc(mask),
490 mpls_lse_to_ttl(key), mpls_lse_to_ttl(mask),
491 mpls_lse_to_bos(key), mpls_lse_to_bos(mask));
496 for (i = 0; i < n; i++) {
497 ds_put_format(ds, "lse%d=%#"PRIx32,
498 i, ntohl(mpls_key[i].mpls_lse));
500 ds_put_format(ds, "/%#"PRIx32, ntohl(mpls_mask[i].mpls_lse));
502 ds_put_char(ds, ',');
509 format_odp_recirc_action(struct ds *ds, uint32_t recirc_id)
511 ds_put_format(ds, "recirc(%#"PRIx32")", recirc_id);
515 format_odp_hash_action(struct ds *ds, const struct ovs_action_hash *hash_act)
517 ds_put_format(ds, "hash(");
519 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
520 ds_put_format(ds, "hash_l4(%"PRIu32")", hash_act->hash_basis);
522 ds_put_format(ds, "Unknown hash algorithm(%"PRIu32")",
525 ds_put_format(ds, ")");
529 format_udp_tnl_push_header(struct ds *ds, const struct ip_header *ip)
531 const struct udp_header *udp;
533 udp = (const struct udp_header *) (ip + 1);
534 ds_put_format(ds, "udp(src=%"PRIu16",dst=%"PRIu16",csum=0x%"PRIx16"),",
535 ntohs(udp->udp_src), ntohs(udp->udp_dst),
536 ntohs(udp->udp_csum));
542 format_odp_tnl_push_header(struct ds *ds, struct ovs_action_push_tnl *data)
544 const struct eth_header *eth;
545 const struct ip_header *ip;
548 eth = (const struct eth_header *)data->header;
551 ip = (const struct ip_header *)l3;
554 ds_put_format(ds, "header(size=%"PRIu8",type=%"PRIu8",eth(dst=",
555 data->header_len, data->tnl_type);
556 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_dst));
557 ds_put_format(ds, ",src=");
558 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_src));
559 ds_put_format(ds, ",dl_type=0x%04"PRIx16"),", ntohs(eth->eth_type));
562 ds_put_format(ds, "ipv4(src="IP_FMT",dst="IP_FMT",proto=%"PRIu8
563 ",tos=%#"PRIx8",ttl=%"PRIu8",frag=0x%"PRIx16"),",
564 IP_ARGS(get_16aligned_be32(&ip->ip_src)),
565 IP_ARGS(get_16aligned_be32(&ip->ip_dst)),
566 ip->ip_proto, ip->ip_tos,
570 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
571 const struct vxlanhdr *vxh;
573 vxh = format_udp_tnl_push_header(ds, ip);
575 ds_put_format(ds, "vxlan(flags=0x%"PRIx32",vni=0x%"PRIx32")",
576 ntohl(get_16aligned_be32(&vxh->vx_flags)),
577 ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8);
578 } else if (data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
579 const struct genevehdr *gnh;
581 gnh = format_udp_tnl_push_header(ds, ip);
583 ds_put_format(ds, "geneve(%svni=0x%"PRIx32")",
584 gnh->oam ? "oam," : "",
585 ntohl(get_16aligned_be32(&gnh->vni)) >> 8);
586 } else if (data->tnl_type == OVS_VPORT_TYPE_GRE) {
587 const struct gre_base_hdr *greh;
588 ovs_16aligned_be32 *options;
591 l4 = ((uint8_t *)l3 + sizeof(struct ip_header));
592 greh = (const struct gre_base_hdr *) l4;
594 ds_put_format(ds, "gre((flags=0x%"PRIx16",proto=0x%"PRIx16")",
595 ntohs(greh->flags), ntohs(greh->protocol));
596 options = (ovs_16aligned_be32 *)(greh + 1);
597 if (greh->flags & htons(GRE_CSUM)) {
598 ds_put_format(ds, ",csum=0x%"PRIx16, ntohs(*((ovs_be16 *)options)));
601 if (greh->flags & htons(GRE_KEY)) {
602 ds_put_format(ds, ",key=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
605 if (greh->flags & htons(GRE_SEQ)) {
606 ds_put_format(ds, ",seq=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
609 ds_put_format(ds, ")");
611 ds_put_format(ds, ")");
615 format_odp_tnl_push_action(struct ds *ds, const struct nlattr *attr)
617 struct ovs_action_push_tnl *data;
619 data = (struct ovs_action_push_tnl *) nl_attr_get(attr);
621 ds_put_format(ds, "tnl_push(tnl_port(%"PRIu32"),", data->tnl_port);
622 format_odp_tnl_push_header(ds, data);
623 ds_put_format(ds, ",out_port(%"PRIu32"))", data->out_port);
627 format_odp_action(struct ds *ds, const struct nlattr *a)
630 enum ovs_action_attr type = nl_attr_type(a);
631 const struct ovs_action_push_vlan *vlan;
634 expected_len = odp_action_len(nl_attr_type(a));
635 if (expected_len != ATTR_LEN_VARIABLE &&
636 nl_attr_get_size(a) != expected_len) {
637 ds_put_format(ds, "bad length %"PRIuSIZE", expected %d for: ",
638 nl_attr_get_size(a), expected_len);
639 format_generic_odp_action(ds, a);
644 case OVS_ACTION_ATTR_OUTPUT:
645 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
647 case OVS_ACTION_ATTR_TUNNEL_POP:
648 ds_put_format(ds, "tnl_pop(%"PRIu32")", nl_attr_get_u32(a));
650 case OVS_ACTION_ATTR_TUNNEL_PUSH:
651 format_odp_tnl_push_action(ds, a);
653 case OVS_ACTION_ATTR_USERSPACE:
654 format_odp_userspace_action(ds, a);
656 case OVS_ACTION_ATTR_RECIRC:
657 format_odp_recirc_action(ds, nl_attr_get_u32(a));
659 case OVS_ACTION_ATTR_HASH:
660 format_odp_hash_action(ds, nl_attr_get(a));
662 case OVS_ACTION_ATTR_SET_MASKED:
664 size = nl_attr_get_size(a) / 2;
665 ds_put_cstr(ds, "set(");
667 /* Masked set action not supported for tunnel key, which is bigger. */
668 if (size <= sizeof(struct ovs_key_ipv6)) {
669 struct nlattr attr[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
670 sizeof(struct nlattr))];
671 struct nlattr mask[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
672 sizeof(struct nlattr))];
674 mask->nla_type = attr->nla_type = nl_attr_type(a);
675 mask->nla_len = attr->nla_len = NLA_HDRLEN + size;
676 memcpy(attr + 1, (char *)(a + 1), size);
677 memcpy(mask + 1, (char *)(a + 1) + size, size);
678 format_odp_key_attr(attr, mask, NULL, ds, false);
680 format_odp_key_attr(a, NULL, NULL, ds, false);
682 ds_put_cstr(ds, ")");
684 case OVS_ACTION_ATTR_SET:
685 ds_put_cstr(ds, "set(");
686 format_odp_key_attr(nl_attr_get(a), NULL, NULL, ds, true);
687 ds_put_cstr(ds, ")");
689 case OVS_ACTION_ATTR_PUSH_VLAN:
690 vlan = nl_attr_get(a);
691 ds_put_cstr(ds, "push_vlan(");
692 if (vlan->vlan_tpid != htons(ETH_TYPE_VLAN)) {
693 ds_put_format(ds, "tpid=0x%04"PRIx16",", ntohs(vlan->vlan_tpid));
695 format_vlan_tci(ds, vlan->vlan_tci, OVS_BE16_MAX, false);
696 ds_put_char(ds, ')');
698 case OVS_ACTION_ATTR_POP_VLAN:
699 ds_put_cstr(ds, "pop_vlan");
701 case OVS_ACTION_ATTR_PUSH_MPLS: {
702 const struct ovs_action_push_mpls *mpls = nl_attr_get(a);
703 ds_put_cstr(ds, "push_mpls(");
704 format_mpls_lse(ds, mpls->mpls_lse);
705 ds_put_format(ds, ",eth_type=0x%"PRIx16")", ntohs(mpls->mpls_ethertype));
708 case OVS_ACTION_ATTR_POP_MPLS: {
709 ovs_be16 ethertype = nl_attr_get_be16(a);
710 ds_put_format(ds, "pop_mpls(eth_type=0x%"PRIx16")", ntohs(ethertype));
713 case OVS_ACTION_ATTR_SAMPLE:
714 format_odp_sample_action(ds, a);
716 case OVS_ACTION_ATTR_UNSPEC:
717 case __OVS_ACTION_ATTR_MAX:
719 format_generic_odp_action(ds, a);
725 format_odp_actions(struct ds *ds, const struct nlattr *actions,
729 const struct nlattr *a;
732 NL_ATTR_FOR_EACH (a, left, actions, actions_len) {
734 ds_put_char(ds, ',');
736 format_odp_action(ds, a);
741 if (left == actions_len) {
742 ds_put_cstr(ds, "<empty>");
744 ds_put_format(ds, ",***%u leftover bytes*** (", left);
745 for (i = 0; i < left; i++) {
746 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
748 ds_put_char(ds, ')');
751 ds_put_cstr(ds, "drop");
755 /* Separate out parse_odp_userspace_action() function. */
757 parse_odp_userspace_action(const char *s, struct ofpbuf *actions)
760 union user_action_cookie cookie;
762 odp_port_t tunnel_out_port;
764 void *user_data = NULL;
765 size_t user_data_size = 0;
767 if (!ovs_scan(s, "userspace(pid=%"SCNi32"%n", &pid, &n)) {
773 uint32_t probability;
774 uint32_t collector_set_id;
775 uint32_t obs_domain_id;
776 uint32_t obs_point_id;
779 if (ovs_scan(&s[n], ",sFlow(vid=%i,"
780 "pcp=%i,output=%"SCNi32")%n",
781 &vid, &pcp, &output, &n1)) {
785 tci = vid | (pcp << VLAN_PCP_SHIFT);
790 cookie.type = USER_ACTION_COOKIE_SFLOW;
791 cookie.sflow.vlan_tci = htons(tci);
792 cookie.sflow.output = output;
794 user_data_size = sizeof cookie.sflow;
795 } else if (ovs_scan(&s[n], ",slow_path(%n",
800 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
801 cookie.slow_path.unused = 0;
802 cookie.slow_path.reason = 0;
804 res = parse_flags(&s[n], slow_path_reason_to_string,
805 &cookie.slow_path.reason,
806 SLOW_PATH_REASON_MASK, NULL);
807 if (res < 0 || s[n + res] != ')') {
813 user_data_size = sizeof cookie.slow_path;
814 } else if (ovs_scan(&s[n], ",flow_sample(probability=%"SCNi32","
815 "collector_set_id=%"SCNi32","
816 "obs_domain_id=%"SCNi32","
817 "obs_point_id=%"SCNi32")%n",
818 &probability, &collector_set_id,
819 &obs_domain_id, &obs_point_id, &n1)) {
822 cookie.type = USER_ACTION_COOKIE_FLOW_SAMPLE;
823 cookie.flow_sample.probability = probability;
824 cookie.flow_sample.collector_set_id = collector_set_id;
825 cookie.flow_sample.obs_domain_id = obs_domain_id;
826 cookie.flow_sample.obs_point_id = obs_point_id;
828 user_data_size = sizeof cookie.flow_sample;
829 } else if (ovs_scan(&s[n], ",ipfix(output_port=%"SCNi32")%n",
832 cookie.type = USER_ACTION_COOKIE_IPFIX;
833 cookie.ipfix.output_odp_port = u32_to_odp(output);
835 user_data_size = sizeof cookie.ipfix;
836 } else if (ovs_scan(&s[n], ",userdata(%n",
841 ofpbuf_init(&buf, 16);
842 end = ofpbuf_put_hex(&buf, &s[n], NULL);
846 user_data = buf.data;
847 user_data_size = buf.size;
854 if (ovs_scan(&s[n], ",tunnel_out_port=%"SCNi32")%n",
855 &tunnel_out_port, &n1)) {
856 odp_put_userspace_action(pid, user_data, user_data_size, tunnel_out_port, actions);
858 } else if (s[n] == ')') {
859 odp_put_userspace_action(pid, user_data, user_data_size, ODPP_NONE, actions);
868 ovs_parse_tnl_push(const char *s, struct ovs_action_push_tnl *data)
870 struct eth_header *eth;
871 struct ip_header *ip;
872 struct udp_header *udp;
873 struct gre_base_hdr *greh;
874 uint16_t gre_proto, gre_flags, dl_type, udp_src, udp_dst, csum;
876 uint32_t tnl_type = 0, header_len = 0;
880 if (!ovs_scan_len(s, &n, "tnl_push(tnl_port(%"SCNi32"),", &data->tnl_port)) {
883 eth = (struct eth_header *) data->header;
884 l3 = (data->header + sizeof *eth);
885 l4 = ((uint8_t *) l3 + sizeof (struct ip_header));
886 ip = (struct ip_header *) l3;
887 if (!ovs_scan_len(s, &n, "header(size=%"SCNi32",type=%"SCNi32","
888 "eth(dst="ETH_ADDR_SCAN_FMT",",
891 ETH_ADDR_SCAN_ARGS(eth->eth_dst))) {
895 if (!ovs_scan_len(s, &n, "src="ETH_ADDR_SCAN_FMT",",
896 ETH_ADDR_SCAN_ARGS(eth->eth_src))) {
899 if (!ovs_scan_len(s, &n, "dl_type=0x%"SCNx16"),", &dl_type)) {
902 eth->eth_type = htons(dl_type);
905 if (!ovs_scan_len(s, &n, "ipv4(src="IP_SCAN_FMT",dst="IP_SCAN_FMT",proto=%"SCNi8
906 ",tos=%"SCNi8",ttl=%"SCNi8",frag=0x%"SCNx16"),",
909 &ip->ip_proto, &ip->ip_tos,
910 &ip->ip_ttl, &ip->ip_frag_off)) {
913 put_16aligned_be32(&ip->ip_src, sip);
914 put_16aligned_be32(&ip->ip_dst, dip);
917 udp = (struct udp_header *) l4;
918 greh = (struct gre_base_hdr *) l4;
919 if (ovs_scan_len(s, &n, "udp(src=%"SCNi16",dst=%"SCNi16",csum=0x%"SCNx16"),",
920 &udp_src, &udp_dst, &csum)) {
921 uint32_t vx_flags, vni;
923 udp->udp_src = htons(udp_src);
924 udp->udp_dst = htons(udp_dst);
926 udp->udp_csum = htons(csum);
928 if (ovs_scan_len(s, &n, "vxlan(flags=0x%"SCNx32",vni=0x%"SCNx32"))",
930 struct vxlanhdr *vxh = (struct vxlanhdr *) (udp + 1);
932 put_16aligned_be32(&vxh->vx_flags, htonl(vx_flags));
933 put_16aligned_be32(&vxh->vx_vni, htonl(vni << 8));
934 tnl_type = OVS_VPORT_TYPE_VXLAN;
935 header_len = sizeof *eth + sizeof *ip +
936 sizeof *udp + sizeof *vxh;
937 } else if (ovs_scan_len(s, &n, "geneve(")) {
938 struct genevehdr *gnh = (struct genevehdr *) (udp + 1);
940 memset(gnh, 0, sizeof *gnh);
941 if (ovs_scan_len(s, &n, "oam,")) {
944 if (!ovs_scan_len(s, &n, "vni=0x%"SCNx32"))", &vni)) {
947 gnh->proto_type = htons(ETH_TYPE_TEB);
948 put_16aligned_be32(&gnh->vni, htonl(vni << 8));
949 tnl_type = OVS_VPORT_TYPE_GENEVE;
950 header_len = sizeof *eth + sizeof *ip +
951 sizeof *udp + sizeof *gnh;
955 } else if (ovs_scan_len(s, &n, "gre((flags=0x%"SCNx16",proto=0x%"SCNx16")",
956 &gre_flags, &gre_proto)){
958 tnl_type = OVS_VPORT_TYPE_GRE;
959 greh->flags = htons(gre_flags);
960 greh->protocol = htons(gre_proto);
961 ovs_16aligned_be32 *options = (ovs_16aligned_be32 *) (greh + 1);
963 if (greh->flags & htons(GRE_CSUM)) {
964 if (!ovs_scan_len(s, &n, ",csum=0x%"SCNx16, &csum)) {
968 memset(options, 0, sizeof *options);
969 *((ovs_be16 *)options) = htons(csum);
972 if (greh->flags & htons(GRE_KEY)) {
975 if (!ovs_scan_len(s, &n, ",key=0x%"SCNx32, &key)) {
979 put_16aligned_be32(options, htonl(key));
982 if (greh->flags & htons(GRE_SEQ)) {
985 if (!ovs_scan_len(s, &n, ",seq=0x%"SCNx32, &seq)) {
988 put_16aligned_be32(options, htonl(seq));
992 if (!ovs_scan_len(s, &n, "))")) {
996 header_len = sizeof *eth + sizeof *ip +
997 ((uint8_t *) options - (uint8_t *) greh);
1002 /* check tunnel meta data. */
1003 if (data->tnl_type != tnl_type) {
1006 if (data->header_len != header_len) {
1011 if (!ovs_scan_len(s, &n, ",out_port(%"SCNi32"))", &data->out_port)) {
1019 parse_odp_action(const char *s, const struct simap *port_names,
1020 struct ofpbuf *actions)
1026 if (ovs_scan(s, "%"SCNi32"%n", &port, &n)) {
1027 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, port);
1033 int len = strcspn(s, delimiters);
1034 struct simap_node *node;
1036 node = simap_find_len(port_names, s, len);
1038 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, node->data);
1047 if (ovs_scan(s, "recirc(%"PRIu32")%n", &recirc_id, &n)) {
1048 nl_msg_put_u32(actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
1053 if (!strncmp(s, "userspace(", 10)) {
1054 return parse_odp_userspace_action(s, actions);
1057 if (!strncmp(s, "set(", 4)) {
1060 struct nlattr mask[128 / sizeof(struct nlattr)];
1061 struct ofpbuf maskbuf;
1062 struct nlattr *nested, *key;
1065 /* 'mask' is big enough to hold any key. */
1066 ofpbuf_use_stack(&maskbuf, mask, sizeof mask);
1068 start_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SET);
1069 retval = parse_odp_key_mask_attr(s + 4, port_names, actions, &maskbuf);
1073 if (s[retval + 4] != ')') {
1077 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
1080 size = nl_attr_get_size(mask);
1081 if (size == nl_attr_get_size(key)) {
1082 /* Change to masked set action if not fully masked. */
1083 if (!is_all_ones(mask + 1, size)) {
1084 key->nla_len += size;
1085 ofpbuf_put(actions, mask + 1, size);
1086 /* 'actions' may have been reallocated by ofpbuf_put(). */
1087 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
1088 nested->nla_type = OVS_ACTION_ATTR_SET_MASKED;
1092 nl_msg_end_nested(actions, start_ofs);
1097 struct ovs_action_push_vlan push;
1098 int tpid = ETH_TYPE_VLAN;
1103 if (ovs_scan(s, "push_vlan(vid=%i,pcp=%i)%n", &vid, &pcp, &n)
1104 || ovs_scan(s, "push_vlan(vid=%i,pcp=%i,cfi=%i)%n",
1105 &vid, &pcp, &cfi, &n)
1106 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i)%n",
1107 &tpid, &vid, &pcp, &n)
1108 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i,cfi=%i)%n",
1109 &tpid, &vid, &pcp, &cfi, &n)) {
1110 push.vlan_tpid = htons(tpid);
1111 push.vlan_tci = htons((vid << VLAN_VID_SHIFT)
1112 | (pcp << VLAN_PCP_SHIFT)
1113 | (cfi ? VLAN_CFI : 0));
1114 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_VLAN,
1115 &push, sizeof push);
1121 if (!strncmp(s, "pop_vlan", 8)) {
1122 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_VLAN);
1130 if (ovs_scan(s, "sample(sample=%lf%%,actions(%n", &percentage, &n)
1131 && percentage >= 0. && percentage <= 100.0) {
1132 size_t sample_ofs, actions_ofs;
1135 probability = floor(UINT32_MAX * (percentage / 100.0) + .5);
1136 sample_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SAMPLE);
1137 nl_msg_put_u32(actions, OVS_SAMPLE_ATTR_PROBABILITY,
1138 (probability <= 0 ? 0
1139 : probability >= UINT32_MAX ? UINT32_MAX
1142 actions_ofs = nl_msg_start_nested(actions,
1143 OVS_SAMPLE_ATTR_ACTIONS);
1147 n += strspn(s + n, delimiters);
1152 retval = parse_odp_action(s + n, port_names, actions);
1158 nl_msg_end_nested(actions, actions_ofs);
1159 nl_msg_end_nested(actions, sample_ofs);
1161 return s[n + 1] == ')' ? n + 2 : -EINVAL;
1169 if (ovs_scan(s, "tnl_pop(%"SCNi32")%n", &port, &n)) {
1170 nl_msg_put_u32(actions, OVS_ACTION_ATTR_TUNNEL_POP, port);
1176 struct ovs_action_push_tnl data;
1179 n = ovs_parse_tnl_push(s, &data);
1181 odp_put_tnl_push_action(actions, &data);
1190 /* Parses the string representation of datapath actions, in the format output
1191 * by format_odp_action(). Returns 0 if successful, otherwise a positive errno
1192 * value. On success, the ODP actions are appended to 'actions' as a series of
1193 * Netlink attributes. On failure, no data is appended to 'actions'. Either
1194 * way, 'actions''s data might be reallocated. */
1196 odp_actions_from_string(const char *s, const struct simap *port_names,
1197 struct ofpbuf *actions)
1201 if (!strcasecmp(s, "drop")) {
1205 old_size = actions->size;
1209 s += strspn(s, delimiters);
1214 retval = parse_odp_action(s, port_names, actions);
1215 if (retval < 0 || !strchr(delimiters, s[retval])) {
1216 actions->size = old_size;
1225 static const struct attr_len_tbl ovs_vxlan_ext_attr_lens[OVS_VXLAN_EXT_MAX + 1] = {
1226 [OVS_VXLAN_EXT_GBP] = { .len = 4 },
1229 static const struct attr_len_tbl ovs_tun_key_attr_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
1230 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = 8 },
1231 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = 4 },
1232 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = 4 },
1233 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
1234 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
1235 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
1236 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
1237 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = 2 },
1238 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = 2 },
1239 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
1240 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = ATTR_LEN_VARIABLE },
1241 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = ATTR_LEN_NESTED,
1242 .next = ovs_vxlan_ext_attr_lens ,
1243 .next_max = OVS_VXLAN_EXT_MAX},
1246 static const struct attr_len_tbl ovs_flow_key_attr_lens[OVS_KEY_ATTR_MAX + 1] = {
1247 [OVS_KEY_ATTR_ENCAP] = { .len = ATTR_LEN_NESTED },
1248 [OVS_KEY_ATTR_PRIORITY] = { .len = 4 },
1249 [OVS_KEY_ATTR_SKB_MARK] = { .len = 4 },
1250 [OVS_KEY_ATTR_DP_HASH] = { .len = 4 },
1251 [OVS_KEY_ATTR_RECIRC_ID] = { .len = 4 },
1252 [OVS_KEY_ATTR_TUNNEL] = { .len = ATTR_LEN_NESTED,
1253 .next = ovs_tun_key_attr_lens,
1254 .next_max = OVS_TUNNEL_KEY_ATTR_MAX },
1255 [OVS_KEY_ATTR_IN_PORT] = { .len = 4 },
1256 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
1257 [OVS_KEY_ATTR_VLAN] = { .len = 2 },
1258 [OVS_KEY_ATTR_ETHERTYPE] = { .len = 2 },
1259 [OVS_KEY_ATTR_MPLS] = { .len = ATTR_LEN_VARIABLE },
1260 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
1261 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
1262 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
1263 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = 2 },
1264 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
1265 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
1266 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
1267 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
1268 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
1269 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
1272 /* Returns the correct length of the payload for a flow key attribute of the
1273 * specified 'type', ATTR_LEN_INVALID if 'type' is unknown, ATTR_LEN_VARIABLE
1274 * if the attribute's payload is variable length, or ATTR_LEN_NESTED if the
1275 * payload is a nested type. */
1277 odp_key_attr_len(const struct attr_len_tbl tbl[], int max_len, uint16_t type)
1279 if (type > max_len) {
1280 return ATTR_LEN_INVALID;
1283 return tbl[type].len;
1287 format_generic_odp_key(const struct nlattr *a, struct ds *ds)
1289 size_t len = nl_attr_get_size(a);
1291 const uint8_t *unspec;
1294 unspec = nl_attr_get(a);
1295 for (i = 0; i < len; i++) {
1297 ds_put_char(ds, ' ');
1299 ds_put_format(ds, "%02x", unspec[i]);
1305 ovs_frag_type_to_string(enum ovs_frag_type type)
1308 case OVS_FRAG_TYPE_NONE:
1310 case OVS_FRAG_TYPE_FIRST:
1312 case OVS_FRAG_TYPE_LATER:
1314 case __OVS_FRAG_TYPE_MAX:
1320 #define GENEVE_OPT(class, type) ((OVS_FORCE uint32_t)(class) << 8 | (type))
1322 parse_geneve_opts(const struct nlattr *attr)
1324 int opts_len = nl_attr_get_size(attr);
1325 const struct geneve_opt *opt = nl_attr_get(attr);
1327 while (opts_len > 0) {
1330 if (opts_len < sizeof(*opt)) {
1334 len = sizeof(*opt) + opt->length * 4;
1335 if (len > opts_len) {
1339 switch (GENEVE_OPT(opt->opt_class, opt->type)) {
1341 if (opt->type & GENEVE_CRIT_OPT_TYPE) {
1346 opt = opt + len / sizeof(*opt);
1353 enum odp_key_fitness
1354 odp_tun_key_from_attr(const struct nlattr *attr, struct flow_tnl *tun)
1357 const struct nlattr *a;
1359 bool unknown = false;
1361 NL_NESTED_FOR_EACH(a, left, attr) {
1362 uint16_t type = nl_attr_type(a);
1363 size_t len = nl_attr_get_size(a);
1364 int expected_len = odp_key_attr_len(ovs_tun_key_attr_lens,
1365 OVS_TUNNEL_ATTR_MAX, type);
1367 if (len != expected_len && expected_len >= 0) {
1368 return ODP_FIT_ERROR;
1372 case OVS_TUNNEL_KEY_ATTR_ID:
1373 tun->tun_id = nl_attr_get_be64(a);
1374 tun->flags |= FLOW_TNL_F_KEY;
1376 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
1377 tun->ip_src = nl_attr_get_be32(a);
1379 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
1380 tun->ip_dst = nl_attr_get_be32(a);
1382 case OVS_TUNNEL_KEY_ATTR_TOS:
1383 tun->ip_tos = nl_attr_get_u8(a);
1385 case OVS_TUNNEL_KEY_ATTR_TTL:
1386 tun->ip_ttl = nl_attr_get_u8(a);
1389 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
1390 tun->flags |= FLOW_TNL_F_DONT_FRAGMENT;
1392 case OVS_TUNNEL_KEY_ATTR_CSUM:
1393 tun->flags |= FLOW_TNL_F_CSUM;
1395 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
1396 tun->tp_src = nl_attr_get_be16(a);
1398 case OVS_TUNNEL_KEY_ATTR_TP_DST:
1399 tun->tp_dst = nl_attr_get_be16(a);
1401 case OVS_TUNNEL_KEY_ATTR_OAM:
1402 tun->flags |= FLOW_TNL_F_OAM;
1404 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: {
1405 static const struct nl_policy vxlan_opts_policy[] = {
1406 [OVS_VXLAN_EXT_GBP] = { .type = NL_A_U32 },
1408 struct nlattr *ext[ARRAY_SIZE(vxlan_opts_policy)];
1410 if (!nl_parse_nested(a, vxlan_opts_policy, ext, ARRAY_SIZE(ext))) {
1411 return ODP_FIT_ERROR;
1414 if (ext[OVS_VXLAN_EXT_GBP]) {
1415 uint32_t gbp = nl_attr_get_u32(ext[OVS_VXLAN_EXT_GBP]);
1417 tun->gbp_id = htons(gbp & 0xFFFF);
1418 tun->gbp_flags = (gbp >> 16) & 0xFF;
1423 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: {
1424 if (parse_geneve_opts(a)) {
1425 return ODP_FIT_ERROR;
1427 /* It is necessary to reproduce options exactly (including order)
1428 * so it's easiest to just echo them back. */
1433 /* Allow this to show up as unexpected, if there are unknown
1434 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
1441 return ODP_FIT_ERROR;
1444 return ODP_FIT_TOO_MUCH;
1446 return ODP_FIT_PERFECT;
1450 tun_key_to_attr(struct ofpbuf *a, const struct flow_tnl *tun_key)
1454 tun_key_ofs = nl_msg_start_nested(a, OVS_KEY_ATTR_TUNNEL);
1456 /* tun_id != 0 without FLOW_TNL_F_KEY is valid if tun_key is a mask. */
1457 if (tun_key->tun_id || tun_key->flags & FLOW_TNL_F_KEY) {
1458 nl_msg_put_be64(a, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id);
1460 if (tun_key->ip_src) {
1461 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ip_src);
1463 if (tun_key->ip_dst) {
1464 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ip_dst);
1466 if (tun_key->ip_tos) {
1467 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ip_tos);
1469 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ip_ttl);
1470 if (tun_key->flags & FLOW_TNL_F_DONT_FRAGMENT) {
1471 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
1473 if (tun_key->flags & FLOW_TNL_F_CSUM) {
1474 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
1476 if (tun_key->tp_src) {
1477 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src);
1479 if (tun_key->tp_dst) {
1480 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst);
1482 if (tun_key->flags & FLOW_TNL_F_OAM) {
1483 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
1485 if (tun_key->gbp_flags || tun_key->gbp_id) {
1486 size_t vxlan_opts_ofs;
1488 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
1489 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP,
1490 (tun_key->gbp_flags << 16) | ntohs(tun_key->gbp_id));
1491 nl_msg_end_nested(a, vxlan_opts_ofs);
1494 nl_msg_end_nested(a, tun_key_ofs);
1498 odp_mask_attr_is_wildcard(const struct nlattr *ma)
1500 return is_all_zeros(nl_attr_get(ma), nl_attr_get_size(ma));
1504 odp_mask_is_exact(enum ovs_key_attr attr, const void *mask, size_t size)
1506 if (attr == OVS_KEY_ATTR_TCP_FLAGS) {
1507 return TCP_FLAGS(*(ovs_be16 *)mask) == TCP_FLAGS(OVS_BE16_MAX);
1509 if (attr == OVS_KEY_ATTR_IPV6) {
1510 const struct ovs_key_ipv6 *ipv6_mask = mask;
1513 ((ipv6_mask->ipv6_label & htonl(IPV6_LABEL_MASK))
1514 == htonl(IPV6_LABEL_MASK))
1515 && ipv6_mask->ipv6_proto == UINT8_MAX
1516 && ipv6_mask->ipv6_tclass == UINT8_MAX
1517 && ipv6_mask->ipv6_hlimit == UINT8_MAX
1518 && ipv6_mask->ipv6_frag == UINT8_MAX
1519 && ipv6_mask_is_exact((const struct in6_addr *)ipv6_mask->ipv6_src)
1520 && ipv6_mask_is_exact((const struct in6_addr *)ipv6_mask->ipv6_dst);
1522 if (attr == OVS_KEY_ATTR_TUNNEL) {
1526 if (attr == OVS_KEY_ATTR_ARP) {
1527 /* ARP key has padding, ignore it. */
1528 BUILD_ASSERT_DECL(sizeof(struct ovs_key_arp) == 24);
1529 BUILD_ASSERT_DECL(offsetof(struct ovs_key_arp, arp_tha) == 10 + 6);
1530 size = offsetof(struct ovs_key_arp, arp_tha) + ETH_ADDR_LEN;
1531 ovs_assert(((uint16_t *)mask)[size/2] == 0);
1534 return is_all_ones(mask, size);
1538 odp_mask_attr_is_exact(const struct nlattr *ma)
1540 enum ovs_key_attr attr = nl_attr_type(ma);
1544 if (attr == OVS_KEY_ATTR_TUNNEL) {
1547 mask = nl_attr_get(ma);
1548 size = nl_attr_get_size(ma);
1551 return odp_mask_is_exact(attr, mask, size);
1555 odp_portno_names_set(struct hmap *portno_names, odp_port_t port_no,
1558 struct odp_portno_names *odp_portno_names;
1560 odp_portno_names = xmalloc(sizeof *odp_portno_names);
1561 odp_portno_names->port_no = port_no;
1562 odp_portno_names->name = xstrdup(port_name);
1563 hmap_insert(portno_names, &odp_portno_names->hmap_node,
1564 hash_odp_port(port_no));
1568 odp_portno_names_get(const struct hmap *portno_names, odp_port_t port_no)
1570 struct odp_portno_names *odp_portno_names;
1572 HMAP_FOR_EACH_IN_BUCKET (odp_portno_names, hmap_node,
1573 hash_odp_port(port_no), portno_names) {
1574 if (odp_portno_names->port_no == port_no) {
1575 return odp_portno_names->name;
1582 odp_portno_names_destroy(struct hmap *portno_names)
1584 struct odp_portno_names *odp_portno_names, *odp_portno_names_next;
1585 HMAP_FOR_EACH_SAFE (odp_portno_names, odp_portno_names_next,
1586 hmap_node, portno_names) {
1587 hmap_remove(portno_names, &odp_portno_names->hmap_node);
1588 free(odp_portno_names->name);
1589 free(odp_portno_names);
1593 /* Format helpers. */
1596 format_eth(struct ds *ds, const char *name, const uint8_t key[ETH_ADDR_LEN],
1597 const uint8_t (*mask)[ETH_ADDR_LEN], bool verbose)
1599 bool mask_empty = mask && eth_addr_is_zero(*mask);
1601 if (verbose || !mask_empty) {
1602 bool mask_full = !mask || eth_mask_is_exact(*mask);
1605 ds_put_format(ds, "%s="ETH_ADDR_FMT",", name, ETH_ADDR_ARGS(key));
1607 ds_put_format(ds, "%s=", name);
1608 eth_format_masked(key, *mask, ds);
1609 ds_put_char(ds, ',');
1615 format_be64(struct ds *ds, const char *name, ovs_be64 key,
1616 const ovs_be64 *mask, bool verbose)
1618 bool mask_empty = mask && !*mask;
1620 if (verbose || !mask_empty) {
1621 bool mask_full = !mask || *mask == OVS_BE64_MAX;
1623 ds_put_format(ds, "%s=0x%"PRIx64, name, ntohll(key));
1624 if (!mask_full) { /* Partially masked. */
1625 ds_put_format(ds, "/%#"PRIx64, ntohll(*mask));
1627 ds_put_char(ds, ',');
1632 format_ipv4(struct ds *ds, const char *name, ovs_be32 key,
1633 const ovs_be32 *mask, bool verbose)
1635 bool mask_empty = mask && !*mask;
1637 if (verbose || !mask_empty) {
1638 bool mask_full = !mask || *mask == OVS_BE32_MAX;
1640 ds_put_format(ds, "%s="IP_FMT, name, IP_ARGS(key));
1641 if (!mask_full) { /* Partially masked. */
1642 ds_put_format(ds, "/"IP_FMT, IP_ARGS(*mask));
1644 ds_put_char(ds, ',');
1649 format_ipv6(struct ds *ds, const char *name, const ovs_be32 key_[4],
1650 const ovs_be32 (*mask_)[4], bool verbose)
1652 char buf[INET6_ADDRSTRLEN];
1653 const struct in6_addr *key = (const struct in6_addr *)key_;
1654 const struct in6_addr *mask = mask_ ? (const struct in6_addr *)*mask_
1656 bool mask_empty = mask && ipv6_mask_is_any(mask);
1658 if (verbose || !mask_empty) {
1659 bool mask_full = !mask || ipv6_mask_is_exact(mask);
1661 inet_ntop(AF_INET6, key, buf, sizeof buf);
1662 ds_put_format(ds, "%s=%s", name, buf);
1663 if (!mask_full) { /* Partially masked. */
1664 inet_ntop(AF_INET6, mask, buf, sizeof buf);
1665 ds_put_format(ds, "/%s", buf);
1667 ds_put_char(ds, ',');
1672 format_ipv6_label(struct ds *ds, const char *name, ovs_be32 key,
1673 const ovs_be32 *mask, bool verbose)
1675 bool mask_empty = mask && !*mask;
1677 if (verbose || !mask_empty) {
1678 bool mask_full = !mask
1679 || (*mask & htonl(IPV6_LABEL_MASK)) == htonl(IPV6_LABEL_MASK);
1681 ds_put_format(ds, "%s=%#"PRIx32, name, ntohl(key));
1682 if (!mask_full) { /* Partially masked. */
1683 ds_put_format(ds, "/%#"PRIx32, ntohl(*mask));
1685 ds_put_char(ds, ',');
1690 format_u8x(struct ds *ds, const char *name, uint8_t key,
1691 const uint8_t *mask, bool verbose)
1693 bool mask_empty = mask && !*mask;
1695 if (verbose || !mask_empty) {
1696 bool mask_full = !mask || *mask == UINT8_MAX;
1698 ds_put_format(ds, "%s=%#"PRIx8, name, key);
1699 if (!mask_full) { /* Partially masked. */
1700 ds_put_format(ds, "/%#"PRIx8, *mask);
1702 ds_put_char(ds, ',');
1707 format_u8u(struct ds *ds, const char *name, uint8_t key,
1708 const uint8_t *mask, bool verbose)
1710 bool mask_empty = mask && !*mask;
1712 if (verbose || !mask_empty) {
1713 bool mask_full = !mask || *mask == UINT8_MAX;
1715 ds_put_format(ds, "%s=%"PRIu8, name, key);
1716 if (!mask_full) { /* Partially masked. */
1717 ds_put_format(ds, "/%#"PRIx8, *mask);
1719 ds_put_char(ds, ',');
1724 format_be16(struct ds *ds, const char *name, ovs_be16 key,
1725 const ovs_be16 *mask, bool verbose)
1727 bool mask_empty = mask && !*mask;
1729 if (verbose || !mask_empty) {
1730 bool mask_full = !mask || *mask == OVS_BE16_MAX;
1732 ds_put_format(ds, "%s=%"PRIu16, name, ntohs(key));
1733 if (!mask_full) { /* Partially masked. */
1734 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
1736 ds_put_char(ds, ',');
1741 format_be16x(struct ds *ds, const char *name, ovs_be16 key,
1742 const ovs_be16 *mask, bool verbose)
1744 bool mask_empty = mask && !*mask;
1746 if (verbose || !mask_empty) {
1747 bool mask_full = !mask || *mask == OVS_BE16_MAX;
1749 ds_put_format(ds, "%s=%#"PRIx16, name, ntohs(key));
1750 if (!mask_full) { /* Partially masked. */
1751 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
1753 ds_put_char(ds, ',');
1758 format_tun_flags(struct ds *ds, const char *name, uint16_t key,
1759 const uint16_t *mask, bool verbose)
1761 bool mask_empty = mask && !*mask;
1763 if (verbose || !mask_empty) {
1764 bool mask_full = !mask || (*mask & FLOW_TNL_F_MASK) == FLOW_TNL_F_MASK;
1766 ds_put_cstr(ds, name);
1767 ds_put_char(ds, '(');
1768 if (!mask_full) { /* Partially masked. */
1769 format_flags_masked(ds, NULL, flow_tun_flag_to_string, key, *mask);
1770 } else { /* Fully masked. */
1771 format_flags(ds, flow_tun_flag_to_string, key, ',');
1773 ds_put_cstr(ds, "),");
1778 check_attr_len(struct ds *ds, const struct nlattr *a, const struct nlattr *ma,
1779 const struct attr_len_tbl tbl[], int max_len, bool need_key)
1783 expected_len = odp_key_attr_len(tbl, max_len, nl_attr_type(a));
1784 if (expected_len != ATTR_LEN_VARIABLE &&
1785 expected_len != ATTR_LEN_NESTED) {
1787 bool bad_key_len = nl_attr_get_size(a) != expected_len;
1788 bool bad_mask_len = ma && nl_attr_get_size(ma) != expected_len;
1790 if (bad_key_len || bad_mask_len) {
1792 ds_put_format(ds, "key%u", nl_attr_type(a));
1795 ds_put_format(ds, "(bad key length %"PRIuSIZE", expected %d)(",
1796 nl_attr_get_size(a), expected_len);
1798 format_generic_odp_key(a, ds);
1800 ds_put_char(ds, '/');
1802 ds_put_format(ds, "(bad mask length %"PRIuSIZE", expected %d)(",
1803 nl_attr_get_size(ma), expected_len);
1805 format_generic_odp_key(ma, ds);
1807 ds_put_char(ds, ')');
1816 format_unknown_key(struct ds *ds, const struct nlattr *a,
1817 const struct nlattr *ma)
1819 ds_put_format(ds, "key%u(", nl_attr_type(a));
1820 format_generic_odp_key(a, ds);
1821 if (ma && !odp_mask_attr_is_exact(ma)) {
1822 ds_put_char(ds, '/');
1823 format_generic_odp_key(ma, ds);
1825 ds_put_cstr(ds, "),");
1829 format_odp_tun_vxlan_opt(const struct nlattr *attr,
1830 const struct nlattr *mask_attr, struct ds *ds,
1834 const struct nlattr *a;
1837 ofpbuf_init(&ofp, 100);
1838 NL_NESTED_FOR_EACH(a, left, attr) {
1839 uint16_t type = nl_attr_type(a);
1840 const struct nlattr *ma = NULL;
1843 ma = nl_attr_find__(nl_attr_get(mask_attr),
1844 nl_attr_get_size(mask_attr), type);
1846 ma = generate_all_wildcard_mask(ovs_vxlan_ext_attr_lens,
1852 if (!check_attr_len(ds, a, ma, ovs_vxlan_ext_attr_lens,
1853 OVS_VXLAN_EXT_MAX, true)) {
1858 case OVS_VXLAN_EXT_GBP: {
1859 uint32_t key = nl_attr_get_u32(a);
1860 ovs_be16 id, id_mask;
1861 uint8_t flags, flags_mask;
1863 id = htons(key & 0xFFFF);
1864 flags = (key >> 16) & 0xFF;
1866 uint32_t mask = nl_attr_get_u32(ma);
1867 id_mask = htons(mask & 0xFFFF);
1868 flags_mask = (mask >> 16) & 0xFF;
1871 ds_put_cstr(ds, "gbp(");
1872 format_be16(ds, "id", id, ma ? &id_mask : NULL, verbose);
1873 format_u8x(ds, "flags", flags, ma ? &flags_mask : NULL, verbose);
1875 ds_put_cstr(ds, "),");
1880 format_unknown_key(ds, a, ma);
1886 ofpbuf_uninit(&ofp);
1889 #define MASK(PTR, FIELD) PTR ? &PTR->FIELD : NULL
1892 format_odp_tun_geneve(const struct nlattr *attr,
1893 const struct nlattr *mask_attr, struct ds *ds,
1896 int opts_len = nl_attr_get_size(attr);
1897 const struct geneve_opt *opt = nl_attr_get(attr);
1898 const struct geneve_opt *mask = mask_attr ?
1899 nl_attr_get(mask_attr) : NULL;
1901 if (mask && nl_attr_get_size(attr) != nl_attr_get_size(mask_attr)) {
1902 ds_put_format(ds, "value len %"PRIuSIZE" different from mask len %"PRIuSIZE,
1903 nl_attr_get_size(attr), nl_attr_get_size(mask_attr));
1907 while (opts_len > 0) {
1909 uint8_t data_len, data_len_mask;
1911 if (opts_len < sizeof *opt) {
1912 ds_put_format(ds, "opt len %u less than minimum %"PRIuSIZE,
1913 opts_len, sizeof *opt);
1917 data_len = opt->length * 4;
1919 if (mask->length == 0x1f) {
1920 data_len_mask = UINT8_MAX;
1922 data_len_mask = mask->length;
1925 len = sizeof *opt + data_len;
1926 if (len > opts_len) {
1927 ds_put_format(ds, "opt len %u greater than remaining %u",
1932 ds_put_char(ds, '{');
1933 format_be16x(ds, "class", opt->opt_class, MASK(mask, opt_class),
1935 format_u8x(ds, "type", opt->type, MASK(mask, type), verbose);
1936 format_u8u(ds, "len", data_len, mask ? &data_len_mask : NULL, verbose);
1937 if (verbose || !mask || !is_all_zeros(mask + 1, data_len)) {
1938 ds_put_hex(ds, opt + 1, data_len);
1939 if (mask && !is_all_ones(mask + 1, data_len)) {
1940 ds_put_char(ds, '/');
1941 ds_put_hex(ds, mask + 1, data_len);
1946 ds_put_char(ds, '}');
1948 opt += len / sizeof(*opt);
1950 mask += len / sizeof(*opt);
1957 format_odp_tun_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
1958 struct ds *ds, bool verbose)
1961 const struct nlattr *a;
1963 uint16_t mask_flags = 0;
1966 ofpbuf_init(&ofp, 100);
1967 NL_NESTED_FOR_EACH(a, left, attr) {
1968 enum ovs_tunnel_key_attr type = nl_attr_type(a);
1969 const struct nlattr *ma = NULL;
1972 ma = nl_attr_find__(nl_attr_get(mask_attr),
1973 nl_attr_get_size(mask_attr), type);
1975 ma = generate_all_wildcard_mask(ovs_tun_key_attr_lens,
1976 OVS_TUNNEL_KEY_ATTR_MAX,
1981 if (!check_attr_len(ds, a, ma, ovs_tun_key_attr_lens,
1982 OVS_TUNNEL_KEY_ATTR_MAX, true)) {
1987 case OVS_TUNNEL_KEY_ATTR_ID:
1988 format_be64(ds, "tun_id", nl_attr_get_be64(a),
1989 ma ? nl_attr_get(ma) : NULL, verbose);
1990 flags |= FLOW_TNL_F_KEY;
1992 mask_flags |= FLOW_TNL_F_KEY;
1995 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
1996 format_ipv4(ds, "src", nl_attr_get_be32(a),
1997 ma ? nl_attr_get(ma) : NULL, verbose);
1999 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
2000 format_ipv4(ds, "dst", nl_attr_get_be32(a),
2001 ma ? nl_attr_get(ma) : NULL, verbose);
2003 case OVS_TUNNEL_KEY_ATTR_TOS:
2004 format_u8x(ds, "tos", nl_attr_get_u8(a),
2005 ma ? nl_attr_get(ma) : NULL, verbose);
2007 case OVS_TUNNEL_KEY_ATTR_TTL:
2008 format_u8u(ds, "ttl", nl_attr_get_u8(a),
2009 ma ? nl_attr_get(ma) : NULL, verbose);
2011 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
2012 flags |= FLOW_TNL_F_DONT_FRAGMENT;
2014 case OVS_TUNNEL_KEY_ATTR_CSUM:
2015 flags |= FLOW_TNL_F_CSUM;
2017 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
2018 format_be16(ds, "tp_src", nl_attr_get_be16(a),
2019 ma ? nl_attr_get(ma) : NULL, verbose);
2021 case OVS_TUNNEL_KEY_ATTR_TP_DST:
2022 format_be16(ds, "tp_dst", nl_attr_get_be16(a),
2023 ma ? nl_attr_get(ma) : NULL, verbose);
2025 case OVS_TUNNEL_KEY_ATTR_OAM:
2026 flags |= FLOW_TNL_F_OAM;
2028 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
2029 ds_put_cstr(ds, "vxlan(");
2030 format_odp_tun_vxlan_opt(a, ma, ds, verbose);
2031 ds_put_cstr(ds, "),");
2033 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
2034 ds_put_cstr(ds, "geneve(");
2035 format_odp_tun_geneve(a, ma, ds, verbose);
2036 ds_put_cstr(ds, "),");
2038 case __OVS_TUNNEL_KEY_ATTR_MAX:
2040 format_unknown_key(ds, a, ma);
2045 /* Flags can have a valid mask even if the attribute is not set, so
2046 * we need to collect these separately. */
2048 NL_NESTED_FOR_EACH(a, left, mask_attr) {
2049 switch (nl_attr_type(a)) {
2050 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
2051 mask_flags |= FLOW_TNL_F_DONT_FRAGMENT;
2053 case OVS_TUNNEL_KEY_ATTR_CSUM:
2054 mask_flags |= FLOW_TNL_F_CSUM;
2056 case OVS_TUNNEL_KEY_ATTR_OAM:
2057 mask_flags |= FLOW_TNL_F_OAM;
2063 format_tun_flags(ds, "flags", flags, mask_attr ? &mask_flags : NULL,
2066 ofpbuf_uninit(&ofp);
2070 format_frag(struct ds *ds, const char *name, uint8_t key,
2071 const uint8_t *mask, bool verbose)
2073 bool mask_empty = mask && !*mask;
2075 /* ODP frag is an enumeration field; partial masks are not meaningful. */
2076 if (verbose || !mask_empty) {
2077 bool mask_full = !mask || *mask == UINT8_MAX;
2079 if (!mask_full) { /* Partially masked. */
2080 ds_put_format(ds, "error: partial mask not supported for frag (%#"
2083 ds_put_format(ds, "%s=%s,", name, ovs_frag_type_to_string(key));
2089 format_odp_key_attr(const struct nlattr *a, const struct nlattr *ma,
2090 const struct hmap *portno_names, struct ds *ds,
2093 enum ovs_key_attr attr = nl_attr_type(a);
2094 char namebuf[OVS_KEY_ATTR_BUFSIZE];
2097 is_exact = ma ? odp_mask_attr_is_exact(ma) : true;
2099 ds_put_cstr(ds, ovs_key_attr_to_string(attr, namebuf, sizeof namebuf));
2101 if (!check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
2102 OVS_KEY_ATTR_MAX, false)) {
2106 ds_put_char(ds, '(');
2108 case OVS_KEY_ATTR_ENCAP:
2109 if (ma && nl_attr_get_size(ma) && nl_attr_get_size(a)) {
2110 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a),
2111 nl_attr_get(ma), nl_attr_get_size(ma), NULL, ds,
2113 } else if (nl_attr_get_size(a)) {
2114 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a), NULL, 0, NULL,
2119 case OVS_KEY_ATTR_PRIORITY:
2120 case OVS_KEY_ATTR_SKB_MARK:
2121 case OVS_KEY_ATTR_DP_HASH:
2122 case OVS_KEY_ATTR_RECIRC_ID:
2123 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
2125 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
2129 case OVS_KEY_ATTR_TUNNEL:
2130 format_odp_tun_attr(a, ma, ds, verbose);
2133 case OVS_KEY_ATTR_IN_PORT:
2134 if (portno_names && verbose && is_exact) {
2135 char *name = odp_portno_names_get(portno_names,
2136 u32_to_odp(nl_attr_get_u32(a)));
2138 ds_put_format(ds, "%s", name);
2140 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
2143 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
2145 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
2150 case OVS_KEY_ATTR_ETHERNET: {
2151 const struct ovs_key_ethernet *mask = ma ? nl_attr_get(ma) : NULL;
2152 const struct ovs_key_ethernet *key = nl_attr_get(a);
2154 format_eth(ds, "src", key->eth_src, MASK(mask, eth_src), verbose);
2155 format_eth(ds, "dst", key->eth_dst, MASK(mask, eth_dst), verbose);
2159 case OVS_KEY_ATTR_VLAN:
2160 format_vlan_tci(ds, nl_attr_get_be16(a),
2161 ma ? nl_attr_get_be16(ma) : OVS_BE16_MAX, verbose);
2164 case OVS_KEY_ATTR_MPLS: {
2165 const struct ovs_key_mpls *mpls_key = nl_attr_get(a);
2166 const struct ovs_key_mpls *mpls_mask = NULL;
2167 size_t size = nl_attr_get_size(a);
2169 if (!size || size % sizeof *mpls_key) {
2170 ds_put_format(ds, "(bad key length %"PRIuSIZE")", size);
2174 mpls_mask = nl_attr_get(ma);
2175 if (size != nl_attr_get_size(ma)) {
2176 ds_put_format(ds, "(key length %"PRIuSIZE" != "
2177 "mask length %"PRIuSIZE")",
2178 size, nl_attr_get_size(ma));
2182 format_mpls(ds, mpls_key, mpls_mask, size / sizeof *mpls_key);
2185 case OVS_KEY_ATTR_ETHERTYPE:
2186 ds_put_format(ds, "0x%04"PRIx16, ntohs(nl_attr_get_be16(a)));
2188 ds_put_format(ds, "/0x%04"PRIx16, ntohs(nl_attr_get_be16(ma)));
2192 case OVS_KEY_ATTR_IPV4: {
2193 const struct ovs_key_ipv4 *key = nl_attr_get(a);
2194 const struct ovs_key_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
2196 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
2197 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
2198 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
2200 format_u8x(ds, "tos", key->ipv4_tos, MASK(mask, ipv4_tos), verbose);
2201 format_u8u(ds, "ttl", key->ipv4_ttl, MASK(mask, ipv4_ttl), verbose);
2202 format_frag(ds, "frag", key->ipv4_frag, MASK(mask, ipv4_frag),
2207 case OVS_KEY_ATTR_IPV6: {
2208 const struct ovs_key_ipv6 *key = nl_attr_get(a);
2209 const struct ovs_key_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
2211 format_ipv6(ds, "src", key->ipv6_src, MASK(mask, ipv6_src), verbose);
2212 format_ipv6(ds, "dst", key->ipv6_dst, MASK(mask, ipv6_dst), verbose);
2213 format_ipv6_label(ds, "label", key->ipv6_label, MASK(mask, ipv6_label),
2215 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
2217 format_u8x(ds, "tclass", key->ipv6_tclass, MASK(mask, ipv6_tclass),
2219 format_u8u(ds, "hlimit", key->ipv6_hlimit, MASK(mask, ipv6_hlimit),
2221 format_frag(ds, "frag", key->ipv6_frag, MASK(mask, ipv6_frag),
2226 /* These have the same structure and format. */
2227 case OVS_KEY_ATTR_TCP:
2228 case OVS_KEY_ATTR_UDP:
2229 case OVS_KEY_ATTR_SCTP: {
2230 const struct ovs_key_tcp *key = nl_attr_get(a);
2231 const struct ovs_key_tcp *mask = ma ? nl_attr_get(ma) : NULL;
2233 format_be16(ds, "src", key->tcp_src, MASK(mask, tcp_src), verbose);
2234 format_be16(ds, "dst", key->tcp_dst, MASK(mask, tcp_dst), verbose);
2238 case OVS_KEY_ATTR_TCP_FLAGS:
2240 format_flags_masked(ds, NULL, packet_tcp_flag_to_string,
2241 ntohs(nl_attr_get_be16(a)),
2242 ntohs(nl_attr_get_be16(ma)));
2244 format_flags(ds, packet_tcp_flag_to_string,
2245 ntohs(nl_attr_get_be16(a)), ',');
2249 case OVS_KEY_ATTR_ICMP: {
2250 const struct ovs_key_icmp *key = nl_attr_get(a);
2251 const struct ovs_key_icmp *mask = ma ? nl_attr_get(ma) : NULL;
2253 format_u8u(ds, "type", key->icmp_type, MASK(mask, icmp_type), verbose);
2254 format_u8u(ds, "code", key->icmp_code, MASK(mask, icmp_code), verbose);
2258 case OVS_KEY_ATTR_ICMPV6: {
2259 const struct ovs_key_icmpv6 *key = nl_attr_get(a);
2260 const struct ovs_key_icmpv6 *mask = ma ? nl_attr_get(ma) : NULL;
2262 format_u8u(ds, "type", key->icmpv6_type, MASK(mask, icmpv6_type),
2264 format_u8u(ds, "code", key->icmpv6_code, MASK(mask, icmpv6_code),
2269 case OVS_KEY_ATTR_ARP: {
2270 const struct ovs_key_arp *mask = ma ? nl_attr_get(ma) : NULL;
2271 const struct ovs_key_arp *key = nl_attr_get(a);
2273 format_ipv4(ds, "sip", key->arp_sip, MASK(mask, arp_sip), verbose);
2274 format_ipv4(ds, "tip", key->arp_tip, MASK(mask, arp_tip), verbose);
2275 format_be16(ds, "op", key->arp_op, MASK(mask, arp_op), verbose);
2276 format_eth(ds, "sha", key->arp_sha, MASK(mask, arp_sha), verbose);
2277 format_eth(ds, "tha", key->arp_tha, MASK(mask, arp_tha), verbose);
2281 case OVS_KEY_ATTR_ND: {
2282 const struct ovs_key_nd *mask = ma ? nl_attr_get(ma) : NULL;
2283 const struct ovs_key_nd *key = nl_attr_get(a);
2285 format_ipv6(ds, "target", key->nd_target, MASK(mask, nd_target),
2287 format_eth(ds, "sll", key->nd_sll, MASK(mask, nd_sll), verbose);
2288 format_eth(ds, "tll", key->nd_tll, MASK(mask, nd_tll), verbose);
2293 case OVS_KEY_ATTR_UNSPEC:
2294 case __OVS_KEY_ATTR_MAX:
2296 format_generic_odp_key(a, ds);
2298 ds_put_char(ds, '/');
2299 format_generic_odp_key(ma, ds);
2303 ds_put_char(ds, ')');
2306 static struct nlattr *
2307 generate_all_wildcard_mask(const struct attr_len_tbl tbl[], int max,
2308 struct ofpbuf *ofp, const struct nlattr *key)
2310 const struct nlattr *a;
2312 int type = nl_attr_type(key);
2313 int size = nl_attr_get_size(key);
2315 if (odp_key_attr_len(tbl, max, type) != ATTR_LEN_NESTED) {
2316 nl_msg_put_unspec_zero(ofp, type, size);
2320 if (tbl[type].next) {
2321 tbl = tbl[type].next;
2322 max = tbl[type].next_max;
2325 nested_mask = nl_msg_start_nested(ofp, type);
2326 NL_ATTR_FOR_EACH(a, left, key, nl_attr_get_size(key)) {
2327 generate_all_wildcard_mask(tbl, max, ofp, nl_attr_get(a));
2329 nl_msg_end_nested(ofp, nested_mask);
2336 odp_ufid_from_string(const char *s_, ovs_u128 *ufid)
2340 if (ovs_scan(s, "ufid:")) {
2343 if (!uuid_from_string_prefix((struct uuid *)ufid, s)) {
2355 odp_format_ufid(const ovs_u128 *ufid, struct ds *ds)
2357 ds_put_format(ds, "ufid:"UUID_FMT, UUID_ARGS((struct uuid *)ufid));
2360 /* Appends to 'ds' a string representation of the 'key_len' bytes of
2361 * OVS_KEY_ATTR_* attributes in 'key'. If non-null, additionally formats the
2362 * 'mask_len' bytes of 'mask' which apply to 'key'. If 'portno_names' is
2363 * non-null and 'verbose' is true, translates odp port number to its name. */
2365 odp_flow_format(const struct nlattr *key, size_t key_len,
2366 const struct nlattr *mask, size_t mask_len,
2367 const struct hmap *portno_names, struct ds *ds, bool verbose)
2370 const struct nlattr *a;
2372 bool has_ethtype_key = false;
2373 const struct nlattr *ma = NULL;
2375 bool first_field = true;
2377 ofpbuf_init(&ofp, 100);
2378 NL_ATTR_FOR_EACH (a, left, key, key_len) {
2379 bool is_nested_attr;
2380 bool is_wildcard = false;
2381 int attr_type = nl_attr_type(a);
2383 if (attr_type == OVS_KEY_ATTR_ETHERTYPE) {
2384 has_ethtype_key = true;
2387 is_nested_attr = odp_key_attr_len(ovs_flow_key_attr_lens,
2388 OVS_KEY_ATTR_MAX, attr_type) ==
2391 if (mask && mask_len) {
2392 ma = nl_attr_find__(mask, mask_len, nl_attr_type(a));
2393 is_wildcard = ma ? odp_mask_attr_is_wildcard(ma) : true;
2396 if (verbose || !is_wildcard || is_nested_attr) {
2397 if (is_wildcard && !ma) {
2398 ma = generate_all_wildcard_mask(ovs_flow_key_attr_lens,
2403 ds_put_char(ds, ',');
2405 format_odp_key_attr(a, ma, portno_names, ds, verbose);
2406 first_field = false;
2410 ofpbuf_uninit(&ofp);
2415 if (left == key_len) {
2416 ds_put_cstr(ds, "<empty>");
2418 ds_put_format(ds, ",***%u leftover bytes*** (", left);
2419 for (i = 0; i < left; i++) {
2420 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
2422 ds_put_char(ds, ')');
2424 if (!has_ethtype_key) {
2425 ma = nl_attr_find__(mask, mask_len, OVS_KEY_ATTR_ETHERTYPE);
2427 ds_put_format(ds, ",eth_type(0/0x%04"PRIx16")",
2428 ntohs(nl_attr_get_be16(ma)));
2432 ds_put_cstr(ds, "<empty>");
2436 /* Appends to 'ds' a string representation of the 'key_len' bytes of
2437 * OVS_KEY_ATTR_* attributes in 'key'. */
2439 odp_flow_key_format(const struct nlattr *key,
2440 size_t key_len, struct ds *ds)
2442 odp_flow_format(key, key_len, NULL, 0, NULL, ds, true);
2446 ovs_frag_type_from_string(const char *s, enum ovs_frag_type *type)
2448 if (!strcasecmp(s, "no")) {
2449 *type = OVS_FRAG_TYPE_NONE;
2450 } else if (!strcasecmp(s, "first")) {
2451 *type = OVS_FRAG_TYPE_FIRST;
2452 } else if (!strcasecmp(s, "later")) {
2453 *type = OVS_FRAG_TYPE_LATER;
2463 scan_eth(const char *s, uint8_t (*key)[ETH_ADDR_LEN],
2464 uint8_t (*mask)[ETH_ADDR_LEN])
2468 if (ovs_scan(s, ETH_ADDR_SCAN_FMT"%n", ETH_ADDR_SCAN_ARGS(*key), &n)) {
2472 if (ovs_scan(s + len, "/"ETH_ADDR_SCAN_FMT"%n",
2473 ETH_ADDR_SCAN_ARGS(*mask), &n)) {
2476 memset(mask, 0xff, sizeof *mask);
2485 scan_ipv4(const char *s, ovs_be32 *key, ovs_be32 *mask)
2489 if (ovs_scan(s, IP_SCAN_FMT"%n", IP_SCAN_ARGS(key), &n)) {
2493 if (ovs_scan(s + len, "/"IP_SCAN_FMT"%n",
2494 IP_SCAN_ARGS(mask), &n)) {
2497 *mask = OVS_BE32_MAX;
2506 scan_ipv6(const char *s, ovs_be32 (*key)[4], ovs_be32 (*mask)[4])
2509 char ipv6_s[IPV6_SCAN_LEN + 1];
2511 if (ovs_scan(s, IPV6_SCAN_FMT"%n", ipv6_s, &n)
2512 && inet_pton(AF_INET6, ipv6_s, key) == 1) {
2516 if (ovs_scan(s + len, "/"IPV6_SCAN_FMT"%n", ipv6_s, &n)
2517 && inet_pton(AF_INET6, ipv6_s, mask) == 1) {
2520 memset(mask, 0xff, sizeof *mask);
2529 scan_ipv6_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
2534 if (ovs_scan(s, "%i%n", &key_, &n)
2535 && (key_ & ~IPV6_LABEL_MASK) == 0) {
2540 if (ovs_scan(s + len, "/%i%n", &mask_, &n)
2541 && (mask_ & ~IPV6_LABEL_MASK) == 0) {
2543 *mask = htonl(mask_);
2545 *mask = htonl(IPV6_LABEL_MASK);
2554 scan_u8(const char *s, uint8_t *key, uint8_t *mask)
2558 if (ovs_scan(s, "%"SCNi8"%n", key, &n)) {
2562 if (ovs_scan(s + len, "/%"SCNi8"%n", mask, &n)) {
2574 scan_u32(const char *s, uint32_t *key, uint32_t *mask)
2578 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
2582 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
2594 scan_be16(const char *s, ovs_be16 *key, ovs_be16 *mask)
2596 uint16_t key_, mask_;
2599 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
2604 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
2606 *mask = htons(mask_);
2608 *mask = OVS_BE16_MAX;
2617 scan_be64(const char *s, ovs_be64 *key, ovs_be64 *mask)
2619 uint64_t key_, mask_;
2622 if (ovs_scan(s, "%"SCNi64"%n", &key_, &n)) {
2625 *key = htonll(key_);
2627 if (ovs_scan(s + len, "/%"SCNi64"%n", &mask_, &n)) {
2629 *mask = htonll(mask_);
2631 *mask = OVS_BE64_MAX;
2640 scan_tun_flags(const char *s, uint16_t *key, uint16_t *mask)
2642 uint32_t flags, fmask;
2645 n = parse_flags(s, flow_tun_flag_to_string, &flags,
2646 FLOW_TNL_F_MASK, mask ? &fmask : NULL);
2647 if (n >= 0 && s[n] == ')') {
2658 scan_tcp_flags(const char *s, ovs_be16 *key, ovs_be16 *mask)
2660 uint32_t flags, fmask;
2663 n = parse_flags(s, packet_tcp_flag_to_string, &flags,
2664 TCP_FLAGS(OVS_BE16_MAX), mask ? &fmask : NULL);
2666 *key = htons(flags);
2668 *mask = htons(fmask);
2676 scan_frag(const char *s, uint8_t *key, uint8_t *mask)
2680 enum ovs_frag_type frag_type;
2682 if (ovs_scan(s, "%7[a-z]%n", frag, &n)
2683 && ovs_frag_type_from_string(frag, &frag_type)) {
2696 scan_port(const char *s, uint32_t *key, uint32_t *mask,
2697 const struct simap *port_names)
2701 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
2705 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
2712 } else if (port_names) {
2713 const struct simap_node *node;
2716 len = strcspn(s, ")");
2717 node = simap_find_len(port_names, s, len);
2730 /* Helper for vlan parsing. */
2731 struct ovs_key_vlan__ {
2736 set_be16_bf(ovs_be16 *bf, uint8_t bits, uint8_t offset, uint16_t value)
2738 const uint16_t mask = ((1U << bits) - 1) << offset;
2740 if (value >> bits) {
2744 *bf = htons((ntohs(*bf) & ~mask) | (value << offset));
2749 scan_be16_bf(const char *s, ovs_be16 *key, ovs_be16 *mask, uint8_t bits,
2752 uint16_t key_, mask_;
2755 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
2758 if (set_be16_bf(key, bits, offset, key_)) {
2760 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
2763 if (!set_be16_bf(mask, bits, offset, mask_)) {
2767 *mask |= htons(((1U << bits) - 1) << offset);
2777 scan_vid(const char *s, ovs_be16 *key, ovs_be16 *mask)
2779 return scan_be16_bf(s, key, mask, 12, VLAN_VID_SHIFT);
2783 scan_pcp(const char *s, ovs_be16 *key, ovs_be16 *mask)
2785 return scan_be16_bf(s, key, mask, 3, VLAN_PCP_SHIFT);
2789 scan_cfi(const char *s, ovs_be16 *key, ovs_be16 *mask)
2791 return scan_be16_bf(s, key, mask, 1, VLAN_CFI_SHIFT);
2796 set_be32_bf(ovs_be32 *bf, uint8_t bits, uint8_t offset, uint32_t value)
2798 const uint32_t mask = ((1U << bits) - 1) << offset;
2800 if (value >> bits) {
2804 *bf = htonl((ntohl(*bf) & ~mask) | (value << offset));
2809 scan_be32_bf(const char *s, ovs_be32 *key, ovs_be32 *mask, uint8_t bits,
2812 uint32_t key_, mask_;
2815 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
2818 if (set_be32_bf(key, bits, offset, key_)) {
2820 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
2823 if (!set_be32_bf(mask, bits, offset, mask_)) {
2827 *mask |= htonl(((1U << bits) - 1) << offset);
2837 scan_mpls_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
2839 return scan_be32_bf(s, key, mask, 20, MPLS_LABEL_SHIFT);
2843 scan_mpls_tc(const char *s, ovs_be32 *key, ovs_be32 *mask)
2845 return scan_be32_bf(s, key, mask, 3, MPLS_TC_SHIFT);
2849 scan_mpls_ttl(const char *s, ovs_be32 *key, ovs_be32 *mask)
2851 return scan_be32_bf(s, key, mask, 8, MPLS_TTL_SHIFT);
2855 scan_mpls_bos(const char *s, ovs_be32 *key, ovs_be32 *mask)
2857 return scan_be32_bf(s, key, mask, 1, MPLS_BOS_SHIFT);
2861 scan_vxlan_gbp(const char *s, uint32_t *key, uint32_t *mask)
2863 const char *s_base = s;
2864 ovs_be16 id = 0, id_mask = 0;
2865 uint8_t flags = 0, flags_mask = 0;
2867 if (!strncmp(s, "id=", 3)) {
2869 s += scan_be16(s, &id, mask ? &id_mask : NULL);
2875 if (!strncmp(s, "flags=", 6)) {
2877 s += scan_u8(s, &flags, mask ? &flags_mask : NULL);
2880 if (!strncmp(s, "))", 2)) {
2883 *key = (flags << 16) | ntohs(id);
2885 *mask = (flags_mask << 16) | ntohs(id_mask);
2894 struct geneve_scan {
2895 struct geneve_opt d[63];
2900 scan_geneve(const char *s, struct geneve_scan *key, struct geneve_scan *mask)
2902 const char *s_base = s;
2903 struct geneve_opt *opt = key->d;
2904 struct geneve_opt *opt_mask = mask ? mask->d : NULL;
2905 int len_remain = sizeof key->d;
2907 while (s[0] == '{' && len_remain >= sizeof *opt) {
2911 len_remain -= sizeof *opt;
2913 if (!strncmp(s, "class=", 6)) {
2915 s += scan_be16(s, &opt->opt_class,
2916 mask ? &opt_mask->opt_class : NULL);
2918 memset(&opt_mask->opt_class, 0, sizeof opt_mask->opt_class);
2924 if (!strncmp(s, "type=", 5)) {
2926 s += scan_u8(s, &opt->type, mask ? &opt_mask->type : NULL);
2928 memset(&opt_mask->type, 0, sizeof opt_mask->type);
2934 if (!strncmp(s, "len=", 4)) {
2935 uint8_t opt_len, opt_len_mask;
2937 s += scan_u8(s, &opt_len, mask ? &opt_len_mask : NULL);
2939 if (opt_len > 124 || opt_len % 4 || opt_len > len_remain) {
2942 opt->length = opt_len / 4;
2944 opt_mask->length = opt_len_mask;
2948 memset(&opt_mask->type, 0, sizeof opt_mask->type);
2954 if (parse_int_string(s, (uint8_t *)(opt + 1), data_len, (char **)&s)) {
2961 if (parse_int_string(s, (uint8_t *)(opt_mask + 1),
2962 data_len, (char **)&s)) {
2973 opt += 1 + data_len / 4;
2975 opt_mask += 1 + data_len / 4;
2977 len_remain -= data_len;
2982 int len = sizeof key->d - len_remain;
2996 tun_flags_to_attr(struct ofpbuf *a, const void *data_)
2998 const uint16_t *flags = data_;
3000 if (*flags & FLOW_TNL_F_DONT_FRAGMENT) {
3001 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
3003 if (*flags & FLOW_TNL_F_CSUM) {
3004 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
3006 if (*flags & FLOW_TNL_F_OAM) {
3007 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
3012 vxlan_gbp_to_attr(struct ofpbuf *a, const void *data_)
3014 const uint32_t *gbp = data_;
3017 size_t vxlan_opts_ofs;
3019 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
3020 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP, *gbp);
3021 nl_msg_end_nested(a, vxlan_opts_ofs);
3026 geneve_to_attr(struct ofpbuf *a, const void *data_)
3028 const struct geneve_scan *geneve = data_;
3030 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, geneve->d,
3034 #define SCAN_PUT_ATTR(BUF, ATTR, DATA, FUNC) \
3036 unsigned long call_fn = (unsigned long)FUNC; \
3038 typedef void (*fn)(struct ofpbuf *, const void *); \
3040 func(BUF, &(DATA)); \
3042 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)); \
3046 #define SCAN_IF(NAME) \
3047 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
3048 const char *start = s; \
3053 /* Usually no special initialization is needed. */
3054 #define SCAN_BEGIN(NAME, TYPE) \
3057 memset(&skey, 0, sizeof skey); \
3058 memset(&smask, 0, sizeof smask); \
3062 /* Init as fully-masked as mask will not be scanned. */
3063 #define SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) \
3066 memset(&skey, 0, sizeof skey); \
3067 memset(&smask, 0xff, sizeof smask); \
3071 /* VLAN needs special initialization. */
3072 #define SCAN_BEGIN_INIT(NAME, TYPE, KEY_INIT, MASK_INIT) \
3074 TYPE skey = KEY_INIT; \
3075 TYPE smask = MASK_INIT; \
3079 /* Scan unnamed entry as 'TYPE' */
3080 #define SCAN_TYPE(TYPE, KEY, MASK) \
3081 len = scan_##TYPE(s, KEY, MASK); \
3087 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
3088 #define SCAN_FIELD(NAME, TYPE, FIELD) \
3089 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
3090 s += strlen(NAME); \
3091 SCAN_TYPE(TYPE, &skey.FIELD, mask ? &smask.FIELD : NULL); \
3095 #define SCAN_FINISH() \
3096 } while (*s++ == ',' && len != 0); \
3097 if (s[-1] != ')') { \
3101 #define SCAN_FINISH_SINGLE() \
3103 if (*s++ != ')') { \
3107 /* Beginning of nested attribute. */
3108 #define SCAN_BEGIN_NESTED(NAME, ATTR) \
3110 size_t key_offset, mask_offset; \
3111 key_offset = nl_msg_start_nested(key, ATTR); \
3113 mask_offset = nl_msg_start_nested(mask, ATTR); \
3118 #define SCAN_END_NESTED() \
3120 nl_msg_end_nested(key, key_offset); \
3122 nl_msg_end_nested(mask, mask_offset); \
3127 #define SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, FUNC) \
3128 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
3130 memset(&skey, 0, sizeof skey); \
3131 memset(&smask, 0xff, sizeof smask); \
3132 s += strlen(NAME); \
3133 SCAN_TYPE(SCAN_AS, &skey, &smask); \
3134 SCAN_PUT(ATTR, FUNC); \
3138 #define SCAN_FIELD_NESTED(NAME, TYPE, SCAN_AS, ATTR) \
3139 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, NULL)
3141 #define SCAN_FIELD_NESTED_FUNC(NAME, TYPE, SCAN_AS, FUNC) \
3142 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, 0, FUNC)
3144 #define SCAN_PUT(ATTR, FUNC) \
3145 if (!mask || !is_all_zeros(&smask, sizeof smask)) { \
3146 SCAN_PUT_ATTR(key, ATTR, skey, FUNC); \
3148 SCAN_PUT_ATTR(mask, ATTR, smask, FUNC); \
3152 #define SCAN_END(ATTR) \
3154 SCAN_PUT(ATTR, NULL); \
3158 #define SCAN_END_SINGLE(ATTR) \
3159 SCAN_FINISH_SINGLE(); \
3160 SCAN_PUT(ATTR, NULL); \
3164 #define SCAN_SINGLE(NAME, TYPE, SCAN_AS, ATTR) \
3165 SCAN_BEGIN(NAME, TYPE) { \
3166 SCAN_TYPE(SCAN_AS, &skey, &smask); \
3167 } SCAN_END_SINGLE(ATTR)
3169 #define SCAN_SINGLE_FULLY_MASKED(NAME, TYPE, SCAN_AS, ATTR) \
3170 SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) { \
3171 SCAN_TYPE(SCAN_AS, &skey, NULL); \
3172 } SCAN_END_SINGLE(ATTR)
3174 /* scan_port needs one extra argument. */
3175 #define SCAN_SINGLE_PORT(NAME, TYPE, ATTR) \
3176 SCAN_BEGIN(NAME, TYPE) { \
3177 len = scan_port(s, &skey, &smask, port_names); \
3182 } SCAN_END_SINGLE(ATTR)
3185 parse_odp_key_mask_attr(const char *s, const struct simap *port_names,
3186 struct ofpbuf *key, struct ofpbuf *mask)
3192 len = odp_ufid_from_string(s, &ufid);
3197 SCAN_SINGLE("skb_priority(", uint32_t, u32, OVS_KEY_ATTR_PRIORITY);
3198 SCAN_SINGLE("skb_mark(", uint32_t, u32, OVS_KEY_ATTR_SKB_MARK);
3199 SCAN_SINGLE_FULLY_MASKED("recirc_id(", uint32_t, u32,
3200 OVS_KEY_ATTR_RECIRC_ID);
3201 SCAN_SINGLE("dp_hash(", uint32_t, u32, OVS_KEY_ATTR_DP_HASH);
3203 SCAN_BEGIN_NESTED("tunnel(", OVS_KEY_ATTR_TUNNEL) {
3204 SCAN_FIELD_NESTED("tun_id=", ovs_be64, be64, OVS_TUNNEL_KEY_ATTR_ID);
3205 SCAN_FIELD_NESTED("src=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_SRC);
3206 SCAN_FIELD_NESTED("dst=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_DST);
3207 SCAN_FIELD_NESTED("tos=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TOS);
3208 SCAN_FIELD_NESTED("ttl=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TTL);
3209 SCAN_FIELD_NESTED("tp_src=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_SRC);
3210 SCAN_FIELD_NESTED("tp_dst=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_DST);
3211 SCAN_FIELD_NESTED_FUNC("vxlan(gbp(", uint32_t, vxlan_gbp, vxlan_gbp_to_attr);
3212 SCAN_FIELD_NESTED_FUNC("geneve(", struct geneve_scan, geneve,
3214 SCAN_FIELD_NESTED_FUNC("flags(", uint16_t, tun_flags, tun_flags_to_attr);
3215 } SCAN_END_NESTED();
3217 SCAN_SINGLE_PORT("in_port(", uint32_t, OVS_KEY_ATTR_IN_PORT);
3219 SCAN_BEGIN("eth(", struct ovs_key_ethernet) {
3220 SCAN_FIELD("src=", eth, eth_src);
3221 SCAN_FIELD("dst=", eth, eth_dst);
3222 } SCAN_END(OVS_KEY_ATTR_ETHERNET);
3224 SCAN_BEGIN_INIT("vlan(", struct ovs_key_vlan__,
3225 { htons(VLAN_CFI) }, { htons(VLAN_CFI) }) {
3226 SCAN_FIELD("vid=", vid, tci);
3227 SCAN_FIELD("pcp=", pcp, tci);
3228 SCAN_FIELD("cfi=", cfi, tci);
3229 } SCAN_END(OVS_KEY_ATTR_VLAN);
3231 SCAN_SINGLE("eth_type(", ovs_be16, be16, OVS_KEY_ATTR_ETHERTYPE);
3233 SCAN_BEGIN("mpls(", struct ovs_key_mpls) {
3234 SCAN_FIELD("label=", mpls_label, mpls_lse);
3235 SCAN_FIELD("tc=", mpls_tc, mpls_lse);
3236 SCAN_FIELD("ttl=", mpls_ttl, mpls_lse);
3237 SCAN_FIELD("bos=", mpls_bos, mpls_lse);
3238 } SCAN_END(OVS_KEY_ATTR_MPLS);
3240 SCAN_BEGIN("ipv4(", struct ovs_key_ipv4) {
3241 SCAN_FIELD("src=", ipv4, ipv4_src);
3242 SCAN_FIELD("dst=", ipv4, ipv4_dst);
3243 SCAN_FIELD("proto=", u8, ipv4_proto);
3244 SCAN_FIELD("tos=", u8, ipv4_tos);
3245 SCAN_FIELD("ttl=", u8, ipv4_ttl);
3246 SCAN_FIELD("frag=", frag, ipv4_frag);
3247 } SCAN_END(OVS_KEY_ATTR_IPV4);
3249 SCAN_BEGIN("ipv6(", struct ovs_key_ipv6) {
3250 SCAN_FIELD("src=", ipv6, ipv6_src);
3251 SCAN_FIELD("dst=", ipv6, ipv6_dst);
3252 SCAN_FIELD("label=", ipv6_label, ipv6_label);
3253 SCAN_FIELD("proto=", u8, ipv6_proto);
3254 SCAN_FIELD("tclass=", u8, ipv6_tclass);
3255 SCAN_FIELD("hlimit=", u8, ipv6_hlimit);
3256 SCAN_FIELD("frag=", frag, ipv6_frag);
3257 } SCAN_END(OVS_KEY_ATTR_IPV6);
3259 SCAN_BEGIN("tcp(", struct ovs_key_tcp) {
3260 SCAN_FIELD("src=", be16, tcp_src);
3261 SCAN_FIELD("dst=", be16, tcp_dst);
3262 } SCAN_END(OVS_KEY_ATTR_TCP);
3264 SCAN_SINGLE("tcp_flags(", ovs_be16, tcp_flags, OVS_KEY_ATTR_TCP_FLAGS);
3266 SCAN_BEGIN("udp(", struct ovs_key_udp) {
3267 SCAN_FIELD("src=", be16, udp_src);
3268 SCAN_FIELD("dst=", be16, udp_dst);
3269 } SCAN_END(OVS_KEY_ATTR_UDP);
3271 SCAN_BEGIN("sctp(", struct ovs_key_sctp) {
3272 SCAN_FIELD("src=", be16, sctp_src);
3273 SCAN_FIELD("dst=", be16, sctp_dst);
3274 } SCAN_END(OVS_KEY_ATTR_SCTP);
3276 SCAN_BEGIN("icmp(", struct ovs_key_icmp) {
3277 SCAN_FIELD("type=", u8, icmp_type);
3278 SCAN_FIELD("code=", u8, icmp_code);
3279 } SCAN_END(OVS_KEY_ATTR_ICMP);
3281 SCAN_BEGIN("icmpv6(", struct ovs_key_icmpv6) {
3282 SCAN_FIELD("type=", u8, icmpv6_type);
3283 SCAN_FIELD("code=", u8, icmpv6_code);
3284 } SCAN_END(OVS_KEY_ATTR_ICMPV6);
3286 SCAN_BEGIN("arp(", struct ovs_key_arp) {
3287 SCAN_FIELD("sip=", ipv4, arp_sip);
3288 SCAN_FIELD("tip=", ipv4, arp_tip);
3289 SCAN_FIELD("op=", be16, arp_op);
3290 SCAN_FIELD("sha=", eth, arp_sha);
3291 SCAN_FIELD("tha=", eth, arp_tha);
3292 } SCAN_END(OVS_KEY_ATTR_ARP);
3294 SCAN_BEGIN("nd(", struct ovs_key_nd) {
3295 SCAN_FIELD("target=", ipv6, nd_target);
3296 SCAN_FIELD("sll=", eth, nd_sll);
3297 SCAN_FIELD("tll=", eth, nd_tll);
3298 } SCAN_END(OVS_KEY_ATTR_ND);
3300 /* Encap open-coded. */
3301 if (!strncmp(s, "encap(", 6)) {
3302 const char *start = s;
3303 size_t encap, encap_mask = 0;
3305 encap = nl_msg_start_nested(key, OVS_KEY_ATTR_ENCAP);
3307 encap_mask = nl_msg_start_nested(mask, OVS_KEY_ATTR_ENCAP);
3314 s += strspn(s, delimiters);
3317 } else if (*s == ')') {
3321 retval = parse_odp_key_mask_attr(s, port_names, key, mask);
3329 nl_msg_end_nested(key, encap);
3331 nl_msg_end_nested(mask, encap_mask);
3340 /* Parses the string representation of a datapath flow key, in the
3341 * format output by odp_flow_key_format(). Returns 0 if successful,
3342 * otherwise a positive errno value. On success, the flow key is
3343 * appended to 'key' as a series of Netlink attributes. On failure, no
3344 * data is appended to 'key'. Either way, 'key''s data might be
3347 * If 'port_names' is nonnull, it points to an simap that maps from a port name
3348 * to a port number. (Port names may be used instead of port numbers in
3351 * On success, the attributes appended to 'key' are individually syntactically
3352 * valid, but they may not be valid as a sequence. 'key' might, for example,
3353 * have duplicated keys. odp_flow_key_to_flow() will detect those errors. */
3355 odp_flow_from_string(const char *s, const struct simap *port_names,
3356 struct ofpbuf *key, struct ofpbuf *mask)
3358 const size_t old_size = key->size;
3362 s += strspn(s, delimiters);
3367 retval = parse_odp_key_mask_attr(s, port_names, key, mask);
3369 key->size = old_size;
3379 ovs_to_odp_frag(uint8_t nw_frag, bool is_mask)
3382 /* Netlink interface 'enum ovs_frag_type' is an 8-bit enumeration type,
3383 * not a set of flags or bitfields. Hence, if the struct flow nw_frag
3384 * mask, which is a set of bits, has the FLOW_NW_FRAG_ANY as zero, we
3385 * must use a zero mask for the netlink frag field, and all ones mask
3387 return (nw_frag & FLOW_NW_FRAG_ANY) ? UINT8_MAX : 0;
3389 return !(nw_frag & FLOW_NW_FRAG_ANY) ? OVS_FRAG_TYPE_NONE
3390 : nw_frag & FLOW_NW_FRAG_LATER ? OVS_FRAG_TYPE_LATER
3391 : OVS_FRAG_TYPE_FIRST;
3394 static void get_ethernet_key(const struct flow *, struct ovs_key_ethernet *);
3395 static void put_ethernet_key(const struct ovs_key_ethernet *, struct flow *);
3396 static void get_ipv4_key(const struct flow *, struct ovs_key_ipv4 *,
3398 static void put_ipv4_key(const struct ovs_key_ipv4 *, struct flow *,
3400 static void get_ipv6_key(const struct flow *, struct ovs_key_ipv6 *,
3402 static void put_ipv6_key(const struct ovs_key_ipv6 *, struct flow *,
3404 static void get_arp_key(const struct flow *, struct ovs_key_arp *);
3405 static void put_arp_key(const struct ovs_key_arp *, struct flow *);
3406 static void get_nd_key(const struct flow *, struct ovs_key_nd *);
3407 static void put_nd_key(const struct ovs_key_nd *, struct flow *);
3409 /* These share the same layout. */
3411 struct ovs_key_tcp tcp;
3412 struct ovs_key_udp udp;
3413 struct ovs_key_sctp sctp;
3416 static void get_tp_key(const struct flow *, union ovs_key_tp *);
3417 static void put_tp_key(const union ovs_key_tp *, struct flow *);
3420 odp_flow_key_from_flow__(struct ofpbuf *buf, const struct flow *flow,
3421 const struct flow *mask, odp_port_t odp_in_port,
3422 size_t max_mpls_depth, bool recirc, bool export_mask)
3424 struct ovs_key_ethernet *eth_key;
3426 const struct flow *data = export_mask ? mask : flow;
3428 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, data->skb_priority);
3430 if (flow->tunnel.ip_dst || export_mask) {
3431 tun_key_to_attr(buf, &data->tunnel);
3434 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, data->pkt_mark);
3437 nl_msg_put_u32(buf, OVS_KEY_ATTR_RECIRC_ID, data->recirc_id);
3438 nl_msg_put_u32(buf, OVS_KEY_ATTR_DP_HASH, data->dp_hash);
3441 /* Add an ingress port attribute if this is a mask or 'odp_in_port'
3442 * is not the magical value "ODPP_NONE". */
3443 if (export_mask || odp_in_port != ODPP_NONE) {
3444 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, odp_in_port);
3447 eth_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ETHERNET,
3449 get_ethernet_key(data, eth_key);
3451 if (flow->vlan_tci != htons(0) || flow->dl_type == htons(ETH_TYPE_VLAN)) {
3453 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
3455 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_TYPE_VLAN));
3457 nl_msg_put_be16(buf, OVS_KEY_ATTR_VLAN, data->vlan_tci);
3458 encap = nl_msg_start_nested(buf, OVS_KEY_ATTR_ENCAP);
3459 if (flow->vlan_tci == htons(0)) {
3466 if (ntohs(flow->dl_type) < ETH_TYPE_MIN) {
3467 /* For backwards compatibility with kernels that don't support
3468 * wildcarding, the following convention is used to encode the
3469 * OVS_KEY_ATTR_ETHERTYPE for key and mask:
3472 * -------- -------- -------
3473 * >0x5ff 0xffff Specified Ethernet II Ethertype.
3474 * >0x5ff 0 Any Ethernet II or non-Ethernet II frame.
3475 * <none> 0xffff Any non-Ethernet II frame (except valid
3476 * 802.3 SNAP packet with valid eth_type).
3479 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
3484 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, data->dl_type);
3486 if (flow->dl_type == htons(ETH_TYPE_IP)) {
3487 struct ovs_key_ipv4 *ipv4_key;
3489 ipv4_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV4,
3491 get_ipv4_key(data, ipv4_key, export_mask);
3492 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
3493 struct ovs_key_ipv6 *ipv6_key;
3495 ipv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV6,
3497 get_ipv6_key(data, ipv6_key, export_mask);
3498 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
3499 flow->dl_type == htons(ETH_TYPE_RARP)) {
3500 struct ovs_key_arp *arp_key;
3502 arp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ARP,
3504 get_arp_key(data, arp_key);
3505 } else if (eth_type_mpls(flow->dl_type)) {
3506 struct ovs_key_mpls *mpls_key;
3509 n = flow_count_mpls_labels(flow, NULL);
3510 n = MIN(n, max_mpls_depth);
3511 mpls_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_MPLS,
3512 n * sizeof *mpls_key);
3513 for (i = 0; i < n; i++) {
3514 mpls_key[i].mpls_lse = data->mpls_lse[i];
3518 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3519 if (flow->nw_proto == IPPROTO_TCP) {
3520 union ovs_key_tp *tcp_key;
3522 tcp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_TCP,
3524 get_tp_key(data, tcp_key);
3525 if (data->tcp_flags) {
3526 nl_msg_put_be16(buf, OVS_KEY_ATTR_TCP_FLAGS, data->tcp_flags);
3528 } else if (flow->nw_proto == IPPROTO_UDP) {
3529 union ovs_key_tp *udp_key;
3531 udp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_UDP,
3533 get_tp_key(data, udp_key);
3534 } else if (flow->nw_proto == IPPROTO_SCTP) {
3535 union ovs_key_tp *sctp_key;
3537 sctp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_SCTP,
3539 get_tp_key(data, sctp_key);
3540 } else if (flow->dl_type == htons(ETH_TYPE_IP)
3541 && flow->nw_proto == IPPROTO_ICMP) {
3542 struct ovs_key_icmp *icmp_key;
3544 icmp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMP,
3546 icmp_key->icmp_type = ntohs(data->tp_src);
3547 icmp_key->icmp_code = ntohs(data->tp_dst);
3548 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)
3549 && flow->nw_proto == IPPROTO_ICMPV6) {
3550 struct ovs_key_icmpv6 *icmpv6_key;
3552 icmpv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMPV6,
3553 sizeof *icmpv6_key);
3554 icmpv6_key->icmpv6_type = ntohs(data->tp_src);
3555 icmpv6_key->icmpv6_code = ntohs(data->tp_dst);
3557 if (flow->tp_dst == htons(0)
3558 && (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)
3559 || flow->tp_src == htons(ND_NEIGHBOR_ADVERT))
3560 && (!export_mask || (data->tp_src == htons(0xffff)
3561 && data->tp_dst == htons(0xffff)))) {
3563 struct ovs_key_nd *nd_key;
3565 nd_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ND,
3567 memcpy(nd_key->nd_target, &data->nd_target,
3568 sizeof nd_key->nd_target);
3569 memcpy(nd_key->nd_sll, data->arp_sha, ETH_ADDR_LEN);
3570 memcpy(nd_key->nd_tll, data->arp_tha, ETH_ADDR_LEN);
3577 nl_msg_end_nested(buf, encap);
3581 /* Appends a representation of 'flow' as OVS_KEY_ATTR_* attributes to 'buf'.
3582 * 'flow->in_port' is ignored (since it is likely to be an OpenFlow port
3583 * number rather than a datapath port number). Instead, if 'odp_in_port'
3584 * is anything other than ODPP_NONE, it is included in 'buf' as the input
3587 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
3588 * capable of being expanded to allow for that much space.
3590 * 'recirc' indicates support for recirculation fields. If this is true, then
3591 * these fields will always be serialised. */
3593 odp_flow_key_from_flow(struct ofpbuf *buf, const struct flow *flow,
3594 const struct flow *mask, odp_port_t odp_in_port,
3597 odp_flow_key_from_flow__(buf, flow, mask, odp_in_port, SIZE_MAX, recirc,
3601 /* Appends a representation of 'mask' as OVS_KEY_ATTR_* attributes to
3602 * 'buf'. 'flow' is used as a template to determine how to interpret
3603 * 'mask'. For example, the 'dl_type' of 'mask' describes the mask, but
3604 * it doesn't indicate whether the other fields should be interpreted as
3605 * ARP, IPv4, IPv6, etc.
3607 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
3608 * capable of being expanded to allow for that much space.
3610 * 'recirc' indicates support for recirculation fields. If this is true, then
3611 * these fields will always be serialised. */
3613 odp_flow_key_from_mask(struct ofpbuf *buf, const struct flow *mask,
3614 const struct flow *flow, uint32_t odp_in_port_mask,
3615 size_t max_mpls_depth, bool recirc)
3617 odp_flow_key_from_flow__(buf, flow, mask, u32_to_odp(odp_in_port_mask),
3618 max_mpls_depth, recirc, true);
3621 /* Generate ODP flow key from the given packet metadata */
3623 odp_key_from_pkt_metadata(struct ofpbuf *buf, const struct pkt_metadata *md)
3625 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, md->skb_priority);
3627 if (md->tunnel.ip_dst) {
3628 tun_key_to_attr(buf, &md->tunnel);
3631 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, md->pkt_mark);
3633 /* Add an ingress port attribute if 'odp_in_port' is not the magical
3634 * value "ODPP_NONE". */
3635 if (md->in_port.odp_port != ODPP_NONE) {
3636 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, md->in_port.odp_port);
3640 /* Generate packet metadata from the given ODP flow key. */
3642 odp_key_to_pkt_metadata(const struct nlattr *key, size_t key_len,
3643 struct pkt_metadata *md)
3645 const struct nlattr *nla;
3647 uint32_t wanted_attrs = 1u << OVS_KEY_ATTR_PRIORITY |
3648 1u << OVS_KEY_ATTR_SKB_MARK | 1u << OVS_KEY_ATTR_TUNNEL |
3649 1u << OVS_KEY_ATTR_IN_PORT;
3651 *md = PKT_METADATA_INITIALIZER(ODPP_NONE);
3653 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
3654 uint16_t type = nl_attr_type(nla);
3655 size_t len = nl_attr_get_size(nla);
3656 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
3657 OVS_KEY_ATTR_MAX, type);
3659 if (len != expected_len && expected_len >= 0) {
3664 case OVS_KEY_ATTR_RECIRC_ID:
3665 md->recirc_id = nl_attr_get_u32(nla);
3666 wanted_attrs &= ~(1u << OVS_KEY_ATTR_RECIRC_ID);
3668 case OVS_KEY_ATTR_DP_HASH:
3669 md->dp_hash = nl_attr_get_u32(nla);
3670 wanted_attrs &= ~(1u << OVS_KEY_ATTR_DP_HASH);
3672 case OVS_KEY_ATTR_PRIORITY:
3673 md->skb_priority = nl_attr_get_u32(nla);
3674 wanted_attrs &= ~(1u << OVS_KEY_ATTR_PRIORITY);
3676 case OVS_KEY_ATTR_SKB_MARK:
3677 md->pkt_mark = nl_attr_get_u32(nla);
3678 wanted_attrs &= ~(1u << OVS_KEY_ATTR_SKB_MARK);
3680 case OVS_KEY_ATTR_TUNNEL: {
3681 enum odp_key_fitness res;
3683 res = odp_tun_key_from_attr(nla, &md->tunnel);
3684 if (res == ODP_FIT_ERROR) {
3685 memset(&md->tunnel, 0, sizeof md->tunnel);
3686 } else if (res == ODP_FIT_PERFECT) {
3687 wanted_attrs &= ~(1u << OVS_KEY_ATTR_TUNNEL);
3691 case OVS_KEY_ATTR_IN_PORT:
3692 md->in_port.odp_port = nl_attr_get_odp_port(nla);
3693 wanted_attrs &= ~(1u << OVS_KEY_ATTR_IN_PORT);
3699 if (!wanted_attrs) {
3700 return; /* Have everything. */
3706 odp_flow_key_hash(const struct nlattr *key, size_t key_len)
3708 BUILD_ASSERT_DECL(!(NLA_ALIGNTO % sizeof(uint32_t)));
3709 return hash_words(ALIGNED_CAST(const uint32_t *, key),
3710 key_len / sizeof(uint32_t), 0);
3714 log_odp_key_attributes(struct vlog_rate_limit *rl, const char *title,
3715 uint64_t attrs, int out_of_range_attr,
3716 const struct nlattr *key, size_t key_len)
3721 if (VLOG_DROP_DBG(rl)) {
3726 for (i = 0; i < 64; i++) {
3727 if (attrs & (UINT64_C(1) << i)) {
3728 char namebuf[OVS_KEY_ATTR_BUFSIZE];
3730 ds_put_format(&s, " %s",
3731 ovs_key_attr_to_string(i, namebuf, sizeof namebuf));
3734 if (out_of_range_attr) {
3735 ds_put_format(&s, " %d (and possibly others)", out_of_range_attr);
3738 ds_put_cstr(&s, ": ");
3739 odp_flow_key_format(key, key_len, &s);
3741 VLOG_DBG("%s:%s", title, ds_cstr(&s));
3746 odp_to_ovs_frag(uint8_t odp_frag, bool is_mask)
3748 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3751 return odp_frag ? FLOW_NW_FRAG_MASK : 0;
3754 if (odp_frag > OVS_FRAG_TYPE_LATER) {
3755 VLOG_ERR_RL(&rl, "invalid frag %"PRIu8" in flow key", odp_frag);
3756 return 0xff; /* Error. */
3759 return (odp_frag == OVS_FRAG_TYPE_NONE) ? 0
3760 : (odp_frag == OVS_FRAG_TYPE_FIRST) ? FLOW_NW_FRAG_ANY
3761 : FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER;
3765 parse_flow_nlattrs(const struct nlattr *key, size_t key_len,
3766 const struct nlattr *attrs[], uint64_t *present_attrsp,
3767 int *out_of_range_attrp)
3769 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
3770 const struct nlattr *nla;
3771 uint64_t present_attrs;
3774 BUILD_ASSERT(OVS_KEY_ATTR_MAX < CHAR_BIT * sizeof present_attrs);
3776 *out_of_range_attrp = 0;
3777 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
3778 uint16_t type = nl_attr_type(nla);
3779 size_t len = nl_attr_get_size(nla);
3780 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
3781 OVS_KEY_ATTR_MAX, type);
3783 if (len != expected_len && expected_len >= 0) {
3784 char namebuf[OVS_KEY_ATTR_BUFSIZE];
3786 VLOG_ERR_RL(&rl, "attribute %s has length %"PRIuSIZE" but should have "
3787 "length %d", ovs_key_attr_to_string(type, namebuf,
3793 if (type > OVS_KEY_ATTR_MAX) {
3794 *out_of_range_attrp = type;
3796 if (present_attrs & (UINT64_C(1) << type)) {
3797 char namebuf[OVS_KEY_ATTR_BUFSIZE];
3799 VLOG_ERR_RL(&rl, "duplicate %s attribute in flow key",
3800 ovs_key_attr_to_string(type,
3801 namebuf, sizeof namebuf));
3805 present_attrs |= UINT64_C(1) << type;
3810 VLOG_ERR_RL(&rl, "trailing garbage in flow key");
3814 *present_attrsp = present_attrs;
3818 static enum odp_key_fitness
3819 check_expectations(uint64_t present_attrs, int out_of_range_attr,
3820 uint64_t expected_attrs,
3821 const struct nlattr *key, size_t key_len)
3823 uint64_t missing_attrs;
3824 uint64_t extra_attrs;
3826 missing_attrs = expected_attrs & ~present_attrs;
3827 if (missing_attrs) {
3828 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
3829 log_odp_key_attributes(&rl, "expected but not present",
3830 missing_attrs, 0, key, key_len);
3831 return ODP_FIT_TOO_LITTLE;
3834 extra_attrs = present_attrs & ~expected_attrs;
3835 if (extra_attrs || out_of_range_attr) {
3836 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
3837 log_odp_key_attributes(&rl, "present but not expected",
3838 extra_attrs, out_of_range_attr, key, key_len);
3839 return ODP_FIT_TOO_MUCH;
3842 return ODP_FIT_PERFECT;
3846 parse_ethertype(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
3847 uint64_t present_attrs, uint64_t *expected_attrs,
3848 struct flow *flow, const struct flow *src_flow)
3850 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3851 bool is_mask = flow != src_flow;
3853 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
3854 flow->dl_type = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
3855 if (!is_mask && ntohs(flow->dl_type) < ETH_TYPE_MIN) {
3856 VLOG_ERR_RL(&rl, "invalid Ethertype %"PRIu16" in flow key",
3857 ntohs(flow->dl_type));
3860 if (is_mask && ntohs(src_flow->dl_type) < ETH_TYPE_MIN &&
3861 flow->dl_type != htons(0xffff)) {
3864 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
3867 flow->dl_type = htons(FLOW_DL_TYPE_NONE);
3868 } else if (ntohs(src_flow->dl_type) < ETH_TYPE_MIN) {
3869 /* See comments in odp_flow_key_from_flow__(). */
3870 VLOG_ERR_RL(&rl, "mask expected for non-Ethernet II frame");
3877 static enum odp_key_fitness
3878 parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
3879 uint64_t present_attrs, int out_of_range_attr,
3880 uint64_t expected_attrs, struct flow *flow,
3881 const struct nlattr *key, size_t key_len,
3882 const struct flow *src_flow)
3884 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3885 bool is_mask = src_flow != flow;
3886 const void *check_start = NULL;
3887 size_t check_len = 0;
3888 enum ovs_key_attr expected_bit = 0xff;
3890 if (eth_type_mpls(src_flow->dl_type)) {
3891 if (!is_mask || present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
3892 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
3894 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
3895 size_t size = nl_attr_get_size(attrs[OVS_KEY_ATTR_MPLS]);
3896 const ovs_be32 *mpls_lse = nl_attr_get(attrs[OVS_KEY_ATTR_MPLS]);
3897 int n = size / sizeof(ovs_be32);
3900 if (!size || size % sizeof(ovs_be32)) {
3901 return ODP_FIT_ERROR;
3903 if (flow->mpls_lse[0] && flow->dl_type != htons(0xffff)) {
3904 return ODP_FIT_ERROR;
3907 for (i = 0; i < n && i < FLOW_MAX_MPLS_LABELS; i++) {
3908 flow->mpls_lse[i] = mpls_lse[i];
3910 if (n > FLOW_MAX_MPLS_LABELS) {
3911 return ODP_FIT_TOO_MUCH;
3915 /* BOS may be set only in the innermost label. */
3916 for (i = 0; i < n - 1; i++) {
3917 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
3918 return ODP_FIT_ERROR;
3922 /* BOS must be set in the innermost label. */
3923 if (n < FLOW_MAX_MPLS_LABELS
3924 && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
3925 return ODP_FIT_TOO_LITTLE;
3931 } else if (src_flow->dl_type == htons(ETH_TYPE_IP)) {
3933 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV4;
3935 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
3936 const struct ovs_key_ipv4 *ipv4_key;
3938 ipv4_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV4]);
3939 put_ipv4_key(ipv4_key, flow, is_mask);
3940 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
3941 return ODP_FIT_ERROR;
3944 check_start = ipv4_key;
3945 check_len = sizeof *ipv4_key;
3946 expected_bit = OVS_KEY_ATTR_IPV4;
3949 } else if (src_flow->dl_type == htons(ETH_TYPE_IPV6)) {
3951 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV6;
3953 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
3954 const struct ovs_key_ipv6 *ipv6_key;
3956 ipv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV6]);
3957 put_ipv6_key(ipv6_key, flow, is_mask);
3958 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
3959 return ODP_FIT_ERROR;
3962 check_start = ipv6_key;
3963 check_len = sizeof *ipv6_key;
3964 expected_bit = OVS_KEY_ATTR_IPV6;
3967 } else if (src_flow->dl_type == htons(ETH_TYPE_ARP) ||
3968 src_flow->dl_type == htons(ETH_TYPE_RARP)) {
3970 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ARP;
3972 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ARP)) {
3973 const struct ovs_key_arp *arp_key;
3975 arp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ARP]);
3976 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
3977 VLOG_ERR_RL(&rl, "unsupported ARP opcode %"PRIu16" in flow "
3978 "key", ntohs(arp_key->arp_op));
3979 return ODP_FIT_ERROR;
3981 put_arp_key(arp_key, flow);
3983 check_start = arp_key;
3984 check_len = sizeof *arp_key;
3985 expected_bit = OVS_KEY_ATTR_ARP;
3991 if (check_len > 0) { /* Happens only when 'is_mask'. */
3992 if (!is_all_zeros(check_start, check_len) &&
3993 flow->dl_type != htons(0xffff)) {
3994 return ODP_FIT_ERROR;
3996 expected_attrs |= UINT64_C(1) << expected_bit;
4000 expected_bit = OVS_KEY_ATTR_UNSPEC;
4001 if (src_flow->nw_proto == IPPROTO_TCP
4002 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
4003 src_flow->dl_type == htons(ETH_TYPE_IPV6))
4004 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
4006 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP;
4008 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP)) {
4009 const union ovs_key_tp *tcp_key;
4011 tcp_key = nl_attr_get(attrs[OVS_KEY_ATTR_TCP]);
4012 put_tp_key(tcp_key, flow);
4013 expected_bit = OVS_KEY_ATTR_TCP;
4015 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS)) {
4016 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS;
4017 flow->tcp_flags = nl_attr_get_be16(attrs[OVS_KEY_ATTR_TCP_FLAGS]);
4019 } else if (src_flow->nw_proto == IPPROTO_UDP
4020 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
4021 src_flow->dl_type == htons(ETH_TYPE_IPV6))
4022 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
4024 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_UDP;
4026 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_UDP)) {
4027 const union ovs_key_tp *udp_key;
4029 udp_key = nl_attr_get(attrs[OVS_KEY_ATTR_UDP]);
4030 put_tp_key(udp_key, flow);
4031 expected_bit = OVS_KEY_ATTR_UDP;
4033 } else if (src_flow->nw_proto == IPPROTO_SCTP
4034 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
4035 src_flow->dl_type == htons(ETH_TYPE_IPV6))
4036 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
4038 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SCTP;
4040 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SCTP)) {
4041 const union ovs_key_tp *sctp_key;
4043 sctp_key = nl_attr_get(attrs[OVS_KEY_ATTR_SCTP]);
4044 put_tp_key(sctp_key, flow);
4045 expected_bit = OVS_KEY_ATTR_SCTP;
4047 } else if (src_flow->nw_proto == IPPROTO_ICMP
4048 && src_flow->dl_type == htons(ETH_TYPE_IP)
4049 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
4051 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMP;
4053 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMP)) {
4054 const struct ovs_key_icmp *icmp_key;
4056 icmp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMP]);
4057 flow->tp_src = htons(icmp_key->icmp_type);
4058 flow->tp_dst = htons(icmp_key->icmp_code);
4059 expected_bit = OVS_KEY_ATTR_ICMP;
4061 } else if (src_flow->nw_proto == IPPROTO_ICMPV6
4062 && src_flow->dl_type == htons(ETH_TYPE_IPV6)
4063 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
4065 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMPV6;
4067 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMPV6)) {
4068 const struct ovs_key_icmpv6 *icmpv6_key;
4070 icmpv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMPV6]);
4071 flow->tp_src = htons(icmpv6_key->icmpv6_type);
4072 flow->tp_dst = htons(icmpv6_key->icmpv6_code);
4073 expected_bit = OVS_KEY_ATTR_ICMPV6;
4074 if (src_flow->tp_dst == htons(0) &&
4075 (src_flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
4076 src_flow->tp_src == htons(ND_NEIGHBOR_ADVERT))) {
4078 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
4080 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ND)) {
4081 const struct ovs_key_nd *nd_key;
4083 nd_key = nl_attr_get(attrs[OVS_KEY_ATTR_ND]);
4084 memcpy(&flow->nd_target, nd_key->nd_target,
4085 sizeof flow->nd_target);
4086 memcpy(flow->arp_sha, nd_key->nd_sll, ETH_ADDR_LEN);
4087 memcpy(flow->arp_tha, nd_key->nd_tll, ETH_ADDR_LEN);
4089 if (!is_all_zeros(nd_key, sizeof *nd_key) &&
4090 (flow->tp_src != htons(0xffff) ||
4091 flow->tp_dst != htons(0xffff))) {
4092 return ODP_FIT_ERROR;
4094 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
4101 if (is_mask && expected_bit != OVS_KEY_ATTR_UNSPEC) {
4102 if ((flow->tp_src || flow->tp_dst) && flow->nw_proto != 0xff) {
4103 return ODP_FIT_ERROR;
4105 expected_attrs |= UINT64_C(1) << expected_bit;
4110 return check_expectations(present_attrs, out_of_range_attr, expected_attrs,
4114 /* Parse 802.1Q header then encapsulated L3 attributes. */
4115 static enum odp_key_fitness
4116 parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
4117 uint64_t present_attrs, int out_of_range_attr,
4118 uint64_t expected_attrs, struct flow *flow,
4119 const struct nlattr *key, size_t key_len,
4120 const struct flow *src_flow)
4122 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
4123 bool is_mask = src_flow != flow;
4125 const struct nlattr *encap
4126 = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)
4127 ? attrs[OVS_KEY_ATTR_ENCAP] : NULL);
4128 enum odp_key_fitness encap_fitness;
4129 enum odp_key_fitness fitness;
4131 /* Calculate fitness of outer attributes. */
4133 expected_attrs |= ((UINT64_C(1) << OVS_KEY_ATTR_VLAN) |
4134 (UINT64_C(1) << OVS_KEY_ATTR_ENCAP));
4136 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
4137 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
4139 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)) {
4140 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_ENCAP);
4143 fitness = check_expectations(present_attrs, out_of_range_attr,
4144 expected_attrs, key, key_len);
4147 * Remove the TPID from dl_type since it's not the real Ethertype. */
4148 flow->dl_type = htons(0);
4149 flow->vlan_tci = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)
4150 ? nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN])
4153 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN))) {
4154 return ODP_FIT_TOO_LITTLE;
4155 } else if (flow->vlan_tci == htons(0)) {
4156 /* Corner case for a truncated 802.1Q header. */
4157 if (fitness == ODP_FIT_PERFECT && nl_attr_get_size(encap)) {
4158 return ODP_FIT_TOO_MUCH;
4161 } else if (!(flow->vlan_tci & htons(VLAN_CFI))) {
4162 VLOG_ERR_RL(&rl, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
4163 "but CFI bit is not set", ntohs(flow->vlan_tci));
4164 return ODP_FIT_ERROR;
4167 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
4172 /* Now parse the encapsulated attributes. */
4173 if (!parse_flow_nlattrs(nl_attr_get(encap), nl_attr_get_size(encap),
4174 attrs, &present_attrs, &out_of_range_attr)) {
4175 return ODP_FIT_ERROR;
4179 if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow, src_flow)) {
4180 return ODP_FIT_ERROR;
4182 encap_fitness = parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
4183 expected_attrs, flow, key, key_len,
4186 /* The overall fitness is the worse of the outer and inner attributes. */
4187 return MAX(fitness, encap_fitness);
4190 static enum odp_key_fitness
4191 odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len,
4192 struct flow *flow, const struct flow *src_flow)
4194 const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1];
4195 uint64_t expected_attrs;
4196 uint64_t present_attrs;
4197 int out_of_range_attr;
4198 bool is_mask = src_flow != flow;
4200 memset(flow, 0, sizeof *flow);
4202 /* Parse attributes. */
4203 if (!parse_flow_nlattrs(key, key_len, attrs, &present_attrs,
4204 &out_of_range_attr)) {
4205 return ODP_FIT_ERROR;
4210 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID)) {
4211 flow->recirc_id = nl_attr_get_u32(attrs[OVS_KEY_ATTR_RECIRC_ID]);
4212 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID;
4213 } else if (is_mask) {
4214 /* Always exact match recirc_id if it is not specified. */
4215 flow->recirc_id = UINT32_MAX;
4218 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_DP_HASH)) {
4219 flow->dp_hash = nl_attr_get_u32(attrs[OVS_KEY_ATTR_DP_HASH]);
4220 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_DP_HASH;
4222 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PRIORITY)) {
4223 flow->skb_priority = nl_attr_get_u32(attrs[OVS_KEY_ATTR_PRIORITY]);
4224 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PRIORITY;
4227 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK)) {
4228 flow->pkt_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_SKB_MARK]);
4229 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK;
4232 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TUNNEL)) {
4233 enum odp_key_fitness res;
4235 res = odp_tun_key_from_attr(attrs[OVS_KEY_ATTR_TUNNEL], &flow->tunnel);
4236 if (res == ODP_FIT_ERROR) {
4237 return ODP_FIT_ERROR;
4238 } else if (res == ODP_FIT_PERFECT) {
4239 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TUNNEL;
4243 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IN_PORT)) {
4244 flow->in_port.odp_port
4245 = nl_attr_get_odp_port(attrs[OVS_KEY_ATTR_IN_PORT]);
4246 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IN_PORT;
4247 } else if (!is_mask) {
4248 flow->in_port.odp_port = ODPP_NONE;
4251 /* Ethernet header. */
4252 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERNET)) {
4253 const struct ovs_key_ethernet *eth_key;
4255 eth_key = nl_attr_get(attrs[OVS_KEY_ATTR_ETHERNET]);
4256 put_ethernet_key(eth_key, flow);
4258 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
4262 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
4265 /* Get Ethertype or 802.1Q TPID or FLOW_DL_TYPE_NONE. */
4266 if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow,
4268 return ODP_FIT_ERROR;
4272 ? (src_flow->vlan_tci & htons(VLAN_CFI)) != 0
4273 : src_flow->dl_type == htons(ETH_TYPE_VLAN)) {
4274 return parse_8021q_onward(attrs, present_attrs, out_of_range_attr,
4275 expected_attrs, flow, key, key_len, src_flow);
4278 flow->vlan_tci = htons(0xffff);
4279 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
4280 flow->vlan_tci = nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN]);
4281 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
4284 return parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
4285 expected_attrs, flow, key, key_len, src_flow);
4288 /* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a flow
4289 * structure in 'flow'. Returns an ODP_FIT_* value that indicates how well
4290 * 'key' fits our expectations for what a flow key should contain.
4292 * The 'in_port' will be the datapath's understanding of the port. The
4293 * caller will need to translate with odp_port_to_ofp_port() if the
4294 * OpenFlow port is needed.
4296 * This function doesn't take the packet itself as an argument because none of
4297 * the currently understood OVS_KEY_ATTR_* attributes require it. Currently,
4298 * it is always possible to infer which additional attribute(s) should appear
4299 * by looking at the attributes for lower-level protocols, e.g. if the network
4300 * protocol in OVS_KEY_ATTR_IPV4 or OVS_KEY_ATTR_IPV6 is IPPROTO_TCP then we
4301 * know that a OVS_KEY_ATTR_TCP attribute must appear and that otherwise it
4302 * must be absent. */
4303 enum odp_key_fitness
4304 odp_flow_key_to_flow(const struct nlattr *key, size_t key_len,
4307 return odp_flow_key_to_flow__(key, key_len, flow, flow);
4310 /* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a mask
4311 * structure in 'mask'. 'flow' must be a previously translated flow
4312 * corresponding to 'mask'. Returns an ODP_FIT_* value that indicates how well
4313 * 'key' fits our expectations for what a flow key should contain. */
4314 enum odp_key_fitness
4315 odp_flow_key_to_mask(const struct nlattr *key, size_t key_len,
4316 struct flow *mask, const struct flow *flow)
4318 return odp_flow_key_to_flow__(key, key_len, mask, flow);
4321 /* Returns 'fitness' as a string, for use in debug messages. */
4323 odp_key_fitness_to_string(enum odp_key_fitness fitness)
4326 case ODP_FIT_PERFECT:
4328 case ODP_FIT_TOO_MUCH:
4330 case ODP_FIT_TOO_LITTLE:
4331 return "too_little";
4339 /* Appends an OVS_ACTION_ATTR_USERSPACE action to 'odp_actions' that specifies
4340 * Netlink PID 'pid'. If 'userdata' is nonnull, adds a userdata attribute
4341 * whose contents are the 'userdata_size' bytes at 'userdata' and returns the
4342 * offset within 'odp_actions' of the start of the cookie. (If 'userdata' is
4343 * null, then the return value is not meaningful.) */
4345 odp_put_userspace_action(uint32_t pid,
4346 const void *userdata, size_t userdata_size,
4347 odp_port_t tunnel_out_port,
4348 struct ofpbuf *odp_actions)
4350 size_t userdata_ofs;
4353 offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
4354 nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
4356 userdata_ofs = odp_actions->size + NLA_HDRLEN;
4358 /* The OVS kernel module before OVS 1.11 and the upstream Linux kernel
4359 * module before Linux 3.10 required the userdata to be exactly 8 bytes
4362 * - The kernel rejected shorter userdata with -ERANGE.
4364 * - The kernel silently dropped userdata beyond the first 8 bytes.
4366 * Thus, for maximum compatibility, always put at least 8 bytes. (We
4367 * separately disable features that required more than 8 bytes.) */
4368 memcpy(nl_msg_put_unspec_zero(odp_actions, OVS_USERSPACE_ATTR_USERDATA,
4369 MAX(8, userdata_size)),
4370 userdata, userdata_size);
4374 if (tunnel_out_port != ODPP_NONE) {
4375 nl_msg_put_odp_port(odp_actions, OVS_USERSPACE_ATTR_EGRESS_TUN_PORT,
4378 nl_msg_end_nested(odp_actions, offset);
4380 return userdata_ofs;
4384 odp_put_tunnel_action(const struct flow_tnl *tunnel,
4385 struct ofpbuf *odp_actions)
4387 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
4388 tun_key_to_attr(odp_actions, tunnel);
4389 nl_msg_end_nested(odp_actions, offset);
4393 odp_put_tnl_push_action(struct ofpbuf *odp_actions,
4394 struct ovs_action_push_tnl *data)
4396 int size = offsetof(struct ovs_action_push_tnl, header);
4398 size += data->header_len;
4399 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_TUNNEL_PUSH, data, size);
4403 /* The commit_odp_actions() function and its helpers. */
4406 commit_set_action(struct ofpbuf *odp_actions, enum ovs_key_attr key_type,
4407 const void *key, size_t key_size)
4409 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
4410 nl_msg_put_unspec(odp_actions, key_type, key, key_size);
4411 nl_msg_end_nested(odp_actions, offset);
4414 /* Masked set actions have a mask following the data within the netlink
4415 * attribute. The unmasked bits in the data will be cleared as the data
4416 * is copied to the action. */
4418 commit_masked_set_action(struct ofpbuf *odp_actions,
4419 enum ovs_key_attr key_type,
4420 const void *key_, const void *mask_, size_t key_size)
4422 size_t offset = nl_msg_start_nested(odp_actions,
4423 OVS_ACTION_ATTR_SET_MASKED);
4424 char *data = nl_msg_put_unspec_uninit(odp_actions, key_type, key_size * 2);
4425 const char *key = key_, *mask = mask_;
4427 memcpy(data + key_size, mask, key_size);
4428 /* Clear unmasked bits while copying. */
4429 while (key_size--) {
4430 *data++ = *key++ & *mask++;
4432 nl_msg_end_nested(odp_actions, offset);
4435 /* If any of the flow key data that ODP actions can modify are different in
4436 * 'base->tunnel' and 'flow->tunnel', appends a set_tunnel ODP action to
4437 * 'odp_actions' that change the flow tunneling information in key from
4438 * 'base->tunnel' into 'flow->tunnel', and then changes 'base->tunnel' in the
4439 * same way. In other words, operates the same as commit_odp_actions(), but
4440 * only on tunneling information. */
4442 commit_odp_tunnel_action(const struct flow *flow, struct flow *base,
4443 struct ofpbuf *odp_actions)
4445 /* A valid IPV4_TUNNEL must have non-zero ip_dst. */
4446 if (flow->tunnel.ip_dst) {
4447 if (!memcmp(&base->tunnel, &flow->tunnel, sizeof base->tunnel)) {
4450 memcpy(&base->tunnel, &flow->tunnel, sizeof base->tunnel);
4451 odp_put_tunnel_action(&base->tunnel, odp_actions);
4456 commit(enum ovs_key_attr attr, bool use_masked_set,
4457 const void *key, void *base, void *mask, size_t size,
4458 struct ofpbuf *odp_actions)
4460 if (memcmp(key, base, size)) {
4461 bool fully_masked = odp_mask_is_exact(attr, mask, size);
4463 if (use_masked_set && !fully_masked) {
4464 commit_masked_set_action(odp_actions, attr, key, mask, size);
4466 if (!fully_masked) {
4467 memset(mask, 0xff, size);
4469 commit_set_action(odp_actions, attr, key, size);
4471 memcpy(base, key, size);
4474 /* Mask bits are set when we have either read or set the corresponding
4475 * values. Masked bits will be exact-matched, no need to set them
4476 * if the value did not actually change. */
4482 get_ethernet_key(const struct flow *flow, struct ovs_key_ethernet *eth)
4484 memcpy(eth->eth_src, flow->dl_src, ETH_ADDR_LEN);
4485 memcpy(eth->eth_dst, flow->dl_dst, ETH_ADDR_LEN);
4489 put_ethernet_key(const struct ovs_key_ethernet *eth, struct flow *flow)
4491 memcpy(flow->dl_src, eth->eth_src, ETH_ADDR_LEN);
4492 memcpy(flow->dl_dst, eth->eth_dst, ETH_ADDR_LEN);
4496 commit_set_ether_addr_action(const struct flow *flow, struct flow *base_flow,
4497 struct ofpbuf *odp_actions,
4498 struct flow_wildcards *wc,
4501 struct ovs_key_ethernet key, base, mask;
4503 get_ethernet_key(flow, &key);
4504 get_ethernet_key(base_flow, &base);
4505 get_ethernet_key(&wc->masks, &mask);
4507 if (commit(OVS_KEY_ATTR_ETHERNET, use_masked,
4508 &key, &base, &mask, sizeof key, odp_actions)) {
4509 put_ethernet_key(&base, base_flow);
4510 put_ethernet_key(&mask, &wc->masks);
4515 pop_vlan(struct flow *base,
4516 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
4518 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
4520 if (base->vlan_tci & htons(VLAN_CFI)) {
4521 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
4527 commit_vlan_action(ovs_be16 vlan_tci, struct flow *base,
4528 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
4530 if (base->vlan_tci == vlan_tci) {
4534 pop_vlan(base, odp_actions, wc);
4535 if (vlan_tci & htons(VLAN_CFI)) {
4536 struct ovs_action_push_vlan vlan;
4538 vlan.vlan_tpid = htons(ETH_TYPE_VLAN);
4539 vlan.vlan_tci = vlan_tci;
4540 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
4541 &vlan, sizeof vlan);
4543 base->vlan_tci = vlan_tci;
4546 /* Wildcarding already done at action translation time. */
4548 commit_mpls_action(const struct flow *flow, struct flow *base,
4549 struct ofpbuf *odp_actions)
4551 int base_n = flow_count_mpls_labels(base, NULL);
4552 int flow_n = flow_count_mpls_labels(flow, NULL);
4553 int common_n = flow_count_common_mpls_labels(flow, flow_n, base, base_n,
4556 while (base_n > common_n) {
4557 if (base_n - 1 == common_n && flow_n > common_n) {
4558 /* If there is only one more LSE in base than there are common
4559 * between base and flow; and flow has at least one more LSE than
4560 * is common then the topmost LSE of base may be updated using
4562 struct ovs_key_mpls mpls_key;
4564 mpls_key.mpls_lse = flow->mpls_lse[flow_n - base_n];
4565 commit_set_action(odp_actions, OVS_KEY_ATTR_MPLS,
4566 &mpls_key, sizeof mpls_key);
4567 flow_set_mpls_lse(base, 0, mpls_key.mpls_lse);
4570 /* Otherwise, if there more LSEs in base than are common between
4571 * base and flow then pop the topmost one. */
4575 /* If all the LSEs are to be popped and this is not the outermost
4576 * LSE then use ETH_TYPE_MPLS as the ethertype parameter of the
4577 * POP_MPLS action instead of flow->dl_type.
4579 * This is because the POP_MPLS action requires its ethertype
4580 * argument to be an MPLS ethernet type but in this case
4581 * flow->dl_type will be a non-MPLS ethernet type.
4583 * When the final POP_MPLS action occurs it use flow->dl_type and
4584 * the and the resulting packet will have the desired dl_type. */
4585 if ((!eth_type_mpls(flow->dl_type)) && base_n > 1) {
4586 dl_type = htons(ETH_TYPE_MPLS);
4588 dl_type = flow->dl_type;
4590 nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, dl_type);
4591 popped = flow_pop_mpls(base, base_n, flow->dl_type, NULL);
4597 /* If, after the above popping and setting, there are more LSEs in flow
4598 * than base then some LSEs need to be pushed. */
4599 while (base_n < flow_n) {
4600 struct ovs_action_push_mpls *mpls;
4602 mpls = nl_msg_put_unspec_zero(odp_actions,
4603 OVS_ACTION_ATTR_PUSH_MPLS,
4605 mpls->mpls_ethertype = flow->dl_type;
4606 mpls->mpls_lse = flow->mpls_lse[flow_n - base_n - 1];
4607 flow_push_mpls(base, base_n, mpls->mpls_ethertype, NULL);
4608 flow_set_mpls_lse(base, 0, mpls->mpls_lse);
4614 get_ipv4_key(const struct flow *flow, struct ovs_key_ipv4 *ipv4, bool is_mask)
4616 ipv4->ipv4_src = flow->nw_src;
4617 ipv4->ipv4_dst = flow->nw_dst;
4618 ipv4->ipv4_proto = flow->nw_proto;
4619 ipv4->ipv4_tos = flow->nw_tos;
4620 ipv4->ipv4_ttl = flow->nw_ttl;
4621 ipv4->ipv4_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
4625 put_ipv4_key(const struct ovs_key_ipv4 *ipv4, struct flow *flow, bool is_mask)
4627 flow->nw_src = ipv4->ipv4_src;
4628 flow->nw_dst = ipv4->ipv4_dst;
4629 flow->nw_proto = ipv4->ipv4_proto;
4630 flow->nw_tos = ipv4->ipv4_tos;
4631 flow->nw_ttl = ipv4->ipv4_ttl;
4632 flow->nw_frag = odp_to_ovs_frag(ipv4->ipv4_frag, is_mask);
4636 commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow,
4637 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4640 struct ovs_key_ipv4 key, mask, base;
4642 /* Check that nw_proto and nw_frag remain unchanged. */
4643 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
4644 flow->nw_frag == base_flow->nw_frag);
4646 get_ipv4_key(flow, &key, false);
4647 get_ipv4_key(base_flow, &base, false);
4648 get_ipv4_key(&wc->masks, &mask, true);
4649 mask.ipv4_proto = 0; /* Not writeable. */
4650 mask.ipv4_frag = 0; /* Not writable. */
4652 if (commit(OVS_KEY_ATTR_IPV4, use_masked, &key, &base, &mask, sizeof key,
4654 put_ipv4_key(&base, base_flow, false);
4655 if (mask.ipv4_proto != 0) { /* Mask was changed by commit(). */
4656 put_ipv4_key(&mask, &wc->masks, true);
4662 get_ipv6_key(const struct flow *flow, struct ovs_key_ipv6 *ipv6, bool is_mask)
4664 memcpy(ipv6->ipv6_src, &flow->ipv6_src, sizeof ipv6->ipv6_src);
4665 memcpy(ipv6->ipv6_dst, &flow->ipv6_dst, sizeof ipv6->ipv6_dst);
4666 ipv6->ipv6_label = flow->ipv6_label;
4667 ipv6->ipv6_proto = flow->nw_proto;
4668 ipv6->ipv6_tclass = flow->nw_tos;
4669 ipv6->ipv6_hlimit = flow->nw_ttl;
4670 ipv6->ipv6_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
4674 put_ipv6_key(const struct ovs_key_ipv6 *ipv6, struct flow *flow, bool is_mask)
4676 memcpy(&flow->ipv6_src, ipv6->ipv6_src, sizeof flow->ipv6_src);
4677 memcpy(&flow->ipv6_dst, ipv6->ipv6_dst, sizeof flow->ipv6_dst);
4678 flow->ipv6_label = ipv6->ipv6_label;
4679 flow->nw_proto = ipv6->ipv6_proto;
4680 flow->nw_tos = ipv6->ipv6_tclass;
4681 flow->nw_ttl = ipv6->ipv6_hlimit;
4682 flow->nw_frag = odp_to_ovs_frag(ipv6->ipv6_frag, is_mask);
4686 commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow,
4687 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4690 struct ovs_key_ipv6 key, mask, base;
4692 /* Check that nw_proto and nw_frag remain unchanged. */
4693 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
4694 flow->nw_frag == base_flow->nw_frag);
4696 get_ipv6_key(flow, &key, false);
4697 get_ipv6_key(base_flow, &base, false);
4698 get_ipv6_key(&wc->masks, &mask, true);
4699 mask.ipv6_proto = 0; /* Not writeable. */
4700 mask.ipv6_frag = 0; /* Not writable. */
4702 if (commit(OVS_KEY_ATTR_IPV6, use_masked, &key, &base, &mask, sizeof key,
4704 put_ipv6_key(&base, base_flow, false);
4705 if (mask.ipv6_proto != 0) { /* Mask was changed by commit(). */
4706 put_ipv6_key(&mask, &wc->masks, true);
4712 get_arp_key(const struct flow *flow, struct ovs_key_arp *arp)
4714 /* ARP key has padding, clear it. */
4715 memset(arp, 0, sizeof *arp);
4717 arp->arp_sip = flow->nw_src;
4718 arp->arp_tip = flow->nw_dst;
4719 arp->arp_op = htons(flow->nw_proto);
4720 memcpy(arp->arp_sha, flow->arp_sha, ETH_ADDR_LEN);
4721 memcpy(arp->arp_tha, flow->arp_tha, ETH_ADDR_LEN);
4725 put_arp_key(const struct ovs_key_arp *arp, struct flow *flow)
4727 flow->nw_src = arp->arp_sip;
4728 flow->nw_dst = arp->arp_tip;
4729 flow->nw_proto = ntohs(arp->arp_op);
4730 memcpy(flow->arp_sha, arp->arp_sha, ETH_ADDR_LEN);
4731 memcpy(flow->arp_tha, arp->arp_tha, ETH_ADDR_LEN);
4734 static enum slow_path_reason
4735 commit_set_arp_action(const struct flow *flow, struct flow *base_flow,
4736 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
4738 struct ovs_key_arp key, mask, base;
4740 get_arp_key(flow, &key);
4741 get_arp_key(base_flow, &base);
4742 get_arp_key(&wc->masks, &mask);
4744 if (commit(OVS_KEY_ATTR_ARP, true, &key, &base, &mask, sizeof key,
4746 put_arp_key(&base, base_flow);
4747 put_arp_key(&mask, &wc->masks);
4754 get_nd_key(const struct flow *flow, struct ovs_key_nd *nd)
4756 memcpy(nd->nd_target, &flow->nd_target, sizeof flow->nd_target);
4757 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
4758 memcpy(nd->nd_sll, flow->arp_sha, ETH_ADDR_LEN);
4759 memcpy(nd->nd_tll, flow->arp_tha, ETH_ADDR_LEN);
4763 put_nd_key(const struct ovs_key_nd *nd, struct flow *flow)
4765 memcpy(&flow->nd_target, &flow->nd_target, sizeof flow->nd_target);
4766 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
4767 memcpy(flow->arp_sha, nd->nd_sll, ETH_ADDR_LEN);
4768 memcpy(flow->arp_tha, nd->nd_tll, ETH_ADDR_LEN);
4771 static enum slow_path_reason
4772 commit_set_nd_action(const struct flow *flow, struct flow *base_flow,
4773 struct ofpbuf *odp_actions,
4774 struct flow_wildcards *wc, bool use_masked)
4776 struct ovs_key_nd key, mask, base;
4778 get_nd_key(flow, &key);
4779 get_nd_key(base_flow, &base);
4780 get_nd_key(&wc->masks, &mask);
4782 if (commit(OVS_KEY_ATTR_ND, use_masked, &key, &base, &mask, sizeof key,
4784 put_nd_key(&base, base_flow);
4785 put_nd_key(&mask, &wc->masks);
4792 static enum slow_path_reason
4793 commit_set_nw_action(const struct flow *flow, struct flow *base,
4794 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4797 /* Check if 'flow' really has an L3 header. */
4798 if (!flow->nw_proto) {
4802 switch (ntohs(base->dl_type)) {
4804 commit_set_ipv4_action(flow, base, odp_actions, wc, use_masked);
4808 commit_set_ipv6_action(flow, base, odp_actions, wc, use_masked);
4809 return commit_set_nd_action(flow, base, odp_actions, wc, use_masked);
4812 return commit_set_arp_action(flow, base, odp_actions, wc);
4818 /* TCP, UDP, and SCTP keys have the same layout. */
4819 BUILD_ASSERT_DECL(sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_udp) &&
4820 sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_sctp));
4823 get_tp_key(const struct flow *flow, union ovs_key_tp *tp)
4825 tp->tcp.tcp_src = flow->tp_src;
4826 tp->tcp.tcp_dst = flow->tp_dst;
4830 put_tp_key(const union ovs_key_tp *tp, struct flow *flow)
4832 flow->tp_src = tp->tcp.tcp_src;
4833 flow->tp_dst = tp->tcp.tcp_dst;
4837 commit_set_port_action(const struct flow *flow, struct flow *base_flow,
4838 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4841 enum ovs_key_attr key_type;
4842 union ovs_key_tp key, mask, base;
4844 /* Check if 'flow' really has an L3 header. */
4845 if (!flow->nw_proto) {
4849 if (!is_ip_any(base_flow)) {
4853 if (flow->nw_proto == IPPROTO_TCP) {
4854 key_type = OVS_KEY_ATTR_TCP;
4855 } else if (flow->nw_proto == IPPROTO_UDP) {
4856 key_type = OVS_KEY_ATTR_UDP;
4857 } else if (flow->nw_proto == IPPROTO_SCTP) {
4858 key_type = OVS_KEY_ATTR_SCTP;
4863 get_tp_key(flow, &key);
4864 get_tp_key(base_flow, &base);
4865 get_tp_key(&wc->masks, &mask);
4867 if (commit(key_type, use_masked, &key, &base, &mask, sizeof key,
4869 put_tp_key(&base, base_flow);
4870 put_tp_key(&mask, &wc->masks);
4875 commit_set_priority_action(const struct flow *flow, struct flow *base_flow,
4876 struct ofpbuf *odp_actions,
4877 struct flow_wildcards *wc,
4880 uint32_t key, mask, base;
4882 key = flow->skb_priority;
4883 base = base_flow->skb_priority;
4884 mask = wc->masks.skb_priority;
4886 if (commit(OVS_KEY_ATTR_PRIORITY, use_masked, &key, &base, &mask,
4887 sizeof key, odp_actions)) {
4888 base_flow->skb_priority = base;
4889 wc->masks.skb_priority = mask;
4894 commit_set_pkt_mark_action(const struct flow *flow, struct flow *base_flow,
4895 struct ofpbuf *odp_actions,
4896 struct flow_wildcards *wc,
4899 uint32_t key, mask, base;
4901 key = flow->pkt_mark;
4902 base = base_flow->pkt_mark;
4903 mask = wc->masks.pkt_mark;
4905 if (commit(OVS_KEY_ATTR_SKB_MARK, use_masked, &key, &base, &mask,
4906 sizeof key, odp_actions)) {
4907 base_flow->pkt_mark = base;
4908 wc->masks.pkt_mark = mask;
4912 /* If any of the flow key data that ODP actions can modify are different in
4913 * 'base' and 'flow', appends ODP actions to 'odp_actions' that change the flow
4914 * key from 'base' into 'flow', and then changes 'base' the same way. Does not
4915 * commit set_tunnel actions. Users should call commit_odp_tunnel_action()
4916 * in addition to this function if needed. Sets fields in 'wc' that are
4917 * used as part of the action.
4919 * Returns a reason to force processing the flow's packets into the userspace
4920 * slow path, if there is one, otherwise 0. */
4921 enum slow_path_reason
4922 commit_odp_actions(const struct flow *flow, struct flow *base,
4923 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4926 enum slow_path_reason slow;
4928 commit_set_ether_addr_action(flow, base, odp_actions, wc, use_masked);
4929 slow = commit_set_nw_action(flow, base, odp_actions, wc, use_masked);
4930 commit_set_port_action(flow, base, odp_actions, wc, use_masked);
4931 commit_mpls_action(flow, base, odp_actions);
4932 commit_vlan_action(flow->vlan_tci, base, odp_actions, wc);
4933 commit_set_priority_action(flow, base, odp_actions, wc, use_masked);
4934 commit_set_pkt_mark_action(flow, base, odp_actions, wc, use_masked);