2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include <arpa/inet.h>
23 #include <netinet/in.h>
24 #include <netinet/icmp6.h>
28 #include "byte-order.h"
31 #include "dynamic-string.h"
38 #include "tun-metadata.h"
39 #include "unaligned.h"
42 #include "openvswitch/vlog.h"
44 VLOG_DEFINE_THIS_MODULE(odp_util);
46 /* The interface between userspace and kernel uses an "OVS_*" prefix.
47 * Since this is fairly non-specific for the OVS userspace components,
48 * "ODP_*" (Open vSwitch Datapath) is used as the prefix for
49 * interactions with the datapath.
52 /* The set of characters that may separate one action or one key attribute
54 static const char *delimiters = ", \t\r\n";
58 const struct attr_len_tbl *next;
61 #define ATTR_LEN_INVALID -1
62 #define ATTR_LEN_VARIABLE -2
63 #define ATTR_LEN_NESTED -3
65 static int parse_odp_key_mask_attr(const char *, const struct simap *port_names,
66 struct ofpbuf *, struct ofpbuf *);
67 static void format_odp_key_attr(const struct nlattr *a,
68 const struct nlattr *ma,
69 const struct hmap *portno_names, struct ds *ds,
73 struct geneve_opt d[63];
77 static int scan_geneve(const char *s, struct geneve_scan *key,
78 struct geneve_scan *mask);
79 static void format_geneve_opts(const struct geneve_opt *opt,
80 const struct geneve_opt *mask, int opts_len,
81 struct ds *, bool verbose);
83 static struct nlattr *generate_all_wildcard_mask(const struct attr_len_tbl tbl[],
84 int max, struct ofpbuf *,
85 const struct nlattr *key);
86 /* Returns one the following for the action with the given OVS_ACTION_ATTR_*
89 * - For an action whose argument has a fixed length, returned that
90 * nonnegative length in bytes.
92 * - For an action with a variable-length argument, returns ATTR_LEN_VARIABLE.
94 * - For an invalid 'type', returns ATTR_LEN_INVALID. */
96 odp_action_len(uint16_t type)
98 if (type > OVS_ACTION_ATTR_MAX) {
102 switch ((enum ovs_action_attr) type) {
103 case OVS_ACTION_ATTR_OUTPUT: return sizeof(uint32_t);
104 case OVS_ACTION_ATTR_TUNNEL_PUSH: return ATTR_LEN_VARIABLE;
105 case OVS_ACTION_ATTR_TUNNEL_POP: return sizeof(uint32_t);
106 case OVS_ACTION_ATTR_USERSPACE: return ATTR_LEN_VARIABLE;
107 case OVS_ACTION_ATTR_PUSH_VLAN: return sizeof(struct ovs_action_push_vlan);
108 case OVS_ACTION_ATTR_POP_VLAN: return 0;
109 case OVS_ACTION_ATTR_PUSH_MPLS: return sizeof(struct ovs_action_push_mpls);
110 case OVS_ACTION_ATTR_POP_MPLS: return sizeof(ovs_be16);
111 case OVS_ACTION_ATTR_RECIRC: return sizeof(uint32_t);
112 case OVS_ACTION_ATTR_HASH: return sizeof(struct ovs_action_hash);
113 case OVS_ACTION_ATTR_SET: return ATTR_LEN_VARIABLE;
114 case OVS_ACTION_ATTR_SET_MASKED: return ATTR_LEN_VARIABLE;
115 case OVS_ACTION_ATTR_SAMPLE: return ATTR_LEN_VARIABLE;
117 case OVS_ACTION_ATTR_UNSPEC:
118 case __OVS_ACTION_ATTR_MAX:
119 return ATTR_LEN_INVALID;
122 return ATTR_LEN_INVALID;
125 /* Returns a string form of 'attr'. The return value is either a statically
126 * allocated constant string or the 'bufsize'-byte buffer 'namebuf'. 'bufsize'
127 * should be at least OVS_KEY_ATTR_BUFSIZE. */
128 enum { OVS_KEY_ATTR_BUFSIZE = 3 + INT_STRLEN(unsigned int) + 1 };
130 ovs_key_attr_to_string(enum ovs_key_attr attr, char *namebuf, size_t bufsize)
133 case OVS_KEY_ATTR_UNSPEC: return "unspec";
134 case OVS_KEY_ATTR_ENCAP: return "encap";
135 case OVS_KEY_ATTR_PRIORITY: return "skb_priority";
136 case OVS_KEY_ATTR_SKB_MARK: return "skb_mark";
137 case OVS_KEY_ATTR_TUNNEL: return "tunnel";
138 case OVS_KEY_ATTR_IN_PORT: return "in_port";
139 case OVS_KEY_ATTR_ETHERNET: return "eth";
140 case OVS_KEY_ATTR_VLAN: return "vlan";
141 case OVS_KEY_ATTR_ETHERTYPE: return "eth_type";
142 case OVS_KEY_ATTR_IPV4: return "ipv4";
143 case OVS_KEY_ATTR_IPV6: return "ipv6";
144 case OVS_KEY_ATTR_TCP: return "tcp";
145 case OVS_KEY_ATTR_TCP_FLAGS: return "tcp_flags";
146 case OVS_KEY_ATTR_UDP: return "udp";
147 case OVS_KEY_ATTR_SCTP: return "sctp";
148 case OVS_KEY_ATTR_ICMP: return "icmp";
149 case OVS_KEY_ATTR_ICMPV6: return "icmpv6";
150 case OVS_KEY_ATTR_ARP: return "arp";
151 case OVS_KEY_ATTR_ND: return "nd";
152 case OVS_KEY_ATTR_MPLS: return "mpls";
153 case OVS_KEY_ATTR_DP_HASH: return "dp_hash";
154 case OVS_KEY_ATTR_RECIRC_ID: return "recirc_id";
156 case __OVS_KEY_ATTR_MAX:
158 snprintf(namebuf, bufsize, "key%u", (unsigned int) attr);
164 format_generic_odp_action(struct ds *ds, const struct nlattr *a)
166 size_t len = nl_attr_get_size(a);
168 ds_put_format(ds, "action%"PRId16, nl_attr_type(a));
170 const uint8_t *unspec;
173 unspec = nl_attr_get(a);
174 for (i = 0; i < len; i++) {
175 ds_put_char(ds, i ? ' ': '(');
176 ds_put_format(ds, "%02x", unspec[i]);
178 ds_put_char(ds, ')');
183 format_odp_sample_action(struct ds *ds, const struct nlattr *attr)
185 static const struct nl_policy ovs_sample_policy[] = {
186 [OVS_SAMPLE_ATTR_PROBABILITY] = { .type = NL_A_U32 },
187 [OVS_SAMPLE_ATTR_ACTIONS] = { .type = NL_A_NESTED }
189 struct nlattr *a[ARRAY_SIZE(ovs_sample_policy)];
191 const struct nlattr *nla_acts;
194 ds_put_cstr(ds, "sample");
196 if (!nl_parse_nested(attr, ovs_sample_policy, a, ARRAY_SIZE(a))) {
197 ds_put_cstr(ds, "(error)");
201 percentage = (100.0 * nl_attr_get_u32(a[OVS_SAMPLE_ATTR_PROBABILITY])) /
204 ds_put_format(ds, "(sample=%.1f%%,", percentage);
206 ds_put_cstr(ds, "actions(");
207 nla_acts = nl_attr_get(a[OVS_SAMPLE_ATTR_ACTIONS]);
208 len = nl_attr_get_size(a[OVS_SAMPLE_ATTR_ACTIONS]);
209 format_odp_actions(ds, nla_acts, len);
210 ds_put_format(ds, "))");
214 slow_path_reason_to_string(uint32_t reason)
216 switch ((enum slow_path_reason) reason) {
217 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return STRING;
226 slow_path_reason_to_explanation(enum slow_path_reason reason)
229 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return EXPLANATION;
238 parse_odp_flags(const char *s, const char *(*bit_to_string)(uint32_t),
239 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask)
241 return parse_flags(s, bit_to_string, ')', NULL, NULL,
242 res_flags, allowed, res_mask);
246 format_odp_userspace_action(struct ds *ds, const struct nlattr *attr)
248 static const struct nl_policy ovs_userspace_policy[] = {
249 [OVS_USERSPACE_ATTR_PID] = { .type = NL_A_U32 },
250 [OVS_USERSPACE_ATTR_USERDATA] = { .type = NL_A_UNSPEC,
252 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = { .type = NL_A_U32,
254 [OVS_USERSPACE_ATTR_ACTIONS] = { .type = NL_A_UNSPEC,
257 struct nlattr *a[ARRAY_SIZE(ovs_userspace_policy)];
258 const struct nlattr *userdata_attr;
259 const struct nlattr *tunnel_out_port_attr;
261 if (!nl_parse_nested(attr, ovs_userspace_policy, a, ARRAY_SIZE(a))) {
262 ds_put_cstr(ds, "userspace(error)");
266 ds_put_format(ds, "userspace(pid=%"PRIu32,
267 nl_attr_get_u32(a[OVS_USERSPACE_ATTR_PID]));
269 userdata_attr = a[OVS_USERSPACE_ATTR_USERDATA];
272 const uint8_t *userdata = nl_attr_get(userdata_attr);
273 size_t userdata_len = nl_attr_get_size(userdata_attr);
274 bool userdata_unspec = true;
275 union user_action_cookie cookie;
277 if (userdata_len >= sizeof cookie.type
278 && userdata_len <= sizeof cookie) {
280 memset(&cookie, 0, sizeof cookie);
281 memcpy(&cookie, userdata, userdata_len);
283 userdata_unspec = false;
285 if (userdata_len == sizeof cookie.sflow
286 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
287 ds_put_format(ds, ",sFlow("
288 "vid=%"PRIu16",pcp=%"PRIu8",output=%"PRIu32")",
289 vlan_tci_to_vid(cookie.sflow.vlan_tci),
290 vlan_tci_to_pcp(cookie.sflow.vlan_tci),
291 cookie.sflow.output);
292 } else if (userdata_len == sizeof cookie.slow_path
293 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
294 ds_put_cstr(ds, ",slow_path(");
295 format_flags(ds, slow_path_reason_to_string,
296 cookie.slow_path.reason, ',');
297 ds_put_format(ds, ")");
298 } else if (userdata_len == sizeof cookie.flow_sample
299 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
300 ds_put_format(ds, ",flow_sample(probability=%"PRIu16
301 ",collector_set_id=%"PRIu32
302 ",obs_domain_id=%"PRIu32
303 ",obs_point_id=%"PRIu32")",
304 cookie.flow_sample.probability,
305 cookie.flow_sample.collector_set_id,
306 cookie.flow_sample.obs_domain_id,
307 cookie.flow_sample.obs_point_id);
308 } else if (userdata_len >= sizeof cookie.ipfix
309 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
310 ds_put_format(ds, ",ipfix(output_port=%"PRIu32")",
311 cookie.ipfix.output_odp_port);
313 userdata_unspec = true;
317 if (userdata_unspec) {
319 ds_put_format(ds, ",userdata(");
320 for (i = 0; i < userdata_len; i++) {
321 ds_put_format(ds, "%02x", userdata[i]);
323 ds_put_char(ds, ')');
327 if (a[OVS_USERSPACE_ATTR_ACTIONS]) {
328 ds_put_cstr(ds, ",actions");
331 tunnel_out_port_attr = a[OVS_USERSPACE_ATTR_EGRESS_TUN_PORT];
332 if (tunnel_out_port_attr) {
333 ds_put_format(ds, ",tunnel_out_port=%"PRIu32,
334 nl_attr_get_u32(tunnel_out_port_attr));
337 ds_put_char(ds, ')');
341 format_vlan_tci(struct ds *ds, ovs_be16 tci, ovs_be16 mask, bool verbose)
343 if (verbose || vlan_tci_to_vid(tci) || vlan_tci_to_vid(mask)) {
344 ds_put_format(ds, "vid=%"PRIu16, vlan_tci_to_vid(tci));
345 if (vlan_tci_to_vid(mask) != VLAN_VID_MASK) { /* Partially masked. */
346 ds_put_format(ds, "/0x%"PRIx16, vlan_tci_to_vid(mask));
348 ds_put_char(ds, ',');
350 if (verbose || vlan_tci_to_pcp(tci) || vlan_tci_to_pcp(mask)) {
351 ds_put_format(ds, "pcp=%d", vlan_tci_to_pcp(tci));
352 if (vlan_tci_to_pcp(mask) != (VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) {
353 ds_put_format(ds, "/0x%x", vlan_tci_to_pcp(mask));
355 ds_put_char(ds, ',');
357 if (!(tci & htons(VLAN_CFI))) {
358 ds_put_cstr(ds, "cfi=0");
359 ds_put_char(ds, ',');
365 format_mpls_lse(struct ds *ds, ovs_be32 mpls_lse)
367 ds_put_format(ds, "label=%"PRIu32",tc=%d,ttl=%d,bos=%d",
368 mpls_lse_to_label(mpls_lse),
369 mpls_lse_to_tc(mpls_lse),
370 mpls_lse_to_ttl(mpls_lse),
371 mpls_lse_to_bos(mpls_lse));
375 format_mpls(struct ds *ds, const struct ovs_key_mpls *mpls_key,
376 const struct ovs_key_mpls *mpls_mask, int n)
379 ovs_be32 key = mpls_key->mpls_lse;
381 if (mpls_mask == NULL) {
382 format_mpls_lse(ds, key);
384 ovs_be32 mask = mpls_mask->mpls_lse;
386 ds_put_format(ds, "label=%"PRIu32"/0x%x,tc=%d/%x,ttl=%d/0x%x,bos=%d/%x",
387 mpls_lse_to_label(key), mpls_lse_to_label(mask),
388 mpls_lse_to_tc(key), mpls_lse_to_tc(mask),
389 mpls_lse_to_ttl(key), mpls_lse_to_ttl(mask),
390 mpls_lse_to_bos(key), mpls_lse_to_bos(mask));
395 for (i = 0; i < n; i++) {
396 ds_put_format(ds, "lse%d=%#"PRIx32,
397 i, ntohl(mpls_key[i].mpls_lse));
399 ds_put_format(ds, "/%#"PRIx32, ntohl(mpls_mask[i].mpls_lse));
401 ds_put_char(ds, ',');
408 format_odp_recirc_action(struct ds *ds, uint32_t recirc_id)
410 ds_put_format(ds, "recirc(%#"PRIx32")", recirc_id);
414 format_odp_hash_action(struct ds *ds, const struct ovs_action_hash *hash_act)
416 ds_put_format(ds, "hash(");
418 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
419 ds_put_format(ds, "hash_l4(%"PRIu32")", hash_act->hash_basis);
421 ds_put_format(ds, "Unknown hash algorithm(%"PRIu32")",
424 ds_put_format(ds, ")");
428 format_udp_tnl_push_header(struct ds *ds, const struct ip_header *ip)
430 const struct udp_header *udp;
432 udp = (const struct udp_header *) (ip + 1);
433 ds_put_format(ds, "udp(src=%"PRIu16",dst=%"PRIu16",csum=0x%"PRIx16"),",
434 ntohs(udp->udp_src), ntohs(udp->udp_dst),
435 ntohs(udp->udp_csum));
441 format_odp_tnl_push_header(struct ds *ds, struct ovs_action_push_tnl *data)
443 const struct eth_header *eth;
444 const struct ip_header *ip;
447 eth = (const struct eth_header *)data->header;
450 ip = (const struct ip_header *)l3;
453 ds_put_format(ds, "header(size=%"PRIu8",type=%"PRIu8",eth(dst=",
454 data->header_len, data->tnl_type);
455 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_dst));
456 ds_put_format(ds, ",src=");
457 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_src));
458 ds_put_format(ds, ",dl_type=0x%04"PRIx16"),", ntohs(eth->eth_type));
461 ds_put_format(ds, "ipv4(src="IP_FMT",dst="IP_FMT",proto=%"PRIu8
462 ",tos=%#"PRIx8",ttl=%"PRIu8",frag=0x%"PRIx16"),",
463 IP_ARGS(get_16aligned_be32(&ip->ip_src)),
464 IP_ARGS(get_16aligned_be32(&ip->ip_dst)),
465 ip->ip_proto, ip->ip_tos,
469 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
470 const struct vxlanhdr *vxh;
472 vxh = format_udp_tnl_push_header(ds, ip);
474 ds_put_format(ds, "vxlan(flags=0x%"PRIx32",vni=0x%"PRIx32")",
475 ntohl(get_16aligned_be32(&vxh->vx_flags)),
476 ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8);
477 } else if (data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
478 const struct genevehdr *gnh;
480 gnh = format_udp_tnl_push_header(ds, ip);
482 ds_put_format(ds, "geneve(%s%svni=0x%"PRIx32,
483 gnh->oam ? "oam," : "",
484 gnh->critical ? "crit," : "",
485 ntohl(get_16aligned_be32(&gnh->vni)) >> 8);
488 ds_put_cstr(ds, ",options(");
489 format_geneve_opts(gnh->options, NULL, gnh->opt_len * 4,
491 ds_put_char(ds, ')');
494 ds_put_char(ds, ')');
495 } else if (data->tnl_type == OVS_VPORT_TYPE_GRE) {
496 const struct gre_base_hdr *greh;
497 ovs_16aligned_be32 *options;
500 l4 = ((uint8_t *)l3 + sizeof(struct ip_header));
501 greh = (const struct gre_base_hdr *) l4;
503 ds_put_format(ds, "gre((flags=0x%"PRIx16",proto=0x%"PRIx16")",
504 ntohs(greh->flags), ntohs(greh->protocol));
505 options = (ovs_16aligned_be32 *)(greh + 1);
506 if (greh->flags & htons(GRE_CSUM)) {
507 ds_put_format(ds, ",csum=0x%"PRIx16, ntohs(*((ovs_be16 *)options)));
510 if (greh->flags & htons(GRE_KEY)) {
511 ds_put_format(ds, ",key=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
514 if (greh->flags & htons(GRE_SEQ)) {
515 ds_put_format(ds, ",seq=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
518 ds_put_format(ds, ")");
520 ds_put_format(ds, ")");
524 format_odp_tnl_push_action(struct ds *ds, const struct nlattr *attr)
526 struct ovs_action_push_tnl *data;
528 data = (struct ovs_action_push_tnl *) nl_attr_get(attr);
530 ds_put_format(ds, "tnl_push(tnl_port(%"PRIu32"),", data->tnl_port);
531 format_odp_tnl_push_header(ds, data);
532 ds_put_format(ds, ",out_port(%"PRIu32"))", data->out_port);
536 format_odp_action(struct ds *ds, const struct nlattr *a)
539 enum ovs_action_attr type = nl_attr_type(a);
540 const struct ovs_action_push_vlan *vlan;
543 expected_len = odp_action_len(nl_attr_type(a));
544 if (expected_len != ATTR_LEN_VARIABLE &&
545 nl_attr_get_size(a) != expected_len) {
546 ds_put_format(ds, "bad length %"PRIuSIZE", expected %d for: ",
547 nl_attr_get_size(a), expected_len);
548 format_generic_odp_action(ds, a);
553 case OVS_ACTION_ATTR_OUTPUT:
554 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
556 case OVS_ACTION_ATTR_TUNNEL_POP:
557 ds_put_format(ds, "tnl_pop(%"PRIu32")", nl_attr_get_u32(a));
559 case OVS_ACTION_ATTR_TUNNEL_PUSH:
560 format_odp_tnl_push_action(ds, a);
562 case OVS_ACTION_ATTR_USERSPACE:
563 format_odp_userspace_action(ds, a);
565 case OVS_ACTION_ATTR_RECIRC:
566 format_odp_recirc_action(ds, nl_attr_get_u32(a));
568 case OVS_ACTION_ATTR_HASH:
569 format_odp_hash_action(ds, nl_attr_get(a));
571 case OVS_ACTION_ATTR_SET_MASKED:
573 size = nl_attr_get_size(a) / 2;
574 ds_put_cstr(ds, "set(");
576 /* Masked set action not supported for tunnel key, which is bigger. */
577 if (size <= sizeof(struct ovs_key_ipv6)) {
578 struct nlattr attr[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
579 sizeof(struct nlattr))];
580 struct nlattr mask[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
581 sizeof(struct nlattr))];
583 mask->nla_type = attr->nla_type = nl_attr_type(a);
584 mask->nla_len = attr->nla_len = NLA_HDRLEN + size;
585 memcpy(attr + 1, (char *)(a + 1), size);
586 memcpy(mask + 1, (char *)(a + 1) + size, size);
587 format_odp_key_attr(attr, mask, NULL, ds, false);
589 format_odp_key_attr(a, NULL, NULL, ds, false);
591 ds_put_cstr(ds, ")");
593 case OVS_ACTION_ATTR_SET:
594 ds_put_cstr(ds, "set(");
595 format_odp_key_attr(nl_attr_get(a), NULL, NULL, ds, true);
596 ds_put_cstr(ds, ")");
598 case OVS_ACTION_ATTR_PUSH_VLAN:
599 vlan = nl_attr_get(a);
600 ds_put_cstr(ds, "push_vlan(");
601 if (vlan->vlan_tpid != htons(ETH_TYPE_VLAN)) {
602 ds_put_format(ds, "tpid=0x%04"PRIx16",", ntohs(vlan->vlan_tpid));
604 format_vlan_tci(ds, vlan->vlan_tci, OVS_BE16_MAX, false);
605 ds_put_char(ds, ')');
607 case OVS_ACTION_ATTR_POP_VLAN:
608 ds_put_cstr(ds, "pop_vlan");
610 case OVS_ACTION_ATTR_PUSH_MPLS: {
611 const struct ovs_action_push_mpls *mpls = nl_attr_get(a);
612 ds_put_cstr(ds, "push_mpls(");
613 format_mpls_lse(ds, mpls->mpls_lse);
614 ds_put_format(ds, ",eth_type=0x%"PRIx16")", ntohs(mpls->mpls_ethertype));
617 case OVS_ACTION_ATTR_POP_MPLS: {
618 ovs_be16 ethertype = nl_attr_get_be16(a);
619 ds_put_format(ds, "pop_mpls(eth_type=0x%"PRIx16")", ntohs(ethertype));
622 case OVS_ACTION_ATTR_SAMPLE:
623 format_odp_sample_action(ds, a);
625 case OVS_ACTION_ATTR_UNSPEC:
626 case __OVS_ACTION_ATTR_MAX:
628 format_generic_odp_action(ds, a);
634 format_odp_actions(struct ds *ds, const struct nlattr *actions,
638 const struct nlattr *a;
641 NL_ATTR_FOR_EACH (a, left, actions, actions_len) {
643 ds_put_char(ds, ',');
645 format_odp_action(ds, a);
650 if (left == actions_len) {
651 ds_put_cstr(ds, "<empty>");
653 ds_put_format(ds, ",***%u leftover bytes*** (", left);
654 for (i = 0; i < left; i++) {
655 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
657 ds_put_char(ds, ')');
660 ds_put_cstr(ds, "drop");
664 /* Separate out parse_odp_userspace_action() function. */
666 parse_odp_userspace_action(const char *s, struct ofpbuf *actions)
669 union user_action_cookie cookie;
671 odp_port_t tunnel_out_port;
673 void *user_data = NULL;
674 size_t user_data_size = 0;
675 bool include_actions = false;
677 if (!ovs_scan(s, "userspace(pid=%"SCNi32"%n", &pid, &n)) {
683 uint32_t probability;
684 uint32_t collector_set_id;
685 uint32_t obs_domain_id;
686 uint32_t obs_point_id;
689 if (ovs_scan(&s[n], ",sFlow(vid=%i,"
690 "pcp=%i,output=%"SCNi32")%n",
691 &vid, &pcp, &output, &n1)) {
695 tci = vid | (pcp << VLAN_PCP_SHIFT);
700 cookie.type = USER_ACTION_COOKIE_SFLOW;
701 cookie.sflow.vlan_tci = htons(tci);
702 cookie.sflow.output = output;
704 user_data_size = sizeof cookie.sflow;
705 } else if (ovs_scan(&s[n], ",slow_path(%n",
710 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
711 cookie.slow_path.unused = 0;
712 cookie.slow_path.reason = 0;
714 res = parse_odp_flags(&s[n], slow_path_reason_to_string,
715 &cookie.slow_path.reason,
716 SLOW_PATH_REASON_MASK, NULL);
717 if (res < 0 || s[n + res] != ')') {
723 user_data_size = sizeof cookie.slow_path;
724 } else if (ovs_scan(&s[n], ",flow_sample(probability=%"SCNi32","
725 "collector_set_id=%"SCNi32","
726 "obs_domain_id=%"SCNi32","
727 "obs_point_id=%"SCNi32")%n",
728 &probability, &collector_set_id,
729 &obs_domain_id, &obs_point_id, &n1)) {
732 cookie.type = USER_ACTION_COOKIE_FLOW_SAMPLE;
733 cookie.flow_sample.probability = probability;
734 cookie.flow_sample.collector_set_id = collector_set_id;
735 cookie.flow_sample.obs_domain_id = obs_domain_id;
736 cookie.flow_sample.obs_point_id = obs_point_id;
738 user_data_size = sizeof cookie.flow_sample;
739 } else if (ovs_scan(&s[n], ",ipfix(output_port=%"SCNi32")%n",
742 cookie.type = USER_ACTION_COOKIE_IPFIX;
743 cookie.ipfix.output_odp_port = u32_to_odp(output);
745 user_data_size = sizeof cookie.ipfix;
746 } else if (ovs_scan(&s[n], ",userdata(%n",
751 ofpbuf_init(&buf, 16);
752 end = ofpbuf_put_hex(&buf, &s[n], NULL);
756 user_data = buf.data;
757 user_data_size = buf.size;
764 if (ovs_scan(&s[n], ",actions%n", &n1)) {
766 include_actions = true;
772 if (ovs_scan(&s[n], ",tunnel_out_port=%"SCNi32")%n",
773 &tunnel_out_port, &n1)) {
774 odp_put_userspace_action(pid, user_data, user_data_size,
775 tunnel_out_port, include_actions, actions);
777 } else if (s[n] == ')') {
778 odp_put_userspace_action(pid, user_data, user_data_size,
779 ODPP_NONE, include_actions, actions);
788 ovs_parse_tnl_push(const char *s, struct ovs_action_push_tnl *data)
790 struct eth_header *eth;
791 struct ip_header *ip;
792 struct udp_header *udp;
793 struct gre_base_hdr *greh;
794 uint16_t gre_proto, gre_flags, dl_type, udp_src, udp_dst, csum;
796 uint32_t tnl_type = 0, header_len = 0;
800 if (!ovs_scan_len(s, &n, "tnl_push(tnl_port(%"SCNi32"),", &data->tnl_port)) {
803 eth = (struct eth_header *) data->header;
804 l3 = (data->header + sizeof *eth);
805 l4 = ((uint8_t *) l3 + sizeof (struct ip_header));
806 ip = (struct ip_header *) l3;
807 if (!ovs_scan_len(s, &n, "header(size=%"SCNi32",type=%"SCNi32","
808 "eth(dst="ETH_ADDR_SCAN_FMT",",
811 ETH_ADDR_SCAN_ARGS(eth->eth_dst))) {
815 if (!ovs_scan_len(s, &n, "src="ETH_ADDR_SCAN_FMT",",
816 ETH_ADDR_SCAN_ARGS(eth->eth_src))) {
819 if (!ovs_scan_len(s, &n, "dl_type=0x%"SCNx16"),", &dl_type)) {
822 eth->eth_type = htons(dl_type);
825 if (!ovs_scan_len(s, &n, "ipv4(src="IP_SCAN_FMT",dst="IP_SCAN_FMT",proto=%"SCNi8
826 ",tos=%"SCNi8",ttl=%"SCNi8",frag=0x%"SCNx16"),",
829 &ip->ip_proto, &ip->ip_tos,
830 &ip->ip_ttl, &ip->ip_frag_off)) {
833 put_16aligned_be32(&ip->ip_src, sip);
834 put_16aligned_be32(&ip->ip_dst, dip);
837 udp = (struct udp_header *) l4;
838 greh = (struct gre_base_hdr *) l4;
839 if (ovs_scan_len(s, &n, "udp(src=%"SCNi16",dst=%"SCNi16",csum=0x%"SCNx16"),",
840 &udp_src, &udp_dst, &csum)) {
841 uint32_t vx_flags, vni;
843 udp->udp_src = htons(udp_src);
844 udp->udp_dst = htons(udp_dst);
846 udp->udp_csum = htons(csum);
848 if (ovs_scan_len(s, &n, "vxlan(flags=0x%"SCNx32",vni=0x%"SCNx32"))",
850 struct vxlanhdr *vxh = (struct vxlanhdr *) (udp + 1);
852 put_16aligned_be32(&vxh->vx_flags, htonl(vx_flags));
853 put_16aligned_be32(&vxh->vx_vni, htonl(vni << 8));
854 tnl_type = OVS_VPORT_TYPE_VXLAN;
855 header_len = sizeof *eth + sizeof *ip +
856 sizeof *udp + sizeof *vxh;
857 } else if (ovs_scan_len(s, &n, "geneve(")) {
858 struct genevehdr *gnh = (struct genevehdr *) (udp + 1);
860 memset(gnh, 0, sizeof *gnh);
861 header_len = sizeof *eth + sizeof *ip +
862 sizeof *udp + sizeof *gnh;
864 if (ovs_scan_len(s, &n, "oam,")) {
867 if (ovs_scan_len(s, &n, "crit,")) {
870 if (!ovs_scan_len(s, &n, "vni=%"SCNi32, &vni)) {
873 if (ovs_scan_len(s, &n, ",options(")) {
874 struct geneve_scan options;
877 memset(&options, 0, sizeof options);
878 len = scan_geneve(s + n, &options, NULL);
883 memcpy(gnh->options, options.d, options.len);
884 gnh->opt_len = options.len / 4;
885 header_len += options.len;
889 if (!ovs_scan_len(s, &n, "))")) {
893 gnh->proto_type = htons(ETH_TYPE_TEB);
894 put_16aligned_be32(&gnh->vni, htonl(vni << 8));
895 tnl_type = OVS_VPORT_TYPE_GENEVE;
899 } else if (ovs_scan_len(s, &n, "gre((flags=0x%"SCNx16",proto=0x%"SCNx16")",
900 &gre_flags, &gre_proto)){
902 tnl_type = OVS_VPORT_TYPE_GRE;
903 greh->flags = htons(gre_flags);
904 greh->protocol = htons(gre_proto);
905 ovs_16aligned_be32 *options = (ovs_16aligned_be32 *) (greh + 1);
907 if (greh->flags & htons(GRE_CSUM)) {
908 if (!ovs_scan_len(s, &n, ",csum=0x%"SCNx16, &csum)) {
912 memset(options, 0, sizeof *options);
913 *((ovs_be16 *)options) = htons(csum);
916 if (greh->flags & htons(GRE_KEY)) {
919 if (!ovs_scan_len(s, &n, ",key=0x%"SCNx32, &key)) {
923 put_16aligned_be32(options, htonl(key));
926 if (greh->flags & htons(GRE_SEQ)) {
929 if (!ovs_scan_len(s, &n, ",seq=0x%"SCNx32, &seq)) {
932 put_16aligned_be32(options, htonl(seq));
936 if (!ovs_scan_len(s, &n, "))")) {
940 header_len = sizeof *eth + sizeof *ip +
941 ((uint8_t *) options - (uint8_t *) greh);
946 /* check tunnel meta data. */
947 if (data->tnl_type != tnl_type) {
950 if (data->header_len != header_len) {
955 if (!ovs_scan_len(s, &n, ",out_port(%"SCNi32"))", &data->out_port)) {
963 parse_odp_action(const char *s, const struct simap *port_names,
964 struct ofpbuf *actions)
970 if (ovs_scan(s, "%"SCNi32"%n", &port, &n)) {
971 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, port);
977 int len = strcspn(s, delimiters);
978 struct simap_node *node;
980 node = simap_find_len(port_names, s, len);
982 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, node->data);
991 if (ovs_scan(s, "recirc(%"PRIu32")%n", &recirc_id, &n)) {
992 nl_msg_put_u32(actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
997 if (!strncmp(s, "userspace(", 10)) {
998 return parse_odp_userspace_action(s, actions);
1001 if (!strncmp(s, "set(", 4)) {
1004 struct nlattr mask[128 / sizeof(struct nlattr)];
1005 struct ofpbuf maskbuf;
1006 struct nlattr *nested, *key;
1009 /* 'mask' is big enough to hold any key. */
1010 ofpbuf_use_stack(&maskbuf, mask, sizeof mask);
1012 start_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SET);
1013 retval = parse_odp_key_mask_attr(s + 4, port_names, actions, &maskbuf);
1017 if (s[retval + 4] != ')') {
1021 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
1024 size = nl_attr_get_size(mask);
1025 if (size == nl_attr_get_size(key)) {
1026 /* Change to masked set action if not fully masked. */
1027 if (!is_all_ones(mask + 1, size)) {
1028 key->nla_len += size;
1029 ofpbuf_put(actions, mask + 1, size);
1030 /* 'actions' may have been reallocated by ofpbuf_put(). */
1031 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
1032 nested->nla_type = OVS_ACTION_ATTR_SET_MASKED;
1036 nl_msg_end_nested(actions, start_ofs);
1041 struct ovs_action_push_vlan push;
1042 int tpid = ETH_TYPE_VLAN;
1047 if (ovs_scan(s, "push_vlan(vid=%i,pcp=%i)%n", &vid, &pcp, &n)
1048 || ovs_scan(s, "push_vlan(vid=%i,pcp=%i,cfi=%i)%n",
1049 &vid, &pcp, &cfi, &n)
1050 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i)%n",
1051 &tpid, &vid, &pcp, &n)
1052 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i,cfi=%i)%n",
1053 &tpid, &vid, &pcp, &cfi, &n)) {
1054 push.vlan_tpid = htons(tpid);
1055 push.vlan_tci = htons((vid << VLAN_VID_SHIFT)
1056 | (pcp << VLAN_PCP_SHIFT)
1057 | (cfi ? VLAN_CFI : 0));
1058 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_VLAN,
1059 &push, sizeof push);
1065 if (!strncmp(s, "pop_vlan", 8)) {
1066 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_VLAN);
1074 if (ovs_scan(s, "sample(sample=%lf%%,actions(%n", &percentage, &n)
1075 && percentage >= 0. && percentage <= 100.0) {
1076 size_t sample_ofs, actions_ofs;
1079 probability = floor(UINT32_MAX * (percentage / 100.0) + .5);
1080 sample_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SAMPLE);
1081 nl_msg_put_u32(actions, OVS_SAMPLE_ATTR_PROBABILITY,
1082 (probability <= 0 ? 0
1083 : probability >= UINT32_MAX ? UINT32_MAX
1086 actions_ofs = nl_msg_start_nested(actions,
1087 OVS_SAMPLE_ATTR_ACTIONS);
1091 n += strspn(s + n, delimiters);
1096 retval = parse_odp_action(s + n, port_names, actions);
1102 nl_msg_end_nested(actions, actions_ofs);
1103 nl_msg_end_nested(actions, sample_ofs);
1105 return s[n + 1] == ')' ? n + 2 : -EINVAL;
1113 if (ovs_scan(s, "tnl_pop(%"SCNi32")%n", &port, &n)) {
1114 nl_msg_put_u32(actions, OVS_ACTION_ATTR_TUNNEL_POP, port);
1120 struct ovs_action_push_tnl data;
1123 n = ovs_parse_tnl_push(s, &data);
1125 odp_put_tnl_push_action(actions, &data);
1134 /* Parses the string representation of datapath actions, in the format output
1135 * by format_odp_action(). Returns 0 if successful, otherwise a positive errno
1136 * value. On success, the ODP actions are appended to 'actions' as a series of
1137 * Netlink attributes. On failure, no data is appended to 'actions'. Either
1138 * way, 'actions''s data might be reallocated. */
1140 odp_actions_from_string(const char *s, const struct simap *port_names,
1141 struct ofpbuf *actions)
1145 if (!strcasecmp(s, "drop")) {
1149 old_size = actions->size;
1153 s += strspn(s, delimiters);
1158 retval = parse_odp_action(s, port_names, actions);
1159 if (retval < 0 || !strchr(delimiters, s[retval])) {
1160 actions->size = old_size;
1169 static const struct attr_len_tbl ovs_vxlan_ext_attr_lens[OVS_VXLAN_EXT_MAX + 1] = {
1170 [OVS_VXLAN_EXT_GBP] = { .len = 4 },
1173 static const struct attr_len_tbl ovs_tun_key_attr_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
1174 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = 8 },
1175 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = 4 },
1176 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = 4 },
1177 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
1178 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
1179 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
1180 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
1181 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = 2 },
1182 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = 2 },
1183 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
1184 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = ATTR_LEN_VARIABLE },
1185 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = ATTR_LEN_NESTED,
1186 .next = ovs_vxlan_ext_attr_lens ,
1187 .next_max = OVS_VXLAN_EXT_MAX},
1190 static const struct attr_len_tbl ovs_flow_key_attr_lens[OVS_KEY_ATTR_MAX + 1] = {
1191 [OVS_KEY_ATTR_ENCAP] = { .len = ATTR_LEN_NESTED },
1192 [OVS_KEY_ATTR_PRIORITY] = { .len = 4 },
1193 [OVS_KEY_ATTR_SKB_MARK] = { .len = 4 },
1194 [OVS_KEY_ATTR_DP_HASH] = { .len = 4 },
1195 [OVS_KEY_ATTR_RECIRC_ID] = { .len = 4 },
1196 [OVS_KEY_ATTR_TUNNEL] = { .len = ATTR_LEN_NESTED,
1197 .next = ovs_tun_key_attr_lens,
1198 .next_max = OVS_TUNNEL_KEY_ATTR_MAX },
1199 [OVS_KEY_ATTR_IN_PORT] = { .len = 4 },
1200 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
1201 [OVS_KEY_ATTR_VLAN] = { .len = 2 },
1202 [OVS_KEY_ATTR_ETHERTYPE] = { .len = 2 },
1203 [OVS_KEY_ATTR_MPLS] = { .len = ATTR_LEN_VARIABLE },
1204 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
1205 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
1206 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
1207 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = 2 },
1208 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
1209 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
1210 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
1211 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
1212 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
1213 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
1216 /* Returns the correct length of the payload for a flow key attribute of the
1217 * specified 'type', ATTR_LEN_INVALID if 'type' is unknown, ATTR_LEN_VARIABLE
1218 * if the attribute's payload is variable length, or ATTR_LEN_NESTED if the
1219 * payload is a nested type. */
1221 odp_key_attr_len(const struct attr_len_tbl tbl[], int max_len, uint16_t type)
1223 if (type > max_len) {
1224 return ATTR_LEN_INVALID;
1227 return tbl[type].len;
1231 format_generic_odp_key(const struct nlattr *a, struct ds *ds)
1233 size_t len = nl_attr_get_size(a);
1235 const uint8_t *unspec;
1238 unspec = nl_attr_get(a);
1239 for (i = 0; i < len; i++) {
1241 ds_put_char(ds, ' ');
1243 ds_put_format(ds, "%02x", unspec[i]);
1249 ovs_frag_type_to_string(enum ovs_frag_type type)
1252 case OVS_FRAG_TYPE_NONE:
1254 case OVS_FRAG_TYPE_FIRST:
1256 case OVS_FRAG_TYPE_LATER:
1258 case __OVS_FRAG_TYPE_MAX:
1264 static enum odp_key_fitness
1265 odp_tun_key_from_attr__(const struct nlattr *attr,
1266 const struct nlattr *flow_attrs, size_t flow_attr_len,
1267 const struct flow_tnl *src_tun, struct flow_tnl *tun,
1271 const struct nlattr *a;
1273 bool unknown = false;
1275 NL_NESTED_FOR_EACH(a, left, attr) {
1276 uint16_t type = nl_attr_type(a);
1277 size_t len = nl_attr_get_size(a);
1278 int expected_len = odp_key_attr_len(ovs_tun_key_attr_lens,
1279 OVS_TUNNEL_ATTR_MAX, type);
1281 if (len != expected_len && expected_len >= 0) {
1282 return ODP_FIT_ERROR;
1286 case OVS_TUNNEL_KEY_ATTR_ID:
1287 tun->tun_id = nl_attr_get_be64(a);
1288 tun->flags |= FLOW_TNL_F_KEY;
1290 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
1291 tun->ip_src = nl_attr_get_be32(a);
1293 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
1294 tun->ip_dst = nl_attr_get_be32(a);
1296 case OVS_TUNNEL_KEY_ATTR_TOS:
1297 tun->ip_tos = nl_attr_get_u8(a);
1299 case OVS_TUNNEL_KEY_ATTR_TTL:
1300 tun->ip_ttl = nl_attr_get_u8(a);
1303 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
1304 tun->flags |= FLOW_TNL_F_DONT_FRAGMENT;
1306 case OVS_TUNNEL_KEY_ATTR_CSUM:
1307 tun->flags |= FLOW_TNL_F_CSUM;
1309 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
1310 tun->tp_src = nl_attr_get_be16(a);
1312 case OVS_TUNNEL_KEY_ATTR_TP_DST:
1313 tun->tp_dst = nl_attr_get_be16(a);
1315 case OVS_TUNNEL_KEY_ATTR_OAM:
1316 tun->flags |= FLOW_TNL_F_OAM;
1318 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: {
1319 static const struct nl_policy vxlan_opts_policy[] = {
1320 [OVS_VXLAN_EXT_GBP] = { .type = NL_A_U32 },
1322 struct nlattr *ext[ARRAY_SIZE(vxlan_opts_policy)];
1324 if (!nl_parse_nested(a, vxlan_opts_policy, ext, ARRAY_SIZE(ext))) {
1325 return ODP_FIT_ERROR;
1328 if (ext[OVS_VXLAN_EXT_GBP]) {
1329 uint32_t gbp = nl_attr_get_u32(ext[OVS_VXLAN_EXT_GBP]);
1331 tun->gbp_id = htons(gbp & 0xFFFF);
1332 tun->gbp_flags = (gbp >> 16) & 0xFF;
1337 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
1338 if (tun_metadata_from_geneve_nlattr(a, flow_attrs, flow_attr_len,
1339 src_tun, udpif, tun)) {
1340 return ODP_FIT_ERROR;
1345 /* Allow this to show up as unexpected, if there are unknown
1346 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
1353 return ODP_FIT_ERROR;
1356 return ODP_FIT_TOO_MUCH;
1358 return ODP_FIT_PERFECT;
1361 enum odp_key_fitness
1362 odp_tun_key_from_attr(const struct nlattr *attr, bool udpif,
1363 struct flow_tnl *tun)
1365 memset(tun, 0, sizeof *tun);
1366 return odp_tun_key_from_attr__(attr, NULL, 0, NULL, tun, udpif);
1370 tun_key_to_attr(struct ofpbuf *a, const struct flow_tnl *tun_key,
1371 const struct flow_tnl *tun_flow_key,
1372 const struct ofpbuf *key_buf)
1376 tun_key_ofs = nl_msg_start_nested(a, OVS_KEY_ATTR_TUNNEL);
1378 /* tun_id != 0 without FLOW_TNL_F_KEY is valid if tun_key is a mask. */
1379 if (tun_key->tun_id || tun_key->flags & FLOW_TNL_F_KEY) {
1380 nl_msg_put_be64(a, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id);
1382 if (tun_key->ip_src) {
1383 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ip_src);
1385 if (tun_key->ip_dst) {
1386 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ip_dst);
1388 if (tun_key->ip_tos) {
1389 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ip_tos);
1391 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ip_ttl);
1392 if (tun_key->flags & FLOW_TNL_F_DONT_FRAGMENT) {
1393 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
1395 if (tun_key->flags & FLOW_TNL_F_CSUM) {
1396 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
1398 if (tun_key->tp_src) {
1399 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src);
1401 if (tun_key->tp_dst) {
1402 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst);
1404 if (tun_key->flags & FLOW_TNL_F_OAM) {
1405 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
1407 if (tun_key->gbp_flags || tun_key->gbp_id) {
1408 size_t vxlan_opts_ofs;
1410 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
1411 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP,
1412 (tun_key->gbp_flags << 16) | ntohs(tun_key->gbp_id));
1413 nl_msg_end_nested(a, vxlan_opts_ofs);
1415 tun_metadata_to_geneve_nlattr(tun_key, tun_flow_key, key_buf, a);
1417 nl_msg_end_nested(a, tun_key_ofs);
1421 odp_mask_attr_is_wildcard(const struct nlattr *ma)
1423 return is_all_zeros(nl_attr_get(ma), nl_attr_get_size(ma));
1427 odp_mask_is_exact(enum ovs_key_attr attr, const void *mask, size_t size)
1429 if (attr == OVS_KEY_ATTR_TCP_FLAGS) {
1430 return TCP_FLAGS(*(ovs_be16 *)mask) == TCP_FLAGS(OVS_BE16_MAX);
1432 if (attr == OVS_KEY_ATTR_IPV6) {
1433 const struct ovs_key_ipv6 *ipv6_mask = mask;
1436 ((ipv6_mask->ipv6_label & htonl(IPV6_LABEL_MASK))
1437 == htonl(IPV6_LABEL_MASK))
1438 && ipv6_mask->ipv6_proto == UINT8_MAX
1439 && ipv6_mask->ipv6_tclass == UINT8_MAX
1440 && ipv6_mask->ipv6_hlimit == UINT8_MAX
1441 && ipv6_mask->ipv6_frag == UINT8_MAX
1442 && ipv6_mask_is_exact((const struct in6_addr *)ipv6_mask->ipv6_src)
1443 && ipv6_mask_is_exact((const struct in6_addr *)ipv6_mask->ipv6_dst);
1445 if (attr == OVS_KEY_ATTR_TUNNEL) {
1449 if (attr == OVS_KEY_ATTR_ARP) {
1450 /* ARP key has padding, ignore it. */
1451 BUILD_ASSERT_DECL(sizeof(struct ovs_key_arp) == 24);
1452 BUILD_ASSERT_DECL(offsetof(struct ovs_key_arp, arp_tha) == 10 + 6);
1453 size = offsetof(struct ovs_key_arp, arp_tha) + ETH_ADDR_LEN;
1454 ovs_assert(((uint16_t *)mask)[size/2] == 0);
1457 return is_all_ones(mask, size);
1461 odp_mask_attr_is_exact(const struct nlattr *ma)
1463 enum ovs_key_attr attr = nl_attr_type(ma);
1467 if (attr == OVS_KEY_ATTR_TUNNEL) {
1470 mask = nl_attr_get(ma);
1471 size = nl_attr_get_size(ma);
1474 return odp_mask_is_exact(attr, mask, size);
1478 odp_portno_names_set(struct hmap *portno_names, odp_port_t port_no,
1481 struct odp_portno_names *odp_portno_names;
1483 odp_portno_names = xmalloc(sizeof *odp_portno_names);
1484 odp_portno_names->port_no = port_no;
1485 odp_portno_names->name = xstrdup(port_name);
1486 hmap_insert(portno_names, &odp_portno_names->hmap_node,
1487 hash_odp_port(port_no));
1491 odp_portno_names_get(const struct hmap *portno_names, odp_port_t port_no)
1493 struct odp_portno_names *odp_portno_names;
1495 HMAP_FOR_EACH_IN_BUCKET (odp_portno_names, hmap_node,
1496 hash_odp_port(port_no), portno_names) {
1497 if (odp_portno_names->port_no == port_no) {
1498 return odp_portno_names->name;
1505 odp_portno_names_destroy(struct hmap *portno_names)
1507 struct odp_portno_names *odp_portno_names, *odp_portno_names_next;
1508 HMAP_FOR_EACH_SAFE (odp_portno_names, odp_portno_names_next,
1509 hmap_node, portno_names) {
1510 hmap_remove(portno_names, &odp_portno_names->hmap_node);
1511 free(odp_portno_names->name);
1512 free(odp_portno_names);
1516 /* Format helpers. */
1519 format_eth(struct ds *ds, const char *name, const struct eth_addr key,
1520 const struct eth_addr *mask, bool verbose)
1522 bool mask_empty = mask && eth_addr_is_zero(*mask);
1524 if (verbose || !mask_empty) {
1525 bool mask_full = !mask || eth_mask_is_exact(*mask);
1528 ds_put_format(ds, "%s="ETH_ADDR_FMT",", name, ETH_ADDR_ARGS(key));
1530 ds_put_format(ds, "%s=", name);
1531 eth_format_masked(key, mask, ds);
1532 ds_put_char(ds, ',');
1538 format_be64(struct ds *ds, const char *name, ovs_be64 key,
1539 const ovs_be64 *mask, bool verbose)
1541 bool mask_empty = mask && !*mask;
1543 if (verbose || !mask_empty) {
1544 bool mask_full = !mask || *mask == OVS_BE64_MAX;
1546 ds_put_format(ds, "%s=0x%"PRIx64, name, ntohll(key));
1547 if (!mask_full) { /* Partially masked. */
1548 ds_put_format(ds, "/%#"PRIx64, ntohll(*mask));
1550 ds_put_char(ds, ',');
1555 format_ipv4(struct ds *ds, const char *name, ovs_be32 key,
1556 const ovs_be32 *mask, bool verbose)
1558 bool mask_empty = mask && !*mask;
1560 if (verbose || !mask_empty) {
1561 bool mask_full = !mask || *mask == OVS_BE32_MAX;
1563 ds_put_format(ds, "%s="IP_FMT, name, IP_ARGS(key));
1564 if (!mask_full) { /* Partially masked. */
1565 ds_put_format(ds, "/"IP_FMT, IP_ARGS(*mask));
1567 ds_put_char(ds, ',');
1572 format_ipv6(struct ds *ds, const char *name, const ovs_be32 key_[4],
1573 const ovs_be32 (*mask_)[4], bool verbose)
1575 char buf[INET6_ADDRSTRLEN];
1576 const struct in6_addr *key = (const struct in6_addr *)key_;
1577 const struct in6_addr *mask = mask_ ? (const struct in6_addr *)*mask_
1579 bool mask_empty = mask && ipv6_mask_is_any(mask);
1581 if (verbose || !mask_empty) {
1582 bool mask_full = !mask || ipv6_mask_is_exact(mask);
1584 inet_ntop(AF_INET6, key, buf, sizeof buf);
1585 ds_put_format(ds, "%s=%s", name, buf);
1586 if (!mask_full) { /* Partially masked. */
1587 inet_ntop(AF_INET6, mask, buf, sizeof buf);
1588 ds_put_format(ds, "/%s", buf);
1590 ds_put_char(ds, ',');
1595 format_ipv6_label(struct ds *ds, const char *name, ovs_be32 key,
1596 const ovs_be32 *mask, bool verbose)
1598 bool mask_empty = mask && !*mask;
1600 if (verbose || !mask_empty) {
1601 bool mask_full = !mask
1602 || (*mask & htonl(IPV6_LABEL_MASK)) == htonl(IPV6_LABEL_MASK);
1604 ds_put_format(ds, "%s=%#"PRIx32, name, ntohl(key));
1605 if (!mask_full) { /* Partially masked. */
1606 ds_put_format(ds, "/%#"PRIx32, ntohl(*mask));
1608 ds_put_char(ds, ',');
1613 format_u8x(struct ds *ds, const char *name, uint8_t key,
1614 const uint8_t *mask, bool verbose)
1616 bool mask_empty = mask && !*mask;
1618 if (verbose || !mask_empty) {
1619 bool mask_full = !mask || *mask == UINT8_MAX;
1621 ds_put_format(ds, "%s=%#"PRIx8, name, key);
1622 if (!mask_full) { /* Partially masked. */
1623 ds_put_format(ds, "/%#"PRIx8, *mask);
1625 ds_put_char(ds, ',');
1630 format_u8u(struct ds *ds, const char *name, uint8_t key,
1631 const uint8_t *mask, bool verbose)
1633 bool mask_empty = mask && !*mask;
1635 if (verbose || !mask_empty) {
1636 bool mask_full = !mask || *mask == UINT8_MAX;
1638 ds_put_format(ds, "%s=%"PRIu8, name, key);
1639 if (!mask_full) { /* Partially masked. */
1640 ds_put_format(ds, "/%#"PRIx8, *mask);
1642 ds_put_char(ds, ',');
1647 format_be16(struct ds *ds, const char *name, ovs_be16 key,
1648 const ovs_be16 *mask, bool verbose)
1650 bool mask_empty = mask && !*mask;
1652 if (verbose || !mask_empty) {
1653 bool mask_full = !mask || *mask == OVS_BE16_MAX;
1655 ds_put_format(ds, "%s=%"PRIu16, name, ntohs(key));
1656 if (!mask_full) { /* Partially masked. */
1657 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
1659 ds_put_char(ds, ',');
1664 format_be16x(struct ds *ds, const char *name, ovs_be16 key,
1665 const ovs_be16 *mask, bool verbose)
1667 bool mask_empty = mask && !*mask;
1669 if (verbose || !mask_empty) {
1670 bool mask_full = !mask || *mask == OVS_BE16_MAX;
1672 ds_put_format(ds, "%s=%#"PRIx16, name, ntohs(key));
1673 if (!mask_full) { /* Partially masked. */
1674 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
1676 ds_put_char(ds, ',');
1681 format_tun_flags(struct ds *ds, const char *name, uint16_t key,
1682 const uint16_t *mask, bool verbose)
1684 bool mask_empty = mask && !*mask;
1686 if (verbose || !mask_empty) {
1687 ds_put_cstr(ds, name);
1688 ds_put_char(ds, '(');
1690 format_flags_masked(ds, NULL, flow_tun_flag_to_string, key,
1691 *mask & FLOW_TNL_F_MASK, FLOW_TNL_F_MASK);
1692 } else { /* Fully masked. */
1693 format_flags(ds, flow_tun_flag_to_string, key, '|');
1695 ds_put_cstr(ds, "),");
1700 check_attr_len(struct ds *ds, const struct nlattr *a, const struct nlattr *ma,
1701 const struct attr_len_tbl tbl[], int max_len, bool need_key)
1705 expected_len = odp_key_attr_len(tbl, max_len, nl_attr_type(a));
1706 if (expected_len != ATTR_LEN_VARIABLE &&
1707 expected_len != ATTR_LEN_NESTED) {
1709 bool bad_key_len = nl_attr_get_size(a) != expected_len;
1710 bool bad_mask_len = ma && nl_attr_get_size(ma) != expected_len;
1712 if (bad_key_len || bad_mask_len) {
1714 ds_put_format(ds, "key%u", nl_attr_type(a));
1717 ds_put_format(ds, "(bad key length %"PRIuSIZE", expected %d)(",
1718 nl_attr_get_size(a), expected_len);
1720 format_generic_odp_key(a, ds);
1722 ds_put_char(ds, '/');
1724 ds_put_format(ds, "(bad mask length %"PRIuSIZE", expected %d)(",
1725 nl_attr_get_size(ma), expected_len);
1727 format_generic_odp_key(ma, ds);
1729 ds_put_char(ds, ')');
1738 format_unknown_key(struct ds *ds, const struct nlattr *a,
1739 const struct nlattr *ma)
1741 ds_put_format(ds, "key%u(", nl_attr_type(a));
1742 format_generic_odp_key(a, ds);
1743 if (ma && !odp_mask_attr_is_exact(ma)) {
1744 ds_put_char(ds, '/');
1745 format_generic_odp_key(ma, ds);
1747 ds_put_cstr(ds, "),");
1751 format_odp_tun_vxlan_opt(const struct nlattr *attr,
1752 const struct nlattr *mask_attr, struct ds *ds,
1756 const struct nlattr *a;
1759 ofpbuf_init(&ofp, 100);
1760 NL_NESTED_FOR_EACH(a, left, attr) {
1761 uint16_t type = nl_attr_type(a);
1762 const struct nlattr *ma = NULL;
1765 ma = nl_attr_find__(nl_attr_get(mask_attr),
1766 nl_attr_get_size(mask_attr), type);
1768 ma = generate_all_wildcard_mask(ovs_vxlan_ext_attr_lens,
1774 if (!check_attr_len(ds, a, ma, ovs_vxlan_ext_attr_lens,
1775 OVS_VXLAN_EXT_MAX, true)) {
1780 case OVS_VXLAN_EXT_GBP: {
1781 uint32_t key = nl_attr_get_u32(a);
1782 ovs_be16 id, id_mask;
1783 uint8_t flags, flags_mask;
1785 id = htons(key & 0xFFFF);
1786 flags = (key >> 16) & 0xFF;
1788 uint32_t mask = nl_attr_get_u32(ma);
1789 id_mask = htons(mask & 0xFFFF);
1790 flags_mask = (mask >> 16) & 0xFF;
1793 ds_put_cstr(ds, "gbp(");
1794 format_be16(ds, "id", id, ma ? &id_mask : NULL, verbose);
1795 format_u8x(ds, "flags", flags, ma ? &flags_mask : NULL, verbose);
1797 ds_put_cstr(ds, "),");
1802 format_unknown_key(ds, a, ma);
1808 ofpbuf_uninit(&ofp);
1811 #define MASK(PTR, FIELD) PTR ? &PTR->FIELD : NULL
1814 format_geneve_opts(const struct geneve_opt *opt,
1815 const struct geneve_opt *mask, int opts_len,
1816 struct ds *ds, bool verbose)
1818 while (opts_len > 0) {
1820 uint8_t data_len, data_len_mask;
1822 if (opts_len < sizeof *opt) {
1823 ds_put_format(ds, "opt len %u less than minimum %"PRIuSIZE,
1824 opts_len, sizeof *opt);
1828 data_len = opt->length * 4;
1830 if (mask->length == 0x1f) {
1831 data_len_mask = UINT8_MAX;
1833 data_len_mask = mask->length;
1836 len = sizeof *opt + data_len;
1837 if (len > opts_len) {
1838 ds_put_format(ds, "opt len %u greater than remaining %u",
1843 ds_put_char(ds, '{');
1844 format_be16x(ds, "class", opt->opt_class, MASK(mask, opt_class),
1846 format_u8x(ds, "type", opt->type, MASK(mask, type), verbose);
1847 format_u8u(ds, "len", data_len, mask ? &data_len_mask : NULL, verbose);
1849 (verbose || !mask || !is_all_zeros(mask + 1, data_len))) {
1850 ds_put_hex(ds, opt + 1, data_len);
1851 if (mask && !is_all_ones(mask + 1, data_len)) {
1852 ds_put_char(ds, '/');
1853 ds_put_hex(ds, mask + 1, data_len);
1858 ds_put_char(ds, '}');
1860 opt += len / sizeof(*opt);
1862 mask += len / sizeof(*opt);
1869 format_odp_tun_geneve(const struct nlattr *attr,
1870 const struct nlattr *mask_attr, struct ds *ds,
1873 int opts_len = nl_attr_get_size(attr);
1874 const struct geneve_opt *opt = nl_attr_get(attr);
1875 const struct geneve_opt *mask = mask_attr ?
1876 nl_attr_get(mask_attr) : NULL;
1878 if (mask && nl_attr_get_size(attr) != nl_attr_get_size(mask_attr)) {
1879 ds_put_format(ds, "value len %"PRIuSIZE" different from mask len %"PRIuSIZE,
1880 nl_attr_get_size(attr), nl_attr_get_size(mask_attr));
1884 format_geneve_opts(opt, mask, opts_len, ds, verbose);
1888 format_odp_tun_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
1889 struct ds *ds, bool verbose)
1892 const struct nlattr *a;
1894 uint16_t mask_flags = 0;
1897 ofpbuf_init(&ofp, 100);
1898 NL_NESTED_FOR_EACH(a, left, attr) {
1899 enum ovs_tunnel_key_attr type = nl_attr_type(a);
1900 const struct nlattr *ma = NULL;
1903 ma = nl_attr_find__(nl_attr_get(mask_attr),
1904 nl_attr_get_size(mask_attr), type);
1906 ma = generate_all_wildcard_mask(ovs_tun_key_attr_lens,
1907 OVS_TUNNEL_KEY_ATTR_MAX,
1912 if (!check_attr_len(ds, a, ma, ovs_tun_key_attr_lens,
1913 OVS_TUNNEL_KEY_ATTR_MAX, true)) {
1918 case OVS_TUNNEL_KEY_ATTR_ID:
1919 format_be64(ds, "tun_id", nl_attr_get_be64(a),
1920 ma ? nl_attr_get(ma) : NULL, verbose);
1921 flags |= FLOW_TNL_F_KEY;
1923 mask_flags |= FLOW_TNL_F_KEY;
1926 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
1927 format_ipv4(ds, "src", nl_attr_get_be32(a),
1928 ma ? nl_attr_get(ma) : NULL, verbose);
1930 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
1931 format_ipv4(ds, "dst", nl_attr_get_be32(a),
1932 ma ? nl_attr_get(ma) : NULL, verbose);
1934 case OVS_TUNNEL_KEY_ATTR_TOS:
1935 format_u8x(ds, "tos", nl_attr_get_u8(a),
1936 ma ? nl_attr_get(ma) : NULL, verbose);
1938 case OVS_TUNNEL_KEY_ATTR_TTL:
1939 format_u8u(ds, "ttl", nl_attr_get_u8(a),
1940 ma ? nl_attr_get(ma) : NULL, verbose);
1942 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
1943 flags |= FLOW_TNL_F_DONT_FRAGMENT;
1945 case OVS_TUNNEL_KEY_ATTR_CSUM:
1946 flags |= FLOW_TNL_F_CSUM;
1948 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
1949 format_be16(ds, "tp_src", nl_attr_get_be16(a),
1950 ma ? nl_attr_get(ma) : NULL, verbose);
1952 case OVS_TUNNEL_KEY_ATTR_TP_DST:
1953 format_be16(ds, "tp_dst", nl_attr_get_be16(a),
1954 ma ? nl_attr_get(ma) : NULL, verbose);
1956 case OVS_TUNNEL_KEY_ATTR_OAM:
1957 flags |= FLOW_TNL_F_OAM;
1959 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
1960 ds_put_cstr(ds, "vxlan(");
1961 format_odp_tun_vxlan_opt(a, ma, ds, verbose);
1962 ds_put_cstr(ds, "),");
1964 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
1965 ds_put_cstr(ds, "geneve(");
1966 format_odp_tun_geneve(a, ma, ds, verbose);
1967 ds_put_cstr(ds, "),");
1969 case __OVS_TUNNEL_KEY_ATTR_MAX:
1971 format_unknown_key(ds, a, ma);
1976 /* Flags can have a valid mask even if the attribute is not set, so
1977 * we need to collect these separately. */
1979 NL_NESTED_FOR_EACH(a, left, mask_attr) {
1980 switch (nl_attr_type(a)) {
1981 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
1982 mask_flags |= FLOW_TNL_F_DONT_FRAGMENT;
1984 case OVS_TUNNEL_KEY_ATTR_CSUM:
1985 mask_flags |= FLOW_TNL_F_CSUM;
1987 case OVS_TUNNEL_KEY_ATTR_OAM:
1988 mask_flags |= FLOW_TNL_F_OAM;
1994 format_tun_flags(ds, "flags", flags, mask_attr ? &mask_flags : NULL,
1997 ofpbuf_uninit(&ofp);
2001 format_frag(struct ds *ds, const char *name, uint8_t key,
2002 const uint8_t *mask, bool verbose)
2004 bool mask_empty = mask && !*mask;
2006 /* ODP frag is an enumeration field; partial masks are not meaningful. */
2007 if (verbose || !mask_empty) {
2008 bool mask_full = !mask || *mask == UINT8_MAX;
2010 if (!mask_full) { /* Partially masked. */
2011 ds_put_format(ds, "error: partial mask not supported for frag (%#"
2014 ds_put_format(ds, "%s=%s,", name, ovs_frag_type_to_string(key));
2020 format_odp_key_attr(const struct nlattr *a, const struct nlattr *ma,
2021 const struct hmap *portno_names, struct ds *ds,
2024 enum ovs_key_attr attr = nl_attr_type(a);
2025 char namebuf[OVS_KEY_ATTR_BUFSIZE];
2028 is_exact = ma ? odp_mask_attr_is_exact(ma) : true;
2030 ds_put_cstr(ds, ovs_key_attr_to_string(attr, namebuf, sizeof namebuf));
2032 if (!check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
2033 OVS_KEY_ATTR_MAX, false)) {
2037 ds_put_char(ds, '(');
2039 case OVS_KEY_ATTR_ENCAP:
2040 if (ma && nl_attr_get_size(ma) && nl_attr_get_size(a)) {
2041 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a),
2042 nl_attr_get(ma), nl_attr_get_size(ma), NULL, ds,
2044 } else if (nl_attr_get_size(a)) {
2045 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a), NULL, 0, NULL,
2050 case OVS_KEY_ATTR_PRIORITY:
2051 case OVS_KEY_ATTR_SKB_MARK:
2052 case OVS_KEY_ATTR_DP_HASH:
2053 case OVS_KEY_ATTR_RECIRC_ID:
2054 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
2056 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
2060 case OVS_KEY_ATTR_TUNNEL:
2061 format_odp_tun_attr(a, ma, ds, verbose);
2064 case OVS_KEY_ATTR_IN_PORT:
2065 if (portno_names && verbose && is_exact) {
2066 char *name = odp_portno_names_get(portno_names,
2067 u32_to_odp(nl_attr_get_u32(a)));
2069 ds_put_format(ds, "%s", name);
2071 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
2074 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
2076 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
2081 case OVS_KEY_ATTR_ETHERNET: {
2082 const struct ovs_key_ethernet *mask = ma ? nl_attr_get(ma) : NULL;
2083 const struct ovs_key_ethernet *key = nl_attr_get(a);
2085 format_eth(ds, "src", key->eth_src, MASK(mask, eth_src), verbose);
2086 format_eth(ds, "dst", key->eth_dst, MASK(mask, eth_dst), verbose);
2090 case OVS_KEY_ATTR_VLAN:
2091 format_vlan_tci(ds, nl_attr_get_be16(a),
2092 ma ? nl_attr_get_be16(ma) : OVS_BE16_MAX, verbose);
2095 case OVS_KEY_ATTR_MPLS: {
2096 const struct ovs_key_mpls *mpls_key = nl_attr_get(a);
2097 const struct ovs_key_mpls *mpls_mask = NULL;
2098 size_t size = nl_attr_get_size(a);
2100 if (!size || size % sizeof *mpls_key) {
2101 ds_put_format(ds, "(bad key length %"PRIuSIZE")", size);
2105 mpls_mask = nl_attr_get(ma);
2106 if (size != nl_attr_get_size(ma)) {
2107 ds_put_format(ds, "(key length %"PRIuSIZE" != "
2108 "mask length %"PRIuSIZE")",
2109 size, nl_attr_get_size(ma));
2113 format_mpls(ds, mpls_key, mpls_mask, size / sizeof *mpls_key);
2116 case OVS_KEY_ATTR_ETHERTYPE:
2117 ds_put_format(ds, "0x%04"PRIx16, ntohs(nl_attr_get_be16(a)));
2119 ds_put_format(ds, "/0x%04"PRIx16, ntohs(nl_attr_get_be16(ma)));
2123 case OVS_KEY_ATTR_IPV4: {
2124 const struct ovs_key_ipv4 *key = nl_attr_get(a);
2125 const struct ovs_key_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
2127 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
2128 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
2129 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
2131 format_u8x(ds, "tos", key->ipv4_tos, MASK(mask, ipv4_tos), verbose);
2132 format_u8u(ds, "ttl", key->ipv4_ttl, MASK(mask, ipv4_ttl), verbose);
2133 format_frag(ds, "frag", key->ipv4_frag, MASK(mask, ipv4_frag),
2138 case OVS_KEY_ATTR_IPV6: {
2139 const struct ovs_key_ipv6 *key = nl_attr_get(a);
2140 const struct ovs_key_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
2142 format_ipv6(ds, "src", key->ipv6_src, MASK(mask, ipv6_src), verbose);
2143 format_ipv6(ds, "dst", key->ipv6_dst, MASK(mask, ipv6_dst), verbose);
2144 format_ipv6_label(ds, "label", key->ipv6_label, MASK(mask, ipv6_label),
2146 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
2148 format_u8x(ds, "tclass", key->ipv6_tclass, MASK(mask, ipv6_tclass),
2150 format_u8u(ds, "hlimit", key->ipv6_hlimit, MASK(mask, ipv6_hlimit),
2152 format_frag(ds, "frag", key->ipv6_frag, MASK(mask, ipv6_frag),
2157 /* These have the same structure and format. */
2158 case OVS_KEY_ATTR_TCP:
2159 case OVS_KEY_ATTR_UDP:
2160 case OVS_KEY_ATTR_SCTP: {
2161 const struct ovs_key_tcp *key = nl_attr_get(a);
2162 const struct ovs_key_tcp *mask = ma ? nl_attr_get(ma) : NULL;
2164 format_be16(ds, "src", key->tcp_src, MASK(mask, tcp_src), verbose);
2165 format_be16(ds, "dst", key->tcp_dst, MASK(mask, tcp_dst), verbose);
2169 case OVS_KEY_ATTR_TCP_FLAGS:
2171 format_flags_masked(ds, NULL, packet_tcp_flag_to_string,
2172 ntohs(nl_attr_get_be16(a)),
2173 TCP_FLAGS(nl_attr_get_be16(ma)),
2174 TCP_FLAGS(OVS_BE16_MAX));
2176 format_flags(ds, packet_tcp_flag_to_string,
2177 ntohs(nl_attr_get_be16(a)), '|');
2181 case OVS_KEY_ATTR_ICMP: {
2182 const struct ovs_key_icmp *key = nl_attr_get(a);
2183 const struct ovs_key_icmp *mask = ma ? nl_attr_get(ma) : NULL;
2185 format_u8u(ds, "type", key->icmp_type, MASK(mask, icmp_type), verbose);
2186 format_u8u(ds, "code", key->icmp_code, MASK(mask, icmp_code), verbose);
2190 case OVS_KEY_ATTR_ICMPV6: {
2191 const struct ovs_key_icmpv6 *key = nl_attr_get(a);
2192 const struct ovs_key_icmpv6 *mask = ma ? nl_attr_get(ma) : NULL;
2194 format_u8u(ds, "type", key->icmpv6_type, MASK(mask, icmpv6_type),
2196 format_u8u(ds, "code", key->icmpv6_code, MASK(mask, icmpv6_code),
2201 case OVS_KEY_ATTR_ARP: {
2202 const struct ovs_key_arp *mask = ma ? nl_attr_get(ma) : NULL;
2203 const struct ovs_key_arp *key = nl_attr_get(a);
2205 format_ipv4(ds, "sip", key->arp_sip, MASK(mask, arp_sip), verbose);
2206 format_ipv4(ds, "tip", key->arp_tip, MASK(mask, arp_tip), verbose);
2207 format_be16(ds, "op", key->arp_op, MASK(mask, arp_op), verbose);
2208 format_eth(ds, "sha", key->arp_sha, MASK(mask, arp_sha), verbose);
2209 format_eth(ds, "tha", key->arp_tha, MASK(mask, arp_tha), verbose);
2213 case OVS_KEY_ATTR_ND: {
2214 const struct ovs_key_nd *mask = ma ? nl_attr_get(ma) : NULL;
2215 const struct ovs_key_nd *key = nl_attr_get(a);
2217 format_ipv6(ds, "target", key->nd_target, MASK(mask, nd_target),
2219 format_eth(ds, "sll", key->nd_sll, MASK(mask, nd_sll), verbose);
2220 format_eth(ds, "tll", key->nd_tll, MASK(mask, nd_tll), verbose);
2225 case OVS_KEY_ATTR_UNSPEC:
2226 case __OVS_KEY_ATTR_MAX:
2228 format_generic_odp_key(a, ds);
2230 ds_put_char(ds, '/');
2231 format_generic_odp_key(ma, ds);
2235 ds_put_char(ds, ')');
2238 static struct nlattr *
2239 generate_all_wildcard_mask(const struct attr_len_tbl tbl[], int max,
2240 struct ofpbuf *ofp, const struct nlattr *key)
2242 const struct nlattr *a;
2244 int type = nl_attr_type(key);
2245 int size = nl_attr_get_size(key);
2247 if (odp_key_attr_len(tbl, max, type) != ATTR_LEN_NESTED) {
2248 nl_msg_put_unspec_zero(ofp, type, size);
2252 if (tbl[type].next) {
2253 tbl = tbl[type].next;
2254 max = tbl[type].next_max;
2257 nested_mask = nl_msg_start_nested(ofp, type);
2258 NL_ATTR_FOR_EACH(a, left, key, nl_attr_get_size(key)) {
2259 generate_all_wildcard_mask(tbl, max, ofp, nl_attr_get(a));
2261 nl_msg_end_nested(ofp, nested_mask);
2268 odp_ufid_from_string(const char *s_, ovs_u128 *ufid)
2272 if (ovs_scan(s, "ufid:")) {
2275 if (!uuid_from_string_prefix((struct uuid *)ufid, s)) {
2287 odp_format_ufid(const ovs_u128 *ufid, struct ds *ds)
2289 ds_put_format(ds, "ufid:"UUID_FMT, UUID_ARGS((struct uuid *)ufid));
2292 /* Appends to 'ds' a string representation of the 'key_len' bytes of
2293 * OVS_KEY_ATTR_* attributes in 'key'. If non-null, additionally formats the
2294 * 'mask_len' bytes of 'mask' which apply to 'key'. If 'portno_names' is
2295 * non-null and 'verbose' is true, translates odp port number to its name. */
2297 odp_flow_format(const struct nlattr *key, size_t key_len,
2298 const struct nlattr *mask, size_t mask_len,
2299 const struct hmap *portno_names, struct ds *ds, bool verbose)
2302 const struct nlattr *a;
2304 bool has_ethtype_key = false;
2305 const struct nlattr *ma = NULL;
2307 bool first_field = true;
2309 ofpbuf_init(&ofp, 100);
2310 NL_ATTR_FOR_EACH (a, left, key, key_len) {
2311 bool is_nested_attr;
2312 bool is_wildcard = false;
2313 int attr_type = nl_attr_type(a);
2315 if (attr_type == OVS_KEY_ATTR_ETHERTYPE) {
2316 has_ethtype_key = true;
2319 is_nested_attr = odp_key_attr_len(ovs_flow_key_attr_lens,
2320 OVS_KEY_ATTR_MAX, attr_type) ==
2323 if (mask && mask_len) {
2324 ma = nl_attr_find__(mask, mask_len, nl_attr_type(a));
2325 is_wildcard = ma ? odp_mask_attr_is_wildcard(ma) : true;
2328 if (verbose || !is_wildcard || is_nested_attr) {
2329 if (is_wildcard && !ma) {
2330 ma = generate_all_wildcard_mask(ovs_flow_key_attr_lens,
2335 ds_put_char(ds, ',');
2337 format_odp_key_attr(a, ma, portno_names, ds, verbose);
2338 first_field = false;
2342 ofpbuf_uninit(&ofp);
2347 if (left == key_len) {
2348 ds_put_cstr(ds, "<empty>");
2350 ds_put_format(ds, ",***%u leftover bytes*** (", left);
2351 for (i = 0; i < left; i++) {
2352 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
2354 ds_put_char(ds, ')');
2356 if (!has_ethtype_key) {
2357 ma = nl_attr_find__(mask, mask_len, OVS_KEY_ATTR_ETHERTYPE);
2359 ds_put_format(ds, ",eth_type(0/0x%04"PRIx16")",
2360 ntohs(nl_attr_get_be16(ma)));
2364 ds_put_cstr(ds, "<empty>");
2368 /* Appends to 'ds' a string representation of the 'key_len' bytes of
2369 * OVS_KEY_ATTR_* attributes in 'key'. */
2371 odp_flow_key_format(const struct nlattr *key,
2372 size_t key_len, struct ds *ds)
2374 odp_flow_format(key, key_len, NULL, 0, NULL, ds, true);
2378 ovs_frag_type_from_string(const char *s, enum ovs_frag_type *type)
2380 if (!strcasecmp(s, "no")) {
2381 *type = OVS_FRAG_TYPE_NONE;
2382 } else if (!strcasecmp(s, "first")) {
2383 *type = OVS_FRAG_TYPE_FIRST;
2384 } else if (!strcasecmp(s, "later")) {
2385 *type = OVS_FRAG_TYPE_LATER;
2395 scan_eth(const char *s, struct eth_addr *key, struct eth_addr *mask)
2399 if (ovs_scan(s, ETH_ADDR_SCAN_FMT"%n",
2400 ETH_ADDR_SCAN_ARGS(*key), &n)) {
2404 if (ovs_scan(s + len, "/"ETH_ADDR_SCAN_FMT"%n",
2405 ETH_ADDR_SCAN_ARGS(*mask), &n)) {
2408 memset(mask, 0xff, sizeof *mask);
2417 scan_ipv4(const char *s, ovs_be32 *key, ovs_be32 *mask)
2421 if (ovs_scan(s, IP_SCAN_FMT"%n", IP_SCAN_ARGS(key), &n)) {
2425 if (ovs_scan(s + len, "/"IP_SCAN_FMT"%n",
2426 IP_SCAN_ARGS(mask), &n)) {
2429 *mask = OVS_BE32_MAX;
2438 scan_ipv6(const char *s, ovs_be32 (*key)[4], ovs_be32 (*mask)[4])
2441 char ipv6_s[IPV6_SCAN_LEN + 1];
2443 if (ovs_scan(s, IPV6_SCAN_FMT"%n", ipv6_s, &n)
2444 && inet_pton(AF_INET6, ipv6_s, key) == 1) {
2448 if (ovs_scan(s + len, "/"IPV6_SCAN_FMT"%n", ipv6_s, &n)
2449 && inet_pton(AF_INET6, ipv6_s, mask) == 1) {
2452 memset(mask, 0xff, sizeof *mask);
2461 scan_ipv6_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
2466 if (ovs_scan(s, "%i%n", &key_, &n)
2467 && (key_ & ~IPV6_LABEL_MASK) == 0) {
2472 if (ovs_scan(s + len, "/%i%n", &mask_, &n)
2473 && (mask_ & ~IPV6_LABEL_MASK) == 0) {
2475 *mask = htonl(mask_);
2477 *mask = htonl(IPV6_LABEL_MASK);
2486 scan_u8(const char *s, uint8_t *key, uint8_t *mask)
2490 if (ovs_scan(s, "%"SCNi8"%n", key, &n)) {
2494 if (ovs_scan(s + len, "/%"SCNi8"%n", mask, &n)) {
2506 scan_u32(const char *s, uint32_t *key, uint32_t *mask)
2510 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
2514 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
2526 scan_be16(const char *s, ovs_be16 *key, ovs_be16 *mask)
2528 uint16_t key_, mask_;
2531 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
2536 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
2538 *mask = htons(mask_);
2540 *mask = OVS_BE16_MAX;
2549 scan_be64(const char *s, ovs_be64 *key, ovs_be64 *mask)
2551 uint64_t key_, mask_;
2554 if (ovs_scan(s, "%"SCNi64"%n", &key_, &n)) {
2557 *key = htonll(key_);
2559 if (ovs_scan(s + len, "/%"SCNi64"%n", &mask_, &n)) {
2561 *mask = htonll(mask_);
2563 *mask = OVS_BE64_MAX;
2572 scan_tun_flags(const char *s, uint16_t *key, uint16_t *mask)
2574 uint32_t flags, fmask;
2577 n = parse_odp_flags(s, flow_tun_flag_to_string, &flags,
2578 FLOW_TNL_F_MASK, mask ? &fmask : NULL);
2579 if (n >= 0 && s[n] == ')') {
2590 scan_tcp_flags(const char *s, ovs_be16 *key, ovs_be16 *mask)
2592 uint32_t flags, fmask;
2595 n = parse_odp_flags(s, packet_tcp_flag_to_string, &flags,
2596 TCP_FLAGS(OVS_BE16_MAX), mask ? &fmask : NULL);
2598 *key = htons(flags);
2600 *mask = htons(fmask);
2608 scan_frag(const char *s, uint8_t *key, uint8_t *mask)
2612 enum ovs_frag_type frag_type;
2614 if (ovs_scan(s, "%7[a-z]%n", frag, &n)
2615 && ovs_frag_type_from_string(frag, &frag_type)) {
2628 scan_port(const char *s, uint32_t *key, uint32_t *mask,
2629 const struct simap *port_names)
2633 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
2637 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
2644 } else if (port_names) {
2645 const struct simap_node *node;
2648 len = strcspn(s, ")");
2649 node = simap_find_len(port_names, s, len);
2662 /* Helper for vlan parsing. */
2663 struct ovs_key_vlan__ {
2668 set_be16_bf(ovs_be16 *bf, uint8_t bits, uint8_t offset, uint16_t value)
2670 const uint16_t mask = ((1U << bits) - 1) << offset;
2672 if (value >> bits) {
2676 *bf = htons((ntohs(*bf) & ~mask) | (value << offset));
2681 scan_be16_bf(const char *s, ovs_be16 *key, ovs_be16 *mask, uint8_t bits,
2684 uint16_t key_, mask_;
2687 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
2690 if (set_be16_bf(key, bits, offset, key_)) {
2692 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
2695 if (!set_be16_bf(mask, bits, offset, mask_)) {
2699 *mask |= htons(((1U << bits) - 1) << offset);
2709 scan_vid(const char *s, ovs_be16 *key, ovs_be16 *mask)
2711 return scan_be16_bf(s, key, mask, 12, VLAN_VID_SHIFT);
2715 scan_pcp(const char *s, ovs_be16 *key, ovs_be16 *mask)
2717 return scan_be16_bf(s, key, mask, 3, VLAN_PCP_SHIFT);
2721 scan_cfi(const char *s, ovs_be16 *key, ovs_be16 *mask)
2723 return scan_be16_bf(s, key, mask, 1, VLAN_CFI_SHIFT);
2728 set_be32_bf(ovs_be32 *bf, uint8_t bits, uint8_t offset, uint32_t value)
2730 const uint32_t mask = ((1U << bits) - 1) << offset;
2732 if (value >> bits) {
2736 *bf = htonl((ntohl(*bf) & ~mask) | (value << offset));
2741 scan_be32_bf(const char *s, ovs_be32 *key, ovs_be32 *mask, uint8_t bits,
2744 uint32_t key_, mask_;
2747 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
2750 if (set_be32_bf(key, bits, offset, key_)) {
2752 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
2755 if (!set_be32_bf(mask, bits, offset, mask_)) {
2759 *mask |= htonl(((1U << bits) - 1) << offset);
2769 scan_mpls_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
2771 return scan_be32_bf(s, key, mask, 20, MPLS_LABEL_SHIFT);
2775 scan_mpls_tc(const char *s, ovs_be32 *key, ovs_be32 *mask)
2777 return scan_be32_bf(s, key, mask, 3, MPLS_TC_SHIFT);
2781 scan_mpls_ttl(const char *s, ovs_be32 *key, ovs_be32 *mask)
2783 return scan_be32_bf(s, key, mask, 8, MPLS_TTL_SHIFT);
2787 scan_mpls_bos(const char *s, ovs_be32 *key, ovs_be32 *mask)
2789 return scan_be32_bf(s, key, mask, 1, MPLS_BOS_SHIFT);
2793 scan_vxlan_gbp(const char *s, uint32_t *key, uint32_t *mask)
2795 const char *s_base = s;
2796 ovs_be16 id = 0, id_mask = 0;
2797 uint8_t flags = 0, flags_mask = 0;
2799 if (!strncmp(s, "id=", 3)) {
2801 s += scan_be16(s, &id, mask ? &id_mask : NULL);
2807 if (!strncmp(s, "flags=", 6)) {
2809 s += scan_u8(s, &flags, mask ? &flags_mask : NULL);
2812 if (!strncmp(s, "))", 2)) {
2815 *key = (flags << 16) | ntohs(id);
2817 *mask = (flags_mask << 16) | ntohs(id_mask);
2827 scan_geneve(const char *s, struct geneve_scan *key, struct geneve_scan *mask)
2829 const char *s_base = s;
2830 struct geneve_opt *opt = key->d;
2831 struct geneve_opt *opt_mask = mask ? mask->d : NULL;
2832 int len_remain = sizeof key->d;
2834 while (s[0] == '{' && len_remain >= sizeof *opt) {
2838 len_remain -= sizeof *opt;
2840 if (!strncmp(s, "class=", 6)) {
2842 s += scan_be16(s, &opt->opt_class,
2843 mask ? &opt_mask->opt_class : NULL);
2845 memset(&opt_mask->opt_class, 0, sizeof opt_mask->opt_class);
2851 if (!strncmp(s, "type=", 5)) {
2853 s += scan_u8(s, &opt->type, mask ? &opt_mask->type : NULL);
2855 memset(&opt_mask->type, 0, sizeof opt_mask->type);
2861 if (!strncmp(s, "len=", 4)) {
2862 uint8_t opt_len, opt_len_mask;
2864 s += scan_u8(s, &opt_len, mask ? &opt_len_mask : NULL);
2866 if (opt_len > 124 || opt_len % 4 || opt_len > len_remain) {
2869 opt->length = opt_len / 4;
2871 opt_mask->length = opt_len_mask;
2875 memset(&opt_mask->type, 0, sizeof opt_mask->type);
2881 if (parse_int_string(s, (uint8_t *)(opt + 1), data_len, (char **)&s)) {
2888 if (parse_int_string(s, (uint8_t *)(opt_mask + 1),
2889 data_len, (char **)&s)) {
2900 opt += 1 + data_len / 4;
2902 opt_mask += 1 + data_len / 4;
2904 len_remain -= data_len;
2909 int len = sizeof key->d - len_remain;
2923 tun_flags_to_attr(struct ofpbuf *a, const void *data_)
2925 const uint16_t *flags = data_;
2927 if (*flags & FLOW_TNL_F_DONT_FRAGMENT) {
2928 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
2930 if (*flags & FLOW_TNL_F_CSUM) {
2931 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
2933 if (*flags & FLOW_TNL_F_OAM) {
2934 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
2939 vxlan_gbp_to_attr(struct ofpbuf *a, const void *data_)
2941 const uint32_t *gbp = data_;
2944 size_t vxlan_opts_ofs;
2946 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
2947 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP, *gbp);
2948 nl_msg_end_nested(a, vxlan_opts_ofs);
2953 geneve_to_attr(struct ofpbuf *a, const void *data_)
2955 const struct geneve_scan *geneve = data_;
2957 nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, geneve->d,
2961 #define SCAN_PUT_ATTR(BUF, ATTR, DATA, FUNC) \
2963 unsigned long call_fn = (unsigned long)FUNC; \
2965 typedef void (*fn)(struct ofpbuf *, const void *); \
2967 func(BUF, &(DATA)); \
2969 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)); \
2973 #define SCAN_IF(NAME) \
2974 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
2975 const char *start = s; \
2980 /* Usually no special initialization is needed. */
2981 #define SCAN_BEGIN(NAME, TYPE) \
2984 memset(&skey, 0, sizeof skey); \
2985 memset(&smask, 0, sizeof smask); \
2989 /* Init as fully-masked as mask will not be scanned. */
2990 #define SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) \
2993 memset(&skey, 0, sizeof skey); \
2994 memset(&smask, 0xff, sizeof smask); \
2998 /* VLAN needs special initialization. */
2999 #define SCAN_BEGIN_INIT(NAME, TYPE, KEY_INIT, MASK_INIT) \
3001 TYPE skey = KEY_INIT; \
3002 TYPE smask = MASK_INIT; \
3006 /* Scan unnamed entry as 'TYPE' */
3007 #define SCAN_TYPE(TYPE, KEY, MASK) \
3008 len = scan_##TYPE(s, KEY, MASK); \
3014 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
3015 #define SCAN_FIELD(NAME, TYPE, FIELD) \
3016 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
3017 s += strlen(NAME); \
3018 SCAN_TYPE(TYPE, &skey.FIELD, mask ? &smask.FIELD : NULL); \
3022 #define SCAN_FINISH() \
3023 } while (*s++ == ',' && len != 0); \
3024 if (s[-1] != ')') { \
3028 #define SCAN_FINISH_SINGLE() \
3030 if (*s++ != ')') { \
3034 /* Beginning of nested attribute. */
3035 #define SCAN_BEGIN_NESTED(NAME, ATTR) \
3037 size_t key_offset, mask_offset; \
3038 key_offset = nl_msg_start_nested(key, ATTR); \
3040 mask_offset = nl_msg_start_nested(mask, ATTR); \
3045 #define SCAN_END_NESTED() \
3047 nl_msg_end_nested(key, key_offset); \
3049 nl_msg_end_nested(mask, mask_offset); \
3054 #define SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, FUNC) \
3055 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
3057 memset(&skey, 0, sizeof skey); \
3058 memset(&smask, 0xff, sizeof smask); \
3059 s += strlen(NAME); \
3060 SCAN_TYPE(SCAN_AS, &skey, &smask); \
3061 SCAN_PUT(ATTR, FUNC); \
3065 #define SCAN_FIELD_NESTED(NAME, TYPE, SCAN_AS, ATTR) \
3066 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, NULL)
3068 #define SCAN_FIELD_NESTED_FUNC(NAME, TYPE, SCAN_AS, FUNC) \
3069 SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, 0, FUNC)
3071 #define SCAN_PUT(ATTR, FUNC) \
3072 if (!mask || !is_all_zeros(&smask, sizeof smask)) { \
3073 SCAN_PUT_ATTR(key, ATTR, skey, FUNC); \
3075 SCAN_PUT_ATTR(mask, ATTR, smask, FUNC); \
3079 #define SCAN_END(ATTR) \
3081 SCAN_PUT(ATTR, NULL); \
3085 #define SCAN_END_SINGLE(ATTR) \
3086 SCAN_FINISH_SINGLE(); \
3087 SCAN_PUT(ATTR, NULL); \
3091 #define SCAN_SINGLE(NAME, TYPE, SCAN_AS, ATTR) \
3092 SCAN_BEGIN(NAME, TYPE) { \
3093 SCAN_TYPE(SCAN_AS, &skey, &smask); \
3094 } SCAN_END_SINGLE(ATTR)
3096 #define SCAN_SINGLE_FULLY_MASKED(NAME, TYPE, SCAN_AS, ATTR) \
3097 SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) { \
3098 SCAN_TYPE(SCAN_AS, &skey, NULL); \
3099 } SCAN_END_SINGLE(ATTR)
3101 /* scan_port needs one extra argument. */
3102 #define SCAN_SINGLE_PORT(NAME, TYPE, ATTR) \
3103 SCAN_BEGIN(NAME, TYPE) { \
3104 len = scan_port(s, &skey, &smask, port_names); \
3109 } SCAN_END_SINGLE(ATTR)
3112 parse_odp_key_mask_attr(const char *s, const struct simap *port_names,
3113 struct ofpbuf *key, struct ofpbuf *mask)
3119 len = odp_ufid_from_string(s, &ufid);
3124 SCAN_SINGLE("skb_priority(", uint32_t, u32, OVS_KEY_ATTR_PRIORITY);
3125 SCAN_SINGLE("skb_mark(", uint32_t, u32, OVS_KEY_ATTR_SKB_MARK);
3126 SCAN_SINGLE_FULLY_MASKED("recirc_id(", uint32_t, u32,
3127 OVS_KEY_ATTR_RECIRC_ID);
3128 SCAN_SINGLE("dp_hash(", uint32_t, u32, OVS_KEY_ATTR_DP_HASH);
3130 SCAN_BEGIN_NESTED("tunnel(", OVS_KEY_ATTR_TUNNEL) {
3131 SCAN_FIELD_NESTED("tun_id=", ovs_be64, be64, OVS_TUNNEL_KEY_ATTR_ID);
3132 SCAN_FIELD_NESTED("src=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_SRC);
3133 SCAN_FIELD_NESTED("dst=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_DST);
3134 SCAN_FIELD_NESTED("tos=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TOS);
3135 SCAN_FIELD_NESTED("ttl=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TTL);
3136 SCAN_FIELD_NESTED("tp_src=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_SRC);
3137 SCAN_FIELD_NESTED("tp_dst=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_DST);
3138 SCAN_FIELD_NESTED_FUNC("vxlan(gbp(", uint32_t, vxlan_gbp, vxlan_gbp_to_attr);
3139 SCAN_FIELD_NESTED_FUNC("geneve(", struct geneve_scan, geneve,
3141 SCAN_FIELD_NESTED_FUNC("flags(", uint16_t, tun_flags, tun_flags_to_attr);
3142 } SCAN_END_NESTED();
3144 SCAN_SINGLE_PORT("in_port(", uint32_t, OVS_KEY_ATTR_IN_PORT);
3146 SCAN_BEGIN("eth(", struct ovs_key_ethernet) {
3147 SCAN_FIELD("src=", eth, eth_src);
3148 SCAN_FIELD("dst=", eth, eth_dst);
3149 } SCAN_END(OVS_KEY_ATTR_ETHERNET);
3151 SCAN_BEGIN_INIT("vlan(", struct ovs_key_vlan__,
3152 { htons(VLAN_CFI) }, { htons(VLAN_CFI) }) {
3153 SCAN_FIELD("vid=", vid, tci);
3154 SCAN_FIELD("pcp=", pcp, tci);
3155 SCAN_FIELD("cfi=", cfi, tci);
3156 } SCAN_END(OVS_KEY_ATTR_VLAN);
3158 SCAN_SINGLE("eth_type(", ovs_be16, be16, OVS_KEY_ATTR_ETHERTYPE);
3160 SCAN_BEGIN("mpls(", struct ovs_key_mpls) {
3161 SCAN_FIELD("label=", mpls_label, mpls_lse);
3162 SCAN_FIELD("tc=", mpls_tc, mpls_lse);
3163 SCAN_FIELD("ttl=", mpls_ttl, mpls_lse);
3164 SCAN_FIELD("bos=", mpls_bos, mpls_lse);
3165 } SCAN_END(OVS_KEY_ATTR_MPLS);
3167 SCAN_BEGIN("ipv4(", struct ovs_key_ipv4) {
3168 SCAN_FIELD("src=", ipv4, ipv4_src);
3169 SCAN_FIELD("dst=", ipv4, ipv4_dst);
3170 SCAN_FIELD("proto=", u8, ipv4_proto);
3171 SCAN_FIELD("tos=", u8, ipv4_tos);
3172 SCAN_FIELD("ttl=", u8, ipv4_ttl);
3173 SCAN_FIELD("frag=", frag, ipv4_frag);
3174 } SCAN_END(OVS_KEY_ATTR_IPV4);
3176 SCAN_BEGIN("ipv6(", struct ovs_key_ipv6) {
3177 SCAN_FIELD("src=", ipv6, ipv6_src);
3178 SCAN_FIELD("dst=", ipv6, ipv6_dst);
3179 SCAN_FIELD("label=", ipv6_label, ipv6_label);
3180 SCAN_FIELD("proto=", u8, ipv6_proto);
3181 SCAN_FIELD("tclass=", u8, ipv6_tclass);
3182 SCAN_FIELD("hlimit=", u8, ipv6_hlimit);
3183 SCAN_FIELD("frag=", frag, ipv6_frag);
3184 } SCAN_END(OVS_KEY_ATTR_IPV6);
3186 SCAN_BEGIN("tcp(", struct ovs_key_tcp) {
3187 SCAN_FIELD("src=", be16, tcp_src);
3188 SCAN_FIELD("dst=", be16, tcp_dst);
3189 } SCAN_END(OVS_KEY_ATTR_TCP);
3191 SCAN_SINGLE("tcp_flags(", ovs_be16, tcp_flags, OVS_KEY_ATTR_TCP_FLAGS);
3193 SCAN_BEGIN("udp(", struct ovs_key_udp) {
3194 SCAN_FIELD("src=", be16, udp_src);
3195 SCAN_FIELD("dst=", be16, udp_dst);
3196 } SCAN_END(OVS_KEY_ATTR_UDP);
3198 SCAN_BEGIN("sctp(", struct ovs_key_sctp) {
3199 SCAN_FIELD("src=", be16, sctp_src);
3200 SCAN_FIELD("dst=", be16, sctp_dst);
3201 } SCAN_END(OVS_KEY_ATTR_SCTP);
3203 SCAN_BEGIN("icmp(", struct ovs_key_icmp) {
3204 SCAN_FIELD("type=", u8, icmp_type);
3205 SCAN_FIELD("code=", u8, icmp_code);
3206 } SCAN_END(OVS_KEY_ATTR_ICMP);
3208 SCAN_BEGIN("icmpv6(", struct ovs_key_icmpv6) {
3209 SCAN_FIELD("type=", u8, icmpv6_type);
3210 SCAN_FIELD("code=", u8, icmpv6_code);
3211 } SCAN_END(OVS_KEY_ATTR_ICMPV6);
3213 SCAN_BEGIN("arp(", struct ovs_key_arp) {
3214 SCAN_FIELD("sip=", ipv4, arp_sip);
3215 SCAN_FIELD("tip=", ipv4, arp_tip);
3216 SCAN_FIELD("op=", be16, arp_op);
3217 SCAN_FIELD("sha=", eth, arp_sha);
3218 SCAN_FIELD("tha=", eth, arp_tha);
3219 } SCAN_END(OVS_KEY_ATTR_ARP);
3221 SCAN_BEGIN("nd(", struct ovs_key_nd) {
3222 SCAN_FIELD("target=", ipv6, nd_target);
3223 SCAN_FIELD("sll=", eth, nd_sll);
3224 SCAN_FIELD("tll=", eth, nd_tll);
3225 } SCAN_END(OVS_KEY_ATTR_ND);
3227 /* Encap open-coded. */
3228 if (!strncmp(s, "encap(", 6)) {
3229 const char *start = s;
3230 size_t encap, encap_mask = 0;
3232 encap = nl_msg_start_nested(key, OVS_KEY_ATTR_ENCAP);
3234 encap_mask = nl_msg_start_nested(mask, OVS_KEY_ATTR_ENCAP);
3241 s += strspn(s, delimiters);
3244 } else if (*s == ')') {
3248 retval = parse_odp_key_mask_attr(s, port_names, key, mask);
3256 nl_msg_end_nested(key, encap);
3258 nl_msg_end_nested(mask, encap_mask);
3267 /* Parses the string representation of a datapath flow key, in the
3268 * format output by odp_flow_key_format(). Returns 0 if successful,
3269 * otherwise a positive errno value. On success, the flow key is
3270 * appended to 'key' as a series of Netlink attributes. On failure, no
3271 * data is appended to 'key'. Either way, 'key''s data might be
3274 * If 'port_names' is nonnull, it points to an simap that maps from a port name
3275 * to a port number. (Port names may be used instead of port numbers in
3278 * On success, the attributes appended to 'key' are individually syntactically
3279 * valid, but they may not be valid as a sequence. 'key' might, for example,
3280 * have duplicated keys. odp_flow_key_to_flow() will detect those errors. */
3282 odp_flow_from_string(const char *s, const struct simap *port_names,
3283 struct ofpbuf *key, struct ofpbuf *mask)
3285 const size_t old_size = key->size;
3289 s += strspn(s, delimiters);
3294 retval = parse_odp_key_mask_attr(s, port_names, key, mask);
3296 key->size = old_size;
3306 ovs_to_odp_frag(uint8_t nw_frag, bool is_mask)
3309 /* Netlink interface 'enum ovs_frag_type' is an 8-bit enumeration type,
3310 * not a set of flags or bitfields. Hence, if the struct flow nw_frag
3311 * mask, which is a set of bits, has the FLOW_NW_FRAG_ANY as zero, we
3312 * must use a zero mask for the netlink frag field, and all ones mask
3314 return (nw_frag & FLOW_NW_FRAG_ANY) ? UINT8_MAX : 0;
3316 return !(nw_frag & FLOW_NW_FRAG_ANY) ? OVS_FRAG_TYPE_NONE
3317 : nw_frag & FLOW_NW_FRAG_LATER ? OVS_FRAG_TYPE_LATER
3318 : OVS_FRAG_TYPE_FIRST;
3321 static void get_ethernet_key(const struct flow *, struct ovs_key_ethernet *);
3322 static void put_ethernet_key(const struct ovs_key_ethernet *, struct flow *);
3323 static void get_ipv4_key(const struct flow *, struct ovs_key_ipv4 *,
3325 static void put_ipv4_key(const struct ovs_key_ipv4 *, struct flow *,
3327 static void get_ipv6_key(const struct flow *, struct ovs_key_ipv6 *,
3329 static void put_ipv6_key(const struct ovs_key_ipv6 *, struct flow *,
3331 static void get_arp_key(const struct flow *, struct ovs_key_arp *);
3332 static void put_arp_key(const struct ovs_key_arp *, struct flow *);
3333 static void get_nd_key(const struct flow *, struct ovs_key_nd *);
3334 static void put_nd_key(const struct ovs_key_nd *, struct flow *);
3336 /* These share the same layout. */
3338 struct ovs_key_tcp tcp;
3339 struct ovs_key_udp udp;
3340 struct ovs_key_sctp sctp;
3343 static void get_tp_key(const struct flow *, union ovs_key_tp *);
3344 static void put_tp_key(const union ovs_key_tp *, struct flow *);
3347 odp_flow_key_from_flow__(const struct odp_flow_key_parms *parms,
3348 bool export_mask, struct ofpbuf *buf)
3350 struct ovs_key_ethernet *eth_key;
3352 const struct flow *flow = parms->flow;
3353 const struct flow *data = export_mask ? parms->mask : parms->flow;
3355 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, data->skb_priority);
3357 if (flow->tunnel.ip_dst || export_mask) {
3358 tun_key_to_attr(buf, &data->tunnel, &parms->flow->tunnel,
3362 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, data->pkt_mark);
3364 if (parms->support.recirc) {
3365 nl_msg_put_u32(buf, OVS_KEY_ATTR_RECIRC_ID, data->recirc_id);
3366 nl_msg_put_u32(buf, OVS_KEY_ATTR_DP_HASH, data->dp_hash);
3369 /* Add an ingress port attribute if this is a mask or 'odp_in_port'
3370 * is not the magical value "ODPP_NONE". */
3371 if (export_mask || parms->odp_in_port != ODPP_NONE) {
3372 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, parms->odp_in_port);
3375 eth_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ETHERNET,
3377 get_ethernet_key(data, eth_key);
3379 if (flow->vlan_tci != htons(0) || flow->dl_type == htons(ETH_TYPE_VLAN)) {
3381 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
3383 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_TYPE_VLAN));
3385 nl_msg_put_be16(buf, OVS_KEY_ATTR_VLAN, data->vlan_tci);
3386 encap = nl_msg_start_nested(buf, OVS_KEY_ATTR_ENCAP);
3387 if (flow->vlan_tci == htons(0)) {
3394 if (ntohs(flow->dl_type) < ETH_TYPE_MIN) {
3395 /* For backwards compatibility with kernels that don't support
3396 * wildcarding, the following convention is used to encode the
3397 * OVS_KEY_ATTR_ETHERTYPE for key and mask:
3400 * -------- -------- -------
3401 * >0x5ff 0xffff Specified Ethernet II Ethertype.
3402 * >0x5ff 0 Any Ethernet II or non-Ethernet II frame.
3403 * <none> 0xffff Any non-Ethernet II frame (except valid
3404 * 802.3 SNAP packet with valid eth_type).
3407 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
3412 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, data->dl_type);
3414 if (flow->dl_type == htons(ETH_TYPE_IP)) {
3415 struct ovs_key_ipv4 *ipv4_key;
3417 ipv4_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV4,
3419 get_ipv4_key(data, ipv4_key, export_mask);
3420 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
3421 struct ovs_key_ipv6 *ipv6_key;
3423 ipv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV6,
3425 get_ipv6_key(data, ipv6_key, export_mask);
3426 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
3427 flow->dl_type == htons(ETH_TYPE_RARP)) {
3428 struct ovs_key_arp *arp_key;
3430 arp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ARP,
3432 get_arp_key(data, arp_key);
3433 } else if (eth_type_mpls(flow->dl_type)) {
3434 struct ovs_key_mpls *mpls_key;
3437 n = flow_count_mpls_labels(flow, NULL);
3439 n = MIN(n, parms->support.max_mpls_depth);
3441 mpls_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_MPLS,
3442 n * sizeof *mpls_key);
3443 for (i = 0; i < n; i++) {
3444 mpls_key[i].mpls_lse = data->mpls_lse[i];
3448 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3449 if (flow->nw_proto == IPPROTO_TCP) {
3450 union ovs_key_tp *tcp_key;
3452 tcp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_TCP,
3454 get_tp_key(data, tcp_key);
3455 if (data->tcp_flags) {
3456 nl_msg_put_be16(buf, OVS_KEY_ATTR_TCP_FLAGS, data->tcp_flags);
3458 } else if (flow->nw_proto == IPPROTO_UDP) {
3459 union ovs_key_tp *udp_key;
3461 udp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_UDP,
3463 get_tp_key(data, udp_key);
3464 } else if (flow->nw_proto == IPPROTO_SCTP) {
3465 union ovs_key_tp *sctp_key;
3467 sctp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_SCTP,
3469 get_tp_key(data, sctp_key);
3470 } else if (flow->dl_type == htons(ETH_TYPE_IP)
3471 && flow->nw_proto == IPPROTO_ICMP) {
3472 struct ovs_key_icmp *icmp_key;
3474 icmp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMP,
3476 icmp_key->icmp_type = ntohs(data->tp_src);
3477 icmp_key->icmp_code = ntohs(data->tp_dst);
3478 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)
3479 && flow->nw_proto == IPPROTO_ICMPV6) {
3480 struct ovs_key_icmpv6 *icmpv6_key;
3482 icmpv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMPV6,
3483 sizeof *icmpv6_key);
3484 icmpv6_key->icmpv6_type = ntohs(data->tp_src);
3485 icmpv6_key->icmpv6_code = ntohs(data->tp_dst);
3487 if (flow->tp_dst == htons(0)
3488 && (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)
3489 || flow->tp_src == htons(ND_NEIGHBOR_ADVERT))
3490 && (!export_mask || (data->tp_src == htons(0xffff)
3491 && data->tp_dst == htons(0xffff)))) {
3493 struct ovs_key_nd *nd_key;
3495 nd_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ND,
3497 memcpy(nd_key->nd_target, &data->nd_target,
3498 sizeof nd_key->nd_target);
3499 nd_key->nd_sll = data->arp_sha;
3500 nd_key->nd_tll = data->arp_tha;
3507 nl_msg_end_nested(buf, encap);
3511 /* Appends a representation of 'flow' as OVS_KEY_ATTR_* attributes to 'buf'.
3513 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
3514 * capable of being expanded to allow for that much space. */
3516 odp_flow_key_from_flow(const struct odp_flow_key_parms *parms,
3519 odp_flow_key_from_flow__(parms, false, buf);
3522 /* Appends a representation of 'mask' as OVS_KEY_ATTR_* attributes to
3525 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
3526 * capable of being expanded to allow for that much space. */
3528 odp_flow_key_from_mask(const struct odp_flow_key_parms *parms,
3531 odp_flow_key_from_flow__(parms, true, buf);
3534 /* Generate ODP flow key from the given packet metadata */
3536 odp_key_from_pkt_metadata(struct ofpbuf *buf, const struct pkt_metadata *md)
3538 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, md->skb_priority);
3540 if (md->tunnel.ip_dst) {
3541 tun_key_to_attr(buf, &md->tunnel, &md->tunnel, NULL);
3544 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, md->pkt_mark);
3546 /* Add an ingress port attribute if 'odp_in_port' is not the magical
3547 * value "ODPP_NONE". */
3548 if (md->in_port.odp_port != ODPP_NONE) {
3549 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, md->in_port.odp_port);
3553 /* Generate packet metadata from the given ODP flow key. */
3555 odp_key_to_pkt_metadata(const struct nlattr *key, size_t key_len,
3556 struct pkt_metadata *md)
3558 const struct nlattr *nla;
3560 uint32_t wanted_attrs = 1u << OVS_KEY_ATTR_PRIORITY |
3561 1u << OVS_KEY_ATTR_SKB_MARK | 1u << OVS_KEY_ATTR_TUNNEL |
3562 1u << OVS_KEY_ATTR_IN_PORT;
3564 pkt_metadata_init(md, ODPP_NONE);
3566 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
3567 uint16_t type = nl_attr_type(nla);
3568 size_t len = nl_attr_get_size(nla);
3569 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
3570 OVS_KEY_ATTR_MAX, type);
3572 if (len != expected_len && expected_len >= 0) {
3577 case OVS_KEY_ATTR_RECIRC_ID:
3578 md->recirc_id = nl_attr_get_u32(nla);
3579 wanted_attrs &= ~(1u << OVS_KEY_ATTR_RECIRC_ID);
3581 case OVS_KEY_ATTR_DP_HASH:
3582 md->dp_hash = nl_attr_get_u32(nla);
3583 wanted_attrs &= ~(1u << OVS_KEY_ATTR_DP_HASH);
3585 case OVS_KEY_ATTR_PRIORITY:
3586 md->skb_priority = nl_attr_get_u32(nla);
3587 wanted_attrs &= ~(1u << OVS_KEY_ATTR_PRIORITY);
3589 case OVS_KEY_ATTR_SKB_MARK:
3590 md->pkt_mark = nl_attr_get_u32(nla);
3591 wanted_attrs &= ~(1u << OVS_KEY_ATTR_SKB_MARK);
3593 case OVS_KEY_ATTR_TUNNEL: {
3594 enum odp_key_fitness res;
3596 res = odp_tun_key_from_attr(nla, true, &md->tunnel);
3597 if (res == ODP_FIT_ERROR) {
3598 memset(&md->tunnel, 0, sizeof md->tunnel);
3599 } else if (res == ODP_FIT_PERFECT) {
3600 wanted_attrs &= ~(1u << OVS_KEY_ATTR_TUNNEL);
3604 case OVS_KEY_ATTR_IN_PORT:
3605 md->in_port.odp_port = nl_attr_get_odp_port(nla);
3606 wanted_attrs &= ~(1u << OVS_KEY_ATTR_IN_PORT);
3612 if (!wanted_attrs) {
3613 return; /* Have everything. */
3619 odp_flow_key_hash(const struct nlattr *key, size_t key_len)
3621 BUILD_ASSERT_DECL(!(NLA_ALIGNTO % sizeof(uint32_t)));
3622 return hash_words(ALIGNED_CAST(const uint32_t *, key),
3623 key_len / sizeof(uint32_t), 0);
3627 log_odp_key_attributes(struct vlog_rate_limit *rl, const char *title,
3628 uint64_t attrs, int out_of_range_attr,
3629 const struct nlattr *key, size_t key_len)
3634 if (VLOG_DROP_DBG(rl)) {
3639 for (i = 0; i < 64; i++) {
3640 if (attrs & (UINT64_C(1) << i)) {
3641 char namebuf[OVS_KEY_ATTR_BUFSIZE];
3643 ds_put_format(&s, " %s",
3644 ovs_key_attr_to_string(i, namebuf, sizeof namebuf));
3647 if (out_of_range_attr) {
3648 ds_put_format(&s, " %d (and possibly others)", out_of_range_attr);
3651 ds_put_cstr(&s, ": ");
3652 odp_flow_key_format(key, key_len, &s);
3654 VLOG_DBG("%s:%s", title, ds_cstr(&s));
3659 odp_to_ovs_frag(uint8_t odp_frag, bool is_mask)
3661 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3664 return odp_frag ? FLOW_NW_FRAG_MASK : 0;
3667 if (odp_frag > OVS_FRAG_TYPE_LATER) {
3668 VLOG_ERR_RL(&rl, "invalid frag %"PRIu8" in flow key", odp_frag);
3669 return 0xff; /* Error. */
3672 return (odp_frag == OVS_FRAG_TYPE_NONE) ? 0
3673 : (odp_frag == OVS_FRAG_TYPE_FIRST) ? FLOW_NW_FRAG_ANY
3674 : FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER;
3678 parse_flow_nlattrs(const struct nlattr *key, size_t key_len,
3679 const struct nlattr *attrs[], uint64_t *present_attrsp,
3680 int *out_of_range_attrp)
3682 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
3683 const struct nlattr *nla;
3684 uint64_t present_attrs;
3687 BUILD_ASSERT(OVS_KEY_ATTR_MAX < CHAR_BIT * sizeof present_attrs);
3689 *out_of_range_attrp = 0;
3690 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
3691 uint16_t type = nl_attr_type(nla);
3692 size_t len = nl_attr_get_size(nla);
3693 int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
3694 OVS_KEY_ATTR_MAX, type);
3696 if (len != expected_len && expected_len >= 0) {
3697 char namebuf[OVS_KEY_ATTR_BUFSIZE];
3699 VLOG_ERR_RL(&rl, "attribute %s has length %"PRIuSIZE" but should have "
3700 "length %d", ovs_key_attr_to_string(type, namebuf,
3706 if (type > OVS_KEY_ATTR_MAX) {
3707 *out_of_range_attrp = type;
3709 if (present_attrs & (UINT64_C(1) << type)) {
3710 char namebuf[OVS_KEY_ATTR_BUFSIZE];
3712 VLOG_ERR_RL(&rl, "duplicate %s attribute in flow key",
3713 ovs_key_attr_to_string(type,
3714 namebuf, sizeof namebuf));
3718 present_attrs |= UINT64_C(1) << type;
3723 VLOG_ERR_RL(&rl, "trailing garbage in flow key");
3727 *present_attrsp = present_attrs;
3731 static enum odp_key_fitness
3732 check_expectations(uint64_t present_attrs, int out_of_range_attr,
3733 uint64_t expected_attrs,
3734 const struct nlattr *key, size_t key_len)
3736 uint64_t missing_attrs;
3737 uint64_t extra_attrs;
3739 missing_attrs = expected_attrs & ~present_attrs;
3740 if (missing_attrs) {
3741 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
3742 log_odp_key_attributes(&rl, "expected but not present",
3743 missing_attrs, 0, key, key_len);
3744 return ODP_FIT_TOO_LITTLE;
3747 extra_attrs = present_attrs & ~expected_attrs;
3748 if (extra_attrs || out_of_range_attr) {
3749 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
3750 log_odp_key_attributes(&rl, "present but not expected",
3751 extra_attrs, out_of_range_attr, key, key_len);
3752 return ODP_FIT_TOO_MUCH;
3755 return ODP_FIT_PERFECT;
3759 parse_ethertype(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
3760 uint64_t present_attrs, uint64_t *expected_attrs,
3761 struct flow *flow, const struct flow *src_flow)
3763 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3764 bool is_mask = flow != src_flow;
3766 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
3767 flow->dl_type = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
3768 if (!is_mask && ntohs(flow->dl_type) < ETH_TYPE_MIN) {
3769 VLOG_ERR_RL(&rl, "invalid Ethertype %"PRIu16" in flow key",
3770 ntohs(flow->dl_type));
3773 if (is_mask && ntohs(src_flow->dl_type) < ETH_TYPE_MIN &&
3774 flow->dl_type != htons(0xffff)) {
3777 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
3780 flow->dl_type = htons(FLOW_DL_TYPE_NONE);
3781 } else if (ntohs(src_flow->dl_type) < ETH_TYPE_MIN) {
3782 /* See comments in odp_flow_key_from_flow__(). */
3783 VLOG_ERR_RL(&rl, "mask expected for non-Ethernet II frame");
3790 static enum odp_key_fitness
3791 parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
3792 uint64_t present_attrs, int out_of_range_attr,
3793 uint64_t expected_attrs, struct flow *flow,
3794 const struct nlattr *key, size_t key_len,
3795 const struct flow *src_flow)
3797 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3798 bool is_mask = src_flow != flow;
3799 const void *check_start = NULL;
3800 size_t check_len = 0;
3801 enum ovs_key_attr expected_bit = 0xff;
3803 if (eth_type_mpls(src_flow->dl_type)) {
3804 if (!is_mask || present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
3805 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
3807 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
3808 size_t size = nl_attr_get_size(attrs[OVS_KEY_ATTR_MPLS]);
3809 const ovs_be32 *mpls_lse = nl_attr_get(attrs[OVS_KEY_ATTR_MPLS]);
3810 int n = size / sizeof(ovs_be32);
3813 if (!size || size % sizeof(ovs_be32)) {
3814 return ODP_FIT_ERROR;
3816 if (flow->mpls_lse[0] && flow->dl_type != htons(0xffff)) {
3817 return ODP_FIT_ERROR;
3820 for (i = 0; i < n && i < FLOW_MAX_MPLS_LABELS; i++) {
3821 flow->mpls_lse[i] = mpls_lse[i];
3823 if (n > FLOW_MAX_MPLS_LABELS) {
3824 return ODP_FIT_TOO_MUCH;
3828 /* BOS may be set only in the innermost label. */
3829 for (i = 0; i < n - 1; i++) {
3830 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
3831 return ODP_FIT_ERROR;
3835 /* BOS must be set in the innermost label. */
3836 if (n < FLOW_MAX_MPLS_LABELS
3837 && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
3838 return ODP_FIT_TOO_LITTLE;
3844 } else if (src_flow->dl_type == htons(ETH_TYPE_IP)) {
3846 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV4;
3848 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
3849 const struct ovs_key_ipv4 *ipv4_key;
3851 ipv4_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV4]);
3852 put_ipv4_key(ipv4_key, flow, is_mask);
3853 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
3854 return ODP_FIT_ERROR;
3857 check_start = ipv4_key;
3858 check_len = sizeof *ipv4_key;
3859 expected_bit = OVS_KEY_ATTR_IPV4;
3862 } else if (src_flow->dl_type == htons(ETH_TYPE_IPV6)) {
3864 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV6;
3866 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
3867 const struct ovs_key_ipv6 *ipv6_key;
3869 ipv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV6]);
3870 put_ipv6_key(ipv6_key, flow, is_mask);
3871 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
3872 return ODP_FIT_ERROR;
3875 check_start = ipv6_key;
3876 check_len = sizeof *ipv6_key;
3877 expected_bit = OVS_KEY_ATTR_IPV6;
3880 } else if (src_flow->dl_type == htons(ETH_TYPE_ARP) ||
3881 src_flow->dl_type == htons(ETH_TYPE_RARP)) {
3883 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ARP;
3885 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ARP)) {
3886 const struct ovs_key_arp *arp_key;
3888 arp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ARP]);
3889 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
3890 VLOG_ERR_RL(&rl, "unsupported ARP opcode %"PRIu16" in flow "
3891 "key", ntohs(arp_key->arp_op));
3892 return ODP_FIT_ERROR;
3894 put_arp_key(arp_key, flow);
3896 check_start = arp_key;
3897 check_len = sizeof *arp_key;
3898 expected_bit = OVS_KEY_ATTR_ARP;
3904 if (check_len > 0) { /* Happens only when 'is_mask'. */
3905 if (!is_all_zeros(check_start, check_len) &&
3906 flow->dl_type != htons(0xffff)) {
3907 return ODP_FIT_ERROR;
3909 expected_attrs |= UINT64_C(1) << expected_bit;
3913 expected_bit = OVS_KEY_ATTR_UNSPEC;
3914 if (src_flow->nw_proto == IPPROTO_TCP
3915 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
3916 src_flow->dl_type == htons(ETH_TYPE_IPV6))
3917 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3919 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP;
3921 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP)) {
3922 const union ovs_key_tp *tcp_key;
3924 tcp_key = nl_attr_get(attrs[OVS_KEY_ATTR_TCP]);
3925 put_tp_key(tcp_key, flow);
3926 expected_bit = OVS_KEY_ATTR_TCP;
3928 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS)) {
3929 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS;
3930 flow->tcp_flags = nl_attr_get_be16(attrs[OVS_KEY_ATTR_TCP_FLAGS]);
3932 } else if (src_flow->nw_proto == IPPROTO_UDP
3933 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
3934 src_flow->dl_type == htons(ETH_TYPE_IPV6))
3935 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3937 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_UDP;
3939 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_UDP)) {
3940 const union ovs_key_tp *udp_key;
3942 udp_key = nl_attr_get(attrs[OVS_KEY_ATTR_UDP]);
3943 put_tp_key(udp_key, flow);
3944 expected_bit = OVS_KEY_ATTR_UDP;
3946 } else if (src_flow->nw_proto == IPPROTO_SCTP
3947 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
3948 src_flow->dl_type == htons(ETH_TYPE_IPV6))
3949 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3951 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SCTP;
3953 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SCTP)) {
3954 const union ovs_key_tp *sctp_key;
3956 sctp_key = nl_attr_get(attrs[OVS_KEY_ATTR_SCTP]);
3957 put_tp_key(sctp_key, flow);
3958 expected_bit = OVS_KEY_ATTR_SCTP;
3960 } else if (src_flow->nw_proto == IPPROTO_ICMP
3961 && src_flow->dl_type == htons(ETH_TYPE_IP)
3962 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3964 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMP;
3966 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMP)) {
3967 const struct ovs_key_icmp *icmp_key;
3969 icmp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMP]);
3970 flow->tp_src = htons(icmp_key->icmp_type);
3971 flow->tp_dst = htons(icmp_key->icmp_code);
3972 expected_bit = OVS_KEY_ATTR_ICMP;
3974 } else if (src_flow->nw_proto == IPPROTO_ICMPV6
3975 && src_flow->dl_type == htons(ETH_TYPE_IPV6)
3976 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3978 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMPV6;
3980 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMPV6)) {
3981 const struct ovs_key_icmpv6 *icmpv6_key;
3983 icmpv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMPV6]);
3984 flow->tp_src = htons(icmpv6_key->icmpv6_type);
3985 flow->tp_dst = htons(icmpv6_key->icmpv6_code);
3986 expected_bit = OVS_KEY_ATTR_ICMPV6;
3987 if (src_flow->tp_dst == htons(0) &&
3988 (src_flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
3989 src_flow->tp_src == htons(ND_NEIGHBOR_ADVERT))) {
3991 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
3993 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ND)) {
3994 const struct ovs_key_nd *nd_key;
3996 nd_key = nl_attr_get(attrs[OVS_KEY_ATTR_ND]);
3997 memcpy(&flow->nd_target, nd_key->nd_target,
3998 sizeof flow->nd_target);
3999 flow->arp_sha = nd_key->nd_sll;
4000 flow->arp_tha = nd_key->nd_tll;
4002 if (!is_all_zeros(nd_key, sizeof *nd_key) &&
4003 (flow->tp_src != htons(0xffff) ||
4004 flow->tp_dst != htons(0xffff))) {
4005 return ODP_FIT_ERROR;
4007 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
4014 if (is_mask && expected_bit != OVS_KEY_ATTR_UNSPEC) {
4015 if ((flow->tp_src || flow->tp_dst) && flow->nw_proto != 0xff) {
4016 return ODP_FIT_ERROR;
4018 expected_attrs |= UINT64_C(1) << expected_bit;
4023 return check_expectations(present_attrs, out_of_range_attr, expected_attrs,
4027 /* Parse 802.1Q header then encapsulated L3 attributes. */
4028 static enum odp_key_fitness
4029 parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
4030 uint64_t present_attrs, int out_of_range_attr,
4031 uint64_t expected_attrs, struct flow *flow,
4032 const struct nlattr *key, size_t key_len,
4033 const struct flow *src_flow)
4035 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
4036 bool is_mask = src_flow != flow;
4038 const struct nlattr *encap
4039 = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)
4040 ? attrs[OVS_KEY_ATTR_ENCAP] : NULL);
4041 enum odp_key_fitness encap_fitness;
4042 enum odp_key_fitness fitness;
4044 /* Calculate fitness of outer attributes. */
4046 expected_attrs |= ((UINT64_C(1) << OVS_KEY_ATTR_VLAN) |
4047 (UINT64_C(1) << OVS_KEY_ATTR_ENCAP));
4049 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
4050 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
4052 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)) {
4053 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_ENCAP);
4056 fitness = check_expectations(present_attrs, out_of_range_attr,
4057 expected_attrs, key, key_len);
4060 * Remove the TPID from dl_type since it's not the real Ethertype. */
4061 flow->dl_type = htons(0);
4062 flow->vlan_tci = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)
4063 ? nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN])
4066 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN))) {
4067 return ODP_FIT_TOO_LITTLE;
4068 } else if (flow->vlan_tci == htons(0)) {
4069 /* Corner case for a truncated 802.1Q header. */
4070 if (fitness == ODP_FIT_PERFECT && nl_attr_get_size(encap)) {
4071 return ODP_FIT_TOO_MUCH;
4074 } else if (!(flow->vlan_tci & htons(VLAN_CFI))) {
4075 VLOG_ERR_RL(&rl, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
4076 "but CFI bit is not set", ntohs(flow->vlan_tci));
4077 return ODP_FIT_ERROR;
4080 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
4085 /* Now parse the encapsulated attributes. */
4086 if (!parse_flow_nlattrs(nl_attr_get(encap), nl_attr_get_size(encap),
4087 attrs, &present_attrs, &out_of_range_attr)) {
4088 return ODP_FIT_ERROR;
4092 if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow, src_flow)) {
4093 return ODP_FIT_ERROR;
4095 encap_fitness = parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
4096 expected_attrs, flow, key, key_len,
4099 /* The overall fitness is the worse of the outer and inner attributes. */
4100 return MAX(fitness, encap_fitness);
4103 static enum odp_key_fitness
4104 odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len,
4105 const struct nlattr *src_key, size_t src_key_len,
4106 struct flow *flow, const struct flow *src_flow,
4109 const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1];
4110 uint64_t expected_attrs;
4111 uint64_t present_attrs;
4112 int out_of_range_attr;
4113 bool is_mask = src_flow != flow;
4115 memset(flow, 0, sizeof *flow);
4117 /* Parse attributes. */
4118 if (!parse_flow_nlattrs(key, key_len, attrs, &present_attrs,
4119 &out_of_range_attr)) {
4120 return ODP_FIT_ERROR;
4125 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID)) {
4126 flow->recirc_id = nl_attr_get_u32(attrs[OVS_KEY_ATTR_RECIRC_ID]);
4127 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID;
4128 } else if (is_mask) {
4129 /* Always exact match recirc_id if it is not specified. */
4130 flow->recirc_id = UINT32_MAX;
4133 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_DP_HASH)) {
4134 flow->dp_hash = nl_attr_get_u32(attrs[OVS_KEY_ATTR_DP_HASH]);
4135 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_DP_HASH;
4137 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PRIORITY)) {
4138 flow->skb_priority = nl_attr_get_u32(attrs[OVS_KEY_ATTR_PRIORITY]);
4139 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PRIORITY;
4142 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK)) {
4143 flow->pkt_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_SKB_MARK]);
4144 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK;
4147 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TUNNEL)) {
4148 enum odp_key_fitness res;
4150 res = odp_tun_key_from_attr__(attrs[OVS_KEY_ATTR_TUNNEL],
4151 is_mask ? src_key : NULL,
4152 src_key_len, &src_flow->tunnel,
4153 &flow->tunnel, udpif);
4154 if (res == ODP_FIT_ERROR) {
4155 return ODP_FIT_ERROR;
4156 } else if (res == ODP_FIT_PERFECT) {
4157 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TUNNEL;
4161 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IN_PORT)) {
4162 flow->in_port.odp_port
4163 = nl_attr_get_odp_port(attrs[OVS_KEY_ATTR_IN_PORT]);
4164 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IN_PORT;
4165 } else if (!is_mask) {
4166 flow->in_port.odp_port = ODPP_NONE;
4169 /* Ethernet header. */
4170 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERNET)) {
4171 const struct ovs_key_ethernet *eth_key;
4173 eth_key = nl_attr_get(attrs[OVS_KEY_ATTR_ETHERNET]);
4174 put_ethernet_key(eth_key, flow);
4176 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
4180 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
4183 /* Get Ethertype or 802.1Q TPID or FLOW_DL_TYPE_NONE. */
4184 if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow,
4186 return ODP_FIT_ERROR;
4190 ? (src_flow->vlan_tci & htons(VLAN_CFI)) != 0
4191 : src_flow->dl_type == htons(ETH_TYPE_VLAN)) {
4192 return parse_8021q_onward(attrs, present_attrs, out_of_range_attr,
4193 expected_attrs, flow, key, key_len, src_flow);
4196 /* A missing VLAN mask means exact match on vlan_tci 0 (== no VLAN). */
4197 flow->vlan_tci = htons(0xffff);
4198 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
4199 flow->vlan_tci = nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN]);
4200 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
4203 return parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
4204 expected_attrs, flow, key, key_len, src_flow);
4207 /* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a flow
4208 * structure in 'flow'. Returns an ODP_FIT_* value that indicates how well
4209 * 'key' fits our expectations for what a flow key should contain.
4211 * The 'in_port' will be the datapath's understanding of the port. The
4212 * caller will need to translate with odp_port_to_ofp_port() if the
4213 * OpenFlow port is needed.
4215 * This function doesn't take the packet itself as an argument because none of
4216 * the currently understood OVS_KEY_ATTR_* attributes require it. Currently,
4217 * it is always possible to infer which additional attribute(s) should appear
4218 * by looking at the attributes for lower-level protocols, e.g. if the network
4219 * protocol in OVS_KEY_ATTR_IPV4 or OVS_KEY_ATTR_IPV6 is IPPROTO_TCP then we
4220 * know that a OVS_KEY_ATTR_TCP attribute must appear and that otherwise it
4221 * must be absent. */
4222 enum odp_key_fitness
4223 odp_flow_key_to_flow(const struct nlattr *key, size_t key_len,
4226 return odp_flow_key_to_flow__(key, key_len, NULL, 0, flow, flow, false);
4229 /* Converts the 'mask_key_len' bytes of OVS_KEY_ATTR_* attributes in 'mask_key'
4230 * to a mask structure in 'mask'. 'flow' must be a previously translated flow
4231 * corresponding to 'mask' and similarly flow_key/flow_key_len must be the
4232 * attributes from that flow. Returns an ODP_FIT_* value that indicates how
4233 * well 'key' fits our expectations for what a flow key should contain. */
4234 enum odp_key_fitness
4235 odp_flow_key_to_mask(const struct nlattr *mask_key, size_t mask_key_len,
4236 const struct nlattr *flow_key, size_t flow_key_len,
4237 struct flow *mask, const struct flow *flow)
4239 return odp_flow_key_to_flow__(mask_key, mask_key_len, flow_key, flow_key_len,
4243 /* These functions are similar to their non-"_udpif" variants but output a
4244 * 'flow' that is suitable for fast-path packet processing.
4246 * Some fields have different representation for flow setup and per-
4247 * packet processing (i.e. different between ofproto-dpif and userspace
4248 * datapath). In particular, with the non-"_udpif" functions, struct
4249 * tun_metadata is in the per-flow format (using 'present.map' and 'opts.u8');
4250 * with these functions, struct tun_metadata is in the per-packet format
4251 * (using 'present.len' and 'opts.gnv'). */
4252 enum odp_key_fitness
4253 odp_flow_key_to_flow_udpif(const struct nlattr *key, size_t key_len,
4256 return odp_flow_key_to_flow__(key, key_len, NULL, 0, flow, flow, true);
4259 enum odp_key_fitness
4260 odp_flow_key_to_mask_udpif(const struct nlattr *mask_key, size_t mask_key_len,
4261 const struct nlattr *flow_key, size_t flow_key_len,
4262 struct flow *mask, const struct flow *flow)
4264 return odp_flow_key_to_flow__(mask_key, mask_key_len, flow_key, flow_key_len,
4268 /* Returns 'fitness' as a string, for use in debug messages. */
4270 odp_key_fitness_to_string(enum odp_key_fitness fitness)
4273 case ODP_FIT_PERFECT:
4275 case ODP_FIT_TOO_MUCH:
4277 case ODP_FIT_TOO_LITTLE:
4278 return "too_little";
4286 /* Appends an OVS_ACTION_ATTR_USERSPACE action to 'odp_actions' that specifies
4287 * Netlink PID 'pid'. If 'userdata' is nonnull, adds a userdata attribute
4288 * whose contents are the 'userdata_size' bytes at 'userdata' and returns the
4289 * offset within 'odp_actions' of the start of the cookie. (If 'userdata' is
4290 * null, then the return value is not meaningful.) */
4292 odp_put_userspace_action(uint32_t pid,
4293 const void *userdata, size_t userdata_size,
4294 odp_port_t tunnel_out_port,
4295 bool include_actions,
4296 struct ofpbuf *odp_actions)
4298 size_t userdata_ofs;
4301 offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
4302 nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
4304 userdata_ofs = odp_actions->size + NLA_HDRLEN;
4306 /* The OVS kernel module before OVS 1.11 and the upstream Linux kernel
4307 * module before Linux 3.10 required the userdata to be exactly 8 bytes
4310 * - The kernel rejected shorter userdata with -ERANGE.
4312 * - The kernel silently dropped userdata beyond the first 8 bytes.
4314 * Thus, for maximum compatibility, always put at least 8 bytes. (We
4315 * separately disable features that required more than 8 bytes.) */
4316 memcpy(nl_msg_put_unspec_zero(odp_actions, OVS_USERSPACE_ATTR_USERDATA,
4317 MAX(8, userdata_size)),
4318 userdata, userdata_size);
4322 if (tunnel_out_port != ODPP_NONE) {
4323 nl_msg_put_odp_port(odp_actions, OVS_USERSPACE_ATTR_EGRESS_TUN_PORT,
4326 if (include_actions) {
4327 nl_msg_put_flag(odp_actions, OVS_USERSPACE_ATTR_ACTIONS);
4329 nl_msg_end_nested(odp_actions, offset);
4331 return userdata_ofs;
4335 odp_put_tunnel_action(const struct flow_tnl *tunnel,
4336 struct ofpbuf *odp_actions)
4338 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
4339 tun_key_to_attr(odp_actions, tunnel, tunnel, NULL);
4340 nl_msg_end_nested(odp_actions, offset);
4344 odp_put_tnl_push_action(struct ofpbuf *odp_actions,
4345 struct ovs_action_push_tnl *data)
4347 int size = offsetof(struct ovs_action_push_tnl, header);
4349 size += data->header_len;
4350 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_TUNNEL_PUSH, data, size);
4354 /* The commit_odp_actions() function and its helpers. */
4357 commit_set_action(struct ofpbuf *odp_actions, enum ovs_key_attr key_type,
4358 const void *key, size_t key_size)
4360 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
4361 nl_msg_put_unspec(odp_actions, key_type, key, key_size);
4362 nl_msg_end_nested(odp_actions, offset);
4365 /* Masked set actions have a mask following the data within the netlink
4366 * attribute. The unmasked bits in the data will be cleared as the data
4367 * is copied to the action. */
4369 commit_masked_set_action(struct ofpbuf *odp_actions,
4370 enum ovs_key_attr key_type,
4371 const void *key_, const void *mask_, size_t key_size)
4373 size_t offset = nl_msg_start_nested(odp_actions,
4374 OVS_ACTION_ATTR_SET_MASKED);
4375 char *data = nl_msg_put_unspec_uninit(odp_actions, key_type, key_size * 2);
4376 const char *key = key_, *mask = mask_;
4378 memcpy(data + key_size, mask, key_size);
4379 /* Clear unmasked bits while copying. */
4380 while (key_size--) {
4381 *data++ = *key++ & *mask++;
4383 nl_msg_end_nested(odp_actions, offset);
4386 /* If any of the flow key data that ODP actions can modify are different in
4387 * 'base->tunnel' and 'flow->tunnel', appends a set_tunnel ODP action to
4388 * 'odp_actions' that change the flow tunneling information in key from
4389 * 'base->tunnel' into 'flow->tunnel', and then changes 'base->tunnel' in the
4390 * same way. In other words, operates the same as commit_odp_actions(), but
4391 * only on tunneling information. */
4393 commit_odp_tunnel_action(const struct flow *flow, struct flow *base,
4394 struct ofpbuf *odp_actions)
4396 /* A valid IPV4_TUNNEL must have non-zero ip_dst. */
4397 if (flow->tunnel.ip_dst) {
4398 if (!memcmp(&base->tunnel, &flow->tunnel, sizeof base->tunnel)) {
4401 memcpy(&base->tunnel, &flow->tunnel, sizeof base->tunnel);
4402 odp_put_tunnel_action(&base->tunnel, odp_actions);
4407 commit(enum ovs_key_attr attr, bool use_masked_set,
4408 const void *key, void *base, void *mask, size_t size,
4409 struct ofpbuf *odp_actions)
4411 if (memcmp(key, base, size)) {
4412 bool fully_masked = odp_mask_is_exact(attr, mask, size);
4414 if (use_masked_set && !fully_masked) {
4415 commit_masked_set_action(odp_actions, attr, key, mask, size);
4417 if (!fully_masked) {
4418 memset(mask, 0xff, size);
4420 commit_set_action(odp_actions, attr, key, size);
4422 memcpy(base, key, size);
4425 /* Mask bits are set when we have either read or set the corresponding
4426 * values. Masked bits will be exact-matched, no need to set them
4427 * if the value did not actually change. */
4433 get_ethernet_key(const struct flow *flow, struct ovs_key_ethernet *eth)
4435 eth->eth_src = flow->dl_src;
4436 eth->eth_dst = flow->dl_dst;
4440 put_ethernet_key(const struct ovs_key_ethernet *eth, struct flow *flow)
4442 flow->dl_src = eth->eth_src;
4443 flow->dl_dst = eth->eth_dst;
4447 commit_set_ether_addr_action(const struct flow *flow, struct flow *base_flow,
4448 struct ofpbuf *odp_actions,
4449 struct flow_wildcards *wc,
4452 struct ovs_key_ethernet key, base, mask;
4454 get_ethernet_key(flow, &key);
4455 get_ethernet_key(base_flow, &base);
4456 get_ethernet_key(&wc->masks, &mask);
4458 if (commit(OVS_KEY_ATTR_ETHERNET, use_masked,
4459 &key, &base, &mask, sizeof key, odp_actions)) {
4460 put_ethernet_key(&base, base_flow);
4461 put_ethernet_key(&mask, &wc->masks);
4466 pop_vlan(struct flow *base,
4467 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
4469 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
4471 if (base->vlan_tci & htons(VLAN_CFI)) {
4472 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
4478 commit_vlan_action(ovs_be16 vlan_tci, struct flow *base,
4479 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
4481 if (base->vlan_tci == vlan_tci) {
4485 pop_vlan(base, odp_actions, wc);
4486 if (vlan_tci & htons(VLAN_CFI)) {
4487 struct ovs_action_push_vlan vlan;
4489 vlan.vlan_tpid = htons(ETH_TYPE_VLAN);
4490 vlan.vlan_tci = vlan_tci;
4491 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
4492 &vlan, sizeof vlan);
4494 base->vlan_tci = vlan_tci;
4497 /* Wildcarding already done at action translation time. */
4499 commit_mpls_action(const struct flow *flow, struct flow *base,
4500 struct ofpbuf *odp_actions)
4502 int base_n = flow_count_mpls_labels(base, NULL);
4503 int flow_n = flow_count_mpls_labels(flow, NULL);
4504 int common_n = flow_count_common_mpls_labels(flow, flow_n, base, base_n,
4507 while (base_n > common_n) {
4508 if (base_n - 1 == common_n && flow_n > common_n) {
4509 /* If there is only one more LSE in base than there are common
4510 * between base and flow; and flow has at least one more LSE than
4511 * is common then the topmost LSE of base may be updated using
4513 struct ovs_key_mpls mpls_key;
4515 mpls_key.mpls_lse = flow->mpls_lse[flow_n - base_n];
4516 commit_set_action(odp_actions, OVS_KEY_ATTR_MPLS,
4517 &mpls_key, sizeof mpls_key);
4518 flow_set_mpls_lse(base, 0, mpls_key.mpls_lse);
4521 /* Otherwise, if there more LSEs in base than are common between
4522 * base and flow then pop the topmost one. */
4526 /* If all the LSEs are to be popped and this is not the outermost
4527 * LSE then use ETH_TYPE_MPLS as the ethertype parameter of the
4528 * POP_MPLS action instead of flow->dl_type.
4530 * This is because the POP_MPLS action requires its ethertype
4531 * argument to be an MPLS ethernet type but in this case
4532 * flow->dl_type will be a non-MPLS ethernet type.
4534 * When the final POP_MPLS action occurs it use flow->dl_type and
4535 * the and the resulting packet will have the desired dl_type. */
4536 if ((!eth_type_mpls(flow->dl_type)) && base_n > 1) {
4537 dl_type = htons(ETH_TYPE_MPLS);
4539 dl_type = flow->dl_type;
4541 nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, dl_type);
4542 popped = flow_pop_mpls(base, base_n, flow->dl_type, NULL);
4548 /* If, after the above popping and setting, there are more LSEs in flow
4549 * than base then some LSEs need to be pushed. */
4550 while (base_n < flow_n) {
4551 struct ovs_action_push_mpls *mpls;
4553 mpls = nl_msg_put_unspec_zero(odp_actions,
4554 OVS_ACTION_ATTR_PUSH_MPLS,
4556 mpls->mpls_ethertype = flow->dl_type;
4557 mpls->mpls_lse = flow->mpls_lse[flow_n - base_n - 1];
4558 flow_push_mpls(base, base_n, mpls->mpls_ethertype, NULL);
4559 flow_set_mpls_lse(base, 0, mpls->mpls_lse);
4565 get_ipv4_key(const struct flow *flow, struct ovs_key_ipv4 *ipv4, bool is_mask)
4567 ipv4->ipv4_src = flow->nw_src;
4568 ipv4->ipv4_dst = flow->nw_dst;
4569 ipv4->ipv4_proto = flow->nw_proto;
4570 ipv4->ipv4_tos = flow->nw_tos;
4571 ipv4->ipv4_ttl = flow->nw_ttl;
4572 ipv4->ipv4_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
4576 put_ipv4_key(const struct ovs_key_ipv4 *ipv4, struct flow *flow, bool is_mask)
4578 flow->nw_src = ipv4->ipv4_src;
4579 flow->nw_dst = ipv4->ipv4_dst;
4580 flow->nw_proto = ipv4->ipv4_proto;
4581 flow->nw_tos = ipv4->ipv4_tos;
4582 flow->nw_ttl = ipv4->ipv4_ttl;
4583 flow->nw_frag = odp_to_ovs_frag(ipv4->ipv4_frag, is_mask);
4587 commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow,
4588 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4591 struct ovs_key_ipv4 key, mask, base;
4593 /* Check that nw_proto and nw_frag remain unchanged. */
4594 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
4595 flow->nw_frag == base_flow->nw_frag);
4597 get_ipv4_key(flow, &key, false);
4598 get_ipv4_key(base_flow, &base, false);
4599 get_ipv4_key(&wc->masks, &mask, true);
4600 mask.ipv4_proto = 0; /* Not writeable. */
4601 mask.ipv4_frag = 0; /* Not writable. */
4603 if (commit(OVS_KEY_ATTR_IPV4, use_masked, &key, &base, &mask, sizeof key,
4605 put_ipv4_key(&base, base_flow, false);
4606 if (mask.ipv4_proto != 0) { /* Mask was changed by commit(). */
4607 put_ipv4_key(&mask, &wc->masks, true);
4613 get_ipv6_key(const struct flow *flow, struct ovs_key_ipv6 *ipv6, bool is_mask)
4615 memcpy(ipv6->ipv6_src, &flow->ipv6_src, sizeof ipv6->ipv6_src);
4616 memcpy(ipv6->ipv6_dst, &flow->ipv6_dst, sizeof ipv6->ipv6_dst);
4617 ipv6->ipv6_label = flow->ipv6_label;
4618 ipv6->ipv6_proto = flow->nw_proto;
4619 ipv6->ipv6_tclass = flow->nw_tos;
4620 ipv6->ipv6_hlimit = flow->nw_ttl;
4621 ipv6->ipv6_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
4625 put_ipv6_key(const struct ovs_key_ipv6 *ipv6, struct flow *flow, bool is_mask)
4627 memcpy(&flow->ipv6_src, ipv6->ipv6_src, sizeof flow->ipv6_src);
4628 memcpy(&flow->ipv6_dst, ipv6->ipv6_dst, sizeof flow->ipv6_dst);
4629 flow->ipv6_label = ipv6->ipv6_label;
4630 flow->nw_proto = ipv6->ipv6_proto;
4631 flow->nw_tos = ipv6->ipv6_tclass;
4632 flow->nw_ttl = ipv6->ipv6_hlimit;
4633 flow->nw_frag = odp_to_ovs_frag(ipv6->ipv6_frag, is_mask);
4637 commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow,
4638 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4641 struct ovs_key_ipv6 key, mask, base;
4643 /* Check that nw_proto and nw_frag remain unchanged. */
4644 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
4645 flow->nw_frag == base_flow->nw_frag);
4647 get_ipv6_key(flow, &key, false);
4648 get_ipv6_key(base_flow, &base, false);
4649 get_ipv6_key(&wc->masks, &mask, true);
4650 mask.ipv6_proto = 0; /* Not writeable. */
4651 mask.ipv6_frag = 0; /* Not writable. */
4653 if (commit(OVS_KEY_ATTR_IPV6, use_masked, &key, &base, &mask, sizeof key,
4655 put_ipv6_key(&base, base_flow, false);
4656 if (mask.ipv6_proto != 0) { /* Mask was changed by commit(). */
4657 put_ipv6_key(&mask, &wc->masks, true);
4663 get_arp_key(const struct flow *flow, struct ovs_key_arp *arp)
4665 /* ARP key has padding, clear it. */
4666 memset(arp, 0, sizeof *arp);
4668 arp->arp_sip = flow->nw_src;
4669 arp->arp_tip = flow->nw_dst;
4670 arp->arp_op = htons(flow->nw_proto);
4671 arp->arp_sha = flow->arp_sha;
4672 arp->arp_tha = flow->arp_tha;
4676 put_arp_key(const struct ovs_key_arp *arp, struct flow *flow)
4678 flow->nw_src = arp->arp_sip;
4679 flow->nw_dst = arp->arp_tip;
4680 flow->nw_proto = ntohs(arp->arp_op);
4681 flow->arp_sha = arp->arp_sha;
4682 flow->arp_tha = arp->arp_tha;
4685 static enum slow_path_reason
4686 commit_set_arp_action(const struct flow *flow, struct flow *base_flow,
4687 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
4689 struct ovs_key_arp key, mask, base;
4691 get_arp_key(flow, &key);
4692 get_arp_key(base_flow, &base);
4693 get_arp_key(&wc->masks, &mask);
4695 if (commit(OVS_KEY_ATTR_ARP, true, &key, &base, &mask, sizeof key,
4697 put_arp_key(&base, base_flow);
4698 put_arp_key(&mask, &wc->masks);
4705 get_nd_key(const struct flow *flow, struct ovs_key_nd *nd)
4707 memcpy(nd->nd_target, &flow->nd_target, sizeof flow->nd_target);
4708 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
4709 nd->nd_sll = flow->arp_sha;
4710 nd->nd_tll = flow->arp_tha;
4714 put_nd_key(const struct ovs_key_nd *nd, struct flow *flow)
4716 memcpy(&flow->nd_target, nd->nd_target, sizeof flow->nd_target);
4717 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
4718 flow->arp_sha = nd->nd_sll;
4719 flow->arp_tha = nd->nd_tll;
4722 static enum slow_path_reason
4723 commit_set_nd_action(const struct flow *flow, struct flow *base_flow,
4724 struct ofpbuf *odp_actions,
4725 struct flow_wildcards *wc, bool use_masked)
4727 struct ovs_key_nd key, mask, base;
4729 get_nd_key(flow, &key);
4730 get_nd_key(base_flow, &base);
4731 get_nd_key(&wc->masks, &mask);
4733 if (commit(OVS_KEY_ATTR_ND, use_masked, &key, &base, &mask, sizeof key,
4735 put_nd_key(&base, base_flow);
4736 put_nd_key(&mask, &wc->masks);
4743 static enum slow_path_reason
4744 commit_set_nw_action(const struct flow *flow, struct flow *base,
4745 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4748 /* Check if 'flow' really has an L3 header. */
4749 if (!flow->nw_proto) {
4753 switch (ntohs(base->dl_type)) {
4755 commit_set_ipv4_action(flow, base, odp_actions, wc, use_masked);
4759 commit_set_ipv6_action(flow, base, odp_actions, wc, use_masked);
4760 return commit_set_nd_action(flow, base, odp_actions, wc, use_masked);
4763 return commit_set_arp_action(flow, base, odp_actions, wc);
4769 /* TCP, UDP, and SCTP keys have the same layout. */
4770 BUILD_ASSERT_DECL(sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_udp) &&
4771 sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_sctp));
4774 get_tp_key(const struct flow *flow, union ovs_key_tp *tp)
4776 tp->tcp.tcp_src = flow->tp_src;
4777 tp->tcp.tcp_dst = flow->tp_dst;
4781 put_tp_key(const union ovs_key_tp *tp, struct flow *flow)
4783 flow->tp_src = tp->tcp.tcp_src;
4784 flow->tp_dst = tp->tcp.tcp_dst;
4788 commit_set_port_action(const struct flow *flow, struct flow *base_flow,
4789 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4792 enum ovs_key_attr key_type;
4793 union ovs_key_tp key, mask, base;
4795 /* Check if 'flow' really has an L3 header. */
4796 if (!flow->nw_proto) {
4800 if (!is_ip_any(base_flow)) {
4804 if (flow->nw_proto == IPPROTO_TCP) {
4805 key_type = OVS_KEY_ATTR_TCP;
4806 } else if (flow->nw_proto == IPPROTO_UDP) {
4807 key_type = OVS_KEY_ATTR_UDP;
4808 } else if (flow->nw_proto == IPPROTO_SCTP) {
4809 key_type = OVS_KEY_ATTR_SCTP;
4814 get_tp_key(flow, &key);
4815 get_tp_key(base_flow, &base);
4816 get_tp_key(&wc->masks, &mask);
4818 if (commit(key_type, use_masked, &key, &base, &mask, sizeof key,
4820 put_tp_key(&base, base_flow);
4821 put_tp_key(&mask, &wc->masks);
4826 commit_set_priority_action(const struct flow *flow, struct flow *base_flow,
4827 struct ofpbuf *odp_actions,
4828 struct flow_wildcards *wc,
4831 uint32_t key, mask, base;
4833 key = flow->skb_priority;
4834 base = base_flow->skb_priority;
4835 mask = wc->masks.skb_priority;
4837 if (commit(OVS_KEY_ATTR_PRIORITY, use_masked, &key, &base, &mask,
4838 sizeof key, odp_actions)) {
4839 base_flow->skb_priority = base;
4840 wc->masks.skb_priority = mask;
4845 commit_set_pkt_mark_action(const struct flow *flow, struct flow *base_flow,
4846 struct ofpbuf *odp_actions,
4847 struct flow_wildcards *wc,
4850 uint32_t key, mask, base;
4852 key = flow->pkt_mark;
4853 base = base_flow->pkt_mark;
4854 mask = wc->masks.pkt_mark;
4856 if (commit(OVS_KEY_ATTR_SKB_MARK, use_masked, &key, &base, &mask,
4857 sizeof key, odp_actions)) {
4858 base_flow->pkt_mark = base;
4859 wc->masks.pkt_mark = mask;
4863 /* If any of the flow key data that ODP actions can modify are different in
4864 * 'base' and 'flow', appends ODP actions to 'odp_actions' that change the flow
4865 * key from 'base' into 'flow', and then changes 'base' the same way. Does not
4866 * commit set_tunnel actions. Users should call commit_odp_tunnel_action()
4867 * in addition to this function if needed. Sets fields in 'wc' that are
4868 * used as part of the action.
4870 * Returns a reason to force processing the flow's packets into the userspace
4871 * slow path, if there is one, otherwise 0. */
4872 enum slow_path_reason
4873 commit_odp_actions(const struct flow *flow, struct flow *base,
4874 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4877 enum slow_path_reason slow;
4879 commit_set_ether_addr_action(flow, base, odp_actions, wc, use_masked);
4880 slow = commit_set_nw_action(flow, base, odp_actions, wc, use_masked);
4881 commit_set_port_action(flow, base, odp_actions, wc, use_masked);
4882 commit_mpls_action(flow, base, odp_actions);
4883 commit_vlan_action(flow->vlan_tci, base, odp_actions, wc);
4884 commit_set_priority_action(flow, base, odp_actions, wc, use_masked);
4885 commit_set_pkt_mark_action(flow, base, odp_actions, wc, use_masked);