2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include <arpa/inet.h>
23 #include <netinet/in.h>
24 #include <netinet/icmp6.h>
28 #include "byte-order.h"
31 #include "dynamic-string.h"
38 #include "unaligned.h"
40 #include "openvswitch/vlog.h"
42 VLOG_DEFINE_THIS_MODULE(odp_util);
44 /* The interface between userspace and kernel uses an "OVS_*" prefix.
45 * Since this is fairly non-specific for the OVS userspace components,
46 * "ODP_*" (Open vSwitch Datapath) is used as the prefix for
47 * interactions with the datapath.
50 /* The set of characters that may separate one action or one key attribute
52 static const char *delimiters = ", \t\r\n";
54 static int parse_odp_key_mask_attr(const char *, const struct simap *port_names,
55 struct ofpbuf *, struct ofpbuf *);
56 static void format_odp_key_attr(const struct nlattr *a,
57 const struct nlattr *ma,
58 const struct hmap *portno_names, struct ds *ds,
61 /* Returns one the following for the action with the given OVS_ACTION_ATTR_*
64 * - For an action whose argument has a fixed length, returned that
65 * nonnegative length in bytes.
67 * - For an action with a variable-length argument, returns -2.
69 * - For an invalid 'type', returns -1. */
71 odp_action_len(uint16_t type)
73 if (type > OVS_ACTION_ATTR_MAX) {
77 switch ((enum ovs_action_attr) type) {
78 case OVS_ACTION_ATTR_OUTPUT: return sizeof(uint32_t);
79 case OVS_ACTION_ATTR_TUNNEL_PUSH: return -2;
80 case OVS_ACTION_ATTR_TUNNEL_POP: return sizeof(uint32_t);
81 case OVS_ACTION_ATTR_USERSPACE: return -2;
82 case OVS_ACTION_ATTR_PUSH_VLAN: return sizeof(struct ovs_action_push_vlan);
83 case OVS_ACTION_ATTR_POP_VLAN: return 0;
84 case OVS_ACTION_ATTR_PUSH_MPLS: return sizeof(struct ovs_action_push_mpls);
85 case OVS_ACTION_ATTR_POP_MPLS: return sizeof(ovs_be16);
86 case OVS_ACTION_ATTR_RECIRC: return sizeof(uint32_t);
87 case OVS_ACTION_ATTR_HASH: return sizeof(struct ovs_action_hash);
88 case OVS_ACTION_ATTR_SET: return -2;
89 case OVS_ACTION_ATTR_SET_MASKED: return -2;
90 case OVS_ACTION_ATTR_SAMPLE: return -2;
92 case OVS_ACTION_ATTR_UNSPEC:
93 case __OVS_ACTION_ATTR_MAX:
100 /* Returns a string form of 'attr'. The return value is either a statically
101 * allocated constant string or the 'bufsize'-byte buffer 'namebuf'. 'bufsize'
102 * should be at least OVS_KEY_ATTR_BUFSIZE. */
103 enum { OVS_KEY_ATTR_BUFSIZE = 3 + INT_STRLEN(unsigned int) + 1 };
105 ovs_key_attr_to_string(enum ovs_key_attr attr, char *namebuf, size_t bufsize)
108 case OVS_KEY_ATTR_UNSPEC: return "unspec";
109 case OVS_KEY_ATTR_ENCAP: return "encap";
110 case OVS_KEY_ATTR_PRIORITY: return "skb_priority";
111 case OVS_KEY_ATTR_SKB_MARK: return "skb_mark";
112 case OVS_KEY_ATTR_TUNNEL: return "tunnel";
113 case OVS_KEY_ATTR_IN_PORT: return "in_port";
114 case OVS_KEY_ATTR_ETHERNET: return "eth";
115 case OVS_KEY_ATTR_VLAN: return "vlan";
116 case OVS_KEY_ATTR_ETHERTYPE: return "eth_type";
117 case OVS_KEY_ATTR_IPV4: return "ipv4";
118 case OVS_KEY_ATTR_IPV6: return "ipv6";
119 case OVS_KEY_ATTR_TCP: return "tcp";
120 case OVS_KEY_ATTR_TCP_FLAGS: return "tcp_flags";
121 case OVS_KEY_ATTR_UDP: return "udp";
122 case OVS_KEY_ATTR_SCTP: return "sctp";
123 case OVS_KEY_ATTR_ICMP: return "icmp";
124 case OVS_KEY_ATTR_ICMPV6: return "icmpv6";
125 case OVS_KEY_ATTR_ARP: return "arp";
126 case OVS_KEY_ATTR_ND: return "nd";
127 case OVS_KEY_ATTR_MPLS: return "mpls";
128 case OVS_KEY_ATTR_DP_HASH: return "dp_hash";
129 case OVS_KEY_ATTR_RECIRC_ID: return "recirc_id";
131 case __OVS_KEY_ATTR_MAX:
133 snprintf(namebuf, bufsize, "key%u", (unsigned int) attr);
139 format_generic_odp_action(struct ds *ds, const struct nlattr *a)
141 size_t len = nl_attr_get_size(a);
143 ds_put_format(ds, "action%"PRId16, nl_attr_type(a));
145 const uint8_t *unspec;
148 unspec = nl_attr_get(a);
149 for (i = 0; i < len; i++) {
150 ds_put_char(ds, i ? ' ': '(');
151 ds_put_format(ds, "%02x", unspec[i]);
153 ds_put_char(ds, ')');
158 format_odp_sample_action(struct ds *ds, const struct nlattr *attr)
160 static const struct nl_policy ovs_sample_policy[] = {
161 [OVS_SAMPLE_ATTR_PROBABILITY] = { .type = NL_A_U32 },
162 [OVS_SAMPLE_ATTR_ACTIONS] = { .type = NL_A_NESTED }
164 struct nlattr *a[ARRAY_SIZE(ovs_sample_policy)];
166 const struct nlattr *nla_acts;
169 ds_put_cstr(ds, "sample");
171 if (!nl_parse_nested(attr, ovs_sample_policy, a, ARRAY_SIZE(a))) {
172 ds_put_cstr(ds, "(error)");
176 percentage = (100.0 * nl_attr_get_u32(a[OVS_SAMPLE_ATTR_PROBABILITY])) /
179 ds_put_format(ds, "(sample=%.1f%%,", percentage);
181 ds_put_cstr(ds, "actions(");
182 nla_acts = nl_attr_get(a[OVS_SAMPLE_ATTR_ACTIONS]);
183 len = nl_attr_get_size(a[OVS_SAMPLE_ATTR_ACTIONS]);
184 format_odp_actions(ds, nla_acts, len);
185 ds_put_format(ds, "))");
189 slow_path_reason_to_string(uint32_t reason)
191 switch ((enum slow_path_reason) reason) {
192 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return STRING;
201 slow_path_reason_to_explanation(enum slow_path_reason reason)
204 #define SPR(ENUM, STRING, EXPLANATION) case ENUM: return EXPLANATION;
213 parse_flags(const char *s, const char *(*bit_to_string)(uint32_t),
214 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask)
219 /* Parse masked flags in numeric format? */
220 if (res_mask && ovs_scan(s, "%"SCNi32"/%"SCNi32"%n",
221 res_flags, res_mask, &n) && n > 0) {
222 if (*res_flags & ~allowed || *res_mask & ~allowed) {
230 if (res_mask && (*s == '+' || *s == '-')) {
231 uint32_t flags = 0, mask = 0;
233 /* Parse masked flags. */
234 while (s[n] != ')') {
241 } else if (s[n] == '-') {
248 name_len = strcspn(s + n, "+-)");
250 for (bit = 1; bit; bit <<= 1) {
251 const char *fname = bit_to_string(bit);
259 if (len != name_len) {
262 if (!strncmp(s + n, fname, len)) {
264 /* bit already set. */
267 if (!(bit & allowed)) {
279 return -EINVAL; /* Unknown flag name */
289 /* Parse unmasked flags. If a flag is present, it is set, otherwise
291 while (s[n] != ')') {
292 unsigned long long int flags;
296 if (ovs_scan(&s[n], "%lli%n", &flags, &n0)) {
297 if (flags & ~allowed) {
300 n += n0 + (s[n + n0] == ',');
305 for (bit = 1; bit; bit <<= 1) {
306 const char *name = bit_to_string(bit);
314 if (!strncmp(s + n, name, len) &&
315 (s[n + len] == ',' || s[n + len] == ')')) {
316 if (!(bit & allowed)) {
320 n += len + (s[n + len] == ',');
332 *res_mask = UINT32_MAX;
338 format_odp_userspace_action(struct ds *ds, const struct nlattr *attr)
340 static const struct nl_policy ovs_userspace_policy[] = {
341 [OVS_USERSPACE_ATTR_PID] = { .type = NL_A_U32 },
342 [OVS_USERSPACE_ATTR_USERDATA] = { .type = NL_A_UNSPEC,
344 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = { .type = NL_A_U32,
347 struct nlattr *a[ARRAY_SIZE(ovs_userspace_policy)];
348 const struct nlattr *userdata_attr;
349 const struct nlattr *tunnel_out_port_attr;
351 if (!nl_parse_nested(attr, ovs_userspace_policy, a, ARRAY_SIZE(a))) {
352 ds_put_cstr(ds, "userspace(error)");
356 ds_put_format(ds, "userspace(pid=%"PRIu32,
357 nl_attr_get_u32(a[OVS_USERSPACE_ATTR_PID]));
359 userdata_attr = a[OVS_USERSPACE_ATTR_USERDATA];
362 const uint8_t *userdata = nl_attr_get(userdata_attr);
363 size_t userdata_len = nl_attr_get_size(userdata_attr);
364 bool userdata_unspec = true;
365 union user_action_cookie cookie;
367 if (userdata_len >= sizeof cookie.type
368 && userdata_len <= sizeof cookie) {
370 memset(&cookie, 0, sizeof cookie);
371 memcpy(&cookie, userdata, userdata_len);
373 userdata_unspec = false;
375 if (userdata_len == sizeof cookie.sflow
376 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
377 ds_put_format(ds, ",sFlow("
378 "vid=%"PRIu16",pcp=%"PRIu8",output=%"PRIu32")",
379 vlan_tci_to_vid(cookie.sflow.vlan_tci),
380 vlan_tci_to_pcp(cookie.sflow.vlan_tci),
381 cookie.sflow.output);
382 } else if (userdata_len == sizeof cookie.slow_path
383 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
384 ds_put_cstr(ds, ",slow_path(");
385 format_flags(ds, slow_path_reason_to_string,
386 cookie.slow_path.reason, ',');
387 ds_put_format(ds, ")");
388 } else if (userdata_len == sizeof cookie.flow_sample
389 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
390 ds_put_format(ds, ",flow_sample(probability=%"PRIu16
391 ",collector_set_id=%"PRIu32
392 ",obs_domain_id=%"PRIu32
393 ",obs_point_id=%"PRIu32")",
394 cookie.flow_sample.probability,
395 cookie.flow_sample.collector_set_id,
396 cookie.flow_sample.obs_domain_id,
397 cookie.flow_sample.obs_point_id);
398 } else if (userdata_len >= sizeof cookie.ipfix
399 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
400 ds_put_format(ds, ",ipfix(output_port=%"PRIu32")",
401 cookie.ipfix.output_odp_port);
403 userdata_unspec = true;
407 if (userdata_unspec) {
409 ds_put_format(ds, ",userdata(");
410 for (i = 0; i < userdata_len; i++) {
411 ds_put_format(ds, "%02x", userdata[i]);
413 ds_put_char(ds, ')');
417 tunnel_out_port_attr = a[OVS_USERSPACE_ATTR_EGRESS_TUN_PORT];
418 if (tunnel_out_port_attr) {
419 ds_put_format(ds, ",tunnel_out_port=%"PRIu32,
420 nl_attr_get_u32(tunnel_out_port_attr));
423 ds_put_char(ds, ')');
427 format_vlan_tci(struct ds *ds, ovs_be16 tci, ovs_be16 mask, bool verbose)
429 if (verbose || vlan_tci_to_vid(tci) || vlan_tci_to_vid(mask)) {
430 ds_put_format(ds, "vid=%"PRIu16, vlan_tci_to_vid(tci));
431 if (vlan_tci_to_vid(mask) != VLAN_VID_MASK) { /* Partially masked. */
432 ds_put_format(ds, "/0x%"PRIx16, vlan_tci_to_vid(mask));
434 ds_put_char(ds, ',');
436 if (verbose || vlan_tci_to_pcp(tci) || vlan_tci_to_pcp(mask)) {
437 ds_put_format(ds, "pcp=%d", vlan_tci_to_pcp(tci));
438 if (vlan_tci_to_pcp(mask) != (VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) {
439 ds_put_format(ds, "/0x%x", vlan_tci_to_pcp(mask));
441 ds_put_char(ds, ',');
443 if (!(tci & htons(VLAN_CFI))) {
444 ds_put_cstr(ds, "cfi=0");
445 ds_put_char(ds, ',');
451 format_mpls_lse(struct ds *ds, ovs_be32 mpls_lse)
453 ds_put_format(ds, "label=%"PRIu32",tc=%d,ttl=%d,bos=%d",
454 mpls_lse_to_label(mpls_lse),
455 mpls_lse_to_tc(mpls_lse),
456 mpls_lse_to_ttl(mpls_lse),
457 mpls_lse_to_bos(mpls_lse));
461 format_mpls(struct ds *ds, const struct ovs_key_mpls *mpls_key,
462 const struct ovs_key_mpls *mpls_mask, int n)
465 ovs_be32 key = mpls_key->mpls_lse;
467 if (mpls_mask == NULL) {
468 format_mpls_lse(ds, key);
470 ovs_be32 mask = mpls_mask->mpls_lse;
472 ds_put_format(ds, "label=%"PRIu32"/0x%x,tc=%d/%x,ttl=%d/0x%x,bos=%d/%x",
473 mpls_lse_to_label(key), mpls_lse_to_label(mask),
474 mpls_lse_to_tc(key), mpls_lse_to_tc(mask),
475 mpls_lse_to_ttl(key), mpls_lse_to_ttl(mask),
476 mpls_lse_to_bos(key), mpls_lse_to_bos(mask));
481 for (i = 0; i < n; i++) {
482 ds_put_format(ds, "lse%d=%#"PRIx32,
483 i, ntohl(mpls_key[i].mpls_lse));
485 ds_put_format(ds, "/%#"PRIx32, ntohl(mpls_mask[i].mpls_lse));
487 ds_put_char(ds, ',');
494 format_odp_recirc_action(struct ds *ds, uint32_t recirc_id)
496 ds_put_format(ds, "recirc(%"PRIu32")", recirc_id);
500 format_odp_hash_action(struct ds *ds, const struct ovs_action_hash *hash_act)
502 ds_put_format(ds, "hash(");
504 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
505 ds_put_format(ds, "hash_l4(%"PRIu32")", hash_act->hash_basis);
507 ds_put_format(ds, "Unknown hash algorithm(%"PRIu32")",
510 ds_put_format(ds, ")");
514 format_udp_tnl_push_header(struct ds *ds, const struct ip_header *ip)
516 const struct udp_header *udp;
518 udp = (const struct udp_header *) (ip + 1);
519 ds_put_format(ds, "udp(src=%"PRIu16",dst=%"PRIu16",csum=0x%"PRIx16"),",
520 ntohs(udp->udp_src), ntohs(udp->udp_dst),
521 ntohs(udp->udp_csum));
527 format_odp_tnl_push_header(struct ds *ds, struct ovs_action_push_tnl *data)
529 const struct eth_header *eth;
530 const struct ip_header *ip;
533 eth = (const struct eth_header *)data->header;
536 ip = (const struct ip_header *)l3;
539 ds_put_format(ds, "header(size=%"PRIu8",type=%"PRIu8",eth(dst=",
540 data->header_len, data->tnl_type);
541 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_dst));
542 ds_put_format(ds, ",src=");
543 ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_src));
544 ds_put_format(ds, ",dl_type=0x%04"PRIx16"),", ntohs(eth->eth_type));
547 ds_put_format(ds, "ipv4(src="IP_FMT",dst="IP_FMT",proto=%"PRIu8
548 ",tos=%#"PRIx8",ttl=%"PRIu8",frag=0x%"PRIx16"),",
549 IP_ARGS(get_16aligned_be32(&ip->ip_src)),
550 IP_ARGS(get_16aligned_be32(&ip->ip_dst)),
551 ip->ip_proto, ip->ip_tos,
555 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
556 const struct vxlanhdr *vxh;
558 vxh = format_udp_tnl_push_header(ds, ip);
560 ds_put_format(ds, "vxlan(flags=0x%"PRIx32",vni=0x%"PRIx32")",
561 ntohl(get_16aligned_be32(&vxh->vx_flags)),
562 ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8);
563 } else if (data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
564 const struct genevehdr *gnh;
566 gnh = format_udp_tnl_push_header(ds, ip);
568 ds_put_format(ds, "geneve(%svni=0x%"PRIx32")",
569 gnh->oam ? "oam," : "",
570 ntohl(get_16aligned_be32(&gnh->vni)) >> 8);
571 } else if (data->tnl_type == OVS_VPORT_TYPE_GRE) {
572 const struct gre_base_hdr *greh;
573 ovs_16aligned_be32 *options;
576 l4 = ((uint8_t *)l3 + sizeof(struct ip_header));
577 greh = (const struct gre_base_hdr *) l4;
579 ds_put_format(ds, "gre((flags=0x%"PRIx16",proto=0x%"PRIx16")",
580 ntohs(greh->flags), ntohs(greh->protocol));
581 options = (ovs_16aligned_be32 *)(greh + 1);
582 if (greh->flags & htons(GRE_CSUM)) {
583 ds_put_format(ds, ",csum=0x%"PRIx16, ntohs(*((ovs_be16 *)options)));
586 if (greh->flags & htons(GRE_KEY)) {
587 ds_put_format(ds, ",key=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
590 if (greh->flags & htons(GRE_SEQ)) {
591 ds_put_format(ds, ",seq=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
594 ds_put_format(ds, ")");
596 ds_put_format(ds, ")");
600 format_odp_tnl_push_action(struct ds *ds, const struct nlattr *attr)
602 struct ovs_action_push_tnl *data;
604 data = (struct ovs_action_push_tnl *) nl_attr_get(attr);
606 ds_put_format(ds, "tnl_push(tnl_port(%"PRIu32"),", data->tnl_port);
607 format_odp_tnl_push_header(ds, data);
608 ds_put_format(ds, ",out_port(%"PRIu32"))", data->out_port);
612 format_odp_action(struct ds *ds, const struct nlattr *a)
615 enum ovs_action_attr type = nl_attr_type(a);
616 const struct ovs_action_push_vlan *vlan;
619 expected_len = odp_action_len(nl_attr_type(a));
620 if (expected_len != -2 && nl_attr_get_size(a) != expected_len) {
621 ds_put_format(ds, "bad length %"PRIuSIZE", expected %d for: ",
622 nl_attr_get_size(a), expected_len);
623 format_generic_odp_action(ds, a);
628 case OVS_ACTION_ATTR_OUTPUT:
629 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
631 case OVS_ACTION_ATTR_TUNNEL_POP:
632 ds_put_format(ds, "tnl_pop(%"PRIu32")", nl_attr_get_u32(a));
634 case OVS_ACTION_ATTR_TUNNEL_PUSH:
635 format_odp_tnl_push_action(ds, a);
637 case OVS_ACTION_ATTR_USERSPACE:
638 format_odp_userspace_action(ds, a);
640 case OVS_ACTION_ATTR_RECIRC:
641 format_odp_recirc_action(ds, nl_attr_get_u32(a));
643 case OVS_ACTION_ATTR_HASH:
644 format_odp_hash_action(ds, nl_attr_get(a));
646 case OVS_ACTION_ATTR_SET_MASKED:
648 size = nl_attr_get_size(a) / 2;
649 ds_put_cstr(ds, "set(");
651 /* Masked set action not supported for tunnel key, which is bigger. */
652 if (size <= sizeof(struct ovs_key_ipv6)) {
653 struct nlattr attr[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
654 sizeof(struct nlattr))];
655 struct nlattr mask[1 + DIV_ROUND_UP(sizeof(struct ovs_key_ipv6),
656 sizeof(struct nlattr))];
658 mask->nla_type = attr->nla_type = nl_attr_type(a);
659 mask->nla_len = attr->nla_len = NLA_HDRLEN + size;
660 memcpy(attr + 1, (char *)(a + 1), size);
661 memcpy(mask + 1, (char *)(a + 1) + size, size);
662 format_odp_key_attr(attr, mask, NULL, ds, false);
664 format_odp_key_attr(a, NULL, NULL, ds, false);
666 ds_put_cstr(ds, ")");
668 case OVS_ACTION_ATTR_SET:
669 ds_put_cstr(ds, "set(");
670 format_odp_key_attr(nl_attr_get(a), NULL, NULL, ds, true);
671 ds_put_cstr(ds, ")");
673 case OVS_ACTION_ATTR_PUSH_VLAN:
674 vlan = nl_attr_get(a);
675 ds_put_cstr(ds, "push_vlan(");
676 if (vlan->vlan_tpid != htons(ETH_TYPE_VLAN)) {
677 ds_put_format(ds, "tpid=0x%04"PRIx16",", ntohs(vlan->vlan_tpid));
679 format_vlan_tci(ds, vlan->vlan_tci, OVS_BE16_MAX, false);
680 ds_put_char(ds, ')');
682 case OVS_ACTION_ATTR_POP_VLAN:
683 ds_put_cstr(ds, "pop_vlan");
685 case OVS_ACTION_ATTR_PUSH_MPLS: {
686 const struct ovs_action_push_mpls *mpls = nl_attr_get(a);
687 ds_put_cstr(ds, "push_mpls(");
688 format_mpls_lse(ds, mpls->mpls_lse);
689 ds_put_format(ds, ",eth_type=0x%"PRIx16")", ntohs(mpls->mpls_ethertype));
692 case OVS_ACTION_ATTR_POP_MPLS: {
693 ovs_be16 ethertype = nl_attr_get_be16(a);
694 ds_put_format(ds, "pop_mpls(eth_type=0x%"PRIx16")", ntohs(ethertype));
697 case OVS_ACTION_ATTR_SAMPLE:
698 format_odp_sample_action(ds, a);
700 case OVS_ACTION_ATTR_UNSPEC:
701 case __OVS_ACTION_ATTR_MAX:
703 format_generic_odp_action(ds, a);
709 format_odp_actions(struct ds *ds, const struct nlattr *actions,
713 const struct nlattr *a;
716 NL_ATTR_FOR_EACH (a, left, actions, actions_len) {
718 ds_put_char(ds, ',');
720 format_odp_action(ds, a);
725 if (left == actions_len) {
726 ds_put_cstr(ds, "<empty>");
728 ds_put_format(ds, ",***%u leftover bytes*** (", left);
729 for (i = 0; i < left; i++) {
730 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
732 ds_put_char(ds, ')');
735 ds_put_cstr(ds, "drop");
739 /* Separate out parse_odp_userspace_action() function. */
741 parse_odp_userspace_action(const char *s, struct ofpbuf *actions)
744 union user_action_cookie cookie;
746 odp_port_t tunnel_out_port;
748 void *user_data = NULL;
749 size_t user_data_size = 0;
751 if (!ovs_scan(s, "userspace(pid=%"SCNi32"%n", &pid, &n)) {
757 uint32_t probability;
758 uint32_t collector_set_id;
759 uint32_t obs_domain_id;
760 uint32_t obs_point_id;
763 if (ovs_scan(&s[n], ",sFlow(vid=%i,"
764 "pcp=%i,output=%"SCNi32")%n",
765 &vid, &pcp, &output, &n1)) {
769 tci = vid | (pcp << VLAN_PCP_SHIFT);
774 cookie.type = USER_ACTION_COOKIE_SFLOW;
775 cookie.sflow.vlan_tci = htons(tci);
776 cookie.sflow.output = output;
778 user_data_size = sizeof cookie.sflow;
779 } else if (ovs_scan(&s[n], ",slow_path(%n",
784 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
785 cookie.slow_path.unused = 0;
786 cookie.slow_path.reason = 0;
788 res = parse_flags(&s[n], slow_path_reason_to_string,
789 &cookie.slow_path.reason,
790 SLOW_PATH_REASON_MASK, NULL);
791 if (res < 0 || s[n + res] != ')') {
797 user_data_size = sizeof cookie.slow_path;
798 } else if (ovs_scan(&s[n], ",flow_sample(probability=%"SCNi32","
799 "collector_set_id=%"SCNi32","
800 "obs_domain_id=%"SCNi32","
801 "obs_point_id=%"SCNi32")%n",
802 &probability, &collector_set_id,
803 &obs_domain_id, &obs_point_id, &n1)) {
806 cookie.type = USER_ACTION_COOKIE_FLOW_SAMPLE;
807 cookie.flow_sample.probability = probability;
808 cookie.flow_sample.collector_set_id = collector_set_id;
809 cookie.flow_sample.obs_domain_id = obs_domain_id;
810 cookie.flow_sample.obs_point_id = obs_point_id;
812 user_data_size = sizeof cookie.flow_sample;
813 } else if (ovs_scan(&s[n], ",ipfix(output_port=%"SCNi32")%n",
816 cookie.type = USER_ACTION_COOKIE_IPFIX;
817 cookie.ipfix.output_odp_port = u32_to_odp(output);
819 user_data_size = sizeof cookie.ipfix;
820 } else if (ovs_scan(&s[n], ",userdata(%n",
825 ofpbuf_init(&buf, 16);
826 end = ofpbuf_put_hex(&buf, &s[n], NULL);
830 user_data = buf.data;
831 user_data_size = buf.size;
838 if (ovs_scan(&s[n], ",tunnel_out_port=%"SCNi32")%n",
839 &tunnel_out_port, &n1)) {
840 odp_put_userspace_action(pid, user_data, user_data_size, tunnel_out_port, actions);
842 } else if (s[n] == ')') {
843 odp_put_userspace_action(pid, user_data, user_data_size, ODPP_NONE, actions);
852 ovs_parse_tnl_push(const char *s, struct ovs_action_push_tnl *data)
854 struct eth_header *eth;
855 struct ip_header *ip;
856 struct udp_header *udp;
857 struct gre_base_hdr *greh;
858 uint16_t gre_proto, gre_flags, dl_type, udp_src, udp_dst, csum;
860 uint32_t tnl_type = 0, header_len = 0;
864 if (!ovs_scan_len(s, &n, "tnl_push(tnl_port(%"SCNi32"),", &data->tnl_port)) {
867 eth = (struct eth_header *) data->header;
868 l3 = (data->header + sizeof *eth);
869 l4 = ((uint8_t *) l3 + sizeof (struct ip_header));
870 ip = (struct ip_header *) l3;
871 if (!ovs_scan_len(s, &n, "header(size=%"SCNi32",type=%"SCNi32","
872 "eth(dst="ETH_ADDR_SCAN_FMT",",
875 ETH_ADDR_SCAN_ARGS(eth->eth_dst))) {
879 if (!ovs_scan_len(s, &n, "src="ETH_ADDR_SCAN_FMT",",
880 ETH_ADDR_SCAN_ARGS(eth->eth_src))) {
883 if (!ovs_scan_len(s, &n, "dl_type=0x%"SCNx16"),", &dl_type)) {
886 eth->eth_type = htons(dl_type);
889 if (!ovs_scan_len(s, &n, "ipv4(src="IP_SCAN_FMT",dst="IP_SCAN_FMT",proto=%"SCNi8
890 ",tos=%"SCNi8",ttl=%"SCNi8",frag=0x%"SCNx16"),",
893 &ip->ip_proto, &ip->ip_tos,
894 &ip->ip_ttl, &ip->ip_frag_off)) {
897 put_16aligned_be32(&ip->ip_src, sip);
898 put_16aligned_be32(&ip->ip_dst, dip);
901 udp = (struct udp_header *) l4;
902 greh = (struct gre_base_hdr *) l4;
903 if (ovs_scan_len(s, &n, "udp(src=%"SCNi16",dst=%"SCNi16",csum=0x%"SCNx16"),",
904 &udp_src, &udp_dst, &csum)) {
905 uint32_t vx_flags, vni;
907 udp->udp_src = htons(udp_src);
908 udp->udp_dst = htons(udp_dst);
910 udp->udp_csum = htons(csum);
912 if (ovs_scan_len(s, &n, "vxlan(flags=0x%"SCNx32",vni=0x%"SCNx32"))",
914 struct vxlanhdr *vxh = (struct vxlanhdr *) (udp + 1);
916 put_16aligned_be32(&vxh->vx_flags, htonl(vx_flags));
917 put_16aligned_be32(&vxh->vx_vni, htonl(vni << 8));
918 tnl_type = OVS_VPORT_TYPE_VXLAN;
919 header_len = sizeof *eth + sizeof *ip +
920 sizeof *udp + sizeof *vxh;
921 } else if (ovs_scan_len(s, &n, "geneve(")) {
922 struct genevehdr *gnh = (struct genevehdr *) (udp + 1);
924 memset(gnh, 0, sizeof *gnh);
925 if (ovs_scan_len(s, &n, "oam,")) {
928 if (!ovs_scan_len(s, &n, "vni=0x%"SCNx32"))", &vni)) {
931 gnh->proto_type = htons(ETH_TYPE_TEB);
932 put_16aligned_be32(&gnh->vni, htonl(vni << 8));
933 tnl_type = OVS_VPORT_TYPE_GENEVE;
934 header_len = sizeof *eth + sizeof *ip +
935 sizeof *udp + sizeof *gnh;
939 } else if (ovs_scan_len(s, &n, "gre((flags=0x%"SCNx16",proto=0x%"SCNx16")",
940 &gre_flags, &gre_proto)){
942 tnl_type = OVS_VPORT_TYPE_GRE;
943 greh->flags = htons(gre_flags);
944 greh->protocol = htons(gre_proto);
945 ovs_16aligned_be32 *options = (ovs_16aligned_be32 *) (greh + 1);
947 if (greh->flags & htons(GRE_CSUM)) {
948 if (!ovs_scan_len(s, &n, ",csum=0x%"SCNx16, &csum)) {
952 memset(options, 0, sizeof *options);
953 *((ovs_be16 *)options) = htons(csum);
956 if (greh->flags & htons(GRE_KEY)) {
959 if (!ovs_scan_len(s, &n, ",key=0x%"SCNx32, &key)) {
963 put_16aligned_be32(options, htonl(key));
966 if (greh->flags & htons(GRE_SEQ)) {
969 if (!ovs_scan_len(s, &n, ",seq=0x%"SCNx32, &seq)) {
972 put_16aligned_be32(options, htonl(seq));
976 if (!ovs_scan_len(s, &n, "))")) {
980 header_len = sizeof *eth + sizeof *ip +
981 ((uint8_t *) options - (uint8_t *) greh);
986 /* check tunnel meta data. */
987 if (data->tnl_type != tnl_type) {
990 if (data->header_len != header_len) {
995 if (!ovs_scan_len(s, &n, ",out_port(%"SCNi32"))", &data->out_port)) {
1003 parse_odp_action(const char *s, const struct simap *port_names,
1004 struct ofpbuf *actions)
1010 if (ovs_scan(s, "%"SCNi32"%n", &port, &n)) {
1011 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, port);
1017 int len = strcspn(s, delimiters);
1018 struct simap_node *node;
1020 node = simap_find_len(port_names, s, len);
1022 nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, node->data);
1031 if (ovs_scan(s, "recirc(%"PRIu32")%n", &recirc_id, &n)) {
1032 nl_msg_put_u32(actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
1037 if (!strncmp(s, "userspace(", 10)) {
1038 return parse_odp_userspace_action(s, actions);
1041 if (!strncmp(s, "set(", 4)) {
1044 struct nlattr mask[128 / sizeof(struct nlattr)];
1045 struct ofpbuf maskbuf;
1046 struct nlattr *nested, *key;
1049 /* 'mask' is big enough to hold any key. */
1050 ofpbuf_use_stack(&maskbuf, mask, sizeof mask);
1052 start_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SET);
1053 retval = parse_odp_key_mask_attr(s + 4, port_names, actions, &maskbuf);
1057 if (s[retval + 4] != ')') {
1061 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
1064 size = nl_attr_get_size(mask);
1065 if (size == nl_attr_get_size(key)) {
1066 /* Change to masked set action if not fully masked. */
1067 if (!is_all_ones(mask + 1, size)) {
1068 key->nla_len += size;
1069 ofpbuf_put(actions, mask + 1, size);
1070 /* 'actions' may have been reallocated by ofpbuf_put(). */
1071 nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
1072 nested->nla_type = OVS_ACTION_ATTR_SET_MASKED;
1076 nl_msg_end_nested(actions, start_ofs);
1081 struct ovs_action_push_vlan push;
1082 int tpid = ETH_TYPE_VLAN;
1087 if (ovs_scan(s, "push_vlan(vid=%i,pcp=%i)%n", &vid, &pcp, &n)
1088 || ovs_scan(s, "push_vlan(vid=%i,pcp=%i,cfi=%i)%n",
1089 &vid, &pcp, &cfi, &n)
1090 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i)%n",
1091 &tpid, &vid, &pcp, &n)
1092 || ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i,cfi=%i)%n",
1093 &tpid, &vid, &pcp, &cfi, &n)) {
1094 push.vlan_tpid = htons(tpid);
1095 push.vlan_tci = htons((vid << VLAN_VID_SHIFT)
1096 | (pcp << VLAN_PCP_SHIFT)
1097 | (cfi ? VLAN_CFI : 0));
1098 nl_msg_put_unspec(actions, OVS_ACTION_ATTR_PUSH_VLAN,
1099 &push, sizeof push);
1105 if (!strncmp(s, "pop_vlan", 8)) {
1106 nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_VLAN);
1114 if (ovs_scan(s, "sample(sample=%lf%%,actions(%n", &percentage, &n)
1115 && percentage >= 0. && percentage <= 100.0) {
1116 size_t sample_ofs, actions_ofs;
1119 probability = floor(UINT32_MAX * (percentage / 100.0) + .5);
1120 sample_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SAMPLE);
1121 nl_msg_put_u32(actions, OVS_SAMPLE_ATTR_PROBABILITY,
1122 (probability <= 0 ? 0
1123 : probability >= UINT32_MAX ? UINT32_MAX
1126 actions_ofs = nl_msg_start_nested(actions,
1127 OVS_SAMPLE_ATTR_ACTIONS);
1131 n += strspn(s + n, delimiters);
1136 retval = parse_odp_action(s + n, port_names, actions);
1142 nl_msg_end_nested(actions, actions_ofs);
1143 nl_msg_end_nested(actions, sample_ofs);
1145 return s[n + 1] == ')' ? n + 2 : -EINVAL;
1153 if (ovs_scan(s, "tnl_pop(%"SCNi32")%n", &port, &n)) {
1154 nl_msg_put_u32(actions, OVS_ACTION_ATTR_TUNNEL_POP, port);
1160 struct ovs_action_push_tnl data;
1163 n = ovs_parse_tnl_push(s, &data);
1165 odp_put_tnl_push_action(actions, &data);
1174 /* Parses the string representation of datapath actions, in the format output
1175 * by format_odp_action(). Returns 0 if successful, otherwise a positive errno
1176 * value. On success, the ODP actions are appended to 'actions' as a series of
1177 * Netlink attributes. On failure, no data is appended to 'actions'. Either
1178 * way, 'actions''s data might be reallocated. */
1180 odp_actions_from_string(const char *s, const struct simap *port_names,
1181 struct ofpbuf *actions)
1185 if (!strcasecmp(s, "drop")) {
1189 old_size = actions->size;
1193 s += strspn(s, delimiters);
1198 retval = parse_odp_action(s, port_names, actions);
1199 if (retval < 0 || !strchr(delimiters, s[retval])) {
1200 actions->size = old_size;
1209 /* Returns the correct length of the payload for a flow key attribute of the
1210 * specified 'type', -1 if 'type' is unknown, or -2 if the attribute's payload
1211 * is variable length. */
1213 odp_flow_key_attr_len(uint16_t type)
1215 if (type > OVS_KEY_ATTR_MAX) {
1219 switch ((enum ovs_key_attr) type) {
1220 case OVS_KEY_ATTR_ENCAP: return -2;
1221 case OVS_KEY_ATTR_PRIORITY: return 4;
1222 case OVS_KEY_ATTR_SKB_MARK: return 4;
1223 case OVS_KEY_ATTR_DP_HASH: return 4;
1224 case OVS_KEY_ATTR_RECIRC_ID: return 4;
1225 case OVS_KEY_ATTR_TUNNEL: return -2;
1226 case OVS_KEY_ATTR_IN_PORT: return 4;
1227 case OVS_KEY_ATTR_ETHERNET: return sizeof(struct ovs_key_ethernet);
1228 case OVS_KEY_ATTR_VLAN: return sizeof(ovs_be16);
1229 case OVS_KEY_ATTR_ETHERTYPE: return 2;
1230 case OVS_KEY_ATTR_MPLS: return -2;
1231 case OVS_KEY_ATTR_IPV4: return sizeof(struct ovs_key_ipv4);
1232 case OVS_KEY_ATTR_IPV6: return sizeof(struct ovs_key_ipv6);
1233 case OVS_KEY_ATTR_TCP: return sizeof(struct ovs_key_tcp);
1234 case OVS_KEY_ATTR_TCP_FLAGS: return 2;
1235 case OVS_KEY_ATTR_UDP: return sizeof(struct ovs_key_udp);
1236 case OVS_KEY_ATTR_SCTP: return sizeof(struct ovs_key_sctp);
1237 case OVS_KEY_ATTR_ICMP: return sizeof(struct ovs_key_icmp);
1238 case OVS_KEY_ATTR_ICMPV6: return sizeof(struct ovs_key_icmpv6);
1239 case OVS_KEY_ATTR_ARP: return sizeof(struct ovs_key_arp);
1240 case OVS_KEY_ATTR_ND: return sizeof(struct ovs_key_nd);
1242 case OVS_KEY_ATTR_UNSPEC:
1243 case __OVS_KEY_ATTR_MAX:
1251 format_generic_odp_key(const struct nlattr *a, struct ds *ds)
1253 size_t len = nl_attr_get_size(a);
1255 const uint8_t *unspec;
1258 unspec = nl_attr_get(a);
1259 for (i = 0; i < len; i++) {
1261 ds_put_char(ds, ' ');
1263 ds_put_format(ds, "%02x", unspec[i]);
1269 ovs_frag_type_to_string(enum ovs_frag_type type)
1272 case OVS_FRAG_TYPE_NONE:
1274 case OVS_FRAG_TYPE_FIRST:
1276 case OVS_FRAG_TYPE_LATER:
1278 case __OVS_FRAG_TYPE_MAX:
1285 tunnel_key_attr_len(int type)
1288 case OVS_TUNNEL_KEY_ATTR_ID: return 8;
1289 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: return 4;
1290 case OVS_TUNNEL_KEY_ATTR_IPV4_DST: return 4;
1291 case OVS_TUNNEL_KEY_ATTR_TOS: return 1;
1292 case OVS_TUNNEL_KEY_ATTR_TTL: return 1;
1293 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: return 0;
1294 case OVS_TUNNEL_KEY_ATTR_CSUM: return 0;
1295 case OVS_TUNNEL_KEY_ATTR_TP_SRC: return 2;
1296 case OVS_TUNNEL_KEY_ATTR_TP_DST: return 2;
1297 case OVS_TUNNEL_KEY_ATTR_OAM: return 0;
1298 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: return -2;
1299 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: return -2;
1300 case __OVS_TUNNEL_KEY_ATTR_MAX:
1306 #define GENEVE_OPT(class, type) ((OVS_FORCE uint32_t)(class) << 8 | (type))
1308 parse_geneve_opts(const struct nlattr *attr)
1310 int opts_len = nl_attr_get_size(attr);
1311 const struct geneve_opt *opt = nl_attr_get(attr);
1313 while (opts_len > 0) {
1316 if (opts_len < sizeof(*opt)) {
1320 len = sizeof(*opt) + opt->length * 4;
1321 if (len > opts_len) {
1325 switch (GENEVE_OPT(opt->opt_class, opt->type)) {
1327 if (opt->type & GENEVE_CRIT_OPT_TYPE) {
1332 opt = opt + len / sizeof(*opt);
1339 enum odp_key_fitness
1340 odp_tun_key_from_attr(const struct nlattr *attr, struct flow_tnl *tun)
1343 const struct nlattr *a;
1345 bool unknown = false;
1347 NL_NESTED_FOR_EACH(a, left, attr) {
1348 uint16_t type = nl_attr_type(a);
1349 size_t len = nl_attr_get_size(a);
1350 int expected_len = tunnel_key_attr_len(type);
1352 if (len != expected_len && expected_len >= 0) {
1353 return ODP_FIT_ERROR;
1357 case OVS_TUNNEL_KEY_ATTR_ID:
1358 tun->tun_id = nl_attr_get_be64(a);
1359 tun->flags |= FLOW_TNL_F_KEY;
1361 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
1362 tun->ip_src = nl_attr_get_be32(a);
1364 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
1365 tun->ip_dst = nl_attr_get_be32(a);
1367 case OVS_TUNNEL_KEY_ATTR_TOS:
1368 tun->ip_tos = nl_attr_get_u8(a);
1370 case OVS_TUNNEL_KEY_ATTR_TTL:
1371 tun->ip_ttl = nl_attr_get_u8(a);
1374 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
1375 tun->flags |= FLOW_TNL_F_DONT_FRAGMENT;
1377 case OVS_TUNNEL_KEY_ATTR_CSUM:
1378 tun->flags |= FLOW_TNL_F_CSUM;
1380 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
1381 tun->tp_src = nl_attr_get_be16(a);
1383 case OVS_TUNNEL_KEY_ATTR_TP_DST:
1384 tun->tp_dst = nl_attr_get_be16(a);
1386 case OVS_TUNNEL_KEY_ATTR_OAM:
1387 tun->flags |= FLOW_TNL_F_OAM;
1389 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: {
1390 static const struct nl_policy vxlan_opts_policy[] = {
1391 [OVS_VXLAN_EXT_GBP] = { .type = NL_A_U32 },
1393 struct nlattr *ext[ARRAY_SIZE(vxlan_opts_policy)];
1395 if (!nl_parse_nested(a, vxlan_opts_policy, ext, ARRAY_SIZE(ext))) {
1396 return ODP_FIT_ERROR;
1399 if (ext[OVS_VXLAN_EXT_GBP]) {
1400 uint32_t gbp = nl_attr_get_u32(ext[OVS_VXLAN_EXT_GBP]);
1402 tun->gbp_id = htons(gbp & 0xFFFF);
1403 tun->gbp_flags = (gbp >> 16) & 0xFF;
1408 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: {
1409 if (parse_geneve_opts(a)) {
1410 return ODP_FIT_ERROR;
1412 /* It is necessary to reproduce options exactly (including order)
1413 * so it's easiest to just echo them back. */
1418 /* Allow this to show up as unexpected, if there are unknown
1419 * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
1426 return ODP_FIT_ERROR;
1429 return ODP_FIT_TOO_MUCH;
1431 return ODP_FIT_PERFECT;
1435 tun_key_to_attr(struct ofpbuf *a, const struct flow_tnl *tun_key)
1439 tun_key_ofs = nl_msg_start_nested(a, OVS_KEY_ATTR_TUNNEL);
1441 /* tun_id != 0 without FLOW_TNL_F_KEY is valid if tun_key is a mask. */
1442 if (tun_key->tun_id || tun_key->flags & FLOW_TNL_F_KEY) {
1443 nl_msg_put_be64(a, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id);
1445 if (tun_key->ip_src) {
1446 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ip_src);
1448 if (tun_key->ip_dst) {
1449 nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ip_dst);
1451 if (tun_key->ip_tos) {
1452 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ip_tos);
1454 nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ip_ttl);
1455 if (tun_key->flags & FLOW_TNL_F_DONT_FRAGMENT) {
1456 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT);
1458 if (tun_key->flags & FLOW_TNL_F_CSUM) {
1459 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_CSUM);
1461 if (tun_key->tp_src) {
1462 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src);
1464 if (tun_key->tp_dst) {
1465 nl_msg_put_be16(a, OVS_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst);
1467 if (tun_key->flags & FLOW_TNL_F_OAM) {
1468 nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
1470 if (tun_key->gbp_flags || tun_key->gbp_id) {
1471 size_t vxlan_opts_ofs;
1473 vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
1474 nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP,
1475 (tun_key->gbp_flags << 16) | ntohs(tun_key->gbp_id));
1476 nl_msg_end_nested(a, vxlan_opts_ofs);
1479 nl_msg_end_nested(a, tun_key_ofs);
1483 odp_mask_attr_is_wildcard(const struct nlattr *ma)
1485 return is_all_zeros(nl_attr_get(ma), nl_attr_get_size(ma));
1489 odp_mask_is_exact(enum ovs_key_attr attr, const void *mask, size_t size)
1491 if (attr == OVS_KEY_ATTR_TCP_FLAGS) {
1492 return TCP_FLAGS(*(ovs_be16 *)mask) == TCP_FLAGS(OVS_BE16_MAX);
1494 if (attr == OVS_KEY_ATTR_IPV6) {
1495 const struct ovs_key_ipv6 *ipv6_mask = mask;
1498 ((ipv6_mask->ipv6_label & htonl(IPV6_LABEL_MASK))
1499 == htonl(IPV6_LABEL_MASK))
1500 && ipv6_mask->ipv6_proto == UINT8_MAX
1501 && ipv6_mask->ipv6_tclass == UINT8_MAX
1502 && ipv6_mask->ipv6_hlimit == UINT8_MAX
1503 && ipv6_mask->ipv6_frag == UINT8_MAX
1504 && ipv6_mask_is_exact((const struct in6_addr *)ipv6_mask->ipv6_src)
1505 && ipv6_mask_is_exact((const struct in6_addr *)ipv6_mask->ipv6_dst);
1507 if (attr == OVS_KEY_ATTR_TUNNEL) {
1508 const struct flow_tnl *tun_mask = mask;
1510 return tun_mask->flags == FLOW_TNL_F_MASK
1511 && tun_mask->tun_id == OVS_BE64_MAX
1512 && tun_mask->ip_src == OVS_BE32_MAX
1513 && tun_mask->ip_dst == OVS_BE32_MAX
1514 && tun_mask->ip_tos == UINT8_MAX
1515 && tun_mask->ip_ttl == UINT8_MAX
1516 && tun_mask->tp_src == OVS_BE16_MAX
1517 && tun_mask->tp_dst == OVS_BE16_MAX
1518 && tun_mask->gbp_id == OVS_BE16_MAX
1519 && tun_mask->gbp_flags == UINT8_MAX;
1522 if (attr == OVS_KEY_ATTR_ARP) {
1523 /* ARP key has padding, ignore it. */
1524 BUILD_ASSERT_DECL(sizeof(struct ovs_key_arp) == 24);
1525 BUILD_ASSERT_DECL(offsetof(struct ovs_key_arp, arp_tha) == 10 + 6);
1526 size = offsetof(struct ovs_key_arp, arp_tha) + ETH_ADDR_LEN;
1527 ovs_assert(((uint16_t *)mask)[size/2] == 0);
1530 return is_all_ones(mask, size);
1534 odp_mask_attr_is_exact(const struct nlattr *ma)
1536 struct flow_tnl tun_mask;
1537 enum ovs_key_attr attr = nl_attr_type(ma);
1541 if (attr == OVS_KEY_ATTR_TUNNEL) {
1542 memset(&tun_mask, 0, sizeof tun_mask);
1543 odp_tun_key_from_attr(ma, &tun_mask);
1545 size = sizeof tun_mask;
1547 mask = nl_attr_get(ma);
1548 size = nl_attr_get_size(ma);
1551 return odp_mask_is_exact(attr, mask, size);
1555 odp_portno_names_set(struct hmap *portno_names, odp_port_t port_no,
1558 struct odp_portno_names *odp_portno_names;
1560 odp_portno_names = xmalloc(sizeof *odp_portno_names);
1561 odp_portno_names->port_no = port_no;
1562 odp_portno_names->name = xstrdup(port_name);
1563 hmap_insert(portno_names, &odp_portno_names->hmap_node,
1564 hash_odp_port(port_no));
1568 odp_portno_names_get(const struct hmap *portno_names, odp_port_t port_no)
1570 struct odp_portno_names *odp_portno_names;
1572 HMAP_FOR_EACH_IN_BUCKET (odp_portno_names, hmap_node,
1573 hash_odp_port(port_no), portno_names) {
1574 if (odp_portno_names->port_no == port_no) {
1575 return odp_portno_names->name;
1582 odp_portno_names_destroy(struct hmap *portno_names)
1584 struct odp_portno_names *odp_portno_names, *odp_portno_names_next;
1585 HMAP_FOR_EACH_SAFE (odp_portno_names, odp_portno_names_next,
1586 hmap_node, portno_names) {
1587 hmap_remove(portno_names, &odp_portno_names->hmap_node);
1588 free(odp_portno_names->name);
1589 free(odp_portno_names);
1593 /* Format helpers. */
1596 format_eth(struct ds *ds, const char *name, const uint8_t key[ETH_ADDR_LEN],
1597 const uint8_t (*mask)[ETH_ADDR_LEN], bool verbose)
1599 bool mask_empty = mask && eth_addr_is_zero(*mask);
1601 if (verbose || !mask_empty) {
1602 bool mask_full = !mask || eth_mask_is_exact(*mask);
1605 ds_put_format(ds, "%s="ETH_ADDR_FMT",", name, ETH_ADDR_ARGS(key));
1607 ds_put_format(ds, "%s=", name);
1608 eth_format_masked(key, *mask, ds);
1609 ds_put_char(ds, ',');
1615 format_be64(struct ds *ds, const char *name, ovs_be64 key,
1616 const ovs_be64 *mask, bool verbose)
1618 bool mask_empty = mask && !*mask;
1620 if (verbose || !mask_empty) {
1621 bool mask_full = !mask || *mask == OVS_BE64_MAX;
1623 ds_put_format(ds, "%s=0x%"PRIx64, name, ntohll(key));
1624 if (!mask_full) { /* Partially masked. */
1625 ds_put_format(ds, "/%#"PRIx64, ntohll(*mask));
1627 ds_put_char(ds, ',');
1632 format_ipv4(struct ds *ds, const char *name, ovs_be32 key,
1633 const ovs_be32 *mask, bool verbose)
1635 bool mask_empty = mask && !*mask;
1637 if (verbose || !mask_empty) {
1638 bool mask_full = !mask || *mask == OVS_BE32_MAX;
1640 ds_put_format(ds, "%s="IP_FMT, name, IP_ARGS(key));
1641 if (!mask_full) { /* Partially masked. */
1642 ds_put_format(ds, "/"IP_FMT, IP_ARGS(*mask));
1644 ds_put_char(ds, ',');
1649 format_ipv6(struct ds *ds, const char *name, const ovs_be32 key_[4],
1650 const ovs_be32 (*mask_)[4], bool verbose)
1652 char buf[INET6_ADDRSTRLEN];
1653 const struct in6_addr *key = (const struct in6_addr *)key_;
1654 const struct in6_addr *mask = mask_ ? (const struct in6_addr *)*mask_
1656 bool mask_empty = mask && ipv6_mask_is_any(mask);
1658 if (verbose || !mask_empty) {
1659 bool mask_full = !mask || ipv6_mask_is_exact(mask);
1661 inet_ntop(AF_INET6, key, buf, sizeof buf);
1662 ds_put_format(ds, "%s=%s", name, buf);
1663 if (!mask_full) { /* Partially masked. */
1664 inet_ntop(AF_INET6, mask, buf, sizeof buf);
1665 ds_put_format(ds, "/%s", buf);
1667 ds_put_char(ds, ',');
1672 format_ipv6_label(struct ds *ds, const char *name, ovs_be32 key,
1673 const ovs_be32 *mask, bool verbose)
1675 bool mask_empty = mask && !*mask;
1677 if (verbose || !mask_empty) {
1678 bool mask_full = !mask
1679 || (*mask & htonl(IPV6_LABEL_MASK)) == htonl(IPV6_LABEL_MASK);
1681 ds_put_format(ds, "%s=%#"PRIx32, name, ntohl(key));
1682 if (!mask_full) { /* Partially masked. */
1683 ds_put_format(ds, "/%#"PRIx32, ntohl(*mask));
1685 ds_put_char(ds, ',');
1690 format_u8x(struct ds *ds, const char *name, uint8_t key,
1691 const uint8_t *mask, bool verbose)
1693 bool mask_empty = mask && !*mask;
1695 if (verbose || !mask_empty) {
1696 bool mask_full = !mask || *mask == UINT8_MAX;
1698 ds_put_format(ds, "%s=%#"PRIx8, name, key);
1699 if (!mask_full) { /* Partially masked. */
1700 ds_put_format(ds, "/%#"PRIx8, *mask);
1702 ds_put_char(ds, ',');
1707 format_u8u(struct ds *ds, const char *name, uint8_t key,
1708 const uint8_t *mask, bool verbose)
1710 bool mask_empty = mask && !*mask;
1712 if (verbose || !mask_empty) {
1713 bool mask_full = !mask || *mask == UINT8_MAX;
1715 ds_put_format(ds, "%s=%"PRIu8, name, key);
1716 if (!mask_full) { /* Partially masked. */
1717 ds_put_format(ds, "/%#"PRIx8, *mask);
1719 ds_put_char(ds, ',');
1724 format_be16(struct ds *ds, const char *name, ovs_be16 key,
1725 const ovs_be16 *mask, bool verbose)
1727 bool mask_empty = mask && !*mask;
1729 if (verbose || !mask_empty) {
1730 bool mask_full = !mask || *mask == OVS_BE16_MAX;
1732 ds_put_format(ds, "%s=%"PRIu16, name, ntohs(key));
1733 if (!mask_full) { /* Partially masked. */
1734 ds_put_format(ds, "/%#"PRIx16, ntohs(*mask));
1736 ds_put_char(ds, ',');
1741 format_tun_flags(struct ds *ds, const char *name, uint16_t key,
1742 const uint16_t *mask, bool verbose)
1744 bool mask_empty = mask && !*mask;
1746 if (verbose || !mask_empty) {
1747 bool mask_full = !mask || (*mask & FLOW_TNL_F_MASK) == FLOW_TNL_F_MASK;
1749 ds_put_cstr(ds, name);
1750 ds_put_char(ds, '(');
1751 if (!mask_full) { /* Partially masked. */
1752 format_flags_masked(ds, NULL, flow_tun_flag_to_string, key, *mask);
1753 } else { /* Fully masked. */
1754 format_flags(ds, flow_tun_flag_to_string, key, ',');
1756 ds_put_cstr(ds, "),");
1761 format_frag(struct ds *ds, const char *name, uint8_t key,
1762 const uint8_t *mask, bool verbose)
1764 bool mask_empty = mask && !*mask;
1766 /* ODP frag is an enumeration field; partial masks are not meaningful. */
1767 if (verbose || !mask_empty) {
1768 bool mask_full = !mask || *mask == UINT8_MAX;
1770 if (!mask_full) { /* Partially masked. */
1771 ds_put_format(ds, "error: partial mask not supported for frag (%#"
1774 ds_put_format(ds, "%s=%s,", name, ovs_frag_type_to_string(key));
1779 #define MASK(PTR, FIELD) PTR ? &PTR->FIELD : NULL
1782 format_odp_key_attr(const struct nlattr *a, const struct nlattr *ma,
1783 const struct hmap *portno_names, struct ds *ds,
1786 enum ovs_key_attr attr = nl_attr_type(a);
1787 char namebuf[OVS_KEY_ATTR_BUFSIZE];
1791 is_exact = ma ? odp_mask_attr_is_exact(ma) : true;
1793 ds_put_cstr(ds, ovs_key_attr_to_string(attr, namebuf, sizeof namebuf));
1796 expected_len = odp_flow_key_attr_len(nl_attr_type(a));
1797 if (expected_len != -2) {
1798 bool bad_key_len = nl_attr_get_size(a) != expected_len;
1799 bool bad_mask_len = ma && nl_attr_get_size(ma) != expected_len;
1801 if (bad_key_len || bad_mask_len) {
1803 ds_put_format(ds, "(bad key length %"PRIuSIZE", expected %d)(",
1804 nl_attr_get_size(a), expected_len);
1806 format_generic_odp_key(a, ds);
1808 ds_put_char(ds, '/');
1810 ds_put_format(ds, "(bad mask length %"PRIuSIZE", expected %d)(",
1811 nl_attr_get_size(ma), expected_len);
1813 format_generic_odp_key(ma, ds);
1815 ds_put_char(ds, ')');
1821 ds_put_char(ds, '(');
1823 case OVS_KEY_ATTR_ENCAP:
1824 if (ma && nl_attr_get_size(ma) && nl_attr_get_size(a)) {
1825 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a),
1826 nl_attr_get(ma), nl_attr_get_size(ma), NULL, ds,
1828 } else if (nl_attr_get_size(a)) {
1829 odp_flow_format(nl_attr_get(a), nl_attr_get_size(a), NULL, 0, NULL,
1834 case OVS_KEY_ATTR_PRIORITY:
1835 case OVS_KEY_ATTR_SKB_MARK:
1836 case OVS_KEY_ATTR_DP_HASH:
1837 case OVS_KEY_ATTR_RECIRC_ID:
1838 ds_put_format(ds, "%#"PRIx32, nl_attr_get_u32(a));
1840 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
1844 case OVS_KEY_ATTR_TUNNEL: {
1845 struct flow_tnl key, mask_;
1846 struct flow_tnl *mask = ma ? &mask_ : NULL;
1849 memset(mask, 0, sizeof *mask);
1850 odp_tun_key_from_attr(ma, mask);
1852 memset(&key, 0, sizeof key);
1853 if (odp_tun_key_from_attr(a, &key) == ODP_FIT_ERROR) {
1854 ds_put_format(ds, "error");
1857 format_be64(ds, "tun_id", key.tun_id, MASK(mask, tun_id), verbose);
1858 format_ipv4(ds, "src", key.ip_src, MASK(mask, ip_src), verbose);
1859 format_ipv4(ds, "dst", key.ip_dst, MASK(mask, ip_dst), verbose);
1860 format_u8x(ds, "tos", key.ip_tos, MASK(mask, ip_tos), verbose);
1861 format_u8u(ds, "ttl", key.ip_ttl, MASK(mask, ip_ttl), verbose);
1862 format_be16(ds, "tp_src", key.tp_src, MASK(mask, tp_src), verbose);
1863 format_be16(ds, "tp_dst", key.tp_dst, MASK(mask, tp_dst), verbose);
1864 format_be16(ds, "gbp_id", key.gbp_id, MASK(mask, gbp_id), verbose);
1865 format_u8x(ds, "gbp_flags", key.gbp_flags, MASK(mask, gbp_flags), verbose);
1866 format_tun_flags(ds, "flags", key.flags, MASK(mask, flags), verbose);
1870 case OVS_KEY_ATTR_IN_PORT:
1871 if (portno_names && verbose && is_exact) {
1872 char *name = odp_portno_names_get(portno_names,
1873 u32_to_odp(nl_attr_get_u32(a)));
1875 ds_put_format(ds, "%s", name);
1877 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
1880 ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
1882 ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
1887 case OVS_KEY_ATTR_ETHERNET: {
1888 const struct ovs_key_ethernet *mask = ma ? nl_attr_get(ma) : NULL;
1889 const struct ovs_key_ethernet *key = nl_attr_get(a);
1891 format_eth(ds, "src", key->eth_src, MASK(mask, eth_src), verbose);
1892 format_eth(ds, "dst", key->eth_dst, MASK(mask, eth_dst), verbose);
1896 case OVS_KEY_ATTR_VLAN:
1897 format_vlan_tci(ds, nl_attr_get_be16(a),
1898 ma ? nl_attr_get_be16(ma) : OVS_BE16_MAX, verbose);
1901 case OVS_KEY_ATTR_MPLS: {
1902 const struct ovs_key_mpls *mpls_key = nl_attr_get(a);
1903 const struct ovs_key_mpls *mpls_mask = NULL;
1904 size_t size = nl_attr_get_size(a);
1906 if (!size || size % sizeof *mpls_key) {
1907 ds_put_format(ds, "(bad key length %"PRIuSIZE")", size);
1911 mpls_mask = nl_attr_get(ma);
1912 if (size != nl_attr_get_size(ma)) {
1913 ds_put_format(ds, "(key length %"PRIuSIZE" != "
1914 "mask length %"PRIuSIZE")",
1915 size, nl_attr_get_size(ma));
1919 format_mpls(ds, mpls_key, mpls_mask, size / sizeof *mpls_key);
1922 case OVS_KEY_ATTR_ETHERTYPE:
1923 ds_put_format(ds, "0x%04"PRIx16, ntohs(nl_attr_get_be16(a)));
1925 ds_put_format(ds, "/0x%04"PRIx16, ntohs(nl_attr_get_be16(ma)));
1929 case OVS_KEY_ATTR_IPV4: {
1930 const struct ovs_key_ipv4 *key = nl_attr_get(a);
1931 const struct ovs_key_ipv4 *mask = ma ? nl_attr_get(ma) : NULL;
1933 format_ipv4(ds, "src", key->ipv4_src, MASK(mask, ipv4_src), verbose);
1934 format_ipv4(ds, "dst", key->ipv4_dst, MASK(mask, ipv4_dst), verbose);
1935 format_u8u(ds, "proto", key->ipv4_proto, MASK(mask, ipv4_proto),
1937 format_u8x(ds, "tos", key->ipv4_tos, MASK(mask, ipv4_tos), verbose);
1938 format_u8u(ds, "ttl", key->ipv4_ttl, MASK(mask, ipv4_ttl), verbose);
1939 format_frag(ds, "frag", key->ipv4_frag, MASK(mask, ipv4_frag),
1944 case OVS_KEY_ATTR_IPV6: {
1945 const struct ovs_key_ipv6 *key = nl_attr_get(a);
1946 const struct ovs_key_ipv6 *mask = ma ? nl_attr_get(ma) : NULL;
1948 format_ipv6(ds, "src", key->ipv6_src, MASK(mask, ipv6_src), verbose);
1949 format_ipv6(ds, "dst", key->ipv6_dst, MASK(mask, ipv6_dst), verbose);
1950 format_ipv6_label(ds, "label", key->ipv6_label, MASK(mask, ipv6_label),
1952 format_u8u(ds, "proto", key->ipv6_proto, MASK(mask, ipv6_proto),
1954 format_u8x(ds, "tclass", key->ipv6_tclass, MASK(mask, ipv6_tclass),
1956 format_u8u(ds, "hlimit", key->ipv6_hlimit, MASK(mask, ipv6_hlimit),
1958 format_frag(ds, "frag", key->ipv6_frag, MASK(mask, ipv6_frag),
1963 /* These have the same structure and format. */
1964 case OVS_KEY_ATTR_TCP:
1965 case OVS_KEY_ATTR_UDP:
1966 case OVS_KEY_ATTR_SCTP: {
1967 const struct ovs_key_tcp *key = nl_attr_get(a);
1968 const struct ovs_key_tcp *mask = ma ? nl_attr_get(ma) : NULL;
1970 format_be16(ds, "src", key->tcp_src, MASK(mask, tcp_src), verbose);
1971 format_be16(ds, "dst", key->tcp_dst, MASK(mask, tcp_dst), verbose);
1975 case OVS_KEY_ATTR_TCP_FLAGS:
1977 format_flags_masked(ds, NULL, packet_tcp_flag_to_string,
1978 ntohs(nl_attr_get_be16(a)),
1979 ntohs(nl_attr_get_be16(ma)));
1981 format_flags(ds, packet_tcp_flag_to_string,
1982 ntohs(nl_attr_get_be16(a)), ',');
1986 case OVS_KEY_ATTR_ICMP: {
1987 const struct ovs_key_icmp *key = nl_attr_get(a);
1988 const struct ovs_key_icmp *mask = ma ? nl_attr_get(ma) : NULL;
1990 format_u8u(ds, "type", key->icmp_type, MASK(mask, icmp_type), verbose);
1991 format_u8u(ds, "code", key->icmp_code, MASK(mask, icmp_code), verbose);
1995 case OVS_KEY_ATTR_ICMPV6: {
1996 const struct ovs_key_icmpv6 *key = nl_attr_get(a);
1997 const struct ovs_key_icmpv6 *mask = ma ? nl_attr_get(ma) : NULL;
1999 format_u8u(ds, "type", key->icmpv6_type, MASK(mask, icmpv6_type),
2001 format_u8u(ds, "code", key->icmpv6_code, MASK(mask, icmpv6_code),
2006 case OVS_KEY_ATTR_ARP: {
2007 const struct ovs_key_arp *mask = ma ? nl_attr_get(ma) : NULL;
2008 const struct ovs_key_arp *key = nl_attr_get(a);
2010 format_ipv4(ds, "sip", key->arp_sip, MASK(mask, arp_sip), verbose);
2011 format_ipv4(ds, "tip", key->arp_tip, MASK(mask, arp_tip), verbose);
2012 format_be16(ds, "op", key->arp_op, MASK(mask, arp_op), verbose);
2013 format_eth(ds, "sha", key->arp_sha, MASK(mask, arp_sha), verbose);
2014 format_eth(ds, "tha", key->arp_tha, MASK(mask, arp_tha), verbose);
2018 case OVS_KEY_ATTR_ND: {
2019 const struct ovs_key_nd *mask = ma ? nl_attr_get(ma) : NULL;
2020 const struct ovs_key_nd *key = nl_attr_get(a);
2022 format_ipv6(ds, "target", key->nd_target, MASK(mask, nd_target),
2024 format_eth(ds, "sll", key->nd_sll, MASK(mask, nd_sll), verbose);
2025 format_eth(ds, "tll", key->nd_tll, MASK(mask, nd_tll), verbose);
2030 case OVS_KEY_ATTR_UNSPEC:
2031 case __OVS_KEY_ATTR_MAX:
2033 format_generic_odp_key(a, ds);
2035 ds_put_char(ds, '/');
2036 format_generic_odp_key(ma, ds);
2040 ds_put_char(ds, ')');
2043 static struct nlattr *
2044 generate_all_wildcard_mask(struct ofpbuf *ofp, const struct nlattr *key)
2046 const struct nlattr *a;
2048 int type = nl_attr_type(key);
2049 int size = nl_attr_get_size(key);
2051 if (odp_flow_key_attr_len(type) >=0) {
2052 nl_msg_put_unspec_zero(ofp, type, size);
2056 nested_mask = nl_msg_start_nested(ofp, type);
2057 NL_ATTR_FOR_EACH(a, left, key, nl_attr_get_size(key)) {
2058 generate_all_wildcard_mask(ofp, nl_attr_get(a));
2060 nl_msg_end_nested(ofp, nested_mask);
2067 odp_ufid_from_string(const char *s_, ovs_u128 *ufid)
2071 if (ovs_scan(s, "ufid:")) {
2075 if (ovs_scan(s, "0x")) {
2079 n = strspn(s, "0123456789abcdefABCDEF");
2084 if (!ovs_scan(s, "%16"SCNx64"%16"SCNx64, &ufid->u64.hi,
2089 s += strspn(s, delimiters);
2098 odp_format_ufid(const ovs_u128 *ufid, struct ds *ds)
2100 ds_put_format(ds, "ufid:%016"PRIx64"%016"PRIx64, ufid->u64.hi,
2104 /* Appends to 'ds' a string representation of the 'key_len' bytes of
2105 * OVS_KEY_ATTR_* attributes in 'key'. If non-null, additionally formats the
2106 * 'mask_len' bytes of 'mask' which apply to 'key'. If 'portno_names' is
2107 * non-null and 'verbose' is true, translates odp port number to its name. */
2109 odp_flow_format(const struct nlattr *key, size_t key_len,
2110 const struct nlattr *mask, size_t mask_len,
2111 const struct hmap *portno_names, struct ds *ds, bool verbose)
2114 const struct nlattr *a;
2116 bool has_ethtype_key = false;
2117 const struct nlattr *ma = NULL;
2119 bool first_field = true;
2121 ofpbuf_init(&ofp, 100);
2122 NL_ATTR_FOR_EACH (a, left, key, key_len) {
2123 bool is_nested_attr;
2124 bool is_wildcard = false;
2125 int attr_type = nl_attr_type(a);
2127 if (attr_type == OVS_KEY_ATTR_ETHERTYPE) {
2128 has_ethtype_key = true;
2131 is_nested_attr = (odp_flow_key_attr_len(attr_type) == -2);
2133 if (mask && mask_len) {
2134 ma = nl_attr_find__(mask, mask_len, nl_attr_type(a));
2135 is_wildcard = ma ? odp_mask_attr_is_wildcard(ma) : true;
2138 if (verbose || !is_wildcard || is_nested_attr) {
2139 if (is_wildcard && !ma) {
2140 ma = generate_all_wildcard_mask(&ofp, a);
2143 ds_put_char(ds, ',');
2145 format_odp_key_attr(a, ma, portno_names, ds, verbose);
2146 first_field = false;
2150 ofpbuf_uninit(&ofp);
2155 if (left == key_len) {
2156 ds_put_cstr(ds, "<empty>");
2158 ds_put_format(ds, ",***%u leftover bytes*** (", left);
2159 for (i = 0; i < left; i++) {
2160 ds_put_format(ds, "%02x", ((const uint8_t *) a)[i]);
2162 ds_put_char(ds, ')');
2164 if (!has_ethtype_key) {
2165 ma = nl_attr_find__(mask, mask_len, OVS_KEY_ATTR_ETHERTYPE);
2167 ds_put_format(ds, ",eth_type(0/0x%04"PRIx16")",
2168 ntohs(nl_attr_get_be16(ma)));
2172 ds_put_cstr(ds, "<empty>");
2176 /* Appends to 'ds' a string representation of the 'key_len' bytes of
2177 * OVS_KEY_ATTR_* attributes in 'key'. */
2179 odp_flow_key_format(const struct nlattr *key,
2180 size_t key_len, struct ds *ds)
2182 odp_flow_format(key, key_len, NULL, 0, NULL, ds, true);
2186 ovs_frag_type_from_string(const char *s, enum ovs_frag_type *type)
2188 if (!strcasecmp(s, "no")) {
2189 *type = OVS_FRAG_TYPE_NONE;
2190 } else if (!strcasecmp(s, "first")) {
2191 *type = OVS_FRAG_TYPE_FIRST;
2192 } else if (!strcasecmp(s, "later")) {
2193 *type = OVS_FRAG_TYPE_LATER;
2203 scan_eth(const char *s, uint8_t (*key)[ETH_ADDR_LEN],
2204 uint8_t (*mask)[ETH_ADDR_LEN])
2208 if (ovs_scan(s, ETH_ADDR_SCAN_FMT"%n", ETH_ADDR_SCAN_ARGS(*key), &n)) {
2212 if (ovs_scan(s + len, "/"ETH_ADDR_SCAN_FMT"%n",
2213 ETH_ADDR_SCAN_ARGS(*mask), &n)) {
2216 memset(mask, 0xff, sizeof *mask);
2225 scan_ipv4(const char *s, ovs_be32 *key, ovs_be32 *mask)
2229 if (ovs_scan(s, IP_SCAN_FMT"%n", IP_SCAN_ARGS(key), &n)) {
2233 if (ovs_scan(s + len, "/"IP_SCAN_FMT"%n",
2234 IP_SCAN_ARGS(mask), &n)) {
2237 *mask = OVS_BE32_MAX;
2246 scan_ipv6(const char *s, ovs_be32 (*key)[4], ovs_be32 (*mask)[4])
2249 char ipv6_s[IPV6_SCAN_LEN + 1];
2251 if (ovs_scan(s, IPV6_SCAN_FMT"%n", ipv6_s, &n)
2252 && inet_pton(AF_INET6, ipv6_s, key) == 1) {
2256 if (ovs_scan(s + len, "/"IPV6_SCAN_FMT"%n", ipv6_s, &n)
2257 && inet_pton(AF_INET6, ipv6_s, mask) == 1) {
2260 memset(mask, 0xff, sizeof *mask);
2269 scan_ipv6_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
2274 if (ovs_scan(s, "%i%n", &key_, &n)
2275 && (key_ & ~IPV6_LABEL_MASK) == 0) {
2280 if (ovs_scan(s + len, "/%i%n", &mask_, &n)
2281 && (mask_ & ~IPV6_LABEL_MASK) == 0) {
2283 *mask = htonl(mask_);
2285 *mask = htonl(IPV6_LABEL_MASK);
2294 scan_u8(const char *s, uint8_t *key, uint8_t *mask)
2298 if (ovs_scan(s, "%"SCNi8"%n", key, &n)) {
2302 if (ovs_scan(s + len, "/%"SCNi8"%n", mask, &n)) {
2314 scan_u32(const char *s, uint32_t *key, uint32_t *mask)
2318 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
2322 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
2334 scan_be16(const char *s, ovs_be16 *key, ovs_be16 *mask)
2336 uint16_t key_, mask_;
2339 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
2344 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
2346 *mask = htons(mask_);
2348 *mask = OVS_BE16_MAX;
2357 scan_be64(const char *s, ovs_be64 *key, ovs_be64 *mask)
2359 uint64_t key_, mask_;
2362 if (ovs_scan(s, "%"SCNi64"%n", &key_, &n)) {
2365 *key = htonll(key_);
2367 if (ovs_scan(s + len, "/%"SCNi64"%n", &mask_, &n)) {
2369 *mask = htonll(mask_);
2371 *mask = OVS_BE64_MAX;
2380 scan_tun_flags(const char *s, uint16_t *key, uint16_t *mask)
2382 uint32_t flags, fmask;
2385 n = parse_flags(s, flow_tun_flag_to_string, &flags,
2386 FLOW_TNL_F_MASK, mask ? &fmask : NULL);
2387 if (n >= 0 && s[n] == ')') {
2398 scan_tcp_flags(const char *s, ovs_be16 *key, ovs_be16 *mask)
2400 uint32_t flags, fmask;
2403 n = parse_flags(s, packet_tcp_flag_to_string, &flags,
2404 TCP_FLAGS(OVS_BE16_MAX), mask ? &fmask : NULL);
2406 *key = htons(flags);
2408 *mask = htons(fmask);
2416 scan_frag(const char *s, uint8_t *key, uint8_t *mask)
2420 enum ovs_frag_type frag_type;
2422 if (ovs_scan(s, "%7[a-z]%n", frag, &n)
2423 && ovs_frag_type_from_string(frag, &frag_type)) {
2436 scan_port(const char *s, uint32_t *key, uint32_t *mask,
2437 const struct simap *port_names)
2441 if (ovs_scan(s, "%"SCNi32"%n", key, &n)) {
2445 if (ovs_scan(s + len, "/%"SCNi32"%n", mask, &n)) {
2452 } else if (port_names) {
2453 const struct simap_node *node;
2456 len = strcspn(s, ")");
2457 node = simap_find_len(port_names, s, len);
2470 /* Helper for vlan parsing. */
2471 struct ovs_key_vlan__ {
2476 set_be16_bf(ovs_be16 *bf, uint8_t bits, uint8_t offset, uint16_t value)
2478 const uint16_t mask = ((1U << bits) - 1) << offset;
2480 if (value >> bits) {
2484 *bf = htons((ntohs(*bf) & ~mask) | (value << offset));
2489 scan_be16_bf(const char *s, ovs_be16 *key, ovs_be16 *mask, uint8_t bits,
2492 uint16_t key_, mask_;
2495 if (ovs_scan(s, "%"SCNi16"%n", &key_, &n)) {
2498 if (set_be16_bf(key, bits, offset, key_)) {
2500 if (ovs_scan(s + len, "/%"SCNi16"%n", &mask_, &n)) {
2503 if (!set_be16_bf(mask, bits, offset, mask_)) {
2507 *mask |= htons(((1U << bits) - 1) << offset);
2517 scan_vid(const char *s, ovs_be16 *key, ovs_be16 *mask)
2519 return scan_be16_bf(s, key, mask, 12, VLAN_VID_SHIFT);
2523 scan_pcp(const char *s, ovs_be16 *key, ovs_be16 *mask)
2525 return scan_be16_bf(s, key, mask, 3, VLAN_PCP_SHIFT);
2529 scan_cfi(const char *s, ovs_be16 *key, ovs_be16 *mask)
2531 return scan_be16_bf(s, key, mask, 1, VLAN_CFI_SHIFT);
2536 set_be32_bf(ovs_be32 *bf, uint8_t bits, uint8_t offset, uint32_t value)
2538 const uint32_t mask = ((1U << bits) - 1) << offset;
2540 if (value >> bits) {
2544 *bf = htonl((ntohl(*bf) & ~mask) | (value << offset));
2549 scan_be32_bf(const char *s, ovs_be32 *key, ovs_be32 *mask, uint8_t bits,
2552 uint32_t key_, mask_;
2555 if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
2558 if (set_be32_bf(key, bits, offset, key_)) {
2560 if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
2563 if (!set_be32_bf(mask, bits, offset, mask_)) {
2567 *mask |= htonl(((1U << bits) - 1) << offset);
2577 scan_mpls_label(const char *s, ovs_be32 *key, ovs_be32 *mask)
2579 return scan_be32_bf(s, key, mask, 20, MPLS_LABEL_SHIFT);
2583 scan_mpls_tc(const char *s, ovs_be32 *key, ovs_be32 *mask)
2585 return scan_be32_bf(s, key, mask, 3, MPLS_TC_SHIFT);
2589 scan_mpls_ttl(const char *s, ovs_be32 *key, ovs_be32 *mask)
2591 return scan_be32_bf(s, key, mask, 8, MPLS_TTL_SHIFT);
2595 scan_mpls_bos(const char *s, ovs_be32 *key, ovs_be32 *mask)
2597 return scan_be32_bf(s, key, mask, 1, MPLS_BOS_SHIFT);
2600 /* ATTR is compile-time constant, so only the case with correct data type
2601 * will be used. However, the compiler complains about the data type for
2602 * the other cases, so we must cast to make the compiler silent. */
2603 #define SCAN_PUT_ATTR(BUF, ATTR, DATA) \
2604 if ((ATTR) == OVS_KEY_ATTR_TUNNEL) { \
2605 tun_key_to_attr(BUF, (const struct flow_tnl *)(void *)&(DATA)); \
2607 nl_msg_put_unspec(BUF, ATTR, &(DATA), sizeof (DATA)); \
2610 #define SCAN_IF(NAME) \
2611 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
2612 const char *start = s; \
2617 /* Usually no special initialization is needed. */
2618 #define SCAN_BEGIN(NAME, TYPE) \
2621 memset(&skey, 0, sizeof skey); \
2622 memset(&smask, 0, sizeof smask); \
2626 /* Init as fully-masked as mask will not be scanned. */
2627 #define SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) \
2630 memset(&skey, 0, sizeof skey); \
2631 memset(&smask, 0xff, sizeof smask); \
2635 /* VLAN needs special initialization. */
2636 #define SCAN_BEGIN_INIT(NAME, TYPE, KEY_INIT, MASK_INIT) \
2638 TYPE skey = KEY_INIT; \
2639 TYPE smask = MASK_INIT; \
2643 /* Scan unnamed entry as 'TYPE' */
2644 #define SCAN_TYPE(TYPE, KEY, MASK) \
2645 len = scan_##TYPE(s, KEY, MASK); \
2651 /* Scan named ('NAME') entry 'FIELD' as 'TYPE'. */
2652 #define SCAN_FIELD(NAME, TYPE, FIELD) \
2653 if (strncmp(s, NAME, strlen(NAME)) == 0) { \
2654 s += strlen(NAME); \
2655 SCAN_TYPE(TYPE, &skey.FIELD, mask ? &smask.FIELD : NULL); \
2659 #define SCAN_FINISH() \
2660 } while (*s++ == ',' && len != 0); \
2661 if (s[-1] != ')') { \
2665 #define SCAN_FINISH_SINGLE() \
2667 if (*s++ != ')') { \
2671 #define SCAN_PUT(ATTR) \
2672 if (!mask || !is_all_zeros(&smask, sizeof smask)) { \
2673 SCAN_PUT_ATTR(key, ATTR, skey); \
2675 SCAN_PUT_ATTR(mask, ATTR, smask); \
2679 #define SCAN_END(ATTR) \
2685 #define SCAN_END_SINGLE(ATTR) \
2686 SCAN_FINISH_SINGLE(); \
2691 #define SCAN_SINGLE(NAME, TYPE, SCAN_AS, ATTR) \
2692 SCAN_BEGIN(NAME, TYPE) { \
2693 SCAN_TYPE(SCAN_AS, &skey, &smask); \
2694 } SCAN_END_SINGLE(ATTR)
2696 #define SCAN_SINGLE_FULLY_MASKED(NAME, TYPE, SCAN_AS, ATTR) \
2697 SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) { \
2698 SCAN_TYPE(SCAN_AS, &skey, NULL); \
2699 } SCAN_END_SINGLE(ATTR)
2701 /* scan_port needs one extra argument. */
2702 #define SCAN_SINGLE_PORT(NAME, TYPE, ATTR) \
2703 SCAN_BEGIN(NAME, TYPE) { \
2704 len = scan_port(s, &skey, &smask, port_names); \
2709 } SCAN_END_SINGLE(ATTR)
2712 parse_odp_key_mask_attr(const char *s, const struct simap *port_names,
2713 struct ofpbuf *key, struct ofpbuf *mask)
2715 SCAN_SINGLE("skb_priority(", uint32_t, u32, OVS_KEY_ATTR_PRIORITY);
2716 SCAN_SINGLE("skb_mark(", uint32_t, u32, OVS_KEY_ATTR_SKB_MARK);
2717 SCAN_SINGLE_FULLY_MASKED("recirc_id(", uint32_t, u32,
2718 OVS_KEY_ATTR_RECIRC_ID);
2719 SCAN_SINGLE("dp_hash(", uint32_t, u32, OVS_KEY_ATTR_DP_HASH);
2721 SCAN_BEGIN("tunnel(", struct flow_tnl) {
2722 SCAN_FIELD("tun_id=", be64, tun_id);
2723 SCAN_FIELD("src=", ipv4, ip_src);
2724 SCAN_FIELD("dst=", ipv4, ip_dst);
2725 SCAN_FIELD("tos=", u8, ip_tos);
2726 SCAN_FIELD("ttl=", u8, ip_ttl);
2727 SCAN_FIELD("tp_src=", be16, tp_src);
2728 SCAN_FIELD("tp_dst=", be16, tp_dst);
2729 SCAN_FIELD("gbp_id=", be16, gbp_id);
2730 SCAN_FIELD("gbp_flags=", u8, gbp_flags);
2731 SCAN_FIELD("flags(", tun_flags, flags);
2732 } SCAN_END(OVS_KEY_ATTR_TUNNEL);
2734 SCAN_SINGLE_PORT("in_port(", uint32_t, OVS_KEY_ATTR_IN_PORT);
2736 SCAN_BEGIN("eth(", struct ovs_key_ethernet) {
2737 SCAN_FIELD("src=", eth, eth_src);
2738 SCAN_FIELD("dst=", eth, eth_dst);
2739 } SCAN_END(OVS_KEY_ATTR_ETHERNET);
2741 SCAN_BEGIN_INIT("vlan(", struct ovs_key_vlan__,
2742 { htons(VLAN_CFI) }, { htons(VLAN_CFI) }) {
2743 SCAN_FIELD("vid=", vid, tci);
2744 SCAN_FIELD("pcp=", pcp, tci);
2745 SCAN_FIELD("cfi=", cfi, tci);
2746 } SCAN_END(OVS_KEY_ATTR_VLAN);
2748 SCAN_SINGLE("eth_type(", ovs_be16, be16, OVS_KEY_ATTR_ETHERTYPE);
2750 SCAN_BEGIN("mpls(", struct ovs_key_mpls) {
2751 SCAN_FIELD("label=", mpls_label, mpls_lse);
2752 SCAN_FIELD("tc=", mpls_tc, mpls_lse);
2753 SCAN_FIELD("ttl=", mpls_ttl, mpls_lse);
2754 SCAN_FIELD("bos=", mpls_bos, mpls_lse);
2755 } SCAN_END(OVS_KEY_ATTR_MPLS);
2757 SCAN_BEGIN("ipv4(", struct ovs_key_ipv4) {
2758 SCAN_FIELD("src=", ipv4, ipv4_src);
2759 SCAN_FIELD("dst=", ipv4, ipv4_dst);
2760 SCAN_FIELD("proto=", u8, ipv4_proto);
2761 SCAN_FIELD("tos=", u8, ipv4_tos);
2762 SCAN_FIELD("ttl=", u8, ipv4_ttl);
2763 SCAN_FIELD("frag=", frag, ipv4_frag);
2764 } SCAN_END(OVS_KEY_ATTR_IPV4);
2766 SCAN_BEGIN("ipv6(", struct ovs_key_ipv6) {
2767 SCAN_FIELD("src=", ipv6, ipv6_src);
2768 SCAN_FIELD("dst=", ipv6, ipv6_dst);
2769 SCAN_FIELD("label=", ipv6_label, ipv6_label);
2770 SCAN_FIELD("proto=", u8, ipv6_proto);
2771 SCAN_FIELD("tclass=", u8, ipv6_tclass);
2772 SCAN_FIELD("hlimit=", u8, ipv6_hlimit);
2773 SCAN_FIELD("frag=", frag, ipv6_frag);
2774 } SCAN_END(OVS_KEY_ATTR_IPV6);
2776 SCAN_BEGIN("tcp(", struct ovs_key_tcp) {
2777 SCAN_FIELD("src=", be16, tcp_src);
2778 SCAN_FIELD("dst=", be16, tcp_dst);
2779 } SCAN_END(OVS_KEY_ATTR_TCP);
2781 SCAN_SINGLE("tcp_flags(", ovs_be16, tcp_flags, OVS_KEY_ATTR_TCP_FLAGS);
2783 SCAN_BEGIN("udp(", struct ovs_key_udp) {
2784 SCAN_FIELD("src=", be16, udp_src);
2785 SCAN_FIELD("dst=", be16, udp_dst);
2786 } SCAN_END(OVS_KEY_ATTR_UDP);
2788 SCAN_BEGIN("sctp(", struct ovs_key_sctp) {
2789 SCAN_FIELD("src=", be16, sctp_src);
2790 SCAN_FIELD("dst=", be16, sctp_dst);
2791 } SCAN_END(OVS_KEY_ATTR_SCTP);
2793 SCAN_BEGIN("icmp(", struct ovs_key_icmp) {
2794 SCAN_FIELD("type=", u8, icmp_type);
2795 SCAN_FIELD("code=", u8, icmp_code);
2796 } SCAN_END(OVS_KEY_ATTR_ICMP);
2798 SCAN_BEGIN("icmpv6(", struct ovs_key_icmpv6) {
2799 SCAN_FIELD("type=", u8, icmpv6_type);
2800 SCAN_FIELD("code=", u8, icmpv6_code);
2801 } SCAN_END(OVS_KEY_ATTR_ICMPV6);
2803 SCAN_BEGIN("arp(", struct ovs_key_arp) {
2804 SCAN_FIELD("sip=", ipv4, arp_sip);
2805 SCAN_FIELD("tip=", ipv4, arp_tip);
2806 SCAN_FIELD("op=", be16, arp_op);
2807 SCAN_FIELD("sha=", eth, arp_sha);
2808 SCAN_FIELD("tha=", eth, arp_tha);
2809 } SCAN_END(OVS_KEY_ATTR_ARP);
2811 SCAN_BEGIN("nd(", struct ovs_key_nd) {
2812 SCAN_FIELD("target=", ipv6, nd_target);
2813 SCAN_FIELD("sll=", eth, nd_sll);
2814 SCAN_FIELD("tll=", eth, nd_tll);
2815 } SCAN_END(OVS_KEY_ATTR_ND);
2817 /* Encap open-coded. */
2818 if (!strncmp(s, "encap(", 6)) {
2819 const char *start = s;
2820 size_t encap, encap_mask = 0;
2822 encap = nl_msg_start_nested(key, OVS_KEY_ATTR_ENCAP);
2824 encap_mask = nl_msg_start_nested(mask, OVS_KEY_ATTR_ENCAP);
2831 s += strspn(s, delimiters);
2834 } else if (*s == ')') {
2838 retval = parse_odp_key_mask_attr(s, port_names, key, mask);
2846 nl_msg_end_nested(key, encap);
2848 nl_msg_end_nested(mask, encap_mask);
2857 /* Parses the string representation of a datapath flow key, in the
2858 * format output by odp_flow_key_format(). Returns 0 if successful,
2859 * otherwise a positive errno value. On success, the flow key is
2860 * appended to 'key' as a series of Netlink attributes. On failure, no
2861 * data is appended to 'key'. Either way, 'key''s data might be
2864 * If 'port_names' is nonnull, it points to an simap that maps from a port name
2865 * to a port number. (Port names may be used instead of port numbers in
2868 * On success, the attributes appended to 'key' are individually syntactically
2869 * valid, but they may not be valid as a sequence. 'key' might, for example,
2870 * have duplicated keys. odp_flow_key_to_flow() will detect those errors. */
2872 odp_flow_from_string(const char *s, const struct simap *port_names,
2873 struct ofpbuf *key, struct ofpbuf *mask)
2875 const size_t old_size = key->size;
2879 s += strspn(s, delimiters);
2884 retval = parse_odp_key_mask_attr(s, port_names, key, mask);
2886 key->size = old_size;
2896 ovs_to_odp_frag(uint8_t nw_frag, bool is_mask)
2899 /* Netlink interface 'enum ovs_frag_type' is an 8-bit enumeration type,
2900 * not a set of flags or bitfields. Hence, if the struct flow nw_frag
2901 * mask, which is a set of bits, has the FLOW_NW_FRAG_ANY as zero, we
2902 * must use a zero mask for the netlink frag field, and all ones mask
2904 return (nw_frag & FLOW_NW_FRAG_ANY) ? UINT8_MAX : 0;
2906 return !(nw_frag & FLOW_NW_FRAG_ANY) ? OVS_FRAG_TYPE_NONE
2907 : nw_frag & FLOW_NW_FRAG_LATER ? OVS_FRAG_TYPE_LATER
2908 : OVS_FRAG_TYPE_FIRST;
2911 static void get_ethernet_key(const struct flow *, struct ovs_key_ethernet *);
2912 static void put_ethernet_key(const struct ovs_key_ethernet *, struct flow *);
2913 static void get_ipv4_key(const struct flow *, struct ovs_key_ipv4 *,
2915 static void put_ipv4_key(const struct ovs_key_ipv4 *, struct flow *,
2917 static void get_ipv6_key(const struct flow *, struct ovs_key_ipv6 *,
2919 static void put_ipv6_key(const struct ovs_key_ipv6 *, struct flow *,
2921 static void get_arp_key(const struct flow *, struct ovs_key_arp *);
2922 static void put_arp_key(const struct ovs_key_arp *, struct flow *);
2923 static void get_nd_key(const struct flow *, struct ovs_key_nd *);
2924 static void put_nd_key(const struct ovs_key_nd *, struct flow *);
2926 /* These share the same layout. */
2928 struct ovs_key_tcp tcp;
2929 struct ovs_key_udp udp;
2930 struct ovs_key_sctp sctp;
2933 static void get_tp_key(const struct flow *, union ovs_key_tp *);
2934 static void put_tp_key(const union ovs_key_tp *, struct flow *);
2937 odp_flow_key_from_flow__(struct ofpbuf *buf, const struct flow *flow,
2938 const struct flow *mask, odp_port_t odp_in_port,
2939 size_t max_mpls_depth, bool recirc, bool export_mask)
2941 struct ovs_key_ethernet *eth_key;
2943 const struct flow *data = export_mask ? mask : flow;
2945 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, data->skb_priority);
2947 if (flow->tunnel.ip_dst || export_mask) {
2948 tun_key_to_attr(buf, &data->tunnel);
2951 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, data->pkt_mark);
2954 nl_msg_put_u32(buf, OVS_KEY_ATTR_RECIRC_ID, data->recirc_id);
2955 nl_msg_put_u32(buf, OVS_KEY_ATTR_DP_HASH, data->dp_hash);
2958 /* Add an ingress port attribute if this is a mask or 'odp_in_port'
2959 * is not the magical value "ODPP_NONE". */
2960 if (export_mask || odp_in_port != ODPP_NONE) {
2961 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, odp_in_port);
2964 eth_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ETHERNET,
2966 get_ethernet_key(data, eth_key);
2968 if (flow->vlan_tci != htons(0) || flow->dl_type == htons(ETH_TYPE_VLAN)) {
2970 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
2972 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_TYPE_VLAN));
2974 nl_msg_put_be16(buf, OVS_KEY_ATTR_VLAN, data->vlan_tci);
2975 encap = nl_msg_start_nested(buf, OVS_KEY_ATTR_ENCAP);
2976 if (flow->vlan_tci == htons(0)) {
2983 if (ntohs(flow->dl_type) < ETH_TYPE_MIN) {
2984 /* For backwards compatibility with kernels that don't support
2985 * wildcarding, the following convention is used to encode the
2986 * OVS_KEY_ATTR_ETHERTYPE for key and mask:
2989 * -------- -------- -------
2990 * >0x5ff 0xffff Specified Ethernet II Ethertype.
2991 * >0x5ff 0 Any Ethernet II or non-Ethernet II frame.
2992 * <none> 0xffff Any non-Ethernet II frame (except valid
2993 * 802.3 SNAP packet with valid eth_type).
2996 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
3001 nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, data->dl_type);
3003 if (flow->dl_type == htons(ETH_TYPE_IP)) {
3004 struct ovs_key_ipv4 *ipv4_key;
3006 ipv4_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV4,
3008 get_ipv4_key(data, ipv4_key, export_mask);
3009 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
3010 struct ovs_key_ipv6 *ipv6_key;
3012 ipv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV6,
3014 get_ipv6_key(data, ipv6_key, export_mask);
3015 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
3016 flow->dl_type == htons(ETH_TYPE_RARP)) {
3017 struct ovs_key_arp *arp_key;
3019 arp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ARP,
3021 get_arp_key(data, arp_key);
3022 } else if (eth_type_mpls(flow->dl_type)) {
3023 struct ovs_key_mpls *mpls_key;
3026 n = flow_count_mpls_labels(flow, NULL);
3027 n = MIN(n, max_mpls_depth);
3028 mpls_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_MPLS,
3029 n * sizeof *mpls_key);
3030 for (i = 0; i < n; i++) {
3031 mpls_key[i].mpls_lse = data->mpls_lse[i];
3035 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3036 if (flow->nw_proto == IPPROTO_TCP) {
3037 union ovs_key_tp *tcp_key;
3039 tcp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_TCP,
3041 get_tp_key(data, tcp_key);
3042 if (data->tcp_flags) {
3043 nl_msg_put_be16(buf, OVS_KEY_ATTR_TCP_FLAGS, data->tcp_flags);
3045 } else if (flow->nw_proto == IPPROTO_UDP) {
3046 union ovs_key_tp *udp_key;
3048 udp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_UDP,
3050 get_tp_key(data, udp_key);
3051 } else if (flow->nw_proto == IPPROTO_SCTP) {
3052 union ovs_key_tp *sctp_key;
3054 sctp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_SCTP,
3056 get_tp_key(data, sctp_key);
3057 } else if (flow->dl_type == htons(ETH_TYPE_IP)
3058 && flow->nw_proto == IPPROTO_ICMP) {
3059 struct ovs_key_icmp *icmp_key;
3061 icmp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMP,
3063 icmp_key->icmp_type = ntohs(data->tp_src);
3064 icmp_key->icmp_code = ntohs(data->tp_dst);
3065 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)
3066 && flow->nw_proto == IPPROTO_ICMPV6) {
3067 struct ovs_key_icmpv6 *icmpv6_key;
3069 icmpv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMPV6,
3070 sizeof *icmpv6_key);
3071 icmpv6_key->icmpv6_type = ntohs(data->tp_src);
3072 icmpv6_key->icmpv6_code = ntohs(data->tp_dst);
3074 if (flow->tp_dst == htons(0)
3075 && (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)
3076 || flow->tp_src == htons(ND_NEIGHBOR_ADVERT))
3077 && (!export_mask || (data->tp_src == htons(0xffff)
3078 && data->tp_dst == htons(0xffff)))) {
3080 struct ovs_key_nd *nd_key;
3082 nd_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ND,
3084 memcpy(nd_key->nd_target, &data->nd_target,
3085 sizeof nd_key->nd_target);
3086 memcpy(nd_key->nd_sll, data->arp_sha, ETH_ADDR_LEN);
3087 memcpy(nd_key->nd_tll, data->arp_tha, ETH_ADDR_LEN);
3094 nl_msg_end_nested(buf, encap);
3098 /* Appends a representation of 'flow' as OVS_KEY_ATTR_* attributes to 'buf'.
3099 * 'flow->in_port' is ignored (since it is likely to be an OpenFlow port
3100 * number rather than a datapath port number). Instead, if 'odp_in_port'
3101 * is anything other than ODPP_NONE, it is included in 'buf' as the input
3104 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
3105 * capable of being expanded to allow for that much space.
3107 * 'recirc' indicates support for recirculation fields. If this is true, then
3108 * these fields will always be serialised. */
3110 odp_flow_key_from_flow(struct ofpbuf *buf, const struct flow *flow,
3111 const struct flow *mask, odp_port_t odp_in_port,
3114 odp_flow_key_from_flow__(buf, flow, mask, odp_in_port, SIZE_MAX, recirc,
3118 /* Appends a representation of 'mask' as OVS_KEY_ATTR_* attributes to
3119 * 'buf'. 'flow' is used as a template to determine how to interpret
3120 * 'mask'. For example, the 'dl_type' of 'mask' describes the mask, but
3121 * it doesn't indicate whether the other fields should be interpreted as
3122 * ARP, IPv4, IPv6, etc.
3124 * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
3125 * capable of being expanded to allow for that much space.
3127 * 'recirc' indicates support for recirculation fields. If this is true, then
3128 * these fields will always be serialised. */
3130 odp_flow_key_from_mask(struct ofpbuf *buf, const struct flow *mask,
3131 const struct flow *flow, uint32_t odp_in_port_mask,
3132 size_t max_mpls_depth, bool recirc)
3134 odp_flow_key_from_flow__(buf, flow, mask, u32_to_odp(odp_in_port_mask),
3135 max_mpls_depth, recirc, true);
3138 /* Generate ODP flow key from the given packet metadata */
3140 odp_key_from_pkt_metadata(struct ofpbuf *buf, const struct pkt_metadata *md)
3142 nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, md->skb_priority);
3144 if (md->tunnel.ip_dst) {
3145 tun_key_to_attr(buf, &md->tunnel);
3148 nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, md->pkt_mark);
3150 /* Add an ingress port attribute if 'odp_in_port' is not the magical
3151 * value "ODPP_NONE". */
3152 if (md->in_port.odp_port != ODPP_NONE) {
3153 nl_msg_put_odp_port(buf, OVS_KEY_ATTR_IN_PORT, md->in_port.odp_port);
3157 /* Generate packet metadata from the given ODP flow key. */
3159 odp_key_to_pkt_metadata(const struct nlattr *key, size_t key_len,
3160 struct pkt_metadata *md)
3162 const struct nlattr *nla;
3164 uint32_t wanted_attrs = 1u << OVS_KEY_ATTR_PRIORITY |
3165 1u << OVS_KEY_ATTR_SKB_MARK | 1u << OVS_KEY_ATTR_TUNNEL |
3166 1u << OVS_KEY_ATTR_IN_PORT;
3168 *md = PKT_METADATA_INITIALIZER(ODPP_NONE);
3170 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
3171 uint16_t type = nl_attr_type(nla);
3172 size_t len = nl_attr_get_size(nla);
3173 int expected_len = odp_flow_key_attr_len(type);
3175 if (len != expected_len && expected_len >= 0) {
3180 case OVS_KEY_ATTR_RECIRC_ID:
3181 md->recirc_id = nl_attr_get_u32(nla);
3182 wanted_attrs &= ~(1u << OVS_KEY_ATTR_RECIRC_ID);
3184 case OVS_KEY_ATTR_DP_HASH:
3185 md->dp_hash = nl_attr_get_u32(nla);
3186 wanted_attrs &= ~(1u << OVS_KEY_ATTR_DP_HASH);
3188 case OVS_KEY_ATTR_PRIORITY:
3189 md->skb_priority = nl_attr_get_u32(nla);
3190 wanted_attrs &= ~(1u << OVS_KEY_ATTR_PRIORITY);
3192 case OVS_KEY_ATTR_SKB_MARK:
3193 md->pkt_mark = nl_attr_get_u32(nla);
3194 wanted_attrs &= ~(1u << OVS_KEY_ATTR_SKB_MARK);
3196 case OVS_KEY_ATTR_TUNNEL: {
3197 enum odp_key_fitness res;
3199 res = odp_tun_key_from_attr(nla, &md->tunnel);
3200 if (res == ODP_FIT_ERROR) {
3201 memset(&md->tunnel, 0, sizeof md->tunnel);
3202 } else if (res == ODP_FIT_PERFECT) {
3203 wanted_attrs &= ~(1u << OVS_KEY_ATTR_TUNNEL);
3207 case OVS_KEY_ATTR_IN_PORT:
3208 md->in_port.odp_port = nl_attr_get_odp_port(nla);
3209 wanted_attrs &= ~(1u << OVS_KEY_ATTR_IN_PORT);
3215 if (!wanted_attrs) {
3216 return; /* Have everything. */
3222 odp_flow_key_hash(const struct nlattr *key, size_t key_len)
3224 BUILD_ASSERT_DECL(!(NLA_ALIGNTO % sizeof(uint32_t)));
3225 return hash_words(ALIGNED_CAST(const uint32_t *, key),
3226 key_len / sizeof(uint32_t), 0);
3230 log_odp_key_attributes(struct vlog_rate_limit *rl, const char *title,
3231 uint64_t attrs, int out_of_range_attr,
3232 const struct nlattr *key, size_t key_len)
3237 if (VLOG_DROP_DBG(rl)) {
3242 for (i = 0; i < 64; i++) {
3243 if (attrs & (UINT64_C(1) << i)) {
3244 char namebuf[OVS_KEY_ATTR_BUFSIZE];
3246 ds_put_format(&s, " %s",
3247 ovs_key_attr_to_string(i, namebuf, sizeof namebuf));
3250 if (out_of_range_attr) {
3251 ds_put_format(&s, " %d (and possibly others)", out_of_range_attr);
3254 ds_put_cstr(&s, ": ");
3255 odp_flow_key_format(key, key_len, &s);
3257 VLOG_DBG("%s:%s", title, ds_cstr(&s));
3262 odp_to_ovs_frag(uint8_t odp_frag, bool is_mask)
3264 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3267 return odp_frag ? FLOW_NW_FRAG_MASK : 0;
3270 if (odp_frag > OVS_FRAG_TYPE_LATER) {
3271 VLOG_ERR_RL(&rl, "invalid frag %"PRIu8" in flow key", odp_frag);
3272 return 0xff; /* Error. */
3275 return (odp_frag == OVS_FRAG_TYPE_NONE) ? 0
3276 : (odp_frag == OVS_FRAG_TYPE_FIRST) ? FLOW_NW_FRAG_ANY
3277 : FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER;
3281 parse_flow_nlattrs(const struct nlattr *key, size_t key_len,
3282 const struct nlattr *attrs[], uint64_t *present_attrsp,
3283 int *out_of_range_attrp)
3285 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
3286 const struct nlattr *nla;
3287 uint64_t present_attrs;
3290 BUILD_ASSERT(OVS_KEY_ATTR_MAX < CHAR_BIT * sizeof present_attrs);
3292 *out_of_range_attrp = 0;
3293 NL_ATTR_FOR_EACH (nla, left, key, key_len) {
3294 uint16_t type = nl_attr_type(nla);
3295 size_t len = nl_attr_get_size(nla);
3296 int expected_len = odp_flow_key_attr_len(type);
3298 if (len != expected_len && expected_len >= 0) {
3299 char namebuf[OVS_KEY_ATTR_BUFSIZE];
3301 VLOG_ERR_RL(&rl, "attribute %s has length %"PRIuSIZE" but should have "
3302 "length %d", ovs_key_attr_to_string(type, namebuf,
3308 if (type > OVS_KEY_ATTR_MAX) {
3309 *out_of_range_attrp = type;
3311 if (present_attrs & (UINT64_C(1) << type)) {
3312 char namebuf[OVS_KEY_ATTR_BUFSIZE];
3314 VLOG_ERR_RL(&rl, "duplicate %s attribute in flow key",
3315 ovs_key_attr_to_string(type,
3316 namebuf, sizeof namebuf));
3320 present_attrs |= UINT64_C(1) << type;
3325 VLOG_ERR_RL(&rl, "trailing garbage in flow key");
3329 *present_attrsp = present_attrs;
3333 static enum odp_key_fitness
3334 check_expectations(uint64_t present_attrs, int out_of_range_attr,
3335 uint64_t expected_attrs,
3336 const struct nlattr *key, size_t key_len)
3338 uint64_t missing_attrs;
3339 uint64_t extra_attrs;
3341 missing_attrs = expected_attrs & ~present_attrs;
3342 if (missing_attrs) {
3343 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
3344 log_odp_key_attributes(&rl, "expected but not present",
3345 missing_attrs, 0, key, key_len);
3346 return ODP_FIT_TOO_LITTLE;
3349 extra_attrs = present_attrs & ~expected_attrs;
3350 if (extra_attrs || out_of_range_attr) {
3351 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
3352 log_odp_key_attributes(&rl, "present but not expected",
3353 extra_attrs, out_of_range_attr, key, key_len);
3354 return ODP_FIT_TOO_MUCH;
3357 return ODP_FIT_PERFECT;
3361 parse_ethertype(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
3362 uint64_t present_attrs, uint64_t *expected_attrs,
3363 struct flow *flow, const struct flow *src_flow)
3365 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3366 bool is_mask = flow != src_flow;
3368 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
3369 flow->dl_type = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
3370 if (!is_mask && ntohs(flow->dl_type) < ETH_TYPE_MIN) {
3371 VLOG_ERR_RL(&rl, "invalid Ethertype %"PRIu16" in flow key",
3372 ntohs(flow->dl_type));
3375 if (is_mask && ntohs(src_flow->dl_type) < ETH_TYPE_MIN &&
3376 flow->dl_type != htons(0xffff)) {
3379 *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
3382 flow->dl_type = htons(FLOW_DL_TYPE_NONE);
3383 } else if (ntohs(src_flow->dl_type) < ETH_TYPE_MIN) {
3384 /* See comments in odp_flow_key_from_flow__(). */
3385 VLOG_ERR_RL(&rl, "mask expected for non-Ethernet II frame");
3392 static enum odp_key_fitness
3393 parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
3394 uint64_t present_attrs, int out_of_range_attr,
3395 uint64_t expected_attrs, struct flow *flow,
3396 const struct nlattr *key, size_t key_len,
3397 const struct flow *src_flow)
3399 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3400 bool is_mask = src_flow != flow;
3401 const void *check_start = NULL;
3402 size_t check_len = 0;
3403 enum ovs_key_attr expected_bit = 0xff;
3405 if (eth_type_mpls(src_flow->dl_type)) {
3406 if (!is_mask || present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
3407 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
3409 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
3410 size_t size = nl_attr_get_size(attrs[OVS_KEY_ATTR_MPLS]);
3411 const ovs_be32 *mpls_lse = nl_attr_get(attrs[OVS_KEY_ATTR_MPLS]);
3412 int n = size / sizeof(ovs_be32);
3415 if (!size || size % sizeof(ovs_be32)) {
3416 return ODP_FIT_ERROR;
3418 if (flow->mpls_lse[0] && flow->dl_type != htons(0xffff)) {
3419 return ODP_FIT_ERROR;
3422 for (i = 0; i < n && i < FLOW_MAX_MPLS_LABELS; i++) {
3423 flow->mpls_lse[i] = mpls_lse[i];
3425 if (n > FLOW_MAX_MPLS_LABELS) {
3426 return ODP_FIT_TOO_MUCH;
3430 /* BOS may be set only in the innermost label. */
3431 for (i = 0; i < n - 1; i++) {
3432 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
3433 return ODP_FIT_ERROR;
3437 /* BOS must be set in the innermost label. */
3438 if (n < FLOW_MAX_MPLS_LABELS
3439 && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
3440 return ODP_FIT_TOO_LITTLE;
3446 } else if (src_flow->dl_type == htons(ETH_TYPE_IP)) {
3448 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV4;
3450 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
3451 const struct ovs_key_ipv4 *ipv4_key;
3453 ipv4_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV4]);
3454 put_ipv4_key(ipv4_key, flow, is_mask);
3455 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
3456 return ODP_FIT_ERROR;
3459 check_start = ipv4_key;
3460 check_len = sizeof *ipv4_key;
3461 expected_bit = OVS_KEY_ATTR_IPV4;
3464 } else if (src_flow->dl_type == htons(ETH_TYPE_IPV6)) {
3466 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV6;
3468 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
3469 const struct ovs_key_ipv6 *ipv6_key;
3471 ipv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV6]);
3472 put_ipv6_key(ipv6_key, flow, is_mask);
3473 if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
3474 return ODP_FIT_ERROR;
3477 check_start = ipv6_key;
3478 check_len = sizeof *ipv6_key;
3479 expected_bit = OVS_KEY_ATTR_IPV6;
3482 } else if (src_flow->dl_type == htons(ETH_TYPE_ARP) ||
3483 src_flow->dl_type == htons(ETH_TYPE_RARP)) {
3485 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ARP;
3487 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ARP)) {
3488 const struct ovs_key_arp *arp_key;
3490 arp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ARP]);
3491 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
3492 VLOG_ERR_RL(&rl, "unsupported ARP opcode %"PRIu16" in flow "
3493 "key", ntohs(arp_key->arp_op));
3494 return ODP_FIT_ERROR;
3496 put_arp_key(arp_key, flow);
3498 check_start = arp_key;
3499 check_len = sizeof *arp_key;
3500 expected_bit = OVS_KEY_ATTR_ARP;
3506 if (check_len > 0) { /* Happens only when 'is_mask'. */
3507 if (!is_all_zeros(check_start, check_len) &&
3508 flow->dl_type != htons(0xffff)) {
3509 return ODP_FIT_ERROR;
3511 expected_attrs |= UINT64_C(1) << expected_bit;
3515 expected_bit = OVS_KEY_ATTR_UNSPEC;
3516 if (src_flow->nw_proto == IPPROTO_TCP
3517 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
3518 src_flow->dl_type == htons(ETH_TYPE_IPV6))
3519 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3521 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP;
3523 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP)) {
3524 const union ovs_key_tp *tcp_key;
3526 tcp_key = nl_attr_get(attrs[OVS_KEY_ATTR_TCP]);
3527 put_tp_key(tcp_key, flow);
3528 expected_bit = OVS_KEY_ATTR_TCP;
3530 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS)) {
3531 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS;
3532 flow->tcp_flags = nl_attr_get_be16(attrs[OVS_KEY_ATTR_TCP_FLAGS]);
3534 } else if (src_flow->nw_proto == IPPROTO_UDP
3535 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
3536 src_flow->dl_type == htons(ETH_TYPE_IPV6))
3537 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3539 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_UDP;
3541 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_UDP)) {
3542 const union ovs_key_tp *udp_key;
3544 udp_key = nl_attr_get(attrs[OVS_KEY_ATTR_UDP]);
3545 put_tp_key(udp_key, flow);
3546 expected_bit = OVS_KEY_ATTR_UDP;
3548 } else if (src_flow->nw_proto == IPPROTO_SCTP
3549 && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
3550 src_flow->dl_type == htons(ETH_TYPE_IPV6))
3551 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3553 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SCTP;
3555 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SCTP)) {
3556 const union ovs_key_tp *sctp_key;
3558 sctp_key = nl_attr_get(attrs[OVS_KEY_ATTR_SCTP]);
3559 put_tp_key(sctp_key, flow);
3560 expected_bit = OVS_KEY_ATTR_SCTP;
3562 } else if (src_flow->nw_proto == IPPROTO_ICMP
3563 && src_flow->dl_type == htons(ETH_TYPE_IP)
3564 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3566 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMP;
3568 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMP)) {
3569 const struct ovs_key_icmp *icmp_key;
3571 icmp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMP]);
3572 flow->tp_src = htons(icmp_key->icmp_type);
3573 flow->tp_dst = htons(icmp_key->icmp_code);
3574 expected_bit = OVS_KEY_ATTR_ICMP;
3576 } else if (src_flow->nw_proto == IPPROTO_ICMPV6
3577 && src_flow->dl_type == htons(ETH_TYPE_IPV6)
3578 && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
3580 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMPV6;
3582 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMPV6)) {
3583 const struct ovs_key_icmpv6 *icmpv6_key;
3585 icmpv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMPV6]);
3586 flow->tp_src = htons(icmpv6_key->icmpv6_type);
3587 flow->tp_dst = htons(icmpv6_key->icmpv6_code);
3588 expected_bit = OVS_KEY_ATTR_ICMPV6;
3589 if (src_flow->tp_dst == htons(0) &&
3590 (src_flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
3591 src_flow->tp_src == htons(ND_NEIGHBOR_ADVERT))) {
3593 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
3595 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ND)) {
3596 const struct ovs_key_nd *nd_key;
3598 nd_key = nl_attr_get(attrs[OVS_KEY_ATTR_ND]);
3599 memcpy(&flow->nd_target, nd_key->nd_target,
3600 sizeof flow->nd_target);
3601 memcpy(flow->arp_sha, nd_key->nd_sll, ETH_ADDR_LEN);
3602 memcpy(flow->arp_tha, nd_key->nd_tll, ETH_ADDR_LEN);
3604 if (!is_all_zeros(nd_key, sizeof *nd_key) &&
3605 (flow->tp_src != htons(0xffff) ||
3606 flow->tp_dst != htons(0xffff))) {
3607 return ODP_FIT_ERROR;
3609 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
3616 if (is_mask && expected_bit != OVS_KEY_ATTR_UNSPEC) {
3617 if ((flow->tp_src || flow->tp_dst) && flow->nw_proto != 0xff) {
3618 return ODP_FIT_ERROR;
3620 expected_attrs |= UINT64_C(1) << expected_bit;
3625 return check_expectations(present_attrs, out_of_range_attr, expected_attrs,
3629 /* Parse 802.1Q header then encapsulated L3 attributes. */
3630 static enum odp_key_fitness
3631 parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
3632 uint64_t present_attrs, int out_of_range_attr,
3633 uint64_t expected_attrs, struct flow *flow,
3634 const struct nlattr *key, size_t key_len,
3635 const struct flow *src_flow)
3637 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3638 bool is_mask = src_flow != flow;
3640 const struct nlattr *encap
3641 = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)
3642 ? attrs[OVS_KEY_ATTR_ENCAP] : NULL);
3643 enum odp_key_fitness encap_fitness;
3644 enum odp_key_fitness fitness;
3646 /* Calculate fitness of outer attributes. */
3648 expected_attrs |= ((UINT64_C(1) << OVS_KEY_ATTR_VLAN) |
3649 (UINT64_C(1) << OVS_KEY_ATTR_ENCAP));
3651 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
3652 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
3654 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)) {
3655 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_ENCAP);
3658 fitness = check_expectations(present_attrs, out_of_range_attr,
3659 expected_attrs, key, key_len);
3662 * Remove the TPID from dl_type since it's not the real Ethertype. */
3663 flow->dl_type = htons(0);
3664 flow->vlan_tci = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)
3665 ? nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN])
3668 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN))) {
3669 return ODP_FIT_TOO_LITTLE;
3670 } else if (flow->vlan_tci == htons(0)) {
3671 /* Corner case for a truncated 802.1Q header. */
3672 if (fitness == ODP_FIT_PERFECT && nl_attr_get_size(encap)) {
3673 return ODP_FIT_TOO_MUCH;
3676 } else if (!(flow->vlan_tci & htons(VLAN_CFI))) {
3677 VLOG_ERR_RL(&rl, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
3678 "but CFI bit is not set", ntohs(flow->vlan_tci));
3679 return ODP_FIT_ERROR;
3682 if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
3687 /* Now parse the encapsulated attributes. */
3688 if (!parse_flow_nlattrs(nl_attr_get(encap), nl_attr_get_size(encap),
3689 attrs, &present_attrs, &out_of_range_attr)) {
3690 return ODP_FIT_ERROR;
3694 if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow, src_flow)) {
3695 return ODP_FIT_ERROR;
3697 encap_fitness = parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
3698 expected_attrs, flow, key, key_len,
3701 /* The overall fitness is the worse of the outer and inner attributes. */
3702 return MAX(fitness, encap_fitness);
3705 static enum odp_key_fitness
3706 odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len,
3707 struct flow *flow, const struct flow *src_flow)
3709 const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1];
3710 uint64_t expected_attrs;
3711 uint64_t present_attrs;
3712 int out_of_range_attr;
3713 bool is_mask = src_flow != flow;
3715 memset(flow, 0, sizeof *flow);
3717 /* Parse attributes. */
3718 if (!parse_flow_nlattrs(key, key_len, attrs, &present_attrs,
3719 &out_of_range_attr)) {
3720 return ODP_FIT_ERROR;
3725 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID)) {
3726 flow->recirc_id = nl_attr_get_u32(attrs[OVS_KEY_ATTR_RECIRC_ID]);
3727 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID;
3728 } else if (is_mask) {
3729 /* Always exact match recirc_id if it is not specified. */
3730 flow->recirc_id = UINT32_MAX;
3733 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_DP_HASH)) {
3734 flow->dp_hash = nl_attr_get_u32(attrs[OVS_KEY_ATTR_DP_HASH]);
3735 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_DP_HASH;
3737 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PRIORITY)) {
3738 flow->skb_priority = nl_attr_get_u32(attrs[OVS_KEY_ATTR_PRIORITY]);
3739 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PRIORITY;
3742 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK)) {
3743 flow->pkt_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_SKB_MARK]);
3744 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK;
3747 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TUNNEL)) {
3748 enum odp_key_fitness res;
3750 res = odp_tun_key_from_attr(attrs[OVS_KEY_ATTR_TUNNEL], &flow->tunnel);
3751 if (res == ODP_FIT_ERROR) {
3752 return ODP_FIT_ERROR;
3753 } else if (res == ODP_FIT_PERFECT) {
3754 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TUNNEL;
3758 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IN_PORT)) {
3759 flow->in_port.odp_port
3760 = nl_attr_get_odp_port(attrs[OVS_KEY_ATTR_IN_PORT]);
3761 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IN_PORT;
3762 } else if (!is_mask) {
3763 flow->in_port.odp_port = ODPP_NONE;
3766 /* Ethernet header. */
3767 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERNET)) {
3768 const struct ovs_key_ethernet *eth_key;
3770 eth_key = nl_attr_get(attrs[OVS_KEY_ATTR_ETHERNET]);
3771 put_ethernet_key(eth_key, flow);
3773 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
3777 expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
3780 /* Get Ethertype or 802.1Q TPID or FLOW_DL_TYPE_NONE. */
3781 if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow,
3783 return ODP_FIT_ERROR;
3787 ? (src_flow->vlan_tci & htons(VLAN_CFI)) != 0
3788 : src_flow->dl_type == htons(ETH_TYPE_VLAN)) {
3789 return parse_8021q_onward(attrs, present_attrs, out_of_range_attr,
3790 expected_attrs, flow, key, key_len, src_flow);
3793 flow->vlan_tci = htons(0xffff);
3794 if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
3795 flow->vlan_tci = nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN]);
3796 expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
3799 return parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
3800 expected_attrs, flow, key, key_len, src_flow);
3803 /* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a flow
3804 * structure in 'flow'. Returns an ODP_FIT_* value that indicates how well
3805 * 'key' fits our expectations for what a flow key should contain.
3807 * The 'in_port' will be the datapath's understanding of the port. The
3808 * caller will need to translate with odp_port_to_ofp_port() if the
3809 * OpenFlow port is needed.
3811 * This function doesn't take the packet itself as an argument because none of
3812 * the currently understood OVS_KEY_ATTR_* attributes require it. Currently,
3813 * it is always possible to infer which additional attribute(s) should appear
3814 * by looking at the attributes for lower-level protocols, e.g. if the network
3815 * protocol in OVS_KEY_ATTR_IPV4 or OVS_KEY_ATTR_IPV6 is IPPROTO_TCP then we
3816 * know that a OVS_KEY_ATTR_TCP attribute must appear and that otherwise it
3817 * must be absent. */
3818 enum odp_key_fitness
3819 odp_flow_key_to_flow(const struct nlattr *key, size_t key_len,
3822 return odp_flow_key_to_flow__(key, key_len, flow, flow);
3825 /* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a mask
3826 * structure in 'mask'. 'flow' must be a previously translated flow
3827 * corresponding to 'mask'. Returns an ODP_FIT_* value that indicates how well
3828 * 'key' fits our expectations for what a flow key should contain. */
3829 enum odp_key_fitness
3830 odp_flow_key_to_mask(const struct nlattr *key, size_t key_len,
3831 struct flow *mask, const struct flow *flow)
3833 return odp_flow_key_to_flow__(key, key_len, mask, flow);
3836 /* Returns 'fitness' as a string, for use in debug messages. */
3838 odp_key_fitness_to_string(enum odp_key_fitness fitness)
3841 case ODP_FIT_PERFECT:
3843 case ODP_FIT_TOO_MUCH:
3845 case ODP_FIT_TOO_LITTLE:
3846 return "too_little";
3854 /* Appends an OVS_ACTION_ATTR_USERSPACE action to 'odp_actions' that specifies
3855 * Netlink PID 'pid'. If 'userdata' is nonnull, adds a userdata attribute
3856 * whose contents are the 'userdata_size' bytes at 'userdata' and returns the
3857 * offset within 'odp_actions' of the start of the cookie. (If 'userdata' is
3858 * null, then the return value is not meaningful.) */
3860 odp_put_userspace_action(uint32_t pid,
3861 const void *userdata, size_t userdata_size,
3862 odp_port_t tunnel_out_port,
3863 struct ofpbuf *odp_actions)
3865 size_t userdata_ofs;
3868 offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
3869 nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
3871 userdata_ofs = odp_actions->size + NLA_HDRLEN;
3873 /* The OVS kernel module before OVS 1.11 and the upstream Linux kernel
3874 * module before Linux 3.10 required the userdata to be exactly 8 bytes
3877 * - The kernel rejected shorter userdata with -ERANGE.
3879 * - The kernel silently dropped userdata beyond the first 8 bytes.
3881 * Thus, for maximum compatibility, always put at least 8 bytes. (We
3882 * separately disable features that required more than 8 bytes.) */
3883 memcpy(nl_msg_put_unspec_zero(odp_actions, OVS_USERSPACE_ATTR_USERDATA,
3884 MAX(8, userdata_size)),
3885 userdata, userdata_size);
3889 if (tunnel_out_port != ODPP_NONE) {
3890 nl_msg_put_odp_port(odp_actions, OVS_USERSPACE_ATTR_EGRESS_TUN_PORT,
3893 nl_msg_end_nested(odp_actions, offset);
3895 return userdata_ofs;
3899 odp_put_tunnel_action(const struct flow_tnl *tunnel,
3900 struct ofpbuf *odp_actions)
3902 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
3903 tun_key_to_attr(odp_actions, tunnel);
3904 nl_msg_end_nested(odp_actions, offset);
3908 odp_put_tnl_push_action(struct ofpbuf *odp_actions,
3909 struct ovs_action_push_tnl *data)
3911 int size = offsetof(struct ovs_action_push_tnl, header);
3913 size += data->header_len;
3914 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_TUNNEL_PUSH, data, size);
3918 /* The commit_odp_actions() function and its helpers. */
3921 commit_set_action(struct ofpbuf *odp_actions, enum ovs_key_attr key_type,
3922 const void *key, size_t key_size)
3924 size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
3925 nl_msg_put_unspec(odp_actions, key_type, key, key_size);
3926 nl_msg_end_nested(odp_actions, offset);
3929 /* Masked set actions have a mask following the data within the netlink
3930 * attribute. The unmasked bits in the data will be cleared as the data
3931 * is copied to the action. */
3933 commit_masked_set_action(struct ofpbuf *odp_actions,
3934 enum ovs_key_attr key_type,
3935 const void *key_, const void *mask_, size_t key_size)
3937 size_t offset = nl_msg_start_nested(odp_actions,
3938 OVS_ACTION_ATTR_SET_MASKED);
3939 char *data = nl_msg_put_unspec_uninit(odp_actions, key_type, key_size * 2);
3940 const char *key = key_, *mask = mask_;
3942 memcpy(data + key_size, mask, key_size);
3943 /* Clear unmasked bits while copying. */
3944 while (key_size--) {
3945 *data++ = *key++ & *mask++;
3947 nl_msg_end_nested(odp_actions, offset);
3950 /* If any of the flow key data that ODP actions can modify are different in
3951 * 'base->tunnel' and 'flow->tunnel', appends a set_tunnel ODP action to
3952 * 'odp_actions' that change the flow tunneling information in key from
3953 * 'base->tunnel' into 'flow->tunnel', and then changes 'base->tunnel' in the
3954 * same way. In other words, operates the same as commit_odp_actions(), but
3955 * only on tunneling information. */
3957 commit_odp_tunnel_action(const struct flow *flow, struct flow *base,
3958 struct ofpbuf *odp_actions)
3960 /* A valid IPV4_TUNNEL must have non-zero ip_dst. */
3961 if (flow->tunnel.ip_dst) {
3962 if (!memcmp(&base->tunnel, &flow->tunnel, sizeof base->tunnel)) {
3965 memcpy(&base->tunnel, &flow->tunnel, sizeof base->tunnel);
3966 odp_put_tunnel_action(&base->tunnel, odp_actions);
3971 commit(enum ovs_key_attr attr, bool use_masked_set,
3972 const void *key, void *base, void *mask, size_t size,
3973 struct ofpbuf *odp_actions)
3975 if (memcmp(key, base, size)) {
3976 bool fully_masked = odp_mask_is_exact(attr, mask, size);
3978 if (use_masked_set && !fully_masked) {
3979 commit_masked_set_action(odp_actions, attr, key, mask, size);
3981 if (!fully_masked) {
3982 memset(mask, 0xff, size);
3984 commit_set_action(odp_actions, attr, key, size);
3986 memcpy(base, key, size);
3989 /* Mask bits are set when we have either read or set the corresponding
3990 * values. Masked bits will be exact-matched, no need to set them
3991 * if the value did not actually change. */
3997 get_ethernet_key(const struct flow *flow, struct ovs_key_ethernet *eth)
3999 memcpy(eth->eth_src, flow->dl_src, ETH_ADDR_LEN);
4000 memcpy(eth->eth_dst, flow->dl_dst, ETH_ADDR_LEN);
4004 put_ethernet_key(const struct ovs_key_ethernet *eth, struct flow *flow)
4006 memcpy(flow->dl_src, eth->eth_src, ETH_ADDR_LEN);
4007 memcpy(flow->dl_dst, eth->eth_dst, ETH_ADDR_LEN);
4011 commit_set_ether_addr_action(const struct flow *flow, struct flow *base_flow,
4012 struct ofpbuf *odp_actions,
4013 struct flow_wildcards *wc,
4016 struct ovs_key_ethernet key, base, mask;
4018 get_ethernet_key(flow, &key);
4019 get_ethernet_key(base_flow, &base);
4020 get_ethernet_key(&wc->masks, &mask);
4022 if (commit(OVS_KEY_ATTR_ETHERNET, use_masked,
4023 &key, &base, &mask, sizeof key, odp_actions)) {
4024 put_ethernet_key(&base, base_flow);
4025 put_ethernet_key(&mask, &wc->masks);
4030 pop_vlan(struct flow *base,
4031 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
4033 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
4035 if (base->vlan_tci & htons(VLAN_CFI)) {
4036 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
4042 commit_vlan_action(ovs_be16 vlan_tci, struct flow *base,
4043 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
4045 if (base->vlan_tci == vlan_tci) {
4049 pop_vlan(base, odp_actions, wc);
4050 if (vlan_tci & htons(VLAN_CFI)) {
4051 struct ovs_action_push_vlan vlan;
4053 vlan.vlan_tpid = htons(ETH_TYPE_VLAN);
4054 vlan.vlan_tci = vlan_tci;
4055 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
4056 &vlan, sizeof vlan);
4058 base->vlan_tci = vlan_tci;
4061 /* Wildcarding already done at action translation time. */
4063 commit_mpls_action(const struct flow *flow, struct flow *base,
4064 struct ofpbuf *odp_actions)
4066 int base_n = flow_count_mpls_labels(base, NULL);
4067 int flow_n = flow_count_mpls_labels(flow, NULL);
4068 int common_n = flow_count_common_mpls_labels(flow, flow_n, base, base_n,
4071 while (base_n > common_n) {
4072 if (base_n - 1 == common_n && flow_n > common_n) {
4073 /* If there is only one more LSE in base than there are common
4074 * between base and flow; and flow has at least one more LSE than
4075 * is common then the topmost LSE of base may be updated using
4077 struct ovs_key_mpls mpls_key;
4079 mpls_key.mpls_lse = flow->mpls_lse[flow_n - base_n];
4080 commit_set_action(odp_actions, OVS_KEY_ATTR_MPLS,
4081 &mpls_key, sizeof mpls_key);
4082 flow_set_mpls_lse(base, 0, mpls_key.mpls_lse);
4085 /* Otherwise, if there more LSEs in base than are common between
4086 * base and flow then pop the topmost one. */
4090 /* If all the LSEs are to be popped and this is not the outermost
4091 * LSE then use ETH_TYPE_MPLS as the ethertype parameter of the
4092 * POP_MPLS action instead of flow->dl_type.
4094 * This is because the POP_MPLS action requires its ethertype
4095 * argument to be an MPLS ethernet type but in this case
4096 * flow->dl_type will be a non-MPLS ethernet type.
4098 * When the final POP_MPLS action occurs it use flow->dl_type and
4099 * the and the resulting packet will have the desired dl_type. */
4100 if ((!eth_type_mpls(flow->dl_type)) && base_n > 1) {
4101 dl_type = htons(ETH_TYPE_MPLS);
4103 dl_type = flow->dl_type;
4105 nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, dl_type);
4106 popped = flow_pop_mpls(base, base_n, flow->dl_type, NULL);
4112 /* If, after the above popping and setting, there are more LSEs in flow
4113 * than base then some LSEs need to be pushed. */
4114 while (base_n < flow_n) {
4115 struct ovs_action_push_mpls *mpls;
4117 mpls = nl_msg_put_unspec_zero(odp_actions,
4118 OVS_ACTION_ATTR_PUSH_MPLS,
4120 mpls->mpls_ethertype = flow->dl_type;
4121 mpls->mpls_lse = flow->mpls_lse[flow_n - base_n - 1];
4122 flow_push_mpls(base, base_n, mpls->mpls_ethertype, NULL);
4123 flow_set_mpls_lse(base, 0, mpls->mpls_lse);
4129 get_ipv4_key(const struct flow *flow, struct ovs_key_ipv4 *ipv4, bool is_mask)
4131 ipv4->ipv4_src = flow->nw_src;
4132 ipv4->ipv4_dst = flow->nw_dst;
4133 ipv4->ipv4_proto = flow->nw_proto;
4134 ipv4->ipv4_tos = flow->nw_tos;
4135 ipv4->ipv4_ttl = flow->nw_ttl;
4136 ipv4->ipv4_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
4140 put_ipv4_key(const struct ovs_key_ipv4 *ipv4, struct flow *flow, bool is_mask)
4142 flow->nw_src = ipv4->ipv4_src;
4143 flow->nw_dst = ipv4->ipv4_dst;
4144 flow->nw_proto = ipv4->ipv4_proto;
4145 flow->nw_tos = ipv4->ipv4_tos;
4146 flow->nw_ttl = ipv4->ipv4_ttl;
4147 flow->nw_frag = odp_to_ovs_frag(ipv4->ipv4_frag, is_mask);
4151 commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow,
4152 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4155 struct ovs_key_ipv4 key, mask, base;
4157 /* Check that nw_proto and nw_frag remain unchanged. */
4158 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
4159 flow->nw_frag == base_flow->nw_frag);
4161 get_ipv4_key(flow, &key, false);
4162 get_ipv4_key(base_flow, &base, false);
4163 get_ipv4_key(&wc->masks, &mask, true);
4164 mask.ipv4_proto = 0; /* Not writeable. */
4165 mask.ipv4_frag = 0; /* Not writable. */
4167 if (commit(OVS_KEY_ATTR_IPV4, use_masked, &key, &base, &mask, sizeof key,
4169 put_ipv4_key(&base, base_flow, false);
4170 if (mask.ipv4_proto != 0) { /* Mask was changed by commit(). */
4171 put_ipv4_key(&mask, &wc->masks, true);
4177 get_ipv6_key(const struct flow *flow, struct ovs_key_ipv6 *ipv6, bool is_mask)
4179 memcpy(ipv6->ipv6_src, &flow->ipv6_src, sizeof ipv6->ipv6_src);
4180 memcpy(ipv6->ipv6_dst, &flow->ipv6_dst, sizeof ipv6->ipv6_dst);
4181 ipv6->ipv6_label = flow->ipv6_label;
4182 ipv6->ipv6_proto = flow->nw_proto;
4183 ipv6->ipv6_tclass = flow->nw_tos;
4184 ipv6->ipv6_hlimit = flow->nw_ttl;
4185 ipv6->ipv6_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
4189 put_ipv6_key(const struct ovs_key_ipv6 *ipv6, struct flow *flow, bool is_mask)
4191 memcpy(&flow->ipv6_src, ipv6->ipv6_src, sizeof flow->ipv6_src);
4192 memcpy(&flow->ipv6_dst, ipv6->ipv6_dst, sizeof flow->ipv6_dst);
4193 flow->ipv6_label = ipv6->ipv6_label;
4194 flow->nw_proto = ipv6->ipv6_proto;
4195 flow->nw_tos = ipv6->ipv6_tclass;
4196 flow->nw_ttl = ipv6->ipv6_hlimit;
4197 flow->nw_frag = odp_to_ovs_frag(ipv6->ipv6_frag, is_mask);
4201 commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow,
4202 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4205 struct ovs_key_ipv6 key, mask, base;
4207 /* Check that nw_proto and nw_frag remain unchanged. */
4208 ovs_assert(flow->nw_proto == base_flow->nw_proto &&
4209 flow->nw_frag == base_flow->nw_frag);
4211 get_ipv6_key(flow, &key, false);
4212 get_ipv6_key(base_flow, &base, false);
4213 get_ipv6_key(&wc->masks, &mask, true);
4214 mask.ipv6_proto = 0; /* Not writeable. */
4215 mask.ipv6_frag = 0; /* Not writable. */
4217 if (commit(OVS_KEY_ATTR_IPV6, use_masked, &key, &base, &mask, sizeof key,
4219 put_ipv6_key(&base, base_flow, false);
4220 if (mask.ipv6_proto != 0) { /* Mask was changed by commit(). */
4221 put_ipv6_key(&mask, &wc->masks, true);
4227 get_arp_key(const struct flow *flow, struct ovs_key_arp *arp)
4229 /* ARP key has padding, clear it. */
4230 memset(arp, 0, sizeof *arp);
4232 arp->arp_sip = flow->nw_src;
4233 arp->arp_tip = flow->nw_dst;
4234 arp->arp_op = htons(flow->nw_proto);
4235 memcpy(arp->arp_sha, flow->arp_sha, ETH_ADDR_LEN);
4236 memcpy(arp->arp_tha, flow->arp_tha, ETH_ADDR_LEN);
4240 put_arp_key(const struct ovs_key_arp *arp, struct flow *flow)
4242 flow->nw_src = arp->arp_sip;
4243 flow->nw_dst = arp->arp_tip;
4244 flow->nw_proto = ntohs(arp->arp_op);
4245 memcpy(flow->arp_sha, arp->arp_sha, ETH_ADDR_LEN);
4246 memcpy(flow->arp_tha, arp->arp_tha, ETH_ADDR_LEN);
4249 static enum slow_path_reason
4250 commit_set_arp_action(const struct flow *flow, struct flow *base_flow,
4251 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
4253 struct ovs_key_arp key, mask, base;
4255 get_arp_key(flow, &key);
4256 get_arp_key(base_flow, &base);
4257 get_arp_key(&wc->masks, &mask);
4259 if (commit(OVS_KEY_ATTR_ARP, true, &key, &base, &mask, sizeof key,
4261 put_arp_key(&base, base_flow);
4262 put_arp_key(&mask, &wc->masks);
4269 get_nd_key(const struct flow *flow, struct ovs_key_nd *nd)
4271 memcpy(nd->nd_target, &flow->nd_target, sizeof flow->nd_target);
4272 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
4273 memcpy(nd->nd_sll, flow->arp_sha, ETH_ADDR_LEN);
4274 memcpy(nd->nd_tll, flow->arp_tha, ETH_ADDR_LEN);
4278 put_nd_key(const struct ovs_key_nd *nd, struct flow *flow)
4280 memcpy(&flow->nd_target, &flow->nd_target, sizeof flow->nd_target);
4281 /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
4282 memcpy(flow->arp_sha, nd->nd_sll, ETH_ADDR_LEN);
4283 memcpy(flow->arp_tha, nd->nd_tll, ETH_ADDR_LEN);
4286 static enum slow_path_reason
4287 commit_set_nd_action(const struct flow *flow, struct flow *base_flow,
4288 struct ofpbuf *odp_actions,
4289 struct flow_wildcards *wc, bool use_masked)
4291 struct ovs_key_nd key, mask, base;
4293 get_nd_key(flow, &key);
4294 get_nd_key(base_flow, &base);
4295 get_nd_key(&wc->masks, &mask);
4297 if (commit(OVS_KEY_ATTR_ND, use_masked, &key, &base, &mask, sizeof key,
4299 put_nd_key(&base, base_flow);
4300 put_nd_key(&mask, &wc->masks);
4307 static enum slow_path_reason
4308 commit_set_nw_action(const struct flow *flow, struct flow *base,
4309 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4312 /* Check if 'flow' really has an L3 header. */
4313 if (!flow->nw_proto) {
4317 switch (ntohs(base->dl_type)) {
4319 commit_set_ipv4_action(flow, base, odp_actions, wc, use_masked);
4323 commit_set_ipv6_action(flow, base, odp_actions, wc, use_masked);
4324 return commit_set_nd_action(flow, base, odp_actions, wc, use_masked);
4327 return commit_set_arp_action(flow, base, odp_actions, wc);
4333 /* TCP, UDP, and SCTP keys have the same layout. */
4334 BUILD_ASSERT_DECL(sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_udp) &&
4335 sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_sctp));
4338 get_tp_key(const struct flow *flow, union ovs_key_tp *tp)
4340 tp->tcp.tcp_src = flow->tp_src;
4341 tp->tcp.tcp_dst = flow->tp_dst;
4345 put_tp_key(const union ovs_key_tp *tp, struct flow *flow)
4347 flow->tp_src = tp->tcp.tcp_src;
4348 flow->tp_dst = tp->tcp.tcp_dst;
4352 commit_set_port_action(const struct flow *flow, struct flow *base_flow,
4353 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4356 enum ovs_key_attr key_type;
4357 union ovs_key_tp key, mask, base;
4359 /* Check if 'flow' really has an L3 header. */
4360 if (!flow->nw_proto) {
4364 if (!is_ip_any(base_flow)) {
4368 if (flow->nw_proto == IPPROTO_TCP) {
4369 key_type = OVS_KEY_ATTR_TCP;
4370 } else if (flow->nw_proto == IPPROTO_UDP) {
4371 key_type = OVS_KEY_ATTR_UDP;
4372 } else if (flow->nw_proto == IPPROTO_SCTP) {
4373 key_type = OVS_KEY_ATTR_SCTP;
4378 get_tp_key(flow, &key);
4379 get_tp_key(base_flow, &base);
4380 get_tp_key(&wc->masks, &mask);
4382 if (commit(key_type, use_masked, &key, &base, &mask, sizeof key,
4384 put_tp_key(&base, base_flow);
4385 put_tp_key(&mask, &wc->masks);
4390 commit_set_priority_action(const struct flow *flow, struct flow *base_flow,
4391 struct ofpbuf *odp_actions,
4392 struct flow_wildcards *wc,
4395 uint32_t key, mask, base;
4397 key = flow->skb_priority;
4398 base = base_flow->skb_priority;
4399 mask = wc->masks.skb_priority;
4401 if (commit(OVS_KEY_ATTR_PRIORITY, use_masked, &key, &base, &mask,
4402 sizeof key, odp_actions)) {
4403 base_flow->skb_priority = base;
4404 wc->masks.skb_priority = mask;
4409 commit_set_pkt_mark_action(const struct flow *flow, struct flow *base_flow,
4410 struct ofpbuf *odp_actions,
4411 struct flow_wildcards *wc,
4414 uint32_t key, mask, base;
4416 key = flow->pkt_mark;
4417 base = base_flow->pkt_mark;
4418 mask = wc->masks.pkt_mark;
4420 if (commit(OVS_KEY_ATTR_SKB_MARK, use_masked, &key, &base, &mask,
4421 sizeof key, odp_actions)) {
4422 base_flow->pkt_mark = base;
4423 wc->masks.pkt_mark = mask;
4427 /* If any of the flow key data that ODP actions can modify are different in
4428 * 'base' and 'flow', appends ODP actions to 'odp_actions' that change the flow
4429 * key from 'base' into 'flow', and then changes 'base' the same way. Does not
4430 * commit set_tunnel actions. Users should call commit_odp_tunnel_action()
4431 * in addition to this function if needed. Sets fields in 'wc' that are
4432 * used as part of the action.
4434 * Returns a reason to force processing the flow's packets into the userspace
4435 * slow path, if there is one, otherwise 0. */
4436 enum slow_path_reason
4437 commit_odp_actions(const struct flow *flow, struct flow *base,
4438 struct ofpbuf *odp_actions, struct flow_wildcards *wc,
4441 enum slow_path_reason slow;
4443 commit_set_ether_addr_action(flow, base, odp_actions, wc, use_masked);
4444 slow = commit_set_nw_action(flow, base, odp_actions, wc, use_masked);
4445 commit_set_port_action(flow, base, odp_actions, wc, use_masked);
4446 commit_mpls_action(flow, base, odp_actions);
4447 commit_vlan_action(flow->vlan_tci, base, odp_actions, wc);
4448 commit_set_priority_action(flow, base, odp_actions, wc, use_masked);
4449 commit_set_pkt_mark_action(flow, base, odp_actions, wc, use_masked);