2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "dpif-netlink.h"
26 #include <linux/types.h>
27 #include <linux/pkt_sched.h>
31 #include <sys/epoll.h>
36 #include "dpif-provider.h"
37 #include "dynamic-string.h"
39 #include "fat-rwlock.h"
41 #include "netdev-linux.h"
42 #include "netdev-vport.h"
43 #include "netlink-notifier.h"
44 #include "netlink-socket.h"
49 #include "poll-loop.h"
54 #include "unaligned.h"
58 VLOG_DEFINE_THIS_MODULE(dpif_netlink);
64 enum { MAX_PORTS = USHRT_MAX };
66 /* This ethtool flag was introduced in Linux 2.6.24, so it might be
67 * missing if we have old headers. */
68 #define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */
70 struct dpif_netlink_dp {
71 /* Generic Netlink header. */
74 /* struct ovs_header. */
78 const char *name; /* OVS_DP_ATTR_NAME. */
79 const uint32_t *upcall_pid; /* OVS_DP_ATTR_UPCALL_PID. */
80 uint32_t user_features; /* OVS_DP_ATTR_USER_FEATURES */
81 const struct ovs_dp_stats *stats; /* OVS_DP_ATTR_STATS. */
82 const struct ovs_dp_megaflow_stats *megaflow_stats;
83 /* OVS_DP_ATTR_MEGAFLOW_STATS.*/
86 static void dpif_netlink_dp_init(struct dpif_netlink_dp *);
87 static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *,
88 const struct ofpbuf *);
89 static void dpif_netlink_dp_dump_start(struct nl_dump *);
90 static int dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
91 struct dpif_netlink_dp *reply,
92 struct ofpbuf **bufp);
93 static int dpif_netlink_dp_get(const struct dpif *,
94 struct dpif_netlink_dp *reply,
95 struct ofpbuf **bufp);
97 struct dpif_netlink_flow {
98 /* Generic Netlink header. */
101 /* struct ovs_header. */
102 unsigned int nlmsg_flags;
107 * The 'stats' member points to 64-bit data that might only be aligned on
108 * 32-bit boundaries, so get_unaligned_u64() should be used to access its
111 * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in
112 * the Netlink version of the command, even if actions_len is zero. */
113 const struct nlattr *key; /* OVS_FLOW_ATTR_KEY. */
115 const struct nlattr *mask; /* OVS_FLOW_ATTR_MASK. */
117 const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */
119 ovs_u128 ufid; /* OVS_FLOW_ATTR_FLOW_ID. */
120 bool ufid_present; /* Is there a UFID? */
121 bool ufid_terse; /* Skip serializing key/mask/acts? */
122 const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */
123 const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */
124 const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */
125 bool clear; /* OVS_FLOW_ATTR_CLEAR. */
126 bool probe; /* OVS_FLOW_ATTR_PROBE. */
129 static void dpif_netlink_flow_init(struct dpif_netlink_flow *);
130 static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *,
131 const struct ofpbuf *);
132 static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *,
134 static int dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
135 struct dpif_netlink_flow *reply,
136 struct ofpbuf **bufp);
137 static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *,
138 struct dpif_flow_stats *);
139 static void dpif_netlink_flow_to_dpif_flow(struct dpif *, struct dpif_flow *,
140 const struct dpif_netlink_flow *);
141 static bool dpif_netlink_check_ufid__(struct dpif *dpif);
142 static bool dpif_netlink_check_ufid(struct dpif *dpif);
144 /* One of the dpif channels between the kernel and userspace. */
145 struct dpif_channel {
146 struct nl_sock *sock; /* Netlink socket. */
147 long long int last_poll; /* Last time this channel was polled. */
151 #define VPORT_SOCK_POOL_SIZE 1
152 /* On Windows, there is no native support for epoll. There are equivalent
153 * interfaces though, that are not used currently. For simpicity, a pool of
154 * netlink sockets is used. Each socket is represented by 'struct
155 * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be
156 * sharing the same socket. In the future, we can add a reference count and
158 struct dpif_windows_vport_sock {
159 struct nl_sock *nl_sock; /* netlink socket. */
163 struct dpif_handler {
164 struct dpif_channel *channels;/* Array of channels for each handler. */
165 struct epoll_event *epoll_events;
166 int epoll_fd; /* epoll fd that includes channel socks. */
167 int n_events; /* Num events returned by epoll_wait(). */
168 int event_offset; /* Offset into 'epoll_events'. */
171 /* Pool of sockets. */
172 struct dpif_windows_vport_sock *vport_sock_pool;
173 size_t last_used_pool_idx; /* Index to aid in allocating a
174 socket in the pool to a port. */
178 /* Datapath interface for the openvswitch Linux kernel module. */
179 struct dpif_netlink {
183 /* Upcall messages. */
184 struct fat_rwlock upcall_lock;
185 struct dpif_handler *handlers;
186 uint32_t n_handlers; /* Num of upcall handlers. */
187 int uc_array_size; /* Size of 'handler->channels' and */
188 /* 'handler->epoll_events'. */
190 /* Change notification. */
191 struct nl_sock *port_notifier; /* vport multicast group subscriber. */
192 bool refresh_channels;
194 /* If the datapath supports indexing flows using unique identifiers, then
195 * we can reduce the size of netlink messages by omitting fields like the
196 * flow key during flow operations. */
200 static void report_loss(struct dpif_netlink *, struct dpif_channel *,
201 uint32_t ch_idx, uint32_t handler_id);
203 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5);
205 /* Generic Netlink family numbers for OVS.
207 * Initialized by dpif_netlink_init(). */
208 static int ovs_datapath_family;
209 static int ovs_vport_family;
210 static int ovs_flow_family;
211 static int ovs_packet_family;
213 /* Generic Netlink multicast groups for OVS.
215 * Initialized by dpif_netlink_init(). */
216 static unsigned int ovs_vport_mcgroup;
218 static int dpif_netlink_init(void);
219 static int open_dpif(const struct dpif_netlink_dp *, struct dpif **);
220 static uint32_t dpif_netlink_port_get_pid(const struct dpif *,
221 odp_port_t port_no, uint32_t hash);
222 static void dpif_netlink_handler_uninit(struct dpif_handler *handler);
223 static int dpif_netlink_refresh_channels(struct dpif_netlink *,
224 uint32_t n_handlers);
225 static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *,
227 static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *,
228 const struct ofpbuf *);
230 static struct dpif_netlink *
231 dpif_netlink_cast(const struct dpif *dpif)
233 dpif_assert_class(dpif, &dpif_netlink_class);
234 return CONTAINER_OF(dpif, struct dpif_netlink, dpif);
238 dpif_netlink_enumerate(struct sset *all_dps,
239 const struct dpif_class *dpif_class OVS_UNUSED)
242 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
243 struct ofpbuf msg, buf;
246 error = dpif_netlink_init();
251 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
252 dpif_netlink_dp_dump_start(&dump);
253 while (nl_dump_next(&dump, &msg, &buf)) {
254 struct dpif_netlink_dp dp;
256 if (!dpif_netlink_dp_from_ofpbuf(&dp, &msg)) {
257 sset_add(all_dps, dp.name);
261 return nl_dump_done(&dump);
265 dpif_netlink_open(const struct dpif_class *class OVS_UNUSED, const char *name,
266 bool create, struct dpif **dpifp)
268 struct dpif_netlink_dp dp_request, dp;
273 error = dpif_netlink_init();
278 /* Create or look up datapath. */
279 dpif_netlink_dp_init(&dp_request);
281 dp_request.cmd = OVS_DP_CMD_NEW;
283 dp_request.upcall_pid = &upcall_pid;
285 /* Use OVS_DP_CMD_SET to report user features */
286 dp_request.cmd = OVS_DP_CMD_SET;
288 dp_request.name = name;
289 dp_request.user_features |= OVS_DP_F_UNALIGNED;
290 dp_request.user_features |= OVS_DP_F_VPORT_PIDS;
291 error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
296 error = open_dpif(&dp, dpifp);
302 open_dpif(const struct dpif_netlink_dp *dp, struct dpif **dpifp)
304 struct dpif_netlink *dpif;
306 dpif = xzalloc(sizeof *dpif);
307 dpif->port_notifier = NULL;
308 fat_rwlock_init(&dpif->upcall_lock);
310 dpif_init(&dpif->dpif, &dpif_netlink_class, dp->name,
311 dp->dp_ifindex, dp->dp_ifindex);
313 dpif->dp_ifindex = dp->dp_ifindex;
314 dpif->ufid_supported = dpif_netlink_check_ufid__(&dpif->dpif);
315 *dpifp = &dpif->dpif;
320 /* Destroys the netlink sockets pointed by the elements in 'socksp'
321 * and frees the 'socksp'. */
323 vport_del_socksp__(struct nl_sock **socksp, uint32_t n_socks)
327 for (i = 0; i < n_socks; i++) {
328 nl_sock_destroy(socksp[i]);
334 /* Creates an array of netlink sockets. Returns an array of the
335 * corresponding pointers. Records the error in 'error'. */
336 static struct nl_sock **
337 vport_create_socksp__(uint32_t n_socks, int *error)
339 struct nl_sock **socksp = xzalloc(n_socks * sizeof *socksp);
342 for (i = 0; i < n_socks; i++) {
343 *error = nl_sock_create(NETLINK_GENERIC, &socksp[i]);
352 vport_del_socksp__(socksp, n_socks);
359 vport_delete_sock_pool(struct dpif_handler *handler)
360 OVS_REQ_WRLOCK(dpif->upcall_lock)
362 if (handler->vport_sock_pool) {
364 struct dpif_windows_vport_sock *sock_pool =
365 handler->vport_sock_pool;
367 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
368 if (sock_pool[i].nl_sock) {
369 nl_sock_unsubscribe_packets(sock_pool[i].nl_sock);
370 nl_sock_destroy(sock_pool[i].nl_sock);
371 sock_pool[i].nl_sock = NULL;
375 free(handler->vport_sock_pool);
376 handler->vport_sock_pool = NULL;
381 vport_create_sock_pool(struct dpif_handler *handler)
382 OVS_REQ_WRLOCK(dpif->upcall_lock)
384 struct dpif_windows_vport_sock *sock_pool;
388 sock_pool = xzalloc(VPORT_SOCK_POOL_SIZE * sizeof *sock_pool);
389 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
390 error = nl_sock_create(NETLINK_GENERIC, &sock_pool[i].nl_sock);
395 /* Enable the netlink socket to receive packets. This is equivalent to
396 * calling nl_sock_join_mcgroup() to receive events. */
397 error = nl_sock_subscribe_packets(sock_pool[i].nl_sock);
403 handler->vport_sock_pool = sock_pool;
404 handler->last_used_pool_idx = 0;
408 vport_delete_sock_pool(handler);
412 /* Returns an array pointers to netlink sockets. The sockets are picked from a
413 * pool. Records the error in 'error'. */
414 static struct nl_sock **
415 vport_create_socksp_windows(struct dpif_netlink *dpif, int *error)
416 OVS_REQ_WRLOCK(dpif->upcall_lock)
418 uint32_t n_socks = dpif->n_handlers;
419 struct nl_sock **socksp;
422 ovs_assert(n_socks <= 1);
423 socksp = xzalloc(n_socks * sizeof *socksp);
425 /* Pick netlink sockets to use in a round-robin fashion from each
426 * handler's pool of sockets. */
427 for (i = 0; i < n_socks; i++) {
428 struct dpif_handler *handler = &dpif->handlers[i];
429 struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
430 size_t index = handler->last_used_pool_idx;
432 /* A pool of sockets is allocated when the handler is initialized. */
433 if (sock_pool == NULL) {
439 ovs_assert(index < VPORT_SOCK_POOL_SIZE);
440 socksp[i] = sock_pool[index].nl_sock;
441 socksp[i] = sock_pool[index].nl_sock;
442 ovs_assert(socksp[i]);
443 index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
444 handler->last_used_pool_idx = index;
451 vport_del_socksp_windows(struct dpif_netlink *dpif, struct nl_sock **socksp)
457 static struct nl_sock **
458 vport_create_socksp(struct dpif_netlink *dpif, int *error)
461 return vport_create_socksp_windows(dpif, error);
463 return vport_create_socksp__(dpif->n_handlers, error);
468 vport_del_socksp(struct dpif_netlink *dpif, struct nl_sock **socksp)
471 vport_del_socksp_windows(dpif, socksp);
473 vport_del_socksp__(socksp, dpif->n_handlers);
477 /* Given the array of pointers to netlink sockets 'socksp', returns
478 * the array of corresponding pids. If the 'socksp' is NULL, returns
479 * a single-element array of value 0. */
481 vport_socksp_to_pids(struct nl_sock **socksp, uint32_t n_socks)
486 pids = xzalloc(sizeof *pids);
490 pids = xzalloc(n_socks * sizeof *pids);
491 for (i = 0; i < n_socks; i++) {
492 pids[i] = nl_sock_pid(socksp[i]);
499 /* Given the port number 'port_idx', extracts the pids of netlink sockets
500 * associated to the port and assigns it to 'upcall_pids'. */
502 vport_get_pids(struct dpif_netlink *dpif, uint32_t port_idx,
503 uint32_t **upcall_pids)
508 /* Since the nl_sock can only be assigned in either all
509 * or none "dpif->handlers" channels, the following check
511 if (!dpif->handlers[0].channels[port_idx].sock) {
514 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
516 pids = xzalloc(dpif->n_handlers * sizeof *pids);
518 for (i = 0; i < dpif->n_handlers; i++) {
519 pids[i] = nl_sock_pid(dpif->handlers[i].channels[port_idx].sock);
528 vport_add_channels(struct dpif_netlink *dpif, odp_port_t port_no,
529 struct nl_sock **socksp)
531 struct epoll_event event;
532 uint32_t port_idx = odp_to_u32(port_no);
536 if (dpif->handlers == NULL) {
540 /* We assume that the datapath densely chooses port numbers, which can
541 * therefore be used as an index into 'channels' and 'epoll_events' of
542 * 'dpif->handler'. */
543 if (port_idx >= dpif->uc_array_size) {
544 uint32_t new_size = port_idx + 1;
546 if (new_size > MAX_PORTS) {
547 VLOG_WARN_RL(&error_rl, "%s: datapath port %"PRIu32" too big",
548 dpif_name(&dpif->dpif), port_no);
552 for (i = 0; i < dpif->n_handlers; i++) {
553 struct dpif_handler *handler = &dpif->handlers[i];
555 handler->channels = xrealloc(handler->channels,
556 new_size * sizeof *handler->channels);
558 for (j = dpif->uc_array_size; j < new_size; j++) {
559 handler->channels[j].sock = NULL;
562 handler->epoll_events = xrealloc(handler->epoll_events,
563 new_size * sizeof *handler->epoll_events);
566 dpif->uc_array_size = new_size;
569 memset(&event, 0, sizeof event);
570 event.events = EPOLLIN;
571 event.data.u32 = port_idx;
573 for (i = 0; i < dpif->n_handlers; i++) {
574 struct dpif_handler *handler = &dpif->handlers[i];
577 if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(socksp[i]),
583 dpif->handlers[i].channels[port_idx].sock = socksp[i];
584 dpif->handlers[i].channels[port_idx].last_poll = LLONG_MIN;
590 for (j = 0; j < i; j++) {
592 epoll_ctl(dpif->handlers[j].epoll_fd, EPOLL_CTL_DEL,
593 nl_sock_fd(socksp[j]), NULL);
595 dpif->handlers[j].channels[port_idx].sock = NULL;
602 vport_del_channels(struct dpif_netlink *dpif, odp_port_t port_no)
604 uint32_t port_idx = odp_to_u32(port_no);
607 if (!dpif->handlers || port_idx >= dpif->uc_array_size) {
611 /* Since the sock can only be assigned in either all or none
612 * of "dpif->handlers" channels, the following check would
614 if (!dpif->handlers[0].channels[port_idx].sock) {
618 for (i = 0; i < dpif->n_handlers; i++) {
619 struct dpif_handler *handler = &dpif->handlers[i];
621 epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL,
622 nl_sock_fd(handler->channels[port_idx].sock), NULL);
623 nl_sock_destroy(handler->channels[port_idx].sock);
625 handler->channels[port_idx].sock = NULL;
626 handler->event_offset = handler->n_events = 0;
631 destroy_all_channels(struct dpif_netlink *dpif)
632 OVS_REQ_WRLOCK(dpif->upcall_lock)
636 if (!dpif->handlers) {
640 for (i = 0; i < dpif->uc_array_size; i++ ) {
641 struct dpif_netlink_vport vport_request;
642 uint32_t upcall_pids = 0;
644 /* Since the sock can only be assigned in either all or none
645 * of "dpif->handlers" channels, the following check would
647 if (!dpif->handlers[0].channels[i].sock) {
651 /* Turn off upcalls. */
652 dpif_netlink_vport_init(&vport_request);
653 vport_request.cmd = OVS_VPORT_CMD_SET;
654 vport_request.dp_ifindex = dpif->dp_ifindex;
655 vport_request.port_no = u32_to_odp(i);
656 vport_request.upcall_pids = &upcall_pids;
657 dpif_netlink_vport_transact(&vport_request, NULL, NULL);
659 vport_del_channels(dpif, u32_to_odp(i));
662 for (i = 0; i < dpif->n_handlers; i++) {
663 struct dpif_handler *handler = &dpif->handlers[i];
665 dpif_netlink_handler_uninit(handler);
666 free(handler->epoll_events);
667 free(handler->channels);
670 free(dpif->handlers);
671 dpif->handlers = NULL;
672 dpif->n_handlers = 0;
673 dpif->uc_array_size = 0;
677 dpif_netlink_close(struct dpif *dpif_)
679 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
681 nl_sock_destroy(dpif->port_notifier);
683 fat_rwlock_wrlock(&dpif->upcall_lock);
684 destroy_all_channels(dpif);
685 fat_rwlock_unlock(&dpif->upcall_lock);
687 fat_rwlock_destroy(&dpif->upcall_lock);
692 dpif_netlink_destroy(struct dpif *dpif_)
694 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
695 struct dpif_netlink_dp dp;
697 dpif_netlink_dp_init(&dp);
698 dp.cmd = OVS_DP_CMD_DEL;
699 dp.dp_ifindex = dpif->dp_ifindex;
700 return dpif_netlink_dp_transact(&dp, NULL, NULL);
704 dpif_netlink_run(struct dpif *dpif_)
706 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
708 if (dpif->refresh_channels) {
709 dpif->refresh_channels = false;
710 fat_rwlock_wrlock(&dpif->upcall_lock);
711 dpif_netlink_refresh_channels(dpif, dpif->n_handlers);
712 fat_rwlock_unlock(&dpif->upcall_lock);
718 dpif_netlink_get_stats(const struct dpif *dpif_, struct dpif_dp_stats *stats)
720 struct dpif_netlink_dp dp;
724 error = dpif_netlink_dp_get(dpif_, &dp, &buf);
726 memset(stats, 0, sizeof *stats);
729 stats->n_hit = get_32aligned_u64(&dp.stats->n_hit);
730 stats->n_missed = get_32aligned_u64(&dp.stats->n_missed);
731 stats->n_lost = get_32aligned_u64(&dp.stats->n_lost);
732 stats->n_flows = get_32aligned_u64(&dp.stats->n_flows);
735 if (dp.megaflow_stats) {
736 stats->n_masks = dp.megaflow_stats->n_masks;
737 stats->n_mask_hit = get_32aligned_u64(
738 &dp.megaflow_stats->n_mask_hit);
740 stats->n_masks = UINT32_MAX;
741 stats->n_mask_hit = UINT64_MAX;
749 get_vport_type(const struct dpif_netlink_vport *vport)
751 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
753 switch (vport->type) {
754 case OVS_VPORT_TYPE_NETDEV: {
755 const char *type = netdev_get_type_from_name(vport->name);
757 return type ? type : "system";
760 case OVS_VPORT_TYPE_INTERNAL:
763 case OVS_VPORT_TYPE_GENEVE:
766 case OVS_VPORT_TYPE_GRE:
769 case OVS_VPORT_TYPE_GRE64:
772 case OVS_VPORT_TYPE_VXLAN:
775 case OVS_VPORT_TYPE_LISP:
778 case OVS_VPORT_TYPE_UNSPEC:
779 case __OVS_VPORT_TYPE_MAX:
783 VLOG_WARN_RL(&rl, "dp%d: port `%s' has unsupported type %u",
784 vport->dp_ifindex, vport->name, (unsigned int) vport->type);
788 static enum ovs_vport_type
789 netdev_to_ovs_vport_type(const struct netdev *netdev)
791 const char *type = netdev_get_type(netdev);
793 if (!strcmp(type, "tap") || !strcmp(type, "system")) {
794 return OVS_VPORT_TYPE_NETDEV;
795 } else if (!strcmp(type, "internal")) {
796 return OVS_VPORT_TYPE_INTERNAL;
797 } else if (!strcmp(type, "geneve")) {
798 return OVS_VPORT_TYPE_GENEVE;
799 } else if (strstr(type, "gre64")) {
800 return OVS_VPORT_TYPE_GRE64;
801 } else if (strstr(type, "gre")) {
802 return OVS_VPORT_TYPE_GRE;
803 } else if (!strcmp(type, "vxlan")) {
804 return OVS_VPORT_TYPE_VXLAN;
805 } else if (!strcmp(type, "lisp")) {
806 return OVS_VPORT_TYPE_LISP;
808 return OVS_VPORT_TYPE_UNSPEC;
813 dpif_netlink_port_add__(struct dpif_netlink *dpif, struct netdev *netdev,
814 odp_port_t *port_nop)
815 OVS_REQ_WRLOCK(dpif->upcall_lock)
817 const struct netdev_tunnel_config *tnl_cfg;
818 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
819 const char *name = netdev_vport_get_dpif_port(netdev,
820 namebuf, sizeof namebuf);
821 const char *type = netdev_get_type(netdev);
822 struct dpif_netlink_vport request, reply;
824 uint64_t options_stub[64 / 8];
825 struct ofpbuf options;
826 struct nl_sock **socksp = NULL;
827 uint32_t *upcall_pids;
830 if (dpif->handlers) {
831 socksp = vport_create_socksp(dpif, &error);
837 dpif_netlink_vport_init(&request);
838 request.cmd = OVS_VPORT_CMD_NEW;
839 request.dp_ifindex = dpif->dp_ifindex;
840 request.type = netdev_to_ovs_vport_type(netdev);
841 if (request.type == OVS_VPORT_TYPE_UNSPEC) {
842 VLOG_WARN_RL(&error_rl, "%s: cannot create port `%s' because it has "
843 "unsupported type `%s'",
844 dpif_name(&dpif->dpif), name, type);
845 vport_del_socksp(dpif, socksp);
850 if (request.type == OVS_VPORT_TYPE_NETDEV) {
852 /* XXX : Map appropiate Windows handle */
854 netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false);
858 tnl_cfg = netdev_get_tunnel_config(netdev);
859 if (tnl_cfg && tnl_cfg->dst_port != 0) {
860 ofpbuf_use_stack(&options, options_stub, sizeof options_stub);
861 nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT,
862 ntohs(tnl_cfg->dst_port));
863 request.options = ofpbuf_data(&options);
864 request.options_len = ofpbuf_size(&options);
867 request.port_no = *port_nop;
868 upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
869 request.n_upcall_pids = socksp ? dpif->n_handlers : 1;
870 request.upcall_pids = upcall_pids;
872 error = dpif_netlink_vport_transact(&request, &reply, &buf);
874 *port_nop = reply.port_no;
876 if (error == EBUSY && *port_nop != ODPP_NONE) {
877 VLOG_INFO("%s: requested port %"PRIu32" is in use",
878 dpif_name(&dpif->dpif), *port_nop);
881 vport_del_socksp(dpif, socksp);
886 error = vport_add_channels(dpif, *port_nop, socksp);
888 VLOG_INFO("%s: could not add channel for port %s",
889 dpif_name(&dpif->dpif), name);
891 /* Delete the port. */
892 dpif_netlink_vport_init(&request);
893 request.cmd = OVS_VPORT_CMD_DEL;
894 request.dp_ifindex = dpif->dp_ifindex;
895 request.port_no = *port_nop;
896 dpif_netlink_vport_transact(&request, NULL, NULL);
897 vport_del_socksp(dpif, socksp);
911 dpif_netlink_port_add(struct dpif *dpif_, struct netdev *netdev,
912 odp_port_t *port_nop)
914 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
917 fat_rwlock_wrlock(&dpif->upcall_lock);
918 error = dpif_netlink_port_add__(dpif, netdev, port_nop);
919 fat_rwlock_unlock(&dpif->upcall_lock);
925 dpif_netlink_port_del__(struct dpif_netlink *dpif, odp_port_t port_no)
926 OVS_REQ_WRLOCK(dpif->upcall_lock)
928 struct dpif_netlink_vport vport;
931 dpif_netlink_vport_init(&vport);
932 vport.cmd = OVS_VPORT_CMD_DEL;
933 vport.dp_ifindex = dpif->dp_ifindex;
934 vport.port_no = port_no;
935 error = dpif_netlink_vport_transact(&vport, NULL, NULL);
937 vport_del_channels(dpif, port_no);
943 dpif_netlink_port_del(struct dpif *dpif_, odp_port_t port_no)
945 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
948 fat_rwlock_wrlock(&dpif->upcall_lock);
949 error = dpif_netlink_port_del__(dpif, port_no);
950 fat_rwlock_unlock(&dpif->upcall_lock);
956 dpif_netlink_port_query__(const struct dpif_netlink *dpif, odp_port_t port_no,
957 const char *port_name, struct dpif_port *dpif_port)
959 struct dpif_netlink_vport request;
960 struct dpif_netlink_vport reply;
964 dpif_netlink_vport_init(&request);
965 request.cmd = OVS_VPORT_CMD_GET;
966 request.dp_ifindex = dpif->dp_ifindex;
967 request.port_no = port_no;
968 request.name = port_name;
970 error = dpif_netlink_vport_transact(&request, &reply, &buf);
972 if (reply.dp_ifindex != request.dp_ifindex) {
973 /* A query by name reported that 'port_name' is in some datapath
974 * other than 'dpif', but the caller wants to know about 'dpif'. */
976 } else if (dpif_port) {
977 dpif_port->name = xstrdup(reply.name);
978 dpif_port->type = xstrdup(get_vport_type(&reply));
979 dpif_port->port_no = reply.port_no;
987 dpif_netlink_port_query_by_number(const struct dpif *dpif_, odp_port_t port_no,
988 struct dpif_port *dpif_port)
990 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
992 return dpif_netlink_port_query__(dpif, port_no, NULL, dpif_port);
996 dpif_netlink_port_query_by_name(const struct dpif *dpif_, const char *devname,
997 struct dpif_port *dpif_port)
999 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1001 return dpif_netlink_port_query__(dpif, 0, devname, dpif_port);
1005 dpif_netlink_port_get_pid__(const struct dpif_netlink *dpif,
1006 odp_port_t port_no, uint32_t hash)
1007 OVS_REQ_RDLOCK(dpif->upcall_lock)
1009 uint32_t port_idx = odp_to_u32(port_no);
1012 if (dpif->handlers && dpif->uc_array_size > 0) {
1013 /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
1014 * channel, since it is not heavily loaded. */
1015 uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
1016 struct dpif_handler *h = &dpif->handlers[hash % dpif->n_handlers];
1018 /* Needs to check in case the socket pointer is changed in between
1019 * the holding of upcall_lock. A known case happens when the main
1020 * thread deletes the vport while the handler thread is handling
1021 * the upcall from that port. */
1022 if (h->channels[idx].sock) {
1023 pid = nl_sock_pid(h->channels[idx].sock);
1031 dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no,
1034 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1037 fat_rwlock_rdlock(&dpif->upcall_lock);
1038 ret = dpif_netlink_port_get_pid__(dpif, port_no, hash);
1039 fat_rwlock_unlock(&dpif->upcall_lock);
1045 dpif_netlink_flow_flush(struct dpif *dpif_)
1047 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1048 struct dpif_netlink_flow flow;
1050 dpif_netlink_flow_init(&flow);
1051 flow.cmd = OVS_FLOW_CMD_DEL;
1052 flow.dp_ifindex = dpif->dp_ifindex;
1053 return dpif_netlink_flow_transact(&flow, NULL, NULL);
1056 struct dpif_netlink_port_state {
1057 struct nl_dump dump;
1062 dpif_netlink_port_dump_start__(const struct dpif_netlink *dpif,
1063 struct nl_dump *dump)
1065 struct dpif_netlink_vport request;
1068 dpif_netlink_vport_init(&request);
1069 request.cmd = OVS_VPORT_CMD_GET;
1070 request.dp_ifindex = dpif->dp_ifindex;
1072 buf = ofpbuf_new(1024);
1073 dpif_netlink_vport_to_ofpbuf(&request, buf);
1074 nl_dump_start(dump, NETLINK_GENERIC, buf);
1079 dpif_netlink_port_dump_start(const struct dpif *dpif_, void **statep)
1081 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1082 struct dpif_netlink_port_state *state;
1084 *statep = state = xmalloc(sizeof *state);
1085 dpif_netlink_port_dump_start__(dpif, &state->dump);
1087 ofpbuf_init(&state->buf, NL_DUMP_BUFSIZE);
1092 dpif_netlink_port_dump_next__(const struct dpif_netlink *dpif,
1093 struct nl_dump *dump,
1094 struct dpif_netlink_vport *vport,
1095 struct ofpbuf *buffer)
1100 if (!nl_dump_next(dump, &buf, buffer)) {
1104 error = dpif_netlink_vport_from_ofpbuf(vport, &buf);
1106 VLOG_WARN_RL(&error_rl, "%s: failed to parse vport record (%s)",
1107 dpif_name(&dpif->dpif), ovs_strerror(error));
1113 dpif_netlink_port_dump_next(const struct dpif *dpif_, void *state_,
1114 struct dpif_port *dpif_port)
1116 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1117 struct dpif_netlink_port_state *state = state_;
1118 struct dpif_netlink_vport vport;
1121 error = dpif_netlink_port_dump_next__(dpif, &state->dump, &vport,
1126 dpif_port->name = CONST_CAST(char *, vport.name);
1127 dpif_port->type = CONST_CAST(char *, get_vport_type(&vport));
1128 dpif_port->port_no = vport.port_no;
1133 dpif_netlink_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_)
1135 struct dpif_netlink_port_state *state = state_;
1136 int error = nl_dump_done(&state->dump);
1138 ofpbuf_uninit(&state->buf);
1144 dpif_netlink_port_poll(const struct dpif *dpif_, char **devnamep)
1146 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1148 /* Lazily create the Netlink socket to listen for notifications. */
1149 if (!dpif->port_notifier) {
1150 struct nl_sock *sock;
1153 error = nl_sock_create(NETLINK_GENERIC, &sock);
1158 error = nl_sock_join_mcgroup(sock, ovs_vport_mcgroup);
1160 nl_sock_destroy(sock);
1163 dpif->port_notifier = sock;
1165 /* We have no idea of the current state so report that everything
1171 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1172 uint64_t buf_stub[4096 / 8];
1176 ofpbuf_use_stub(&buf, buf_stub, sizeof buf_stub);
1177 error = nl_sock_recv(dpif->port_notifier, &buf, false);
1179 struct dpif_netlink_vport vport;
1181 error = dpif_netlink_vport_from_ofpbuf(&vport, &buf);
1183 if (vport.dp_ifindex == dpif->dp_ifindex
1184 && (vport.cmd == OVS_VPORT_CMD_NEW
1185 || vport.cmd == OVS_VPORT_CMD_DEL
1186 || vport.cmd == OVS_VPORT_CMD_SET)) {
1187 VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8,
1188 dpif->dpif.full_name, vport.name, vport.cmd);
1189 if (vport.cmd == OVS_VPORT_CMD_DEL && dpif->handlers) {
1190 dpif->refresh_channels = true;
1192 *devnamep = xstrdup(vport.name);
1193 ofpbuf_uninit(&buf);
1197 } else if (error != EAGAIN) {
1198 VLOG_WARN_RL(&rl, "error reading or parsing netlink (%s)",
1199 ovs_strerror(error));
1200 nl_sock_drain(dpif->port_notifier);
1204 ofpbuf_uninit(&buf);
1212 dpif_netlink_port_poll_wait(const struct dpif *dpif_)
1214 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1216 if (dpif->port_notifier) {
1217 nl_sock_wait(dpif->port_notifier, POLLIN);
1219 poll_immediate_wake();
1224 dpif_netlink_flow_init_ufid(struct dpif_netlink_flow *request,
1225 const ovs_u128 *ufid, bool terse)
1228 request->ufid = *ufid;
1229 request->ufid_present = true;
1231 request->ufid_present = false;
1233 request->ufid_terse = terse;
1237 dpif_netlink_init_flow_get__(const struct dpif_netlink *dpif,
1238 const struct nlattr *key, size_t key_len,
1239 const ovs_u128 *ufid, bool terse,
1240 struct dpif_netlink_flow *request)
1242 dpif_netlink_flow_init(request);
1243 request->cmd = OVS_FLOW_CMD_GET;
1244 request->dp_ifindex = dpif->dp_ifindex;
1246 request->key_len = key_len;
1247 dpif_netlink_flow_init_ufid(request, ufid, terse);
1251 dpif_netlink_init_flow_get(const struct dpif_netlink *dpif,
1252 const struct dpif_flow_get *get,
1253 struct dpif_netlink_flow *request)
1255 dpif_netlink_init_flow_get__(dpif, get->key, get->key_len, get->ufid,
1260 dpif_netlink_flow_get__(const struct dpif_netlink *dpif,
1261 const struct nlattr *key, size_t key_len,
1262 const ovs_u128 *ufid, bool terse,
1263 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1265 struct dpif_netlink_flow request;
1267 dpif_netlink_init_flow_get__(dpif, key, key_len, ufid, terse, &request);
1268 return dpif_netlink_flow_transact(&request, reply, bufp);
1272 dpif_netlink_flow_get(const struct dpif_netlink *dpif,
1273 const struct dpif_netlink_flow *flow,
1274 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1276 return dpif_netlink_flow_get__(dpif, flow->key, flow->key_len,
1277 flow->ufid_present ? &flow->ufid : NULL,
1278 false, reply, bufp);
1282 dpif_netlink_init_flow_put(struct dpif_netlink *dpif,
1283 const struct dpif_flow_put *put,
1284 struct dpif_netlink_flow *request)
1286 static const struct nlattr dummy_action;
1288 dpif_netlink_flow_init(request);
1289 request->cmd = (put->flags & DPIF_FP_CREATE
1290 ? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET);
1291 request->dp_ifindex = dpif->dp_ifindex;
1292 request->key = put->key;
1293 request->key_len = put->key_len;
1294 request->mask = put->mask;
1295 request->mask_len = put->mask_len;
1296 dpif_netlink_flow_init_ufid(request, put->ufid, false);
1298 /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
1299 request->actions = (put->actions
1301 : CONST_CAST(struct nlattr *, &dummy_action));
1302 request->actions_len = put->actions_len;
1303 if (put->flags & DPIF_FP_ZERO_STATS) {
1304 request->clear = true;
1306 if (put->flags & DPIF_FP_PROBE) {
1307 request->probe = true;
1309 request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE;
1313 dpif_netlink_init_flow_del__(struct dpif_netlink *dpif,
1314 const struct nlattr *key, size_t key_len,
1315 const ovs_u128 *ufid, bool terse,
1316 struct dpif_netlink_flow *request)
1318 dpif_netlink_flow_init(request);
1319 request->cmd = OVS_FLOW_CMD_DEL;
1320 request->dp_ifindex = dpif->dp_ifindex;
1322 request->key_len = key_len;
1323 dpif_netlink_flow_init_ufid(request, ufid, terse);
1327 dpif_netlink_init_flow_del(struct dpif_netlink *dpif,
1328 const struct dpif_flow_del *del,
1329 struct dpif_netlink_flow *request)
1331 return dpif_netlink_init_flow_del__(dpif, del->key, del->key_len,
1332 del->ufid, dpif->ufid_supported,
1337 dpif_netlink_flow_del(struct dpif_netlink *dpif,
1338 const struct nlattr *key, size_t key_len,
1339 const ovs_u128 *ufid, bool terse)
1341 struct dpif_netlink_flow request;
1343 dpif_netlink_init_flow_del__(dpif, key, key_len, ufid, terse, &request);
1346 return dpif_netlink_flow_transact(&request, NULL, NULL);
1349 struct dpif_netlink_flow_dump {
1350 struct dpif_flow_dump up;
1351 struct nl_dump nl_dump;
1355 static struct dpif_netlink_flow_dump *
1356 dpif_netlink_flow_dump_cast(struct dpif_flow_dump *dump)
1358 return CONTAINER_OF(dump, struct dpif_netlink_flow_dump, up);
1361 static struct dpif_flow_dump *
1362 dpif_netlink_flow_dump_create(const struct dpif *dpif_, bool terse)
1364 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1365 struct dpif_netlink_flow_dump *dump;
1366 struct dpif_netlink_flow request;
1369 dump = xmalloc(sizeof *dump);
1370 dpif_flow_dump_init(&dump->up, dpif_);
1372 dpif_netlink_flow_init(&request);
1373 request.cmd = OVS_FLOW_CMD_GET;
1374 request.dp_ifindex = dpif->dp_ifindex;
1375 request.ufid_present = false;
1376 request.ufid_terse = terse;
1378 buf = ofpbuf_new(1024);
1379 dpif_netlink_flow_to_ofpbuf(&request, buf);
1380 nl_dump_start(&dump->nl_dump, NETLINK_GENERIC, buf);
1382 atomic_init(&dump->status, 0);
1383 dump->up.terse = terse;
1389 dpif_netlink_flow_dump_destroy(struct dpif_flow_dump *dump_)
1391 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1392 unsigned int nl_status = nl_dump_done(&dump->nl_dump);
1395 /* No other thread has access to 'dump' at this point. */
1396 atomic_read_relaxed(&dump->status, &dump_status);
1398 return dump_status ? dump_status : nl_status;
1401 struct dpif_netlink_flow_dump_thread {
1402 struct dpif_flow_dump_thread up;
1403 struct dpif_netlink_flow_dump *dump;
1404 struct dpif_netlink_flow flow;
1405 struct dpif_flow_stats stats;
1406 struct ofpbuf nl_flows; /* Always used to store flows. */
1407 struct ofpbuf *nl_actions; /* Used if kernel does not supply actions. */
1410 static struct dpif_netlink_flow_dump_thread *
1411 dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
1413 return CONTAINER_OF(thread, struct dpif_netlink_flow_dump_thread, up);
1416 static struct dpif_flow_dump_thread *
1417 dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump *dump_)
1419 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1420 struct dpif_netlink_flow_dump_thread *thread;
1422 thread = xmalloc(sizeof *thread);
1423 dpif_flow_dump_thread_init(&thread->up, &dump->up);
1424 thread->dump = dump;
1425 ofpbuf_init(&thread->nl_flows, NL_DUMP_BUFSIZE);
1426 thread->nl_actions = NULL;
1432 dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
1434 struct dpif_netlink_flow_dump_thread *thread
1435 = dpif_netlink_flow_dump_thread_cast(thread_);
1437 ofpbuf_uninit(&thread->nl_flows);
1438 ofpbuf_delete(thread->nl_actions);
1443 dpif_netlink_flow_to_dpif_flow(struct dpif *dpif, struct dpif_flow *dpif_flow,
1444 const struct dpif_netlink_flow *datapath_flow)
1446 dpif_flow->key = datapath_flow->key;
1447 dpif_flow->key_len = datapath_flow->key_len;
1448 dpif_flow->mask = datapath_flow->mask;
1449 dpif_flow->mask_len = datapath_flow->mask_len;
1450 dpif_flow->actions = datapath_flow->actions;
1451 dpif_flow->actions_len = datapath_flow->actions_len;
1452 dpif_flow->ufid_present = datapath_flow->ufid_present;
1453 if (datapath_flow->ufid_present) {
1454 dpif_flow->ufid = datapath_flow->ufid;
1456 ovs_assert(datapath_flow->key && datapath_flow->key_len);
1457 dpif_flow_hash(dpif, datapath_flow->key, datapath_flow->key_len,
1460 dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats);
1464 dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread *thread_,
1465 struct dpif_flow *flows, int max_flows)
1467 struct dpif_netlink_flow_dump_thread *thread
1468 = dpif_netlink_flow_dump_thread_cast(thread_);
1469 struct dpif_netlink_flow_dump *dump = thread->dump;
1470 struct dpif_netlink *dpif = dpif_netlink_cast(thread->up.dpif);
1473 ofpbuf_delete(thread->nl_actions);
1474 thread->nl_actions = NULL;
1478 || (n_flows < max_flows && ofpbuf_size(&thread->nl_flows))) {
1479 struct dpif_netlink_flow datapath_flow;
1480 struct ofpbuf nl_flow;
1483 /* Try to grab another flow. */
1484 if (!nl_dump_next(&dump->nl_dump, &nl_flow, &thread->nl_flows)) {
1488 /* Convert the flow to our output format. */
1489 error = dpif_netlink_flow_from_ofpbuf(&datapath_flow, &nl_flow);
1491 atomic_store_relaxed(&dump->status, error);
1495 if (dump->up.terse || datapath_flow.actions) {
1496 /* Common case: we don't want actions, or the flow includes
1498 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1501 /* Rare case: the flow does not include actions. Retrieve this
1502 * individual flow again to get the actions. */
1503 error = dpif_netlink_flow_get(dpif, &datapath_flow,
1504 &datapath_flow, &thread->nl_actions);
1505 if (error == ENOENT) {
1506 VLOG_DBG("dumped flow disappeared on get");
1509 VLOG_WARN("error fetching dumped flow: %s",
1510 ovs_strerror(error));
1511 atomic_store_relaxed(&dump->status, error);
1515 /* Save this flow. Then exit, because we only have one buffer to
1516 * handle this case. */
1517 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1526 dpif_netlink_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec,
1529 struct ovs_header *k_exec;
1532 ofpbuf_prealloc_tailroom(buf, (64
1533 + ofpbuf_size(d_exec->packet)
1534 + ODP_KEY_METADATA_SIZE
1535 + d_exec->actions_len));
1537 nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST,
1538 OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION);
1540 k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec);
1541 k_exec->dp_ifindex = dp_ifindex;
1543 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET,
1544 ofpbuf_data(d_exec->packet),
1545 ofpbuf_size(d_exec->packet));
1547 key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY);
1548 odp_key_from_pkt_metadata(buf, &d_exec->md);
1549 nl_msg_end_nested(buf, key_ofs);
1551 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS,
1552 d_exec->actions, d_exec->actions_len);
1553 if (d_exec->probe) {
1554 nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE);
1561 dpif_netlink_operate__(struct dpif_netlink *dpif,
1562 struct dpif_op **ops, size_t n_ops)
1565 struct nl_transaction txn;
1567 struct ofpbuf request;
1568 uint64_t request_stub[1024 / 8];
1570 struct ofpbuf reply;
1571 uint64_t reply_stub[1024 / 8];
1574 struct nl_transaction *txnsp[MAX_OPS];
1577 ovs_assert(n_ops <= MAX_OPS);
1578 for (i = 0; i < n_ops; i++) {
1579 struct op_auxdata *aux = &auxes[i];
1580 struct dpif_op *op = ops[i];
1581 struct dpif_flow_put *put;
1582 struct dpif_flow_del *del;
1583 struct dpif_execute *execute;
1584 struct dpif_flow_get *get;
1585 struct dpif_netlink_flow flow;
1587 ofpbuf_use_stub(&aux->request,
1588 aux->request_stub, sizeof aux->request_stub);
1589 aux->txn.request = &aux->request;
1591 ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub);
1592 aux->txn.reply = NULL;
1595 case DPIF_OP_FLOW_PUT:
1596 put = &op->u.flow_put;
1597 dpif_netlink_init_flow_put(dpif, put, &flow);
1599 flow.nlmsg_flags |= NLM_F_ECHO;
1600 aux->txn.reply = &aux->reply;
1602 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1605 case DPIF_OP_FLOW_DEL:
1606 del = &op->u.flow_del;
1607 dpif_netlink_init_flow_del(dpif, del, &flow);
1609 flow.nlmsg_flags |= NLM_F_ECHO;
1610 aux->txn.reply = &aux->reply;
1612 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1615 case DPIF_OP_EXECUTE:
1616 execute = &op->u.execute;
1617 dpif_netlink_encode_execute(dpif->dp_ifindex, execute,
1621 case DPIF_OP_FLOW_GET:
1622 get = &op->u.flow_get;
1623 dpif_netlink_init_flow_get(dpif, get, &flow);
1624 aux->txn.reply = get->buffer;
1625 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1633 for (i = 0; i < n_ops; i++) {
1634 txnsp[i] = &auxes[i].txn;
1636 nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops);
1638 for (i = 0; i < n_ops; i++) {
1639 struct op_auxdata *aux = &auxes[i];
1640 struct nl_transaction *txn = &auxes[i].txn;
1641 struct dpif_op *op = ops[i];
1642 struct dpif_flow_put *put;
1643 struct dpif_flow_del *del;
1644 struct dpif_flow_get *get;
1646 op->error = txn->error;
1649 case DPIF_OP_FLOW_PUT:
1650 put = &op->u.flow_put;
1653 struct dpif_netlink_flow reply;
1655 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1658 dpif_netlink_flow_get_stats(&reply, put->stats);
1664 case DPIF_OP_FLOW_DEL:
1665 del = &op->u.flow_del;
1668 struct dpif_netlink_flow reply;
1670 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1673 dpif_netlink_flow_get_stats(&reply, del->stats);
1679 case DPIF_OP_EXECUTE:
1682 case DPIF_OP_FLOW_GET:
1683 get = &op->u.flow_get;
1685 struct dpif_netlink_flow reply;
1687 op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply);
1689 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, get->flow,
1699 ofpbuf_uninit(&aux->request);
1700 ofpbuf_uninit(&aux->reply);
1705 dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops)
1707 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1710 size_t chunk = MIN(n_ops, MAX_OPS);
1711 dpif_netlink_operate__(dpif, ops, chunk);
1719 dpif_netlink_handler_uninit(struct dpif_handler *handler)
1721 vport_delete_sock_pool(handler);
1725 dpif_netlink_handler_init(struct dpif_handler *handler)
1727 return vport_create_sock_pool(handler);
1732 dpif_netlink_handler_init(struct dpif_handler *handler)
1734 handler->epoll_fd = epoll_create(10);
1735 return handler->epoll_fd < 0 ? errno : 0;
1739 dpif_netlink_handler_uninit(struct dpif_handler *handler)
1741 close(handler->epoll_fd);
1745 /* Checks support for unique flow identifiers. */
1747 dpif_netlink_check_ufid__(struct dpif *dpif_)
1749 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1751 struct odputil_keybuf keybuf;
1752 struct ofpbuf key, *replybuf;
1753 struct dpif_netlink_flow reply;
1756 bool enable_ufid = false;
1758 memset(&flow, 0, sizeof flow);
1759 flow.dl_type = htons(0x1234);
1761 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
1762 odp_flow_key_from_flow(&key, &flow, NULL, 0, true);
1763 dpif_flow_hash(dpif_, ofpbuf_data(&key), ofpbuf_size(&key), &ufid);
1764 error = dpif_flow_put(dpif_, DPIF_FP_CREATE | DPIF_FP_PROBE,
1765 ofpbuf_data(&key), ofpbuf_size(&key), NULL, 0, NULL,
1768 if (error && error != EEXIST) {
1769 VLOG_WARN("%s: UFID feature probe failed (%s).",
1770 dpif_name(dpif_), ovs_strerror(error));
1774 error = dpif_netlink_flow_get__(dpif, NULL, 0, &ufid, true, &reply,
1776 if (!error && reply.ufid_present && ovs_u128_equal(&ufid, &reply.ufid)) {
1779 ofpbuf_delete(replybuf);
1781 error = dpif_netlink_flow_del(dpif, ofpbuf_data(&key), ofpbuf_size(&key),
1784 VLOG_WARN("%s: failed to delete UFID feature probe flow",
1792 dpif_netlink_check_ufid(struct dpif *dpif_)
1794 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1796 if (dpif->ufid_supported) {
1797 VLOG_INFO("%s: Datapath supports userspace flow ids",
1800 VLOG_INFO("%s: Datapath does not support userspace flow ids",
1803 return dpif->ufid_supported;
1806 /* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
1807 * currently in 'dpif' in the kernel, by adding a new set of channels for
1808 * any kernel vport that lacks one and deleting any channels that have no
1809 * backing kernel vports. */
1811 dpif_netlink_refresh_channels(struct dpif_netlink *dpif, uint32_t n_handlers)
1812 OVS_REQ_WRLOCK(dpif->upcall_lock)
1814 unsigned long int *keep_channels;
1815 struct dpif_netlink_vport vport;
1816 size_t keep_channels_nbits;
1817 struct nl_dump dump;
1818 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
1823 ovs_assert(!WINDOWS || n_handlers <= 1);
1824 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
1826 if (dpif->n_handlers != n_handlers) {
1827 destroy_all_channels(dpif);
1828 dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers);
1829 for (i = 0; i < n_handlers; i++) {
1831 struct dpif_handler *handler = &dpif->handlers[i];
1833 error = dpif_netlink_handler_init(handler);
1836 struct dpif_handler *tmp = &dpif->handlers[i];
1839 for (j = 0; j < i; j++) {
1840 dpif_netlink_handler_uninit(tmp);
1842 free(dpif->handlers);
1843 dpif->handlers = NULL;
1848 dpif->n_handlers = n_handlers;
1851 for (i = 0; i < n_handlers; i++) {
1852 struct dpif_handler *handler = &dpif->handlers[i];
1854 handler->event_offset = handler->n_events = 0;
1857 keep_channels_nbits = dpif->uc_array_size;
1858 keep_channels = bitmap_allocate(keep_channels_nbits);
1860 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
1861 dpif_netlink_port_dump_start__(dpif, &dump);
1862 while (!dpif_netlink_port_dump_next__(dpif, &dump, &vport, &buf)) {
1863 uint32_t port_no = odp_to_u32(vport.port_no);
1864 uint32_t *upcall_pids = NULL;
1867 if (port_no >= dpif->uc_array_size
1868 || !vport_get_pids(dpif, port_no, &upcall_pids)) {
1869 struct nl_sock **socksp = vport_create_socksp(dpif, &error);
1875 error = vport_add_channels(dpif, vport.port_no, socksp);
1877 VLOG_INFO("%s: could not add channels for port %s",
1878 dpif_name(&dpif->dpif), vport.name);
1879 vport_del_socksp(dpif, socksp);
1883 upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
1887 /* Configure the vport to deliver misses to 'sock'. */
1888 if (vport.upcall_pids[0] == 0
1889 || vport.n_upcall_pids != dpif->n_handlers
1890 || memcmp(upcall_pids, vport.upcall_pids, n_handlers * sizeof
1892 struct dpif_netlink_vport vport_request;
1894 dpif_netlink_vport_init(&vport_request);
1895 vport_request.cmd = OVS_VPORT_CMD_SET;
1896 vport_request.dp_ifindex = dpif->dp_ifindex;
1897 vport_request.port_no = vport.port_no;
1898 vport_request.n_upcall_pids = dpif->n_handlers;
1899 vport_request.upcall_pids = upcall_pids;
1900 error = dpif_netlink_vport_transact(&vport_request, NULL, NULL);
1902 VLOG_WARN_RL(&error_rl,
1903 "%s: failed to set upcall pid on port: %s",
1904 dpif_name(&dpif->dpif), ovs_strerror(error));
1906 if (error != ENODEV && error != ENOENT) {
1909 /* The vport isn't really there, even though the dump says
1910 * it is. Probably we just hit a race after a port
1917 if (port_no < keep_channels_nbits) {
1918 bitmap_set1(keep_channels, port_no);
1925 vport_del_channels(dpif, vport.port_no);
1927 nl_dump_done(&dump);
1928 ofpbuf_uninit(&buf);
1930 /* Discard any saved channels that we didn't reuse. */
1931 for (i = 0; i < keep_channels_nbits; i++) {
1932 if (!bitmap_is_set(keep_channels, i)) {
1933 vport_del_channels(dpif, u32_to_odp(i));
1936 free(keep_channels);
1942 dpif_netlink_recv_set__(struct dpif_netlink *dpif, bool enable)
1943 OVS_REQ_WRLOCK(dpif->upcall_lock)
1945 if ((dpif->handlers != NULL) == enable) {
1947 } else if (!enable) {
1948 destroy_all_channels(dpif);
1951 return dpif_netlink_refresh_channels(dpif, 1);
1956 dpif_netlink_recv_set(struct dpif *dpif_, bool enable)
1958 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1961 fat_rwlock_wrlock(&dpif->upcall_lock);
1962 error = dpif_netlink_recv_set__(dpif, enable);
1963 fat_rwlock_unlock(&dpif->upcall_lock);
1969 dpif_netlink_handlers_set(struct dpif *dpif_, uint32_t n_handlers)
1971 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1975 /* Multiple upcall handlers will be supported once kernel datapath supports
1977 if (n_handlers > 1) {
1982 fat_rwlock_wrlock(&dpif->upcall_lock);
1983 if (dpif->handlers) {
1984 error = dpif_netlink_refresh_channels(dpif, n_handlers);
1986 fat_rwlock_unlock(&dpif->upcall_lock);
1992 dpif_netlink_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
1993 uint32_t queue_id, uint32_t *priority)
1995 if (queue_id < 0xf000) {
1996 *priority = TC_H_MAKE(1 << 16, queue_id + 1);
2004 parse_odp_packet(const struct dpif_netlink *dpif, struct ofpbuf *buf,
2005 struct dpif_upcall *upcall, int *dp_ifindex)
2007 static const struct nl_policy ovs_packet_policy[] = {
2008 /* Always present. */
2009 [OVS_PACKET_ATTR_PACKET] = { .type = NL_A_UNSPEC,
2010 .min_len = ETH_HEADER_LEN },
2011 [OVS_PACKET_ATTR_KEY] = { .type = NL_A_NESTED },
2013 /* OVS_PACKET_CMD_ACTION only. */
2014 [OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true },
2015 [OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true },
2018 struct ovs_header *ovs_header;
2019 struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)];
2020 struct nlmsghdr *nlmsg;
2021 struct genlmsghdr *genl;
2025 ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
2027 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2028 genl = ofpbuf_try_pull(&b, sizeof *genl);
2029 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2030 if (!nlmsg || !genl || !ovs_header
2031 || nlmsg->nlmsg_type != ovs_packet_family
2032 || !nl_policy_parse(&b, 0, ovs_packet_policy, a,
2033 ARRAY_SIZE(ovs_packet_policy))) {
2037 type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS
2038 : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION
2044 /* (Re)set ALL fields of '*upcall' on successful return. */
2045 upcall->type = type;
2046 upcall->key = CONST_CAST(struct nlattr *,
2047 nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
2048 upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
2049 dpif_flow_hash(&dpif->dpif, upcall->key, upcall->key_len, &upcall->ufid);
2050 upcall->userdata = a[OVS_PACKET_ATTR_USERDATA];
2051 upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY];
2053 /* Allow overwriting the netlink attribute header without reallocating. */
2054 ofpbuf_use_stub(&upcall->packet,
2055 CONST_CAST(struct nlattr *,
2056 nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1,
2057 nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) +
2058 sizeof(struct nlattr));
2059 ofpbuf_set_data(&upcall->packet,
2060 (char *)ofpbuf_data(&upcall->packet) + sizeof(struct nlattr));
2061 ofpbuf_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]));
2063 *dp_ifindex = ovs_header->dp_ifindex;
2069 #define PACKET_RECV_BATCH_SIZE 50
2071 dpif_netlink_recv_windows(struct dpif_netlink *dpif, uint32_t handler_id,
2072 struct dpif_upcall *upcall, struct ofpbuf *buf)
2073 OVS_REQ_RDLOCK(dpif->upcall_lock)
2075 struct dpif_handler *handler;
2077 struct dpif_windows_vport_sock *sock_pool;
2080 if (!dpif->handlers) {
2084 /* Only one handler is supported currently. */
2085 if (handler_id >= 1) {
2089 if (handler_id >= dpif->n_handlers) {
2093 handler = &dpif->handlers[handler_id];
2094 sock_pool = handler->vport_sock_pool;
2096 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2101 if (++read_tries > PACKET_RECV_BATCH_SIZE) {
2105 error = nl_sock_recv(sock_pool[i].nl_sock, buf, false);
2106 if (error == ENOBUFS) {
2107 /* ENOBUFS typically means that we've received so many
2108 * packets that the buffer overflowed. Try again
2109 * immediately because there's almost certainly a packet
2110 * waiting for us. */
2111 /* XXX: report_loss(dpif, ch, idx, handler_id); */
2115 /* XXX: ch->last_poll = time_msec(); */
2117 if (error == EAGAIN) {
2123 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2124 if (!error && dp_ifindex == dpif->dp_ifindex) {
2136 dpif_netlink_recv__(struct dpif_netlink *dpif, uint32_t handler_id,
2137 struct dpif_upcall *upcall, struct ofpbuf *buf)
2138 OVS_REQ_RDLOCK(dpif->upcall_lock)
2140 struct dpif_handler *handler;
2143 if (!dpif->handlers || handler_id >= dpif->n_handlers) {
2147 handler = &dpif->handlers[handler_id];
2148 if (handler->event_offset >= handler->n_events) {
2151 handler->event_offset = handler->n_events = 0;
2154 retval = epoll_wait(handler->epoll_fd, handler->epoll_events,
2155 dpif->uc_array_size, 0);
2156 } while (retval < 0 && errno == EINTR);
2159 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
2160 VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno));
2161 } else if (retval > 0) {
2162 handler->n_events = retval;
2166 while (handler->event_offset < handler->n_events) {
2167 int idx = handler->epoll_events[handler->event_offset].data.u32;
2168 struct dpif_channel *ch = &dpif->handlers[handler_id].channels[idx];
2170 handler->event_offset++;
2176 if (++read_tries > 50) {
2180 error = nl_sock_recv(ch->sock, buf, false);
2181 if (error == ENOBUFS) {
2182 /* ENOBUFS typically means that we've received so many
2183 * packets that the buffer overflowed. Try again
2184 * immediately because there's almost certainly a packet
2185 * waiting for us. */
2186 report_loss(dpif, ch, idx, handler_id);
2190 ch->last_poll = time_msec();
2192 if (error == EAGAIN) {
2198 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2199 if (!error && dp_ifindex == dpif->dp_ifindex) {
2212 dpif_netlink_recv(struct dpif *dpif_, uint32_t handler_id,
2213 struct dpif_upcall *upcall, struct ofpbuf *buf)
2215 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2218 fat_rwlock_rdlock(&dpif->upcall_lock);
2220 error = dpif_netlink_recv_windows(dpif, handler_id, upcall, buf);
2222 error = dpif_netlink_recv__(dpif, handler_id, upcall, buf);
2224 fat_rwlock_unlock(&dpif->upcall_lock);
2230 dpif_netlink_recv_wait__(struct dpif_netlink *dpif, uint32_t handler_id)
2231 OVS_REQ_RDLOCK(dpif->upcall_lock)
2235 struct dpif_windows_vport_sock *sock_pool =
2236 dpif->handlers[handler_id].vport_sock_pool;
2238 /* Only one handler is supported currently. */
2239 if (handler_id >= 1) {
2243 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2244 nl_sock_wait(sock_pool[i].nl_sock, POLLIN);
2247 if (dpif->handlers && handler_id < dpif->n_handlers) {
2248 struct dpif_handler *handler = &dpif->handlers[handler_id];
2250 poll_fd_wait(handler->epoll_fd, POLLIN);
2256 dpif_netlink_recv_wait(struct dpif *dpif_, uint32_t handler_id)
2258 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2260 fat_rwlock_rdlock(&dpif->upcall_lock);
2261 dpif_netlink_recv_wait__(dpif, handler_id);
2262 fat_rwlock_unlock(&dpif->upcall_lock);
2266 dpif_netlink_recv_purge__(struct dpif_netlink *dpif)
2267 OVS_REQ_WRLOCK(dpif->upcall_lock)
2269 if (dpif->handlers) {
2272 for (i = 0; i < dpif->uc_array_size; i++ ) {
2273 if (!dpif->handlers[0].channels[i].sock) {
2277 for (j = 0; j < dpif->n_handlers; j++) {
2278 nl_sock_drain(dpif->handlers[j].channels[i].sock);
2285 dpif_netlink_recv_purge(struct dpif *dpif_)
2287 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2289 fat_rwlock_wrlock(&dpif->upcall_lock);
2290 dpif_netlink_recv_purge__(dpif);
2291 fat_rwlock_unlock(&dpif->upcall_lock);
2295 dpif_netlink_get_datapath_version(void)
2297 char *version_str = NULL;
2301 #define MAX_VERSION_STR_SIZE 80
2302 #define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version"
2305 f = fopen(LINUX_DATAPATH_VERSION_FILE, "r");
2308 char version[MAX_VERSION_STR_SIZE];
2310 if (fgets(version, MAX_VERSION_STR_SIZE, f)) {
2311 newline = strchr(version, '\n');
2315 version_str = xstrdup(version);
2324 const struct dpif_class dpif_netlink_class = {
2326 dpif_netlink_enumerate,
2330 dpif_netlink_destroy,
2333 dpif_netlink_get_stats,
2334 dpif_netlink_port_add,
2335 dpif_netlink_port_del,
2336 dpif_netlink_port_query_by_number,
2337 dpif_netlink_port_query_by_name,
2338 dpif_netlink_port_get_pid,
2339 dpif_netlink_port_dump_start,
2340 dpif_netlink_port_dump_next,
2341 dpif_netlink_port_dump_done,
2342 dpif_netlink_port_poll,
2343 dpif_netlink_port_poll_wait,
2344 dpif_netlink_flow_flush,
2345 dpif_netlink_flow_dump_create,
2346 dpif_netlink_flow_dump_destroy,
2347 dpif_netlink_flow_dump_thread_create,
2348 dpif_netlink_flow_dump_thread_destroy,
2349 dpif_netlink_flow_dump_next,
2350 dpif_netlink_operate,
2351 dpif_netlink_recv_set,
2352 dpif_netlink_handlers_set,
2353 NULL, /* poll_thread_set */
2354 dpif_netlink_queue_to_priority,
2356 dpif_netlink_recv_wait,
2357 dpif_netlink_recv_purge,
2358 NULL, /* register_upcall_cb */
2359 NULL, /* enable_upcall */
2360 NULL, /* disable_upcall */
2361 dpif_netlink_get_datapath_version, /* get_datapath_version */
2362 dpif_netlink_check_ufid,
2366 dpif_netlink_init(void)
2368 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2371 if (ovsthread_once_start(&once)) {
2372 error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY,
2373 &ovs_datapath_family);
2375 VLOG_ERR("Generic Netlink family '%s' does not exist. "
2376 "The Open vSwitch kernel module is probably not loaded.",
2377 OVS_DATAPATH_FAMILY);
2380 error = nl_lookup_genl_family(OVS_VPORT_FAMILY, &ovs_vport_family);
2383 error = nl_lookup_genl_family(OVS_FLOW_FAMILY, &ovs_flow_family);
2386 error = nl_lookup_genl_family(OVS_PACKET_FAMILY,
2387 &ovs_packet_family);
2390 error = nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY, OVS_VPORT_MCGROUP,
2391 &ovs_vport_mcgroup);
2394 ovsthread_once_done(&once);
2401 dpif_netlink_is_internal_device(const char *name)
2403 struct dpif_netlink_vport reply;
2407 error = dpif_netlink_vport_get(name, &reply, &buf);
2410 } else if (error != ENODEV && error != ENOENT) {
2411 VLOG_WARN_RL(&error_rl, "%s: vport query failed (%s)",
2412 name, ovs_strerror(error));
2415 return reply.type == OVS_VPORT_TYPE_INTERNAL;
2418 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
2419 * by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a
2420 * positive errno value.
2422 * 'vport' will contain pointers into 'buf', so the caller should not free
2423 * 'buf' while 'vport' is still in use. */
2425 dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *vport,
2426 const struct ofpbuf *buf)
2428 static const struct nl_policy ovs_vport_policy[] = {
2429 [OVS_VPORT_ATTR_PORT_NO] = { .type = NL_A_U32 },
2430 [OVS_VPORT_ATTR_TYPE] = { .type = NL_A_U32 },
2431 [OVS_VPORT_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
2432 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NL_A_UNSPEC },
2433 [OVS_VPORT_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_vport_stats),
2435 [OVS_VPORT_ATTR_OPTIONS] = { .type = NL_A_NESTED, .optional = true },
2438 struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)];
2439 struct ovs_header *ovs_header;
2440 struct nlmsghdr *nlmsg;
2441 struct genlmsghdr *genl;
2444 dpif_netlink_vport_init(vport);
2446 ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
2447 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2448 genl = ofpbuf_try_pull(&b, sizeof *genl);
2449 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2450 if (!nlmsg || !genl || !ovs_header
2451 || nlmsg->nlmsg_type != ovs_vport_family
2452 || !nl_policy_parse(&b, 0, ovs_vport_policy, a,
2453 ARRAY_SIZE(ovs_vport_policy))) {
2457 vport->cmd = genl->cmd;
2458 vport->dp_ifindex = ovs_header->dp_ifindex;
2459 vport->port_no = nl_attr_get_odp_port(a[OVS_VPORT_ATTR_PORT_NO]);
2460 vport->type = nl_attr_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2461 vport->name = nl_attr_get_string(a[OVS_VPORT_ATTR_NAME]);
2462 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2463 vport->n_upcall_pids = nl_attr_get_size(a[OVS_VPORT_ATTR_UPCALL_PID])
2464 / (sizeof *vport->upcall_pids);
2465 vport->upcall_pids = nl_attr_get(a[OVS_VPORT_ATTR_UPCALL_PID]);
2468 if (a[OVS_VPORT_ATTR_STATS]) {
2469 vport->stats = nl_attr_get(a[OVS_VPORT_ATTR_STATS]);
2471 if (a[OVS_VPORT_ATTR_OPTIONS]) {
2472 vport->options = nl_attr_get(a[OVS_VPORT_ATTR_OPTIONS]);
2473 vport->options_len = nl_attr_get_size(a[OVS_VPORT_ATTR_OPTIONS]);
2478 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
2479 * followed by Netlink attributes corresponding to 'vport'. */
2481 dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *vport,
2484 struct ovs_header *ovs_header;
2486 nl_msg_put_genlmsghdr(buf, 0, ovs_vport_family, NLM_F_REQUEST | NLM_F_ECHO,
2487 vport->cmd, OVS_VPORT_VERSION);
2489 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
2490 ovs_header->dp_ifindex = vport->dp_ifindex;
2492 if (vport->port_no != ODPP_NONE) {
2493 nl_msg_put_odp_port(buf, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
2496 if (vport->type != OVS_VPORT_TYPE_UNSPEC) {
2497 nl_msg_put_u32(buf, OVS_VPORT_ATTR_TYPE, vport->type);
2501 nl_msg_put_string(buf, OVS_VPORT_ATTR_NAME, vport->name);
2504 if (vport->upcall_pids) {
2505 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_UPCALL_PID,
2507 vport->n_upcall_pids * sizeof *vport->upcall_pids);
2511 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_STATS,
2512 vport->stats, sizeof *vport->stats);
2515 if (vport->options) {
2516 nl_msg_put_nested(buf, OVS_VPORT_ATTR_OPTIONS,
2517 vport->options, vport->options_len);
2521 /* Clears 'vport' to "empty" values. */
2523 dpif_netlink_vport_init(struct dpif_netlink_vport *vport)
2525 memset(vport, 0, sizeof *vport);
2526 vport->port_no = ODPP_NONE;
2529 /* Executes 'request' in the kernel datapath. If the command fails, returns a
2530 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
2531 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
2532 * result of the command is expected to be an ovs_vport also, which is decoded
2533 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
2534 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
2536 dpif_netlink_vport_transact(const struct dpif_netlink_vport *request,
2537 struct dpif_netlink_vport *reply,
2538 struct ofpbuf **bufp)
2540 struct ofpbuf *request_buf;
2543 ovs_assert((reply != NULL) == (bufp != NULL));
2545 error = dpif_netlink_init();
2549 dpif_netlink_vport_init(reply);
2554 request_buf = ofpbuf_new(1024);
2555 dpif_netlink_vport_to_ofpbuf(request, request_buf);
2556 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
2557 ofpbuf_delete(request_buf);
2561 error = dpif_netlink_vport_from_ofpbuf(reply, *bufp);
2564 dpif_netlink_vport_init(reply);
2565 ofpbuf_delete(*bufp);
2572 /* Obtains information about the kernel vport named 'name' and stores it into
2573 * '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no
2574 * longer needed ('reply' will contain pointers into '*bufp'). */
2576 dpif_netlink_vport_get(const char *name, struct dpif_netlink_vport *reply,
2577 struct ofpbuf **bufp)
2579 struct dpif_netlink_vport request;
2581 dpif_netlink_vport_init(&request);
2582 request.cmd = OVS_VPORT_CMD_GET;
2583 request.name = name;
2585 return dpif_netlink_vport_transact(&request, reply, bufp);
2588 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
2589 * by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a
2590 * positive errno value.
2592 * 'dp' will contain pointers into 'buf', so the caller should not free 'buf'
2593 * while 'dp' is still in use. */
2595 dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *dp, const struct ofpbuf *buf)
2597 static const struct nl_policy ovs_datapath_policy[] = {
2598 [OVS_DP_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
2599 [OVS_DP_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_dp_stats),
2601 [OVS_DP_ATTR_MEGAFLOW_STATS] = {
2602 NL_POLICY_FOR(struct ovs_dp_megaflow_stats),
2606 struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)];
2607 struct ovs_header *ovs_header;
2608 struct nlmsghdr *nlmsg;
2609 struct genlmsghdr *genl;
2612 dpif_netlink_dp_init(dp);
2614 ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
2615 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2616 genl = ofpbuf_try_pull(&b, sizeof *genl);
2617 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2618 if (!nlmsg || !genl || !ovs_header
2619 || nlmsg->nlmsg_type != ovs_datapath_family
2620 || !nl_policy_parse(&b, 0, ovs_datapath_policy, a,
2621 ARRAY_SIZE(ovs_datapath_policy))) {
2625 dp->cmd = genl->cmd;
2626 dp->dp_ifindex = ovs_header->dp_ifindex;
2627 dp->name = nl_attr_get_string(a[OVS_DP_ATTR_NAME]);
2628 if (a[OVS_DP_ATTR_STATS]) {
2629 dp->stats = nl_attr_get(a[OVS_DP_ATTR_STATS]);
2632 if (a[OVS_DP_ATTR_MEGAFLOW_STATS]) {
2633 dp->megaflow_stats = nl_attr_get(a[OVS_DP_ATTR_MEGAFLOW_STATS]);
2639 /* Appends to 'buf' the Generic Netlink message described by 'dp'. */
2641 dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp *dp, struct ofpbuf *buf)
2643 struct ovs_header *ovs_header;
2645 nl_msg_put_genlmsghdr(buf, 0, ovs_datapath_family,
2646 NLM_F_REQUEST | NLM_F_ECHO, dp->cmd,
2647 OVS_DATAPATH_VERSION);
2649 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
2650 ovs_header->dp_ifindex = dp->dp_ifindex;
2653 nl_msg_put_string(buf, OVS_DP_ATTR_NAME, dp->name);
2656 if (dp->upcall_pid) {
2657 nl_msg_put_u32(buf, OVS_DP_ATTR_UPCALL_PID, *dp->upcall_pid);
2660 if (dp->user_features) {
2661 nl_msg_put_u32(buf, OVS_DP_ATTR_USER_FEATURES, dp->user_features);
2664 /* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */
2667 /* Clears 'dp' to "empty" values. */
2669 dpif_netlink_dp_init(struct dpif_netlink_dp *dp)
2671 memset(dp, 0, sizeof *dp);
2675 dpif_netlink_dp_dump_start(struct nl_dump *dump)
2677 struct dpif_netlink_dp request;
2680 dpif_netlink_dp_init(&request);
2681 request.cmd = OVS_DP_CMD_GET;
2683 buf = ofpbuf_new(1024);
2684 dpif_netlink_dp_to_ofpbuf(&request, buf);
2685 nl_dump_start(dump, NETLINK_GENERIC, buf);
2689 /* Executes 'request' in the kernel datapath. If the command fails, returns a
2690 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
2691 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
2692 * result of the command is expected to be of the same form, which is decoded
2693 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
2694 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
2696 dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
2697 struct dpif_netlink_dp *reply, struct ofpbuf **bufp)
2699 struct ofpbuf *request_buf;
2702 ovs_assert((reply != NULL) == (bufp != NULL));
2704 request_buf = ofpbuf_new(1024);
2705 dpif_netlink_dp_to_ofpbuf(request, request_buf);
2706 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
2707 ofpbuf_delete(request_buf);
2710 dpif_netlink_dp_init(reply);
2712 error = dpif_netlink_dp_from_ofpbuf(reply, *bufp);
2715 ofpbuf_delete(*bufp);
2722 /* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'.
2723 * The caller must free '*bufp' when the reply is no longer needed ('reply'
2724 * will contain pointers into '*bufp'). */
2726 dpif_netlink_dp_get(const struct dpif *dpif_, struct dpif_netlink_dp *reply,
2727 struct ofpbuf **bufp)
2729 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2730 struct dpif_netlink_dp request;
2732 dpif_netlink_dp_init(&request);
2733 request.cmd = OVS_DP_CMD_GET;
2734 request.dp_ifindex = dpif->dp_ifindex;
2736 return dpif_netlink_dp_transact(&request, reply, bufp);
2739 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
2740 * by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a
2741 * positive errno value.
2743 * 'flow' will contain pointers into 'buf', so the caller should not free 'buf'
2744 * while 'flow' is still in use. */
2746 dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *flow,
2747 const struct ofpbuf *buf)
2749 static const struct nl_policy ovs_flow_policy[__OVS_FLOW_ATTR_MAX] = {
2750 [OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED, .optional = true },
2751 [OVS_FLOW_ATTR_MASK] = { .type = NL_A_NESTED, .optional = true },
2752 [OVS_FLOW_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
2753 [OVS_FLOW_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
2755 [OVS_FLOW_ATTR_TCP_FLAGS] = { .type = NL_A_U8, .optional = true },
2756 [OVS_FLOW_ATTR_USED] = { .type = NL_A_U64, .optional = true },
2757 [OVS_FLOW_ATTR_UFID] = { .type = NL_A_UNSPEC, .optional = true,
2758 .min_len = sizeof(ovs_u128) },
2759 /* The kernel never uses OVS_FLOW_ATTR_CLEAR. */
2760 /* The kernel never uses OVS_FLOW_ATTR_PROBE. */
2761 /* The kernel never uses OVS_FLOW_ATTR_UFID_FLAGS. */
2764 struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)];
2765 struct ovs_header *ovs_header;
2766 struct nlmsghdr *nlmsg;
2767 struct genlmsghdr *genl;
2770 dpif_netlink_flow_init(flow);
2772 ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
2773 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2774 genl = ofpbuf_try_pull(&b, sizeof *genl);
2775 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2776 if (!nlmsg || !genl || !ovs_header
2777 || nlmsg->nlmsg_type != ovs_flow_family
2778 || !nl_policy_parse(&b, 0, ovs_flow_policy, a,
2779 ARRAY_SIZE(ovs_flow_policy))) {
2782 if (!a[OVS_FLOW_ATTR_KEY] && !a[OVS_FLOW_ATTR_UFID]) {
2786 flow->nlmsg_flags = nlmsg->nlmsg_flags;
2787 flow->dp_ifindex = ovs_header->dp_ifindex;
2788 if (a[OVS_FLOW_ATTR_KEY]) {
2789 flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]);
2790 flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]);
2793 if (a[OVS_FLOW_ATTR_UFID]) {
2794 const ovs_u128 *ufid;
2796 ufid = nl_attr_get_unspec(a[OVS_FLOW_ATTR_UFID],
2797 nl_attr_get_size(a[OVS_FLOW_ATTR_UFID]));
2799 flow->ufid_present = true;
2801 if (a[OVS_FLOW_ATTR_MASK]) {
2802 flow->mask = nl_attr_get(a[OVS_FLOW_ATTR_MASK]);
2803 flow->mask_len = nl_attr_get_size(a[OVS_FLOW_ATTR_MASK]);
2805 if (a[OVS_FLOW_ATTR_ACTIONS]) {
2806 flow->actions = nl_attr_get(a[OVS_FLOW_ATTR_ACTIONS]);
2807 flow->actions_len = nl_attr_get_size(a[OVS_FLOW_ATTR_ACTIONS]);
2809 if (a[OVS_FLOW_ATTR_STATS]) {
2810 flow->stats = nl_attr_get(a[OVS_FLOW_ATTR_STATS]);
2812 if (a[OVS_FLOW_ATTR_TCP_FLAGS]) {
2813 flow->tcp_flags = nl_attr_get(a[OVS_FLOW_ATTR_TCP_FLAGS]);
2815 if (a[OVS_FLOW_ATTR_USED]) {
2816 flow->used = nl_attr_get(a[OVS_FLOW_ATTR_USED]);
2821 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
2822 * followed by Netlink attributes corresponding to 'flow'. */
2824 dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *flow,
2827 struct ovs_header *ovs_header;
2829 nl_msg_put_genlmsghdr(buf, 0, ovs_flow_family,
2830 NLM_F_REQUEST | flow->nlmsg_flags,
2831 flow->cmd, OVS_FLOW_VERSION);
2833 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
2834 ovs_header->dp_ifindex = flow->dp_ifindex;
2836 if (flow->ufid_present) {
2837 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_UFID, &flow->ufid,
2840 if (flow->ufid_terse) {
2841 nl_msg_put_u32(buf, OVS_FLOW_ATTR_UFID_FLAGS,
2842 OVS_UFID_F_OMIT_KEY | OVS_UFID_F_OMIT_MASK
2843 | OVS_UFID_F_OMIT_ACTIONS);
2845 if (!flow->ufid_terse || !flow->ufid_present) {
2846 if (flow->key_len) {
2847 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_KEY,
2848 flow->key, flow->key_len);
2851 if (flow->mask_len) {
2852 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_MASK,
2853 flow->mask, flow->mask_len);
2855 if (flow->actions || flow->actions_len) {
2856 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS,
2857 flow->actions, flow->actions_len);
2861 /* We never need to send these to the kernel. */
2862 ovs_assert(!flow->stats);
2863 ovs_assert(!flow->tcp_flags);
2864 ovs_assert(!flow->used);
2867 nl_msg_put_flag(buf, OVS_FLOW_ATTR_CLEAR);
2870 nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE);
2874 /* Clears 'flow' to "empty" values. */
2876 dpif_netlink_flow_init(struct dpif_netlink_flow *flow)
2878 memset(flow, 0, sizeof *flow);
2881 /* Executes 'request' in the kernel datapath. If the command fails, returns a
2882 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
2883 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
2884 * result of the command is expected to be a flow also, which is decoded and
2885 * stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply
2886 * is no longer needed ('reply' will contain pointers into '*bufp'). */
2888 dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
2889 struct dpif_netlink_flow *reply,
2890 struct ofpbuf **bufp)
2892 struct ofpbuf *request_buf;
2895 ovs_assert((reply != NULL) == (bufp != NULL));
2898 request->nlmsg_flags |= NLM_F_ECHO;
2901 request_buf = ofpbuf_new(1024);
2902 dpif_netlink_flow_to_ofpbuf(request, request_buf);
2903 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
2904 ofpbuf_delete(request_buf);
2908 error = dpif_netlink_flow_from_ofpbuf(reply, *bufp);
2911 dpif_netlink_flow_init(reply);
2912 ofpbuf_delete(*bufp);
2920 dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *flow,
2921 struct dpif_flow_stats *stats)
2924 stats->n_packets = get_32aligned_u64(&flow->stats->n_packets);
2925 stats->n_bytes = get_32aligned_u64(&flow->stats->n_bytes);
2927 stats->n_packets = 0;
2930 stats->used = flow->used ? get_32aligned_u64(flow->used) : 0;
2931 stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0;
2934 /* Logs information about a packet that was recently lost in 'ch' (in
2937 report_loss(struct dpif_netlink *dpif, struct dpif_channel *ch, uint32_t ch_idx,
2938 uint32_t handler_id)
2940 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
2943 if (VLOG_DROP_WARN(&rl)) {
2948 if (ch->last_poll != LLONG_MIN) {
2949 ds_put_format(&s, " (last polled %lld ms ago)",
2950 time_msec() - ch->last_poll);
2953 VLOG_WARN("%s: lost packet on port channel %u of handler %u",
2954 dpif_name(&dpif->dpif), ch_idx, handler_id);