2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "dpif-netlink.h"
26 #include <linux/types.h>
27 #include <linux/pkt_sched.h>
31 #include <sys/epoll.h>
36 #include "dpif-provider.h"
37 #include "dynamic-string.h"
39 #include "fat-rwlock.h"
41 #include "netdev-linux.h"
42 #include "netdev-vport.h"
43 #include "netlink-notifier.h"
44 #include "netlink-socket.h"
49 #include "poll-loop.h"
54 #include "unaligned.h"
58 VLOG_DEFINE_THIS_MODULE(dpif_netlink);
64 enum { MAX_PORTS = USHRT_MAX };
66 /* This ethtool flag was introduced in Linux 2.6.24, so it might be
67 * missing if we have old headers. */
68 #define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */
70 struct dpif_netlink_dp {
71 /* Generic Netlink header. */
74 /* struct ovs_header. */
78 const char *name; /* OVS_DP_ATTR_NAME. */
79 const uint32_t *upcall_pid; /* OVS_DP_ATTR_UPCALL_PID. */
80 uint32_t user_features; /* OVS_DP_ATTR_USER_FEATURES */
81 const struct ovs_dp_stats *stats; /* OVS_DP_ATTR_STATS. */
82 const struct ovs_dp_megaflow_stats *megaflow_stats;
83 /* OVS_DP_ATTR_MEGAFLOW_STATS.*/
86 static void dpif_netlink_dp_init(struct dpif_netlink_dp *);
87 static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *,
88 const struct ofpbuf *);
89 static void dpif_netlink_dp_dump_start(struct nl_dump *);
90 static int dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
91 struct dpif_netlink_dp *reply,
92 struct ofpbuf **bufp);
93 static int dpif_netlink_dp_get(const struct dpif *,
94 struct dpif_netlink_dp *reply,
95 struct ofpbuf **bufp);
97 struct dpif_netlink_flow {
98 /* Generic Netlink header. */
101 /* struct ovs_header. */
102 unsigned int nlmsg_flags;
107 * The 'stats' member points to 64-bit data that might only be aligned on
108 * 32-bit boundaries, so get_unaligned_u64() should be used to access its
111 * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in
112 * the Netlink version of the command, even if actions_len is zero. */
113 const struct nlattr *key; /* OVS_FLOW_ATTR_KEY. */
115 const struct nlattr *mask; /* OVS_FLOW_ATTR_MASK. */
117 const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */
119 const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */
120 const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */
121 const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */
122 bool clear; /* OVS_FLOW_ATTR_CLEAR. */
123 bool probe; /* OVS_FLOW_ATTR_PROBE. */
126 static void dpif_netlink_flow_init(struct dpif_netlink_flow *);
127 static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *,
128 const struct ofpbuf *);
129 static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *,
131 static int dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
132 struct dpif_netlink_flow *reply,
133 struct ofpbuf **bufp);
134 static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *,
135 struct dpif_flow_stats *);
136 static void dpif_netlink_flow_to_dpif_flow(struct dpif *, struct dpif_flow *,
137 const struct dpif_netlink_flow *);
139 /* One of the dpif channels between the kernel and userspace. */
140 struct dpif_channel {
141 struct nl_sock *sock; /* Netlink socket. */
142 long long int last_poll; /* Last time this channel was polled. */
146 #define VPORT_SOCK_POOL_SIZE 1
147 /* On Windows, there is no native support for epoll. There are equivalent
148 * interfaces though, that are not used currently. For simpicity, a pool of
149 * netlink sockets is used. Each socket is represented by 'struct
150 * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be
151 * sharing the same socket. In the future, we can add a reference count and
153 struct dpif_windows_vport_sock {
154 struct nl_sock *nl_sock; /* netlink socket. */
158 struct dpif_handler {
159 struct dpif_channel *channels;/* Array of channels for each handler. */
160 struct epoll_event *epoll_events;
161 int epoll_fd; /* epoll fd that includes channel socks. */
162 int n_events; /* Num events returned by epoll_wait(). */
163 int event_offset; /* Offset into 'epoll_events'. */
166 /* Pool of sockets. */
167 struct dpif_windows_vport_sock *vport_sock_pool;
168 size_t last_used_pool_idx; /* Index to aid in allocating a
169 socket in the pool to a port. */
173 /* Datapath interface for the openvswitch Linux kernel module. */
174 struct dpif_netlink {
178 /* Upcall messages. */
179 struct fat_rwlock upcall_lock;
180 struct dpif_handler *handlers;
181 uint32_t n_handlers; /* Num of upcall handlers. */
182 int uc_array_size; /* Size of 'handler->channels' and */
183 /* 'handler->epoll_events'. */
185 /* Change notification. */
186 struct nl_sock *port_notifier; /* vport multicast group subscriber. */
187 bool refresh_channels;
190 static void report_loss(struct dpif_netlink *, struct dpif_channel *,
191 uint32_t ch_idx, uint32_t handler_id);
193 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5);
195 /* Generic Netlink family numbers for OVS.
197 * Initialized by dpif_netlink_init(). */
198 static int ovs_datapath_family;
199 static int ovs_vport_family;
200 static int ovs_flow_family;
201 static int ovs_packet_family;
203 /* Generic Netlink multicast groups for OVS.
205 * Initialized by dpif_netlink_init(). */
206 static unsigned int ovs_vport_mcgroup;
208 static int dpif_netlink_init(void);
209 static int open_dpif(const struct dpif_netlink_dp *, struct dpif **);
210 static uint32_t dpif_netlink_port_get_pid(const struct dpif *,
211 odp_port_t port_no, uint32_t hash);
212 static void dpif_netlink_handler_uninit(struct dpif_handler *handler);
213 static int dpif_netlink_refresh_channels(struct dpif_netlink *,
214 uint32_t n_handlers);
215 static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *,
217 static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *,
218 const struct ofpbuf *);
220 static struct dpif_netlink *
221 dpif_netlink_cast(const struct dpif *dpif)
223 dpif_assert_class(dpif, &dpif_netlink_class);
224 return CONTAINER_OF(dpif, struct dpif_netlink, dpif);
228 dpif_netlink_enumerate(struct sset *all_dps,
229 const struct dpif_class *dpif_class OVS_UNUSED)
232 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
233 struct ofpbuf msg, buf;
236 error = dpif_netlink_init();
241 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
242 dpif_netlink_dp_dump_start(&dump);
243 while (nl_dump_next(&dump, &msg, &buf)) {
244 struct dpif_netlink_dp dp;
246 if (!dpif_netlink_dp_from_ofpbuf(&dp, &msg)) {
247 sset_add(all_dps, dp.name);
251 return nl_dump_done(&dump);
255 dpif_netlink_open(const struct dpif_class *class OVS_UNUSED, const char *name,
256 bool create, struct dpif **dpifp)
258 struct dpif_netlink_dp dp_request, dp;
263 error = dpif_netlink_init();
268 /* Create or look up datapath. */
269 dpif_netlink_dp_init(&dp_request);
271 dp_request.cmd = OVS_DP_CMD_NEW;
273 dp_request.upcall_pid = &upcall_pid;
275 /* Use OVS_DP_CMD_SET to report user features */
276 dp_request.cmd = OVS_DP_CMD_SET;
278 dp_request.name = name;
279 dp_request.user_features |= OVS_DP_F_UNALIGNED;
280 dp_request.user_features |= OVS_DP_F_VPORT_PIDS;
281 error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
286 error = open_dpif(&dp, dpifp);
292 open_dpif(const struct dpif_netlink_dp *dp, struct dpif **dpifp)
294 struct dpif_netlink *dpif;
296 dpif = xzalloc(sizeof *dpif);
297 dpif->port_notifier = NULL;
298 fat_rwlock_init(&dpif->upcall_lock);
300 dpif_init(&dpif->dpif, &dpif_netlink_class, dp->name,
301 dp->dp_ifindex, dp->dp_ifindex);
303 dpif->dp_ifindex = dp->dp_ifindex;
304 *dpifp = &dpif->dpif;
309 /* Destroys the netlink sockets pointed by the elements in 'socksp'
310 * and frees the 'socksp'. */
312 vport_del_socksp__(struct nl_sock **socksp, uint32_t n_socks)
316 for (i = 0; i < n_socks; i++) {
317 nl_sock_destroy(socksp[i]);
323 /* Creates an array of netlink sockets. Returns an array of the
324 * corresponding pointers. Records the error in 'error'. */
325 static struct nl_sock **
326 vport_create_socksp__(uint32_t n_socks, int *error)
328 struct nl_sock **socksp = xzalloc(n_socks * sizeof *socksp);
331 for (i = 0; i < n_socks; i++) {
332 *error = nl_sock_create(NETLINK_GENERIC, &socksp[i]);
341 vport_del_socksp__(socksp, n_socks);
348 vport_delete_sock_pool(struct dpif_handler *handler)
349 OVS_REQ_WRLOCK(dpif->upcall_lock)
351 if (handler->vport_sock_pool) {
353 struct dpif_windows_vport_sock *sock_pool =
354 handler->vport_sock_pool;
356 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
357 if (sock_pool[i].nl_sock) {
358 nl_sock_unsubscribe_packets(sock_pool[i].nl_sock);
359 nl_sock_destroy(sock_pool[i].nl_sock);
360 sock_pool[i].nl_sock = NULL;
364 free(handler->vport_sock_pool);
365 handler->vport_sock_pool = NULL;
370 vport_create_sock_pool(struct dpif_handler *handler)
371 OVS_REQ_WRLOCK(dpif->upcall_lock)
373 struct dpif_windows_vport_sock *sock_pool;
377 sock_pool = xzalloc(VPORT_SOCK_POOL_SIZE * sizeof *sock_pool);
378 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
379 error = nl_sock_create(NETLINK_GENERIC, &sock_pool[i].nl_sock);
384 /* Enable the netlink socket to receive packets. This is equivalent to
385 * calling nl_sock_join_mcgroup() to receive events. */
386 error = nl_sock_subscribe_packets(sock_pool[i].nl_sock);
392 handler->vport_sock_pool = sock_pool;
393 handler->last_used_pool_idx = 0;
397 vport_delete_sock_pool(handler);
401 /* Returns an array pointers to netlink sockets. The sockets are picked from a
402 * pool. Records the error in 'error'. */
403 static struct nl_sock **
404 vport_create_socksp_windows(struct dpif_netlink *dpif, int *error)
405 OVS_REQ_WRLOCK(dpif->upcall_lock)
407 uint32_t n_socks = dpif->n_handlers;
408 struct nl_sock **socksp;
411 ovs_assert(n_socks <= 1);
412 socksp = xzalloc(n_socks * sizeof *socksp);
414 /* Pick netlink sockets to use in a round-robin fashion from each
415 * handler's pool of sockets. */
416 for (i = 0; i < n_socks; i++) {
417 struct dpif_handler *handler = &dpif->handlers[i];
418 struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
419 size_t index = handler->last_used_pool_idx;
421 /* A pool of sockets is allocated when the handler is initialized. */
422 if (sock_pool == NULL) {
428 ovs_assert(index < VPORT_SOCK_POOL_SIZE);
429 socksp[i] = sock_pool[index].nl_sock;
430 socksp[i] = sock_pool[index].nl_sock;
431 ovs_assert(socksp[i]);
432 index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
433 handler->last_used_pool_idx = index;
440 vport_del_socksp_windows(struct dpif_netlink *dpif, struct nl_sock **socksp)
446 static struct nl_sock **
447 vport_create_socksp(struct dpif_netlink *dpif, int *error)
450 return vport_create_socksp_windows(dpif, error);
452 return vport_create_socksp__(dpif->n_handlers, error);
457 vport_del_socksp(struct dpif_netlink *dpif, struct nl_sock **socksp)
460 vport_del_socksp_windows(dpif, socksp);
462 vport_del_socksp__(socksp, dpif->n_handlers);
466 /* Given the array of pointers to netlink sockets 'socksp', returns
467 * the array of corresponding pids. If the 'socksp' is NULL, returns
468 * a single-element array of value 0. */
470 vport_socksp_to_pids(struct nl_sock **socksp, uint32_t n_socks)
475 pids = xzalloc(sizeof *pids);
479 pids = xzalloc(n_socks * sizeof *pids);
480 for (i = 0; i < n_socks; i++) {
481 pids[i] = nl_sock_pid(socksp[i]);
488 /* Given the port number 'port_idx', extracts the pids of netlink sockets
489 * associated to the port and assigns it to 'upcall_pids'. */
491 vport_get_pids(struct dpif_netlink *dpif, uint32_t port_idx,
492 uint32_t **upcall_pids)
497 /* Since the nl_sock can only be assigned in either all
498 * or none "dpif->handlers" channels, the following check
500 if (!dpif->handlers[0].channels[port_idx].sock) {
503 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
505 pids = xzalloc(dpif->n_handlers * sizeof *pids);
507 for (i = 0; i < dpif->n_handlers; i++) {
508 pids[i] = nl_sock_pid(dpif->handlers[i].channels[port_idx].sock);
517 vport_add_channels(struct dpif_netlink *dpif, odp_port_t port_no,
518 struct nl_sock **socksp)
520 struct epoll_event event;
521 uint32_t port_idx = odp_to_u32(port_no);
525 if (dpif->handlers == NULL) {
529 /* We assume that the datapath densely chooses port numbers, which can
530 * therefore be used as an index into 'channels' and 'epoll_events' of
531 * 'dpif->handler'. */
532 if (port_idx >= dpif->uc_array_size) {
533 uint32_t new_size = port_idx + 1;
535 if (new_size > MAX_PORTS) {
536 VLOG_WARN_RL(&error_rl, "%s: datapath port %"PRIu32" too big",
537 dpif_name(&dpif->dpif), port_no);
541 for (i = 0; i < dpif->n_handlers; i++) {
542 struct dpif_handler *handler = &dpif->handlers[i];
544 handler->channels = xrealloc(handler->channels,
545 new_size * sizeof *handler->channels);
547 for (j = dpif->uc_array_size; j < new_size; j++) {
548 handler->channels[j].sock = NULL;
551 handler->epoll_events = xrealloc(handler->epoll_events,
552 new_size * sizeof *handler->epoll_events);
555 dpif->uc_array_size = new_size;
558 memset(&event, 0, sizeof event);
559 event.events = EPOLLIN;
560 event.data.u32 = port_idx;
562 for (i = 0; i < dpif->n_handlers; i++) {
563 struct dpif_handler *handler = &dpif->handlers[i];
566 if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(socksp[i]),
572 dpif->handlers[i].channels[port_idx].sock = socksp[i];
573 dpif->handlers[i].channels[port_idx].last_poll = LLONG_MIN;
579 for (j = 0; j < i; j++) {
581 epoll_ctl(dpif->handlers[j].epoll_fd, EPOLL_CTL_DEL,
582 nl_sock_fd(socksp[j]), NULL);
584 dpif->handlers[j].channels[port_idx].sock = NULL;
591 vport_del_channels(struct dpif_netlink *dpif, odp_port_t port_no)
593 uint32_t port_idx = odp_to_u32(port_no);
596 if (!dpif->handlers || port_idx >= dpif->uc_array_size) {
600 /* Since the sock can only be assigned in either all or none
601 * of "dpif->handlers" channels, the following check would
603 if (!dpif->handlers[0].channels[port_idx].sock) {
607 for (i = 0; i < dpif->n_handlers; i++) {
608 struct dpif_handler *handler = &dpif->handlers[i];
610 epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL,
611 nl_sock_fd(handler->channels[port_idx].sock), NULL);
612 nl_sock_destroy(handler->channels[port_idx].sock);
614 handler->channels[port_idx].sock = NULL;
615 handler->event_offset = handler->n_events = 0;
620 destroy_all_channels(struct dpif_netlink *dpif)
621 OVS_REQ_WRLOCK(dpif->upcall_lock)
625 if (!dpif->handlers) {
629 for (i = 0; i < dpif->uc_array_size; i++ ) {
630 struct dpif_netlink_vport vport_request;
631 uint32_t upcall_pids = 0;
633 /* Since the sock can only be assigned in either all or none
634 * of "dpif->handlers" channels, the following check would
636 if (!dpif->handlers[0].channels[i].sock) {
640 /* Turn off upcalls. */
641 dpif_netlink_vport_init(&vport_request);
642 vport_request.cmd = OVS_VPORT_CMD_SET;
643 vport_request.dp_ifindex = dpif->dp_ifindex;
644 vport_request.port_no = u32_to_odp(i);
645 vport_request.upcall_pids = &upcall_pids;
646 dpif_netlink_vport_transact(&vport_request, NULL, NULL);
648 vport_del_channels(dpif, u32_to_odp(i));
651 for (i = 0; i < dpif->n_handlers; i++) {
652 struct dpif_handler *handler = &dpif->handlers[i];
654 dpif_netlink_handler_uninit(handler);
655 free(handler->epoll_events);
656 free(handler->channels);
659 free(dpif->handlers);
660 dpif->handlers = NULL;
661 dpif->n_handlers = 0;
662 dpif->uc_array_size = 0;
666 dpif_netlink_close(struct dpif *dpif_)
668 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
670 nl_sock_destroy(dpif->port_notifier);
672 fat_rwlock_wrlock(&dpif->upcall_lock);
673 destroy_all_channels(dpif);
674 fat_rwlock_unlock(&dpif->upcall_lock);
676 fat_rwlock_destroy(&dpif->upcall_lock);
681 dpif_netlink_destroy(struct dpif *dpif_)
683 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
684 struct dpif_netlink_dp dp;
686 dpif_netlink_dp_init(&dp);
687 dp.cmd = OVS_DP_CMD_DEL;
688 dp.dp_ifindex = dpif->dp_ifindex;
689 return dpif_netlink_dp_transact(&dp, NULL, NULL);
693 dpif_netlink_run(struct dpif *dpif_)
695 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
697 if (dpif->refresh_channels) {
698 dpif->refresh_channels = false;
699 fat_rwlock_wrlock(&dpif->upcall_lock);
700 dpif_netlink_refresh_channels(dpif, dpif->n_handlers);
701 fat_rwlock_unlock(&dpif->upcall_lock);
707 dpif_netlink_get_stats(const struct dpif *dpif_, struct dpif_dp_stats *stats)
709 struct dpif_netlink_dp dp;
713 error = dpif_netlink_dp_get(dpif_, &dp, &buf);
715 memset(stats, 0, sizeof *stats);
718 stats->n_hit = get_32aligned_u64(&dp.stats->n_hit);
719 stats->n_missed = get_32aligned_u64(&dp.stats->n_missed);
720 stats->n_lost = get_32aligned_u64(&dp.stats->n_lost);
721 stats->n_flows = get_32aligned_u64(&dp.stats->n_flows);
724 if (dp.megaflow_stats) {
725 stats->n_masks = dp.megaflow_stats->n_masks;
726 stats->n_mask_hit = get_32aligned_u64(
727 &dp.megaflow_stats->n_mask_hit);
729 stats->n_masks = UINT32_MAX;
730 stats->n_mask_hit = UINT64_MAX;
738 get_vport_type(const struct dpif_netlink_vport *vport)
740 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
742 switch (vport->type) {
743 case OVS_VPORT_TYPE_NETDEV: {
744 const char *type = netdev_get_type_from_name(vport->name);
746 return type ? type : "system";
749 case OVS_VPORT_TYPE_INTERNAL:
752 case OVS_VPORT_TYPE_GENEVE:
755 case OVS_VPORT_TYPE_GRE:
758 case OVS_VPORT_TYPE_GRE64:
761 case OVS_VPORT_TYPE_VXLAN:
764 case OVS_VPORT_TYPE_LISP:
767 case OVS_VPORT_TYPE_UNSPEC:
768 case __OVS_VPORT_TYPE_MAX:
772 VLOG_WARN_RL(&rl, "dp%d: port `%s' has unsupported type %u",
773 vport->dp_ifindex, vport->name, (unsigned int) vport->type);
777 static enum ovs_vport_type
778 netdev_to_ovs_vport_type(const struct netdev *netdev)
780 const char *type = netdev_get_type(netdev);
782 if (!strcmp(type, "tap") || !strcmp(type, "system")) {
783 return OVS_VPORT_TYPE_NETDEV;
784 } else if (!strcmp(type, "internal")) {
785 return OVS_VPORT_TYPE_INTERNAL;
786 } else if (!strcmp(type, "geneve")) {
787 return OVS_VPORT_TYPE_GENEVE;
788 } else if (strstr(type, "gre64")) {
789 return OVS_VPORT_TYPE_GRE64;
790 } else if (strstr(type, "gre")) {
791 return OVS_VPORT_TYPE_GRE;
792 } else if (!strcmp(type, "vxlan")) {
793 return OVS_VPORT_TYPE_VXLAN;
794 } else if (!strcmp(type, "lisp")) {
795 return OVS_VPORT_TYPE_LISP;
797 return OVS_VPORT_TYPE_UNSPEC;
802 dpif_netlink_port_add__(struct dpif_netlink *dpif, struct netdev *netdev,
803 odp_port_t *port_nop)
804 OVS_REQ_WRLOCK(dpif->upcall_lock)
806 const struct netdev_tunnel_config *tnl_cfg;
807 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
808 const char *name = netdev_vport_get_dpif_port(netdev,
809 namebuf, sizeof namebuf);
810 const char *type = netdev_get_type(netdev);
811 struct dpif_netlink_vport request, reply;
813 uint64_t options_stub[64 / 8];
814 struct ofpbuf options;
815 struct nl_sock **socksp = NULL;
816 uint32_t *upcall_pids;
819 if (dpif->handlers) {
820 socksp = vport_create_socksp(dpif, &error);
826 dpif_netlink_vport_init(&request);
827 request.cmd = OVS_VPORT_CMD_NEW;
828 request.dp_ifindex = dpif->dp_ifindex;
829 request.type = netdev_to_ovs_vport_type(netdev);
830 if (request.type == OVS_VPORT_TYPE_UNSPEC) {
831 VLOG_WARN_RL(&error_rl, "%s: cannot create port `%s' because it has "
832 "unsupported type `%s'",
833 dpif_name(&dpif->dpif), name, type);
834 vport_del_socksp(dpif, socksp);
839 if (request.type == OVS_VPORT_TYPE_NETDEV) {
841 /* XXX : Map appropiate Windows handle */
843 netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false);
847 tnl_cfg = netdev_get_tunnel_config(netdev);
848 if (tnl_cfg && tnl_cfg->dst_port != 0) {
849 ofpbuf_use_stack(&options, options_stub, sizeof options_stub);
850 nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT,
851 ntohs(tnl_cfg->dst_port));
852 request.options = ofpbuf_data(&options);
853 request.options_len = ofpbuf_size(&options);
856 request.port_no = *port_nop;
857 upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
858 request.n_upcall_pids = socksp ? dpif->n_handlers : 1;
859 request.upcall_pids = upcall_pids;
861 error = dpif_netlink_vport_transact(&request, &reply, &buf);
863 *port_nop = reply.port_no;
865 if (error == EBUSY && *port_nop != ODPP_NONE) {
866 VLOG_INFO("%s: requested port %"PRIu32" is in use",
867 dpif_name(&dpif->dpif), *port_nop);
870 vport_del_socksp(dpif, socksp);
875 error = vport_add_channels(dpif, *port_nop, socksp);
877 VLOG_INFO("%s: could not add channel for port %s",
878 dpif_name(&dpif->dpif), name);
880 /* Delete the port. */
881 dpif_netlink_vport_init(&request);
882 request.cmd = OVS_VPORT_CMD_DEL;
883 request.dp_ifindex = dpif->dp_ifindex;
884 request.port_no = *port_nop;
885 dpif_netlink_vport_transact(&request, NULL, NULL);
886 vport_del_socksp(dpif, socksp);
900 dpif_netlink_port_add(struct dpif *dpif_, struct netdev *netdev,
901 odp_port_t *port_nop)
903 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
906 fat_rwlock_wrlock(&dpif->upcall_lock);
907 error = dpif_netlink_port_add__(dpif, netdev, port_nop);
908 fat_rwlock_unlock(&dpif->upcall_lock);
914 dpif_netlink_port_del__(struct dpif_netlink *dpif, odp_port_t port_no)
915 OVS_REQ_WRLOCK(dpif->upcall_lock)
917 struct dpif_netlink_vport vport;
920 dpif_netlink_vport_init(&vport);
921 vport.cmd = OVS_VPORT_CMD_DEL;
922 vport.dp_ifindex = dpif->dp_ifindex;
923 vport.port_no = port_no;
924 error = dpif_netlink_vport_transact(&vport, NULL, NULL);
926 vport_del_channels(dpif, port_no);
932 dpif_netlink_port_del(struct dpif *dpif_, odp_port_t port_no)
934 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
937 fat_rwlock_wrlock(&dpif->upcall_lock);
938 error = dpif_netlink_port_del__(dpif, port_no);
939 fat_rwlock_unlock(&dpif->upcall_lock);
945 dpif_netlink_port_query__(const struct dpif_netlink *dpif, odp_port_t port_no,
946 const char *port_name, struct dpif_port *dpif_port)
948 struct dpif_netlink_vport request;
949 struct dpif_netlink_vport reply;
953 dpif_netlink_vport_init(&request);
954 request.cmd = OVS_VPORT_CMD_GET;
955 request.dp_ifindex = dpif->dp_ifindex;
956 request.port_no = port_no;
957 request.name = port_name;
959 error = dpif_netlink_vport_transact(&request, &reply, &buf);
961 if (reply.dp_ifindex != request.dp_ifindex) {
962 /* A query by name reported that 'port_name' is in some datapath
963 * other than 'dpif', but the caller wants to know about 'dpif'. */
965 } else if (dpif_port) {
966 dpif_port->name = xstrdup(reply.name);
967 dpif_port->type = xstrdup(get_vport_type(&reply));
968 dpif_port->port_no = reply.port_no;
976 dpif_netlink_port_query_by_number(const struct dpif *dpif_, odp_port_t port_no,
977 struct dpif_port *dpif_port)
979 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
981 return dpif_netlink_port_query__(dpif, port_no, NULL, dpif_port);
985 dpif_netlink_port_query_by_name(const struct dpif *dpif_, const char *devname,
986 struct dpif_port *dpif_port)
988 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
990 return dpif_netlink_port_query__(dpif, 0, devname, dpif_port);
994 dpif_netlink_port_get_pid__(const struct dpif_netlink *dpif,
995 odp_port_t port_no, uint32_t hash)
996 OVS_REQ_RDLOCK(dpif->upcall_lock)
998 uint32_t port_idx = odp_to_u32(port_no);
1001 if (dpif->handlers && dpif->uc_array_size > 0) {
1002 /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
1003 * channel, since it is not heavily loaded. */
1004 uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
1005 struct dpif_handler *h = &dpif->handlers[hash % dpif->n_handlers];
1007 /* Needs to check in case the socket pointer is changed in between
1008 * the holding of upcall_lock. A known case happens when the main
1009 * thread deletes the vport while the handler thread is handling
1010 * the upcall from that port. */
1011 if (h->channels[idx].sock) {
1012 pid = nl_sock_pid(h->channels[idx].sock);
1020 dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no,
1023 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1026 fat_rwlock_rdlock(&dpif->upcall_lock);
1027 ret = dpif_netlink_port_get_pid__(dpif, port_no, hash);
1028 fat_rwlock_unlock(&dpif->upcall_lock);
1034 dpif_netlink_flow_flush(struct dpif *dpif_)
1036 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1037 struct dpif_netlink_flow flow;
1039 dpif_netlink_flow_init(&flow);
1040 flow.cmd = OVS_FLOW_CMD_DEL;
1041 flow.dp_ifindex = dpif->dp_ifindex;
1042 return dpif_netlink_flow_transact(&flow, NULL, NULL);
1045 struct dpif_netlink_port_state {
1046 struct nl_dump dump;
1051 dpif_netlink_port_dump_start__(const struct dpif_netlink *dpif,
1052 struct nl_dump *dump)
1054 struct dpif_netlink_vport request;
1057 dpif_netlink_vport_init(&request);
1058 request.cmd = OVS_VPORT_CMD_GET;
1059 request.dp_ifindex = dpif->dp_ifindex;
1061 buf = ofpbuf_new(1024);
1062 dpif_netlink_vport_to_ofpbuf(&request, buf);
1063 nl_dump_start(dump, NETLINK_GENERIC, buf);
1068 dpif_netlink_port_dump_start(const struct dpif *dpif_, void **statep)
1070 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1071 struct dpif_netlink_port_state *state;
1073 *statep = state = xmalloc(sizeof *state);
1074 dpif_netlink_port_dump_start__(dpif, &state->dump);
1076 ofpbuf_init(&state->buf, NL_DUMP_BUFSIZE);
1081 dpif_netlink_port_dump_next__(const struct dpif_netlink *dpif,
1082 struct nl_dump *dump,
1083 struct dpif_netlink_vport *vport,
1084 struct ofpbuf *buffer)
1089 if (!nl_dump_next(dump, &buf, buffer)) {
1093 error = dpif_netlink_vport_from_ofpbuf(vport, &buf);
1095 VLOG_WARN_RL(&error_rl, "%s: failed to parse vport record (%s)",
1096 dpif_name(&dpif->dpif), ovs_strerror(error));
1102 dpif_netlink_port_dump_next(const struct dpif *dpif_, void *state_,
1103 struct dpif_port *dpif_port)
1105 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1106 struct dpif_netlink_port_state *state = state_;
1107 struct dpif_netlink_vport vport;
1110 error = dpif_netlink_port_dump_next__(dpif, &state->dump, &vport,
1115 dpif_port->name = CONST_CAST(char *, vport.name);
1116 dpif_port->type = CONST_CAST(char *, get_vport_type(&vport));
1117 dpif_port->port_no = vport.port_no;
1122 dpif_netlink_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_)
1124 struct dpif_netlink_port_state *state = state_;
1125 int error = nl_dump_done(&state->dump);
1127 ofpbuf_uninit(&state->buf);
1133 dpif_netlink_port_poll(const struct dpif *dpif_, char **devnamep)
1135 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1137 /* Lazily create the Netlink socket to listen for notifications. */
1138 if (!dpif->port_notifier) {
1139 struct nl_sock *sock;
1142 error = nl_sock_create(NETLINK_GENERIC, &sock);
1147 error = nl_sock_join_mcgroup(sock, ovs_vport_mcgroup);
1149 nl_sock_destroy(sock);
1152 dpif->port_notifier = sock;
1154 /* We have no idea of the current state so report that everything
1160 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1161 uint64_t buf_stub[4096 / 8];
1165 ofpbuf_use_stub(&buf, buf_stub, sizeof buf_stub);
1166 error = nl_sock_recv(dpif->port_notifier, &buf, false);
1168 struct dpif_netlink_vport vport;
1170 error = dpif_netlink_vport_from_ofpbuf(&vport, &buf);
1172 if (vport.dp_ifindex == dpif->dp_ifindex
1173 && (vport.cmd == OVS_VPORT_CMD_NEW
1174 || vport.cmd == OVS_VPORT_CMD_DEL
1175 || vport.cmd == OVS_VPORT_CMD_SET)) {
1176 VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8,
1177 dpif->dpif.full_name, vport.name, vport.cmd);
1178 if (vport.cmd == OVS_VPORT_CMD_DEL && dpif->handlers) {
1179 dpif->refresh_channels = true;
1181 *devnamep = xstrdup(vport.name);
1182 ofpbuf_uninit(&buf);
1186 } else if (error != EAGAIN) {
1187 VLOG_WARN_RL(&rl, "error reading or parsing netlink (%s)",
1188 ovs_strerror(error));
1189 nl_sock_drain(dpif->port_notifier);
1193 ofpbuf_uninit(&buf);
1201 dpif_netlink_port_poll_wait(const struct dpif *dpif_)
1203 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1205 if (dpif->port_notifier) {
1206 nl_sock_wait(dpif->port_notifier, POLLIN);
1208 poll_immediate_wake();
1213 dpif_netlink_init_flow_get(const struct dpif_netlink *dpif,
1214 const struct nlattr *key, size_t key_len,
1215 struct dpif_netlink_flow *request)
1217 dpif_netlink_flow_init(request);
1218 request->cmd = OVS_FLOW_CMD_GET;
1219 request->dp_ifindex = dpif->dp_ifindex;
1221 request->key_len = key_len;
1225 dpif_netlink_flow_get(const struct dpif_netlink *dpif,
1226 const struct nlattr *key, size_t key_len,
1227 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1229 struct dpif_netlink_flow request;
1231 dpif_netlink_init_flow_get(dpif, key, key_len, &request);
1232 return dpif_netlink_flow_transact(&request, reply, bufp);
1236 dpif_netlink_init_flow_put(struct dpif_netlink *dpif,
1237 const struct dpif_flow_put *put,
1238 struct dpif_netlink_flow *request)
1240 static const struct nlattr dummy_action;
1242 dpif_netlink_flow_init(request);
1243 request->cmd = (put->flags & DPIF_FP_CREATE
1244 ? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET);
1245 request->dp_ifindex = dpif->dp_ifindex;
1246 request->key = put->key;
1247 request->key_len = put->key_len;
1248 request->mask = put->mask;
1249 request->mask_len = put->mask_len;
1250 /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
1251 request->actions = (put->actions
1253 : CONST_CAST(struct nlattr *, &dummy_action));
1254 request->actions_len = put->actions_len;
1255 if (put->flags & DPIF_FP_ZERO_STATS) {
1256 request->clear = true;
1258 if (put->flags & DPIF_FP_PROBE) {
1259 request->probe = true;
1261 request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE;
1265 dpif_netlink_init_flow_del(struct dpif_netlink *dpif,
1266 const struct dpif_flow_del *del,
1267 struct dpif_netlink_flow *request)
1269 dpif_netlink_flow_init(request);
1270 request->cmd = OVS_FLOW_CMD_DEL;
1271 request->dp_ifindex = dpif->dp_ifindex;
1272 request->key = del->key;
1273 request->key_len = del->key_len;
1276 struct dpif_netlink_flow_dump {
1277 struct dpif_flow_dump up;
1278 struct nl_dump nl_dump;
1282 static struct dpif_netlink_flow_dump *
1283 dpif_netlink_flow_dump_cast(struct dpif_flow_dump *dump)
1285 return CONTAINER_OF(dump, struct dpif_netlink_flow_dump, up);
1288 static struct dpif_flow_dump *
1289 dpif_netlink_flow_dump_create(const struct dpif *dpif_)
1291 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1292 struct dpif_netlink_flow_dump *dump;
1293 struct dpif_netlink_flow request;
1296 dump = xmalloc(sizeof *dump);
1297 dpif_flow_dump_init(&dump->up, dpif_);
1299 dpif_netlink_flow_init(&request);
1300 request.cmd = OVS_FLOW_CMD_GET;
1301 request.dp_ifindex = dpif->dp_ifindex;
1303 buf = ofpbuf_new(1024);
1304 dpif_netlink_flow_to_ofpbuf(&request, buf);
1305 nl_dump_start(&dump->nl_dump, NETLINK_GENERIC, buf);
1307 atomic_init(&dump->status, 0);
1313 dpif_netlink_flow_dump_destroy(struct dpif_flow_dump *dump_)
1315 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1316 unsigned int nl_status = nl_dump_done(&dump->nl_dump);
1319 /* No other thread has access to 'dump' at this point. */
1320 atomic_read_relaxed(&dump->status, &dump_status);
1322 return dump_status ? dump_status : nl_status;
1325 struct dpif_netlink_flow_dump_thread {
1326 struct dpif_flow_dump_thread up;
1327 struct dpif_netlink_flow_dump *dump;
1328 struct dpif_netlink_flow flow;
1329 struct dpif_flow_stats stats;
1330 struct ofpbuf nl_flows; /* Always used to store flows. */
1331 struct ofpbuf *nl_actions; /* Used if kernel does not supply actions. */
1334 static struct dpif_netlink_flow_dump_thread *
1335 dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
1337 return CONTAINER_OF(thread, struct dpif_netlink_flow_dump_thread, up);
1340 static struct dpif_flow_dump_thread *
1341 dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump *dump_)
1343 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1344 struct dpif_netlink_flow_dump_thread *thread;
1346 thread = xmalloc(sizeof *thread);
1347 dpif_flow_dump_thread_init(&thread->up, &dump->up);
1348 thread->dump = dump;
1349 ofpbuf_init(&thread->nl_flows, NL_DUMP_BUFSIZE);
1350 thread->nl_actions = NULL;
1356 dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
1358 struct dpif_netlink_flow_dump_thread *thread
1359 = dpif_netlink_flow_dump_thread_cast(thread_);
1361 ofpbuf_uninit(&thread->nl_flows);
1362 ofpbuf_delete(thread->nl_actions);
1367 dpif_netlink_flow_to_dpif_flow(struct dpif *dpif, struct dpif_flow *dpif_flow,
1368 const struct dpif_netlink_flow *datapath_flow)
1370 dpif_flow->key = datapath_flow->key;
1371 dpif_flow->key_len = datapath_flow->key_len;
1372 dpif_flow->mask = datapath_flow->mask;
1373 dpif_flow->mask_len = datapath_flow->mask_len;
1374 dpif_flow->actions = datapath_flow->actions;
1375 dpif_flow->actions_len = datapath_flow->actions_len;
1376 dpif_flow_hash(dpif, datapath_flow->key, datapath_flow->key_len,
1378 dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats);
1382 dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread *thread_,
1383 struct dpif_flow *flows, int max_flows)
1385 struct dpif_netlink_flow_dump_thread *thread
1386 = dpif_netlink_flow_dump_thread_cast(thread_);
1387 struct dpif_netlink_flow_dump *dump = thread->dump;
1388 struct dpif_netlink *dpif = dpif_netlink_cast(thread->up.dpif);
1391 ofpbuf_delete(thread->nl_actions);
1392 thread->nl_actions = NULL;
1396 || (n_flows < max_flows && ofpbuf_size(&thread->nl_flows))) {
1397 struct dpif_netlink_flow datapath_flow;
1398 struct ofpbuf nl_flow;
1401 /* Try to grab another flow. */
1402 if (!nl_dump_next(&dump->nl_dump, &nl_flow, &thread->nl_flows)) {
1406 /* Convert the flow to our output format. */
1407 error = dpif_netlink_flow_from_ofpbuf(&datapath_flow, &nl_flow);
1409 atomic_store_relaxed(&dump->status, error);
1413 if (datapath_flow.actions) {
1414 /* Common case: the flow includes actions. */
1415 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1418 /* Rare case: the flow does not include actions. Retrieve this
1419 * individual flow again to get the actions. */
1420 error = dpif_netlink_flow_get(dpif, datapath_flow.key,
1421 datapath_flow.key_len,
1422 &datapath_flow, &thread->nl_actions);
1423 if (error == ENOENT) {
1424 VLOG_DBG("dumped flow disappeared on get");
1427 VLOG_WARN("error fetching dumped flow: %s",
1428 ovs_strerror(error));
1429 atomic_store_relaxed(&dump->status, error);
1433 /* Save this flow. Then exit, because we only have one buffer to
1434 * handle this case. */
1435 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1444 dpif_netlink_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec,
1447 struct ovs_header *k_exec;
1450 ofpbuf_prealloc_tailroom(buf, (64
1451 + ofpbuf_size(d_exec->packet)
1452 + ODP_KEY_METADATA_SIZE
1453 + d_exec->actions_len));
1455 nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST,
1456 OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION);
1458 k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec);
1459 k_exec->dp_ifindex = dp_ifindex;
1461 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET,
1462 ofpbuf_data(d_exec->packet),
1463 ofpbuf_size(d_exec->packet));
1465 key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY);
1466 odp_key_from_pkt_metadata(buf, &d_exec->md);
1467 nl_msg_end_nested(buf, key_ofs);
1469 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS,
1470 d_exec->actions, d_exec->actions_len);
1471 if (d_exec->probe) {
1472 nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE);
1479 dpif_netlink_operate__(struct dpif_netlink *dpif,
1480 struct dpif_op **ops, size_t n_ops)
1483 struct nl_transaction txn;
1485 struct ofpbuf request;
1486 uint64_t request_stub[1024 / 8];
1488 struct ofpbuf reply;
1489 uint64_t reply_stub[1024 / 8];
1492 struct nl_transaction *txnsp[MAX_OPS];
1495 ovs_assert(n_ops <= MAX_OPS);
1496 for (i = 0; i < n_ops; i++) {
1497 struct op_auxdata *aux = &auxes[i];
1498 struct dpif_op *op = ops[i];
1499 struct dpif_flow_put *put;
1500 struct dpif_flow_del *del;
1501 struct dpif_execute *execute;
1502 struct dpif_flow_get *get;
1503 struct dpif_netlink_flow flow;
1505 ofpbuf_use_stub(&aux->request,
1506 aux->request_stub, sizeof aux->request_stub);
1507 aux->txn.request = &aux->request;
1509 ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub);
1510 aux->txn.reply = NULL;
1513 case DPIF_OP_FLOW_PUT:
1514 put = &op->u.flow_put;
1515 dpif_netlink_init_flow_put(dpif, put, &flow);
1517 flow.nlmsg_flags |= NLM_F_ECHO;
1518 aux->txn.reply = &aux->reply;
1520 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1523 case DPIF_OP_FLOW_DEL:
1524 del = &op->u.flow_del;
1525 dpif_netlink_init_flow_del(dpif, del, &flow);
1527 flow.nlmsg_flags |= NLM_F_ECHO;
1528 aux->txn.reply = &aux->reply;
1530 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1533 case DPIF_OP_EXECUTE:
1534 execute = &op->u.execute;
1535 dpif_netlink_encode_execute(dpif->dp_ifindex, execute,
1539 case DPIF_OP_FLOW_GET:
1540 get = &op->u.flow_get;
1541 dpif_netlink_init_flow_get(dpif, get->key, get->key_len, &flow);
1542 aux->txn.reply = get->buffer;
1543 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1551 for (i = 0; i < n_ops; i++) {
1552 txnsp[i] = &auxes[i].txn;
1554 nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops);
1556 for (i = 0; i < n_ops; i++) {
1557 struct op_auxdata *aux = &auxes[i];
1558 struct nl_transaction *txn = &auxes[i].txn;
1559 struct dpif_op *op = ops[i];
1560 struct dpif_flow_put *put;
1561 struct dpif_flow_del *del;
1562 struct dpif_flow_get *get;
1564 op->error = txn->error;
1567 case DPIF_OP_FLOW_PUT:
1568 put = &op->u.flow_put;
1571 struct dpif_netlink_flow reply;
1573 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1576 dpif_netlink_flow_get_stats(&reply, put->stats);
1582 case DPIF_OP_FLOW_DEL:
1583 del = &op->u.flow_del;
1586 struct dpif_netlink_flow reply;
1588 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1591 dpif_netlink_flow_get_stats(&reply, del->stats);
1597 case DPIF_OP_EXECUTE:
1600 case DPIF_OP_FLOW_GET:
1601 get = &op->u.flow_get;
1603 struct dpif_netlink_flow reply;
1605 op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply);
1607 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, get->flow,
1617 ofpbuf_uninit(&aux->request);
1618 ofpbuf_uninit(&aux->reply);
1623 dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops)
1625 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1628 size_t chunk = MIN(n_ops, MAX_OPS);
1629 dpif_netlink_operate__(dpif, ops, chunk);
1637 dpif_netlink_handler_uninit(struct dpif_handler *handler)
1639 vport_delete_sock_pool(handler);
1643 dpif_netlink_handler_init(struct dpif_handler *handler)
1645 return vport_create_sock_pool(handler);
1650 dpif_netlink_handler_init(struct dpif_handler *handler)
1652 handler->epoll_fd = epoll_create(10);
1653 return handler->epoll_fd < 0 ? errno : 0;
1657 dpif_netlink_handler_uninit(struct dpif_handler *handler)
1659 close(handler->epoll_fd);
1663 /* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
1664 * currently in 'dpif' in the kernel, by adding a new set of channels for
1665 * any kernel vport that lacks one and deleting any channels that have no
1666 * backing kernel vports. */
1668 dpif_netlink_refresh_channels(struct dpif_netlink *dpif, uint32_t n_handlers)
1669 OVS_REQ_WRLOCK(dpif->upcall_lock)
1671 unsigned long int *keep_channels;
1672 struct dpif_netlink_vport vport;
1673 size_t keep_channels_nbits;
1674 struct nl_dump dump;
1675 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
1680 ovs_assert(!WINDOWS || n_handlers <= 1);
1681 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
1683 if (dpif->n_handlers != n_handlers) {
1684 destroy_all_channels(dpif);
1685 dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers);
1686 for (i = 0; i < n_handlers; i++) {
1688 struct dpif_handler *handler = &dpif->handlers[i];
1690 error = dpif_netlink_handler_init(handler);
1693 struct dpif_handler *tmp = &dpif->handlers[i];
1696 for (j = 0; j < i; j++) {
1697 dpif_netlink_handler_uninit(tmp);
1699 free(dpif->handlers);
1700 dpif->handlers = NULL;
1705 dpif->n_handlers = n_handlers;
1708 for (i = 0; i < n_handlers; i++) {
1709 struct dpif_handler *handler = &dpif->handlers[i];
1711 handler->event_offset = handler->n_events = 0;
1714 keep_channels_nbits = dpif->uc_array_size;
1715 keep_channels = bitmap_allocate(keep_channels_nbits);
1717 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
1718 dpif_netlink_port_dump_start__(dpif, &dump);
1719 while (!dpif_netlink_port_dump_next__(dpif, &dump, &vport, &buf)) {
1720 uint32_t port_no = odp_to_u32(vport.port_no);
1721 uint32_t *upcall_pids = NULL;
1724 if (port_no >= dpif->uc_array_size
1725 || !vport_get_pids(dpif, port_no, &upcall_pids)) {
1726 struct nl_sock **socksp = vport_create_socksp(dpif, &error);
1732 error = vport_add_channels(dpif, vport.port_no, socksp);
1734 VLOG_INFO("%s: could not add channels for port %s",
1735 dpif_name(&dpif->dpif), vport.name);
1736 vport_del_socksp(dpif, socksp);
1740 upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
1744 /* Configure the vport to deliver misses to 'sock'. */
1745 if (vport.upcall_pids[0] == 0
1746 || vport.n_upcall_pids != dpif->n_handlers
1747 || memcmp(upcall_pids, vport.upcall_pids, n_handlers * sizeof
1749 struct dpif_netlink_vport vport_request;
1751 dpif_netlink_vport_init(&vport_request);
1752 vport_request.cmd = OVS_VPORT_CMD_SET;
1753 vport_request.dp_ifindex = dpif->dp_ifindex;
1754 vport_request.port_no = vport.port_no;
1755 vport_request.n_upcall_pids = dpif->n_handlers;
1756 vport_request.upcall_pids = upcall_pids;
1757 error = dpif_netlink_vport_transact(&vport_request, NULL, NULL);
1759 VLOG_WARN_RL(&error_rl,
1760 "%s: failed to set upcall pid on port: %s",
1761 dpif_name(&dpif->dpif), ovs_strerror(error));
1763 if (error != ENODEV && error != ENOENT) {
1766 /* The vport isn't really there, even though the dump says
1767 * it is. Probably we just hit a race after a port
1774 if (port_no < keep_channels_nbits) {
1775 bitmap_set1(keep_channels, port_no);
1782 vport_del_channels(dpif, vport.port_no);
1784 nl_dump_done(&dump);
1785 ofpbuf_uninit(&buf);
1787 /* Discard any saved channels that we didn't reuse. */
1788 for (i = 0; i < keep_channels_nbits; i++) {
1789 if (!bitmap_is_set(keep_channels, i)) {
1790 vport_del_channels(dpif, u32_to_odp(i));
1793 free(keep_channels);
1799 dpif_netlink_recv_set__(struct dpif_netlink *dpif, bool enable)
1800 OVS_REQ_WRLOCK(dpif->upcall_lock)
1802 if ((dpif->handlers != NULL) == enable) {
1804 } else if (!enable) {
1805 destroy_all_channels(dpif);
1808 return dpif_netlink_refresh_channels(dpif, 1);
1813 dpif_netlink_recv_set(struct dpif *dpif_, bool enable)
1815 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1818 fat_rwlock_wrlock(&dpif->upcall_lock);
1819 error = dpif_netlink_recv_set__(dpif, enable);
1820 fat_rwlock_unlock(&dpif->upcall_lock);
1826 dpif_netlink_handlers_set(struct dpif *dpif_, uint32_t n_handlers)
1828 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1832 /* Multiple upcall handlers will be supported once kernel datapath supports
1834 if (n_handlers > 1) {
1839 fat_rwlock_wrlock(&dpif->upcall_lock);
1840 if (dpif->handlers) {
1841 error = dpif_netlink_refresh_channels(dpif, n_handlers);
1843 fat_rwlock_unlock(&dpif->upcall_lock);
1849 dpif_netlink_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
1850 uint32_t queue_id, uint32_t *priority)
1852 if (queue_id < 0xf000) {
1853 *priority = TC_H_MAKE(1 << 16, queue_id + 1);
1861 parse_odp_packet(const struct dpif_netlink *dpif, struct ofpbuf *buf,
1862 struct dpif_upcall *upcall, int *dp_ifindex)
1864 static const struct nl_policy ovs_packet_policy[] = {
1865 /* Always present. */
1866 [OVS_PACKET_ATTR_PACKET] = { .type = NL_A_UNSPEC,
1867 .min_len = ETH_HEADER_LEN },
1868 [OVS_PACKET_ATTR_KEY] = { .type = NL_A_NESTED },
1870 /* OVS_PACKET_CMD_ACTION only. */
1871 [OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true },
1872 [OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true },
1875 struct ovs_header *ovs_header;
1876 struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)];
1877 struct nlmsghdr *nlmsg;
1878 struct genlmsghdr *genl;
1882 ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
1884 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
1885 genl = ofpbuf_try_pull(&b, sizeof *genl);
1886 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
1887 if (!nlmsg || !genl || !ovs_header
1888 || nlmsg->nlmsg_type != ovs_packet_family
1889 || !nl_policy_parse(&b, 0, ovs_packet_policy, a,
1890 ARRAY_SIZE(ovs_packet_policy))) {
1894 type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS
1895 : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION
1901 /* (Re)set ALL fields of '*upcall' on successful return. */
1902 upcall->type = type;
1903 upcall->key = CONST_CAST(struct nlattr *,
1904 nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
1905 upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
1906 dpif_flow_hash(&dpif->dpif, upcall->key, upcall->key_len, &upcall->ufid);
1907 upcall->userdata = a[OVS_PACKET_ATTR_USERDATA];
1908 upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY];
1910 /* Allow overwriting the netlink attribute header without reallocating. */
1911 ofpbuf_use_stub(&upcall->packet,
1912 CONST_CAST(struct nlattr *,
1913 nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1,
1914 nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) +
1915 sizeof(struct nlattr));
1916 ofpbuf_set_data(&upcall->packet,
1917 (char *)ofpbuf_data(&upcall->packet) + sizeof(struct nlattr));
1918 ofpbuf_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]));
1920 *dp_ifindex = ovs_header->dp_ifindex;
1926 #define PACKET_RECV_BATCH_SIZE 50
1928 dpif_netlink_recv_windows(struct dpif_netlink *dpif, uint32_t handler_id,
1929 struct dpif_upcall *upcall, struct ofpbuf *buf)
1930 OVS_REQ_RDLOCK(dpif->upcall_lock)
1932 struct dpif_handler *handler;
1934 struct dpif_windows_vport_sock *sock_pool;
1937 if (!dpif->handlers) {
1941 /* Only one handler is supported currently. */
1942 if (handler_id >= 1) {
1946 if (handler_id >= dpif->n_handlers) {
1950 handler = &dpif->handlers[handler_id];
1951 sock_pool = handler->vport_sock_pool;
1953 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
1958 if (++read_tries > PACKET_RECV_BATCH_SIZE) {
1962 error = nl_sock_recv(sock_pool[i].nl_sock, buf, false);
1963 if (error == ENOBUFS) {
1964 /* ENOBUFS typically means that we've received so many
1965 * packets that the buffer overflowed. Try again
1966 * immediately because there's almost certainly a packet
1967 * waiting for us. */
1968 /* XXX: report_loss(dpif, ch, idx, handler_id); */
1972 /* XXX: ch->last_poll = time_msec(); */
1974 if (error == EAGAIN) {
1980 error = parse_odp_packet(buf, upcall, &dp_ifindex);
1981 if (!error && dp_ifindex == dpif->dp_ifindex) {
1993 dpif_netlink_recv__(struct dpif_netlink *dpif, uint32_t handler_id,
1994 struct dpif_upcall *upcall, struct ofpbuf *buf)
1995 OVS_REQ_RDLOCK(dpif->upcall_lock)
1997 struct dpif_handler *handler;
2000 if (!dpif->handlers || handler_id >= dpif->n_handlers) {
2004 handler = &dpif->handlers[handler_id];
2005 if (handler->event_offset >= handler->n_events) {
2008 handler->event_offset = handler->n_events = 0;
2011 retval = epoll_wait(handler->epoll_fd, handler->epoll_events,
2012 dpif->uc_array_size, 0);
2013 } while (retval < 0 && errno == EINTR);
2016 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
2017 VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno));
2018 } else if (retval > 0) {
2019 handler->n_events = retval;
2023 while (handler->event_offset < handler->n_events) {
2024 int idx = handler->epoll_events[handler->event_offset].data.u32;
2025 struct dpif_channel *ch = &dpif->handlers[handler_id].channels[idx];
2027 handler->event_offset++;
2033 if (++read_tries > 50) {
2037 error = nl_sock_recv(ch->sock, buf, false);
2038 if (error == ENOBUFS) {
2039 /* ENOBUFS typically means that we've received so many
2040 * packets that the buffer overflowed. Try again
2041 * immediately because there's almost certainly a packet
2042 * waiting for us. */
2043 report_loss(dpif, ch, idx, handler_id);
2047 ch->last_poll = time_msec();
2049 if (error == EAGAIN) {
2055 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2056 if (!error && dp_ifindex == dpif->dp_ifindex) {
2069 dpif_netlink_recv(struct dpif *dpif_, uint32_t handler_id,
2070 struct dpif_upcall *upcall, struct ofpbuf *buf)
2072 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2075 fat_rwlock_rdlock(&dpif->upcall_lock);
2077 error = dpif_netlink_recv_windows(dpif, handler_id, upcall, buf);
2079 error = dpif_netlink_recv__(dpif, handler_id, upcall, buf);
2081 fat_rwlock_unlock(&dpif->upcall_lock);
2087 dpif_netlink_recv_wait__(struct dpif_netlink *dpif, uint32_t handler_id)
2088 OVS_REQ_RDLOCK(dpif->upcall_lock)
2092 struct dpif_windows_vport_sock *sock_pool =
2093 dpif->handlers[handler_id].vport_sock_pool;
2095 /* Only one handler is supported currently. */
2096 if (handler_id >= 1) {
2100 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2101 nl_sock_wait(sock_pool[i].nl_sock, POLLIN);
2104 if (dpif->handlers && handler_id < dpif->n_handlers) {
2105 struct dpif_handler *handler = &dpif->handlers[handler_id];
2107 poll_fd_wait(handler->epoll_fd, POLLIN);
2113 dpif_netlink_recv_wait(struct dpif *dpif_, uint32_t handler_id)
2115 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2117 fat_rwlock_rdlock(&dpif->upcall_lock);
2118 dpif_netlink_recv_wait__(dpif, handler_id);
2119 fat_rwlock_unlock(&dpif->upcall_lock);
2123 dpif_netlink_recv_purge__(struct dpif_netlink *dpif)
2124 OVS_REQ_WRLOCK(dpif->upcall_lock)
2126 if (dpif->handlers) {
2129 for (i = 0; i < dpif->uc_array_size; i++ ) {
2130 if (!dpif->handlers[0].channels[i].sock) {
2134 for (j = 0; j < dpif->n_handlers; j++) {
2135 nl_sock_drain(dpif->handlers[j].channels[i].sock);
2142 dpif_netlink_recv_purge(struct dpif *dpif_)
2144 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2146 fat_rwlock_wrlock(&dpif->upcall_lock);
2147 dpif_netlink_recv_purge__(dpif);
2148 fat_rwlock_unlock(&dpif->upcall_lock);
2152 dpif_netlink_get_datapath_version(void)
2154 char *version_str = NULL;
2158 #define MAX_VERSION_STR_SIZE 80
2159 #define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version"
2162 f = fopen(LINUX_DATAPATH_VERSION_FILE, "r");
2165 char version[MAX_VERSION_STR_SIZE];
2167 if (fgets(version, MAX_VERSION_STR_SIZE, f)) {
2168 newline = strchr(version, '\n');
2172 version_str = xstrdup(version);
2181 const struct dpif_class dpif_netlink_class = {
2183 dpif_netlink_enumerate,
2187 dpif_netlink_destroy,
2190 dpif_netlink_get_stats,
2191 dpif_netlink_port_add,
2192 dpif_netlink_port_del,
2193 dpif_netlink_port_query_by_number,
2194 dpif_netlink_port_query_by_name,
2195 dpif_netlink_port_get_pid,
2196 dpif_netlink_port_dump_start,
2197 dpif_netlink_port_dump_next,
2198 dpif_netlink_port_dump_done,
2199 dpif_netlink_port_poll,
2200 dpif_netlink_port_poll_wait,
2201 dpif_netlink_flow_flush,
2202 dpif_netlink_flow_dump_create,
2203 dpif_netlink_flow_dump_destroy,
2204 dpif_netlink_flow_dump_thread_create,
2205 dpif_netlink_flow_dump_thread_destroy,
2206 dpif_netlink_flow_dump_next,
2207 dpif_netlink_operate,
2208 dpif_netlink_recv_set,
2209 dpif_netlink_handlers_set,
2210 NULL, /* poll_thread_set */
2211 dpif_netlink_queue_to_priority,
2213 dpif_netlink_recv_wait,
2214 dpif_netlink_recv_purge,
2215 NULL, /* register_upcall_cb */
2216 NULL, /* enable_upcall */
2217 NULL, /* disable_upcall */
2218 dpif_netlink_get_datapath_version, /* get_datapath_version */
2222 dpif_netlink_init(void)
2224 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2227 if (ovsthread_once_start(&once)) {
2228 error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY,
2229 &ovs_datapath_family);
2231 VLOG_ERR("Generic Netlink family '%s' does not exist. "
2232 "The Open vSwitch kernel module is probably not loaded.",
2233 OVS_DATAPATH_FAMILY);
2236 error = nl_lookup_genl_family(OVS_VPORT_FAMILY, &ovs_vport_family);
2239 error = nl_lookup_genl_family(OVS_FLOW_FAMILY, &ovs_flow_family);
2242 error = nl_lookup_genl_family(OVS_PACKET_FAMILY,
2243 &ovs_packet_family);
2246 error = nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY, OVS_VPORT_MCGROUP,
2247 &ovs_vport_mcgroup);
2250 ovsthread_once_done(&once);
2257 dpif_netlink_is_internal_device(const char *name)
2259 struct dpif_netlink_vport reply;
2263 error = dpif_netlink_vport_get(name, &reply, &buf);
2266 } else if (error != ENODEV && error != ENOENT) {
2267 VLOG_WARN_RL(&error_rl, "%s: vport query failed (%s)",
2268 name, ovs_strerror(error));
2271 return reply.type == OVS_VPORT_TYPE_INTERNAL;
2274 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
2275 * by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a
2276 * positive errno value.
2278 * 'vport' will contain pointers into 'buf', so the caller should not free
2279 * 'buf' while 'vport' is still in use. */
2281 dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *vport,
2282 const struct ofpbuf *buf)
2284 static const struct nl_policy ovs_vport_policy[] = {
2285 [OVS_VPORT_ATTR_PORT_NO] = { .type = NL_A_U32 },
2286 [OVS_VPORT_ATTR_TYPE] = { .type = NL_A_U32 },
2287 [OVS_VPORT_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
2288 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NL_A_UNSPEC },
2289 [OVS_VPORT_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_vport_stats),
2291 [OVS_VPORT_ATTR_OPTIONS] = { .type = NL_A_NESTED, .optional = true },
2294 struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)];
2295 struct ovs_header *ovs_header;
2296 struct nlmsghdr *nlmsg;
2297 struct genlmsghdr *genl;
2300 dpif_netlink_vport_init(vport);
2302 ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
2303 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2304 genl = ofpbuf_try_pull(&b, sizeof *genl);
2305 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2306 if (!nlmsg || !genl || !ovs_header
2307 || nlmsg->nlmsg_type != ovs_vport_family
2308 || !nl_policy_parse(&b, 0, ovs_vport_policy, a,
2309 ARRAY_SIZE(ovs_vport_policy))) {
2313 vport->cmd = genl->cmd;
2314 vport->dp_ifindex = ovs_header->dp_ifindex;
2315 vport->port_no = nl_attr_get_odp_port(a[OVS_VPORT_ATTR_PORT_NO]);
2316 vport->type = nl_attr_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2317 vport->name = nl_attr_get_string(a[OVS_VPORT_ATTR_NAME]);
2318 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2319 vport->n_upcall_pids = nl_attr_get_size(a[OVS_VPORT_ATTR_UPCALL_PID])
2320 / (sizeof *vport->upcall_pids);
2321 vport->upcall_pids = nl_attr_get(a[OVS_VPORT_ATTR_UPCALL_PID]);
2324 if (a[OVS_VPORT_ATTR_STATS]) {
2325 vport->stats = nl_attr_get(a[OVS_VPORT_ATTR_STATS]);
2327 if (a[OVS_VPORT_ATTR_OPTIONS]) {
2328 vport->options = nl_attr_get(a[OVS_VPORT_ATTR_OPTIONS]);
2329 vport->options_len = nl_attr_get_size(a[OVS_VPORT_ATTR_OPTIONS]);
2334 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
2335 * followed by Netlink attributes corresponding to 'vport'. */
2337 dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *vport,
2340 struct ovs_header *ovs_header;
2342 nl_msg_put_genlmsghdr(buf, 0, ovs_vport_family, NLM_F_REQUEST | NLM_F_ECHO,
2343 vport->cmd, OVS_VPORT_VERSION);
2345 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
2346 ovs_header->dp_ifindex = vport->dp_ifindex;
2348 if (vport->port_no != ODPP_NONE) {
2349 nl_msg_put_odp_port(buf, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
2352 if (vport->type != OVS_VPORT_TYPE_UNSPEC) {
2353 nl_msg_put_u32(buf, OVS_VPORT_ATTR_TYPE, vport->type);
2357 nl_msg_put_string(buf, OVS_VPORT_ATTR_NAME, vport->name);
2360 if (vport->upcall_pids) {
2361 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_UPCALL_PID,
2363 vport->n_upcall_pids * sizeof *vport->upcall_pids);
2367 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_STATS,
2368 vport->stats, sizeof *vport->stats);
2371 if (vport->options) {
2372 nl_msg_put_nested(buf, OVS_VPORT_ATTR_OPTIONS,
2373 vport->options, vport->options_len);
2377 /* Clears 'vport' to "empty" values. */
2379 dpif_netlink_vport_init(struct dpif_netlink_vport *vport)
2381 memset(vport, 0, sizeof *vport);
2382 vport->port_no = ODPP_NONE;
2385 /* Executes 'request' in the kernel datapath. If the command fails, returns a
2386 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
2387 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
2388 * result of the command is expected to be an ovs_vport also, which is decoded
2389 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
2390 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
2392 dpif_netlink_vport_transact(const struct dpif_netlink_vport *request,
2393 struct dpif_netlink_vport *reply,
2394 struct ofpbuf **bufp)
2396 struct ofpbuf *request_buf;
2399 ovs_assert((reply != NULL) == (bufp != NULL));
2401 error = dpif_netlink_init();
2405 dpif_netlink_vport_init(reply);
2410 request_buf = ofpbuf_new(1024);
2411 dpif_netlink_vport_to_ofpbuf(request, request_buf);
2412 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
2413 ofpbuf_delete(request_buf);
2417 error = dpif_netlink_vport_from_ofpbuf(reply, *bufp);
2420 dpif_netlink_vport_init(reply);
2421 ofpbuf_delete(*bufp);
2428 /* Obtains information about the kernel vport named 'name' and stores it into
2429 * '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no
2430 * longer needed ('reply' will contain pointers into '*bufp'). */
2432 dpif_netlink_vport_get(const char *name, struct dpif_netlink_vport *reply,
2433 struct ofpbuf **bufp)
2435 struct dpif_netlink_vport request;
2437 dpif_netlink_vport_init(&request);
2438 request.cmd = OVS_VPORT_CMD_GET;
2439 request.name = name;
2441 return dpif_netlink_vport_transact(&request, reply, bufp);
2444 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
2445 * by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a
2446 * positive errno value.
2448 * 'dp' will contain pointers into 'buf', so the caller should not free 'buf'
2449 * while 'dp' is still in use. */
2451 dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *dp, const struct ofpbuf *buf)
2453 static const struct nl_policy ovs_datapath_policy[] = {
2454 [OVS_DP_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
2455 [OVS_DP_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_dp_stats),
2457 [OVS_DP_ATTR_MEGAFLOW_STATS] = {
2458 NL_POLICY_FOR(struct ovs_dp_megaflow_stats),
2462 struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)];
2463 struct ovs_header *ovs_header;
2464 struct nlmsghdr *nlmsg;
2465 struct genlmsghdr *genl;
2468 dpif_netlink_dp_init(dp);
2470 ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
2471 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2472 genl = ofpbuf_try_pull(&b, sizeof *genl);
2473 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2474 if (!nlmsg || !genl || !ovs_header
2475 || nlmsg->nlmsg_type != ovs_datapath_family
2476 || !nl_policy_parse(&b, 0, ovs_datapath_policy, a,
2477 ARRAY_SIZE(ovs_datapath_policy))) {
2481 dp->cmd = genl->cmd;
2482 dp->dp_ifindex = ovs_header->dp_ifindex;
2483 dp->name = nl_attr_get_string(a[OVS_DP_ATTR_NAME]);
2484 if (a[OVS_DP_ATTR_STATS]) {
2485 dp->stats = nl_attr_get(a[OVS_DP_ATTR_STATS]);
2488 if (a[OVS_DP_ATTR_MEGAFLOW_STATS]) {
2489 dp->megaflow_stats = nl_attr_get(a[OVS_DP_ATTR_MEGAFLOW_STATS]);
2495 /* Appends to 'buf' the Generic Netlink message described by 'dp'. */
2497 dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp *dp, struct ofpbuf *buf)
2499 struct ovs_header *ovs_header;
2501 nl_msg_put_genlmsghdr(buf, 0, ovs_datapath_family,
2502 NLM_F_REQUEST | NLM_F_ECHO, dp->cmd,
2503 OVS_DATAPATH_VERSION);
2505 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
2506 ovs_header->dp_ifindex = dp->dp_ifindex;
2509 nl_msg_put_string(buf, OVS_DP_ATTR_NAME, dp->name);
2512 if (dp->upcall_pid) {
2513 nl_msg_put_u32(buf, OVS_DP_ATTR_UPCALL_PID, *dp->upcall_pid);
2516 if (dp->user_features) {
2517 nl_msg_put_u32(buf, OVS_DP_ATTR_USER_FEATURES, dp->user_features);
2520 /* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */
2523 /* Clears 'dp' to "empty" values. */
2525 dpif_netlink_dp_init(struct dpif_netlink_dp *dp)
2527 memset(dp, 0, sizeof *dp);
2531 dpif_netlink_dp_dump_start(struct nl_dump *dump)
2533 struct dpif_netlink_dp request;
2536 dpif_netlink_dp_init(&request);
2537 request.cmd = OVS_DP_CMD_GET;
2539 buf = ofpbuf_new(1024);
2540 dpif_netlink_dp_to_ofpbuf(&request, buf);
2541 nl_dump_start(dump, NETLINK_GENERIC, buf);
2545 /* Executes 'request' in the kernel datapath. If the command fails, returns a
2546 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
2547 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
2548 * result of the command is expected to be of the same form, which is decoded
2549 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
2550 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
2552 dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
2553 struct dpif_netlink_dp *reply, struct ofpbuf **bufp)
2555 struct ofpbuf *request_buf;
2558 ovs_assert((reply != NULL) == (bufp != NULL));
2560 request_buf = ofpbuf_new(1024);
2561 dpif_netlink_dp_to_ofpbuf(request, request_buf);
2562 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
2563 ofpbuf_delete(request_buf);
2566 dpif_netlink_dp_init(reply);
2568 error = dpif_netlink_dp_from_ofpbuf(reply, *bufp);
2571 ofpbuf_delete(*bufp);
2578 /* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'.
2579 * The caller must free '*bufp' when the reply is no longer needed ('reply'
2580 * will contain pointers into '*bufp'). */
2582 dpif_netlink_dp_get(const struct dpif *dpif_, struct dpif_netlink_dp *reply,
2583 struct ofpbuf **bufp)
2585 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2586 struct dpif_netlink_dp request;
2588 dpif_netlink_dp_init(&request);
2589 request.cmd = OVS_DP_CMD_GET;
2590 request.dp_ifindex = dpif->dp_ifindex;
2592 return dpif_netlink_dp_transact(&request, reply, bufp);
2595 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
2596 * by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a
2597 * positive errno value.
2599 * 'flow' will contain pointers into 'buf', so the caller should not free 'buf'
2600 * while 'flow' is still in use. */
2602 dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *flow,
2603 const struct ofpbuf *buf)
2605 static const struct nl_policy ovs_flow_policy[] = {
2606 [OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED },
2607 [OVS_FLOW_ATTR_MASK] = { .type = NL_A_NESTED, .optional = true },
2608 [OVS_FLOW_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
2609 [OVS_FLOW_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
2611 [OVS_FLOW_ATTR_TCP_FLAGS] = { .type = NL_A_U8, .optional = true },
2612 [OVS_FLOW_ATTR_USED] = { .type = NL_A_U64, .optional = true },
2613 /* The kernel never uses OVS_FLOW_ATTR_CLEAR. */
2614 /* The kernel never uses OVS_FLOW_ATTR_PROBE. */
2617 struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)];
2618 struct ovs_header *ovs_header;
2619 struct nlmsghdr *nlmsg;
2620 struct genlmsghdr *genl;
2623 dpif_netlink_flow_init(flow);
2625 ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
2626 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2627 genl = ofpbuf_try_pull(&b, sizeof *genl);
2628 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2629 if (!nlmsg || !genl || !ovs_header
2630 || nlmsg->nlmsg_type != ovs_flow_family
2631 || !nl_policy_parse(&b, 0, ovs_flow_policy, a,
2632 ARRAY_SIZE(ovs_flow_policy))) {
2636 flow->nlmsg_flags = nlmsg->nlmsg_flags;
2637 flow->dp_ifindex = ovs_header->dp_ifindex;
2638 flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]);
2639 flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]);
2641 if (a[OVS_FLOW_ATTR_MASK]) {
2642 flow->mask = nl_attr_get(a[OVS_FLOW_ATTR_MASK]);
2643 flow->mask_len = nl_attr_get_size(a[OVS_FLOW_ATTR_MASK]);
2645 if (a[OVS_FLOW_ATTR_ACTIONS]) {
2646 flow->actions = nl_attr_get(a[OVS_FLOW_ATTR_ACTIONS]);
2647 flow->actions_len = nl_attr_get_size(a[OVS_FLOW_ATTR_ACTIONS]);
2649 if (a[OVS_FLOW_ATTR_STATS]) {
2650 flow->stats = nl_attr_get(a[OVS_FLOW_ATTR_STATS]);
2652 if (a[OVS_FLOW_ATTR_TCP_FLAGS]) {
2653 flow->tcp_flags = nl_attr_get(a[OVS_FLOW_ATTR_TCP_FLAGS]);
2655 if (a[OVS_FLOW_ATTR_USED]) {
2656 flow->used = nl_attr_get(a[OVS_FLOW_ATTR_USED]);
2661 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
2662 * followed by Netlink attributes corresponding to 'flow'. */
2664 dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *flow,
2667 struct ovs_header *ovs_header;
2669 nl_msg_put_genlmsghdr(buf, 0, ovs_flow_family,
2670 NLM_F_REQUEST | flow->nlmsg_flags,
2671 flow->cmd, OVS_FLOW_VERSION);
2673 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
2674 ovs_header->dp_ifindex = flow->dp_ifindex;
2676 if (flow->key_len) {
2677 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_KEY, flow->key, flow->key_len);
2680 if (flow->mask_len) {
2681 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_MASK, flow->mask, flow->mask_len);
2684 if (flow->actions || flow->actions_len) {
2685 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS,
2686 flow->actions, flow->actions_len);
2689 /* We never need to send these to the kernel. */
2690 ovs_assert(!flow->stats);
2691 ovs_assert(!flow->tcp_flags);
2692 ovs_assert(!flow->used);
2695 nl_msg_put_flag(buf, OVS_FLOW_ATTR_CLEAR);
2698 nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE);
2702 /* Clears 'flow' to "empty" values. */
2704 dpif_netlink_flow_init(struct dpif_netlink_flow *flow)
2706 memset(flow, 0, sizeof *flow);
2709 /* Executes 'request' in the kernel datapath. If the command fails, returns a
2710 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
2711 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
2712 * result of the command is expected to be a flow also, which is decoded and
2713 * stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply
2714 * is no longer needed ('reply' will contain pointers into '*bufp'). */
2716 dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
2717 struct dpif_netlink_flow *reply,
2718 struct ofpbuf **bufp)
2720 struct ofpbuf *request_buf;
2723 ovs_assert((reply != NULL) == (bufp != NULL));
2726 request->nlmsg_flags |= NLM_F_ECHO;
2729 request_buf = ofpbuf_new(1024);
2730 dpif_netlink_flow_to_ofpbuf(request, request_buf);
2731 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
2732 ofpbuf_delete(request_buf);
2736 error = dpif_netlink_flow_from_ofpbuf(reply, *bufp);
2739 dpif_netlink_flow_init(reply);
2740 ofpbuf_delete(*bufp);
2748 dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *flow,
2749 struct dpif_flow_stats *stats)
2752 stats->n_packets = get_32aligned_u64(&flow->stats->n_packets);
2753 stats->n_bytes = get_32aligned_u64(&flow->stats->n_bytes);
2755 stats->n_packets = 0;
2758 stats->used = flow->used ? get_32aligned_u64(flow->used) : 0;
2759 stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0;
2762 /* Logs information about a packet that was recently lost in 'ch' (in
2765 report_loss(struct dpif_netlink *dpif, struct dpif_channel *ch, uint32_t ch_idx,
2766 uint32_t handler_id)
2768 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
2771 if (VLOG_DROP_WARN(&rl)) {
2776 if (ch->last_poll != LLONG_MIN) {
2777 ds_put_format(&s, " (last polled %lld ms ago)",
2778 time_msec() - ch->last_poll);
2781 VLOG_WARN("%s: lost packet on port channel %u of handler %u",
2782 dpif_name(&dpif->dpif), ch_idx, handler_id);