2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "dpif-netlink.h"
26 #include <linux/types.h>
27 #include <linux/pkt_sched.h>
31 #include <sys/epoll.h>
36 #include "dpif-provider.h"
37 #include "dynamic-string.h"
39 #include "fat-rwlock.h"
41 #include "netdev-linux.h"
42 #include "netdev-vport.h"
43 #include "netlink-notifier.h"
44 #include "netlink-socket.h"
49 #include "poll-loop.h"
54 #include "unaligned.h"
56 #include "openvswitch/vlog.h"
58 VLOG_DEFINE_THIS_MODULE(dpif_netlink);
64 enum { MAX_PORTS = USHRT_MAX };
66 /* This ethtool flag was introduced in Linux 2.6.24, so it might be
67 * missing if we have old headers. */
68 #define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */
70 struct dpif_netlink_dp {
71 /* Generic Netlink header. */
74 /* struct ovs_header. */
78 const char *name; /* OVS_DP_ATTR_NAME. */
79 const uint32_t *upcall_pid; /* OVS_DP_ATTR_UPCALL_PID. */
80 uint32_t user_features; /* OVS_DP_ATTR_USER_FEATURES */
81 const struct ovs_dp_stats *stats; /* OVS_DP_ATTR_STATS. */
82 const struct ovs_dp_megaflow_stats *megaflow_stats;
83 /* OVS_DP_ATTR_MEGAFLOW_STATS.*/
86 static void dpif_netlink_dp_init(struct dpif_netlink_dp *);
87 static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *,
88 const struct ofpbuf *);
89 static void dpif_netlink_dp_dump_start(struct nl_dump *);
90 static int dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
91 struct dpif_netlink_dp *reply,
92 struct ofpbuf **bufp);
93 static int dpif_netlink_dp_get(const struct dpif *,
94 struct dpif_netlink_dp *reply,
95 struct ofpbuf **bufp);
97 struct dpif_netlink_flow {
98 /* Generic Netlink header. */
101 /* struct ovs_header. */
102 unsigned int nlmsg_flags;
107 * The 'stats' member points to 64-bit data that might only be aligned on
108 * 32-bit boundaries, so get_unaligned_u64() should be used to access its
111 * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in
112 * the Netlink version of the command, even if actions_len is zero. */
113 const struct nlattr *key; /* OVS_FLOW_ATTR_KEY. */
115 const struct nlattr *mask; /* OVS_FLOW_ATTR_MASK. */
117 const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */
119 ovs_u128 ufid; /* OVS_FLOW_ATTR_FLOW_ID. */
120 bool ufid_present; /* Is there a UFID? */
121 bool ufid_terse; /* Skip serializing key/mask/acts? */
122 const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */
123 const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */
124 const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */
125 bool clear; /* OVS_FLOW_ATTR_CLEAR. */
126 bool probe; /* OVS_FLOW_ATTR_PROBE. */
129 static void dpif_netlink_flow_init(struct dpif_netlink_flow *);
130 static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *,
131 const struct ofpbuf *);
132 static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *,
134 static int dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
135 struct dpif_netlink_flow *reply,
136 struct ofpbuf **bufp);
137 static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *,
138 struct dpif_flow_stats *);
139 static void dpif_netlink_flow_to_dpif_flow(struct dpif *, struct dpif_flow *,
140 const struct dpif_netlink_flow *);
142 /* One of the dpif channels between the kernel and userspace. */
143 struct dpif_channel {
144 struct nl_sock *sock; /* Netlink socket. */
145 long long int last_poll; /* Last time this channel was polled. */
149 #define VPORT_SOCK_POOL_SIZE 1
150 /* On Windows, there is no native support for epoll. There are equivalent
151 * interfaces though, that are not used currently. For simpicity, a pool of
152 * netlink sockets is used. Each socket is represented by 'struct
153 * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be
154 * sharing the same socket. In the future, we can add a reference count and
156 struct dpif_windows_vport_sock {
157 struct nl_sock *nl_sock; /* netlink socket. */
161 struct dpif_handler {
162 struct dpif_channel *channels;/* Array of channels for each handler. */
163 struct epoll_event *epoll_events;
164 int epoll_fd; /* epoll fd that includes channel socks. */
165 int n_events; /* Num events returned by epoll_wait(). */
166 int event_offset; /* Offset into 'epoll_events'. */
169 /* Pool of sockets. */
170 struct dpif_windows_vport_sock *vport_sock_pool;
171 size_t last_used_pool_idx; /* Index to aid in allocating a
172 socket in the pool to a port. */
176 /* Datapath interface for the openvswitch Linux kernel module. */
177 struct dpif_netlink {
181 /* Upcall messages. */
182 struct fat_rwlock upcall_lock;
183 struct dpif_handler *handlers;
184 uint32_t n_handlers; /* Num of upcall handlers. */
185 int uc_array_size; /* Size of 'handler->channels' and */
186 /* 'handler->epoll_events'. */
188 /* Change notification. */
189 struct nl_sock *port_notifier; /* vport multicast group subscriber. */
190 bool refresh_channels;
193 static void report_loss(struct dpif_netlink *, struct dpif_channel *,
194 uint32_t ch_idx, uint32_t handler_id);
196 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5);
198 /* Generic Netlink family numbers for OVS.
200 * Initialized by dpif_netlink_init(). */
201 static int ovs_datapath_family;
202 static int ovs_vport_family;
203 static int ovs_flow_family;
204 static int ovs_packet_family;
206 /* Generic Netlink multicast groups for OVS.
208 * Initialized by dpif_netlink_init(). */
209 static unsigned int ovs_vport_mcgroup;
211 static int dpif_netlink_init(void);
212 static int open_dpif(const struct dpif_netlink_dp *, struct dpif **);
213 static uint32_t dpif_netlink_port_get_pid(const struct dpif *,
214 odp_port_t port_no, uint32_t hash);
215 static void dpif_netlink_handler_uninit(struct dpif_handler *handler);
216 static int dpif_netlink_refresh_channels(struct dpif_netlink *,
217 uint32_t n_handlers);
218 static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *,
220 static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *,
221 const struct ofpbuf *);
223 static struct dpif_netlink *
224 dpif_netlink_cast(const struct dpif *dpif)
226 dpif_assert_class(dpif, &dpif_netlink_class);
227 return CONTAINER_OF(dpif, struct dpif_netlink, dpif);
231 dpif_netlink_enumerate(struct sset *all_dps,
232 const struct dpif_class *dpif_class OVS_UNUSED)
235 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
236 struct ofpbuf msg, buf;
239 error = dpif_netlink_init();
244 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
245 dpif_netlink_dp_dump_start(&dump);
246 while (nl_dump_next(&dump, &msg, &buf)) {
247 struct dpif_netlink_dp dp;
249 if (!dpif_netlink_dp_from_ofpbuf(&dp, &msg)) {
250 sset_add(all_dps, dp.name);
254 return nl_dump_done(&dump);
258 dpif_netlink_open(const struct dpif_class *class OVS_UNUSED, const char *name,
259 bool create, struct dpif **dpifp)
261 struct dpif_netlink_dp dp_request, dp;
266 error = dpif_netlink_init();
271 /* Create or look up datapath. */
272 dpif_netlink_dp_init(&dp_request);
274 dp_request.cmd = OVS_DP_CMD_NEW;
276 dp_request.upcall_pid = &upcall_pid;
278 /* Use OVS_DP_CMD_SET to report user features */
279 dp_request.cmd = OVS_DP_CMD_SET;
281 dp_request.name = name;
282 dp_request.user_features |= OVS_DP_F_UNALIGNED;
283 dp_request.user_features |= OVS_DP_F_VPORT_PIDS;
284 error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
289 error = open_dpif(&dp, dpifp);
295 open_dpif(const struct dpif_netlink_dp *dp, struct dpif **dpifp)
297 struct dpif_netlink *dpif;
299 dpif = xzalloc(sizeof *dpif);
300 dpif->port_notifier = NULL;
301 fat_rwlock_init(&dpif->upcall_lock);
303 dpif_init(&dpif->dpif, &dpif_netlink_class, dp->name,
304 dp->dp_ifindex, dp->dp_ifindex);
306 dpif->dp_ifindex = dp->dp_ifindex;
307 *dpifp = &dpif->dpif;
312 /* Destroys the netlink sockets pointed by the elements in 'socksp'
313 * and frees the 'socksp'. */
315 vport_del_socksp__(struct nl_sock **socksp, uint32_t n_socks)
319 for (i = 0; i < n_socks; i++) {
320 nl_sock_destroy(socksp[i]);
326 /* Creates an array of netlink sockets. Returns an array of the
327 * corresponding pointers. Records the error in 'error'. */
328 static struct nl_sock **
329 vport_create_socksp__(uint32_t n_socks, int *error)
331 struct nl_sock **socksp = xzalloc(n_socks * sizeof *socksp);
334 for (i = 0; i < n_socks; i++) {
335 *error = nl_sock_create(NETLINK_GENERIC, &socksp[i]);
344 vport_del_socksp__(socksp, n_socks);
351 vport_delete_sock_pool(struct dpif_handler *handler)
352 OVS_REQ_WRLOCK(dpif->upcall_lock)
354 if (handler->vport_sock_pool) {
356 struct dpif_windows_vport_sock *sock_pool =
357 handler->vport_sock_pool;
359 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
360 if (sock_pool[i].nl_sock) {
361 nl_sock_unsubscribe_packets(sock_pool[i].nl_sock);
362 nl_sock_destroy(sock_pool[i].nl_sock);
363 sock_pool[i].nl_sock = NULL;
367 free(handler->vport_sock_pool);
368 handler->vport_sock_pool = NULL;
373 vport_create_sock_pool(struct dpif_handler *handler)
374 OVS_REQ_WRLOCK(dpif->upcall_lock)
376 struct dpif_windows_vport_sock *sock_pool;
380 sock_pool = xzalloc(VPORT_SOCK_POOL_SIZE * sizeof *sock_pool);
381 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
382 error = nl_sock_create(NETLINK_GENERIC, &sock_pool[i].nl_sock);
387 /* Enable the netlink socket to receive packets. This is equivalent to
388 * calling nl_sock_join_mcgroup() to receive events. */
389 error = nl_sock_subscribe_packets(sock_pool[i].nl_sock);
395 handler->vport_sock_pool = sock_pool;
396 handler->last_used_pool_idx = 0;
400 vport_delete_sock_pool(handler);
404 /* Returns an array pointers to netlink sockets. The sockets are picked from a
405 * pool. Records the error in 'error'. */
406 static struct nl_sock **
407 vport_create_socksp_windows(struct dpif_netlink *dpif, int *error)
408 OVS_REQ_WRLOCK(dpif->upcall_lock)
410 uint32_t n_socks = dpif->n_handlers;
411 struct nl_sock **socksp;
414 ovs_assert(n_socks <= 1);
415 socksp = xzalloc(n_socks * sizeof *socksp);
417 /* Pick netlink sockets to use in a round-robin fashion from each
418 * handler's pool of sockets. */
419 for (i = 0; i < n_socks; i++) {
420 struct dpif_handler *handler = &dpif->handlers[i];
421 struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
422 size_t index = handler->last_used_pool_idx;
424 /* A pool of sockets is allocated when the handler is initialized. */
425 if (sock_pool == NULL) {
431 ovs_assert(index < VPORT_SOCK_POOL_SIZE);
432 socksp[i] = sock_pool[index].nl_sock;
433 socksp[i] = sock_pool[index].nl_sock;
434 ovs_assert(socksp[i]);
435 index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
436 handler->last_used_pool_idx = index;
443 vport_del_socksp_windows(struct dpif_netlink *dpif, struct nl_sock **socksp)
449 static struct nl_sock **
450 vport_create_socksp(struct dpif_netlink *dpif, int *error)
453 return vport_create_socksp_windows(dpif, error);
455 return vport_create_socksp__(dpif->n_handlers, error);
460 vport_del_socksp(struct dpif_netlink *dpif, struct nl_sock **socksp)
463 vport_del_socksp_windows(dpif, socksp);
465 vport_del_socksp__(socksp, dpif->n_handlers);
469 /* Given the array of pointers to netlink sockets 'socksp', returns
470 * the array of corresponding pids. If the 'socksp' is NULL, returns
471 * a single-element array of value 0. */
473 vport_socksp_to_pids(struct nl_sock **socksp, uint32_t n_socks)
478 pids = xzalloc(sizeof *pids);
482 pids = xzalloc(n_socks * sizeof *pids);
483 for (i = 0; i < n_socks; i++) {
484 pids[i] = nl_sock_pid(socksp[i]);
491 /* Given the port number 'port_idx', extracts the pids of netlink sockets
492 * associated to the port and assigns it to 'upcall_pids'. */
494 vport_get_pids(struct dpif_netlink *dpif, uint32_t port_idx,
495 uint32_t **upcall_pids)
500 /* Since the nl_sock can only be assigned in either all
501 * or none "dpif->handlers" channels, the following check
503 if (!dpif->handlers[0].channels[port_idx].sock) {
506 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
508 pids = xzalloc(dpif->n_handlers * sizeof *pids);
510 for (i = 0; i < dpif->n_handlers; i++) {
511 pids[i] = nl_sock_pid(dpif->handlers[i].channels[port_idx].sock);
520 vport_add_channels(struct dpif_netlink *dpif, odp_port_t port_no,
521 struct nl_sock **socksp)
523 struct epoll_event event;
524 uint32_t port_idx = odp_to_u32(port_no);
528 if (dpif->handlers == NULL) {
532 /* We assume that the datapath densely chooses port numbers, which can
533 * therefore be used as an index into 'channels' and 'epoll_events' of
534 * 'dpif->handler'. */
535 if (port_idx >= dpif->uc_array_size) {
536 uint32_t new_size = port_idx + 1;
538 if (new_size > MAX_PORTS) {
539 VLOG_WARN_RL(&error_rl, "%s: datapath port %"PRIu32" too big",
540 dpif_name(&dpif->dpif), port_no);
544 for (i = 0; i < dpif->n_handlers; i++) {
545 struct dpif_handler *handler = &dpif->handlers[i];
547 handler->channels = xrealloc(handler->channels,
548 new_size * sizeof *handler->channels);
550 for (j = dpif->uc_array_size; j < new_size; j++) {
551 handler->channels[j].sock = NULL;
554 handler->epoll_events = xrealloc(handler->epoll_events,
555 new_size * sizeof *handler->epoll_events);
558 dpif->uc_array_size = new_size;
561 memset(&event, 0, sizeof event);
562 event.events = EPOLLIN;
563 event.data.u32 = port_idx;
565 for (i = 0; i < dpif->n_handlers; i++) {
566 struct dpif_handler *handler = &dpif->handlers[i];
569 if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(socksp[i]),
575 dpif->handlers[i].channels[port_idx].sock = socksp[i];
576 dpif->handlers[i].channels[port_idx].last_poll = LLONG_MIN;
582 for (j = 0; j < i; j++) {
584 epoll_ctl(dpif->handlers[j].epoll_fd, EPOLL_CTL_DEL,
585 nl_sock_fd(socksp[j]), NULL);
587 dpif->handlers[j].channels[port_idx].sock = NULL;
594 vport_del_channels(struct dpif_netlink *dpif, odp_port_t port_no)
596 uint32_t port_idx = odp_to_u32(port_no);
599 if (!dpif->handlers || port_idx >= dpif->uc_array_size) {
603 /* Since the sock can only be assigned in either all or none
604 * of "dpif->handlers" channels, the following check would
606 if (!dpif->handlers[0].channels[port_idx].sock) {
610 for (i = 0; i < dpif->n_handlers; i++) {
611 struct dpif_handler *handler = &dpif->handlers[i];
613 epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL,
614 nl_sock_fd(handler->channels[port_idx].sock), NULL);
615 nl_sock_destroy(handler->channels[port_idx].sock);
617 handler->channels[port_idx].sock = NULL;
618 handler->event_offset = handler->n_events = 0;
623 destroy_all_channels(struct dpif_netlink *dpif)
624 OVS_REQ_WRLOCK(dpif->upcall_lock)
628 if (!dpif->handlers) {
632 for (i = 0; i < dpif->uc_array_size; i++ ) {
633 struct dpif_netlink_vport vport_request;
634 uint32_t upcall_pids = 0;
636 /* Since the sock can only be assigned in either all or none
637 * of "dpif->handlers" channels, the following check would
639 if (!dpif->handlers[0].channels[i].sock) {
643 /* Turn off upcalls. */
644 dpif_netlink_vport_init(&vport_request);
645 vport_request.cmd = OVS_VPORT_CMD_SET;
646 vport_request.dp_ifindex = dpif->dp_ifindex;
647 vport_request.port_no = u32_to_odp(i);
648 vport_request.n_upcall_pids = 1;
649 vport_request.upcall_pids = &upcall_pids;
650 dpif_netlink_vport_transact(&vport_request, NULL, NULL);
652 vport_del_channels(dpif, u32_to_odp(i));
655 for (i = 0; i < dpif->n_handlers; i++) {
656 struct dpif_handler *handler = &dpif->handlers[i];
658 dpif_netlink_handler_uninit(handler);
659 free(handler->epoll_events);
660 free(handler->channels);
663 free(dpif->handlers);
664 dpif->handlers = NULL;
665 dpif->n_handlers = 0;
666 dpif->uc_array_size = 0;
670 dpif_netlink_close(struct dpif *dpif_)
672 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
674 nl_sock_destroy(dpif->port_notifier);
676 fat_rwlock_wrlock(&dpif->upcall_lock);
677 destroy_all_channels(dpif);
678 fat_rwlock_unlock(&dpif->upcall_lock);
680 fat_rwlock_destroy(&dpif->upcall_lock);
685 dpif_netlink_destroy(struct dpif *dpif_)
687 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
688 struct dpif_netlink_dp dp;
690 dpif_netlink_dp_init(&dp);
691 dp.cmd = OVS_DP_CMD_DEL;
692 dp.dp_ifindex = dpif->dp_ifindex;
693 return dpif_netlink_dp_transact(&dp, NULL, NULL);
697 dpif_netlink_run(struct dpif *dpif_)
699 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
701 if (dpif->refresh_channels) {
702 dpif->refresh_channels = false;
703 fat_rwlock_wrlock(&dpif->upcall_lock);
704 dpif_netlink_refresh_channels(dpif, dpif->n_handlers);
705 fat_rwlock_unlock(&dpif->upcall_lock);
711 dpif_netlink_get_stats(const struct dpif *dpif_, struct dpif_dp_stats *stats)
713 struct dpif_netlink_dp dp;
717 error = dpif_netlink_dp_get(dpif_, &dp, &buf);
719 memset(stats, 0, sizeof *stats);
722 stats->n_hit = get_32aligned_u64(&dp.stats->n_hit);
723 stats->n_missed = get_32aligned_u64(&dp.stats->n_missed);
724 stats->n_lost = get_32aligned_u64(&dp.stats->n_lost);
725 stats->n_flows = get_32aligned_u64(&dp.stats->n_flows);
728 if (dp.megaflow_stats) {
729 stats->n_masks = dp.megaflow_stats->n_masks;
730 stats->n_mask_hit = get_32aligned_u64(
731 &dp.megaflow_stats->n_mask_hit);
733 stats->n_masks = UINT32_MAX;
734 stats->n_mask_hit = UINT64_MAX;
742 get_vport_type(const struct dpif_netlink_vport *vport)
744 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
746 switch (vport->type) {
747 case OVS_VPORT_TYPE_NETDEV: {
748 const char *type = netdev_get_type_from_name(vport->name);
750 return type ? type : "system";
753 case OVS_VPORT_TYPE_INTERNAL:
756 case OVS_VPORT_TYPE_GENEVE:
759 case OVS_VPORT_TYPE_GRE:
762 case OVS_VPORT_TYPE_GRE64:
765 case OVS_VPORT_TYPE_VXLAN:
768 case OVS_VPORT_TYPE_LISP:
771 case OVS_VPORT_TYPE_UNSPEC:
772 case __OVS_VPORT_TYPE_MAX:
776 VLOG_WARN_RL(&rl, "dp%d: port `%s' has unsupported type %u",
777 vport->dp_ifindex, vport->name, (unsigned int) vport->type);
781 static enum ovs_vport_type
782 netdev_to_ovs_vport_type(const struct netdev *netdev)
784 const char *type = netdev_get_type(netdev);
786 if (!strcmp(type, "tap") || !strcmp(type, "system")) {
787 return OVS_VPORT_TYPE_NETDEV;
788 } else if (!strcmp(type, "internal")) {
789 return OVS_VPORT_TYPE_INTERNAL;
790 } else if (!strcmp(type, "geneve")) {
791 return OVS_VPORT_TYPE_GENEVE;
792 } else if (strstr(type, "gre64")) {
793 return OVS_VPORT_TYPE_GRE64;
794 } else if (strstr(type, "gre")) {
795 return OVS_VPORT_TYPE_GRE;
796 } else if (!strcmp(type, "vxlan")) {
797 return OVS_VPORT_TYPE_VXLAN;
798 } else if (!strcmp(type, "lisp")) {
799 return OVS_VPORT_TYPE_LISP;
801 return OVS_VPORT_TYPE_UNSPEC;
806 dpif_netlink_port_add__(struct dpif_netlink *dpif, struct netdev *netdev,
807 odp_port_t *port_nop)
808 OVS_REQ_WRLOCK(dpif->upcall_lock)
810 const struct netdev_tunnel_config *tnl_cfg;
811 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
812 const char *name = netdev_vport_get_dpif_port(netdev,
813 namebuf, sizeof namebuf);
814 const char *type = netdev_get_type(netdev);
815 struct dpif_netlink_vport request, reply;
817 uint64_t options_stub[64 / 8];
818 struct ofpbuf options;
819 struct nl_sock **socksp = NULL;
820 uint32_t *upcall_pids;
823 if (dpif->handlers) {
824 socksp = vport_create_socksp(dpif, &error);
830 dpif_netlink_vport_init(&request);
831 request.cmd = OVS_VPORT_CMD_NEW;
832 request.dp_ifindex = dpif->dp_ifindex;
833 request.type = netdev_to_ovs_vport_type(netdev);
834 if (request.type == OVS_VPORT_TYPE_UNSPEC) {
835 VLOG_WARN_RL(&error_rl, "%s: cannot create port `%s' because it has "
836 "unsupported type `%s'",
837 dpif_name(&dpif->dpif), name, type);
838 vport_del_socksp(dpif, socksp);
843 if (request.type == OVS_VPORT_TYPE_NETDEV) {
845 /* XXX : Map appropiate Windows handle */
847 netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false);
851 tnl_cfg = netdev_get_tunnel_config(netdev);
852 if (tnl_cfg && (tnl_cfg->dst_port != 0 || tnl_cfg->exts)) {
853 ofpbuf_use_stack(&options, options_stub, sizeof options_stub);
854 if (tnl_cfg->dst_port) {
855 nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT,
856 ntohs(tnl_cfg->dst_port));
862 ext_ofs = nl_msg_start_nested(&options, OVS_TUNNEL_ATTR_EXTENSION);
863 for (i = 0; i < 32; i++) {
864 if (tnl_cfg->exts & (1 << i)) {
865 nl_msg_put_flag(&options, i);
868 nl_msg_end_nested(&options, ext_ofs);
870 request.options = options.data;
871 request.options_len = options.size;
874 request.port_no = *port_nop;
875 upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
876 request.n_upcall_pids = socksp ? dpif->n_handlers : 1;
877 request.upcall_pids = upcall_pids;
879 error = dpif_netlink_vport_transact(&request, &reply, &buf);
881 *port_nop = reply.port_no;
883 if (error == EBUSY && *port_nop != ODPP_NONE) {
884 VLOG_INFO("%s: requested port %"PRIu32" is in use",
885 dpif_name(&dpif->dpif), *port_nop);
888 vport_del_socksp(dpif, socksp);
893 error = vport_add_channels(dpif, *port_nop, socksp);
895 VLOG_INFO("%s: could not add channel for port %s",
896 dpif_name(&dpif->dpif), name);
898 /* Delete the port. */
899 dpif_netlink_vport_init(&request);
900 request.cmd = OVS_VPORT_CMD_DEL;
901 request.dp_ifindex = dpif->dp_ifindex;
902 request.port_no = *port_nop;
903 dpif_netlink_vport_transact(&request, NULL, NULL);
904 vport_del_socksp(dpif, socksp);
918 dpif_netlink_port_add(struct dpif *dpif_, struct netdev *netdev,
919 odp_port_t *port_nop)
921 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
924 fat_rwlock_wrlock(&dpif->upcall_lock);
925 error = dpif_netlink_port_add__(dpif, netdev, port_nop);
926 fat_rwlock_unlock(&dpif->upcall_lock);
932 dpif_netlink_port_del__(struct dpif_netlink *dpif, odp_port_t port_no)
933 OVS_REQ_WRLOCK(dpif->upcall_lock)
935 struct dpif_netlink_vport vport;
938 dpif_netlink_vport_init(&vport);
939 vport.cmd = OVS_VPORT_CMD_DEL;
940 vport.dp_ifindex = dpif->dp_ifindex;
941 vport.port_no = port_no;
942 error = dpif_netlink_vport_transact(&vport, NULL, NULL);
944 vport_del_channels(dpif, port_no);
950 dpif_netlink_port_del(struct dpif *dpif_, odp_port_t port_no)
952 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
955 fat_rwlock_wrlock(&dpif->upcall_lock);
956 error = dpif_netlink_port_del__(dpif, port_no);
957 fat_rwlock_unlock(&dpif->upcall_lock);
963 dpif_netlink_port_query__(const struct dpif_netlink *dpif, odp_port_t port_no,
964 const char *port_name, struct dpif_port *dpif_port)
966 struct dpif_netlink_vport request;
967 struct dpif_netlink_vport reply;
971 dpif_netlink_vport_init(&request);
972 request.cmd = OVS_VPORT_CMD_GET;
973 request.dp_ifindex = dpif->dp_ifindex;
974 request.port_no = port_no;
975 request.name = port_name;
977 error = dpif_netlink_vport_transact(&request, &reply, &buf);
979 if (reply.dp_ifindex != request.dp_ifindex) {
980 /* A query by name reported that 'port_name' is in some datapath
981 * other than 'dpif', but the caller wants to know about 'dpif'. */
983 } else if (dpif_port) {
984 dpif_port->name = xstrdup(reply.name);
985 dpif_port->type = xstrdup(get_vport_type(&reply));
986 dpif_port->port_no = reply.port_no;
994 dpif_netlink_port_query_by_number(const struct dpif *dpif_, odp_port_t port_no,
995 struct dpif_port *dpif_port)
997 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
999 return dpif_netlink_port_query__(dpif, port_no, NULL, dpif_port);
1003 dpif_netlink_port_query_by_name(const struct dpif *dpif_, const char *devname,
1004 struct dpif_port *dpif_port)
1006 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1008 return dpif_netlink_port_query__(dpif, 0, devname, dpif_port);
1012 dpif_netlink_port_get_pid__(const struct dpif_netlink *dpif,
1013 odp_port_t port_no, uint32_t hash)
1014 OVS_REQ_RDLOCK(dpif->upcall_lock)
1016 uint32_t port_idx = odp_to_u32(port_no);
1019 if (dpif->handlers && dpif->uc_array_size > 0) {
1020 /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
1021 * channel, since it is not heavily loaded. */
1022 uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
1023 struct dpif_handler *h = &dpif->handlers[hash % dpif->n_handlers];
1025 /* Needs to check in case the socket pointer is changed in between
1026 * the holding of upcall_lock. A known case happens when the main
1027 * thread deletes the vport while the handler thread is handling
1028 * the upcall from that port. */
1029 if (h->channels[idx].sock) {
1030 pid = nl_sock_pid(h->channels[idx].sock);
1038 dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no,
1041 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1044 fat_rwlock_rdlock(&dpif->upcall_lock);
1045 ret = dpif_netlink_port_get_pid__(dpif, port_no, hash);
1046 fat_rwlock_unlock(&dpif->upcall_lock);
1052 dpif_netlink_flow_flush(struct dpif *dpif_)
1054 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1055 struct dpif_netlink_flow flow;
1057 dpif_netlink_flow_init(&flow);
1058 flow.cmd = OVS_FLOW_CMD_DEL;
1059 flow.dp_ifindex = dpif->dp_ifindex;
1060 return dpif_netlink_flow_transact(&flow, NULL, NULL);
1063 struct dpif_netlink_port_state {
1064 struct nl_dump dump;
1069 dpif_netlink_port_dump_start__(const struct dpif_netlink *dpif,
1070 struct nl_dump *dump)
1072 struct dpif_netlink_vport request;
1075 dpif_netlink_vport_init(&request);
1076 request.cmd = OVS_VPORT_CMD_GET;
1077 request.dp_ifindex = dpif->dp_ifindex;
1079 buf = ofpbuf_new(1024);
1080 dpif_netlink_vport_to_ofpbuf(&request, buf);
1081 nl_dump_start(dump, NETLINK_GENERIC, buf);
1086 dpif_netlink_port_dump_start(const struct dpif *dpif_, void **statep)
1088 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1089 struct dpif_netlink_port_state *state;
1091 *statep = state = xmalloc(sizeof *state);
1092 dpif_netlink_port_dump_start__(dpif, &state->dump);
1094 ofpbuf_init(&state->buf, NL_DUMP_BUFSIZE);
1099 dpif_netlink_port_dump_next__(const struct dpif_netlink *dpif,
1100 struct nl_dump *dump,
1101 struct dpif_netlink_vport *vport,
1102 struct ofpbuf *buffer)
1107 if (!nl_dump_next(dump, &buf, buffer)) {
1111 error = dpif_netlink_vport_from_ofpbuf(vport, &buf);
1113 VLOG_WARN_RL(&error_rl, "%s: failed to parse vport record (%s)",
1114 dpif_name(&dpif->dpif), ovs_strerror(error));
1120 dpif_netlink_port_dump_next(const struct dpif *dpif_, void *state_,
1121 struct dpif_port *dpif_port)
1123 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1124 struct dpif_netlink_port_state *state = state_;
1125 struct dpif_netlink_vport vport;
1128 error = dpif_netlink_port_dump_next__(dpif, &state->dump, &vport,
1133 dpif_port->name = CONST_CAST(char *, vport.name);
1134 dpif_port->type = CONST_CAST(char *, get_vport_type(&vport));
1135 dpif_port->port_no = vport.port_no;
1140 dpif_netlink_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_)
1142 struct dpif_netlink_port_state *state = state_;
1143 int error = nl_dump_done(&state->dump);
1145 ofpbuf_uninit(&state->buf);
1151 dpif_netlink_port_poll(const struct dpif *dpif_, char **devnamep)
1153 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1155 /* Lazily create the Netlink socket to listen for notifications. */
1156 if (!dpif->port_notifier) {
1157 struct nl_sock *sock;
1160 error = nl_sock_create(NETLINK_GENERIC, &sock);
1165 error = nl_sock_join_mcgroup(sock, ovs_vport_mcgroup);
1167 nl_sock_destroy(sock);
1170 dpif->port_notifier = sock;
1172 /* We have no idea of the current state so report that everything
1178 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1179 uint64_t buf_stub[4096 / 8];
1183 ofpbuf_use_stub(&buf, buf_stub, sizeof buf_stub);
1184 error = nl_sock_recv(dpif->port_notifier, &buf, false);
1186 struct dpif_netlink_vport vport;
1188 error = dpif_netlink_vport_from_ofpbuf(&vport, &buf);
1190 if (vport.dp_ifindex == dpif->dp_ifindex
1191 && (vport.cmd == OVS_VPORT_CMD_NEW
1192 || vport.cmd == OVS_VPORT_CMD_DEL
1193 || vport.cmd == OVS_VPORT_CMD_SET)) {
1194 VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8,
1195 dpif->dpif.full_name, vport.name, vport.cmd);
1196 if (vport.cmd == OVS_VPORT_CMD_DEL && dpif->handlers) {
1197 dpif->refresh_channels = true;
1199 *devnamep = xstrdup(vport.name);
1200 ofpbuf_uninit(&buf);
1204 } else if (error != EAGAIN) {
1205 VLOG_WARN_RL(&rl, "error reading or parsing netlink (%s)",
1206 ovs_strerror(error));
1207 nl_sock_drain(dpif->port_notifier);
1211 ofpbuf_uninit(&buf);
1219 dpif_netlink_port_poll_wait(const struct dpif *dpif_)
1221 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1223 if (dpif->port_notifier) {
1224 nl_sock_wait(dpif->port_notifier, POLLIN);
1226 poll_immediate_wake();
1231 dpif_netlink_flow_init_ufid(struct dpif_netlink_flow *request,
1232 const ovs_u128 *ufid, bool terse)
1235 request->ufid = *ufid;
1236 request->ufid_present = true;
1238 request->ufid_present = false;
1240 request->ufid_terse = terse;
1244 dpif_netlink_init_flow_get__(const struct dpif_netlink *dpif,
1245 const struct nlattr *key, size_t key_len,
1246 const ovs_u128 *ufid, bool terse,
1247 struct dpif_netlink_flow *request)
1249 dpif_netlink_flow_init(request);
1250 request->cmd = OVS_FLOW_CMD_GET;
1251 request->dp_ifindex = dpif->dp_ifindex;
1253 request->key_len = key_len;
1254 dpif_netlink_flow_init_ufid(request, ufid, terse);
1258 dpif_netlink_init_flow_get(const struct dpif_netlink *dpif,
1259 const struct dpif_flow_get *get,
1260 struct dpif_netlink_flow *request)
1262 dpif_netlink_init_flow_get__(dpif, get->key, get->key_len, get->ufid,
1267 dpif_netlink_flow_get__(const struct dpif_netlink *dpif,
1268 const struct nlattr *key, size_t key_len,
1269 const ovs_u128 *ufid, bool terse,
1270 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1272 struct dpif_netlink_flow request;
1274 dpif_netlink_init_flow_get__(dpif, key, key_len, ufid, terse, &request);
1275 return dpif_netlink_flow_transact(&request, reply, bufp);
1279 dpif_netlink_flow_get(const struct dpif_netlink *dpif,
1280 const struct dpif_netlink_flow *flow,
1281 struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
1283 return dpif_netlink_flow_get__(dpif, flow->key, flow->key_len,
1284 flow->ufid_present ? &flow->ufid : NULL,
1285 false, reply, bufp);
1289 dpif_netlink_init_flow_put(struct dpif_netlink *dpif,
1290 const struct dpif_flow_put *put,
1291 struct dpif_netlink_flow *request)
1293 static const struct nlattr dummy_action;
1295 dpif_netlink_flow_init(request);
1296 request->cmd = (put->flags & DPIF_FP_CREATE
1297 ? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET);
1298 request->dp_ifindex = dpif->dp_ifindex;
1299 request->key = put->key;
1300 request->key_len = put->key_len;
1301 request->mask = put->mask;
1302 request->mask_len = put->mask_len;
1303 dpif_netlink_flow_init_ufid(request, put->ufid, false);
1305 /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
1306 request->actions = (put->actions
1308 : CONST_CAST(struct nlattr *, &dummy_action));
1309 request->actions_len = put->actions_len;
1310 if (put->flags & DPIF_FP_ZERO_STATS) {
1311 request->clear = true;
1313 if (put->flags & DPIF_FP_PROBE) {
1314 request->probe = true;
1316 request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE;
1320 dpif_netlink_init_flow_del__(struct dpif_netlink *dpif,
1321 const struct nlattr *key, size_t key_len,
1322 const ovs_u128 *ufid, bool terse,
1323 struct dpif_netlink_flow *request)
1325 dpif_netlink_flow_init(request);
1326 request->cmd = OVS_FLOW_CMD_DEL;
1327 request->dp_ifindex = dpif->dp_ifindex;
1329 request->key_len = key_len;
1330 dpif_netlink_flow_init_ufid(request, ufid, terse);
1334 dpif_netlink_init_flow_del(struct dpif_netlink *dpif,
1335 const struct dpif_flow_del *del,
1336 struct dpif_netlink_flow *request)
1338 return dpif_netlink_init_flow_del__(dpif, del->key, del->key_len,
1339 del->ufid, del->terse, request);
1342 struct dpif_netlink_flow_dump {
1343 struct dpif_flow_dump up;
1344 struct nl_dump nl_dump;
1348 static struct dpif_netlink_flow_dump *
1349 dpif_netlink_flow_dump_cast(struct dpif_flow_dump *dump)
1351 return CONTAINER_OF(dump, struct dpif_netlink_flow_dump, up);
1354 static struct dpif_flow_dump *
1355 dpif_netlink_flow_dump_create(const struct dpif *dpif_, bool terse)
1357 const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1358 struct dpif_netlink_flow_dump *dump;
1359 struct dpif_netlink_flow request;
1362 dump = xmalloc(sizeof *dump);
1363 dpif_flow_dump_init(&dump->up, dpif_);
1365 dpif_netlink_flow_init(&request);
1366 request.cmd = OVS_FLOW_CMD_GET;
1367 request.dp_ifindex = dpif->dp_ifindex;
1368 request.ufid_present = false;
1369 request.ufid_terse = terse;
1371 buf = ofpbuf_new(1024);
1372 dpif_netlink_flow_to_ofpbuf(&request, buf);
1373 nl_dump_start(&dump->nl_dump, NETLINK_GENERIC, buf);
1375 atomic_init(&dump->status, 0);
1376 dump->up.terse = terse;
1382 dpif_netlink_flow_dump_destroy(struct dpif_flow_dump *dump_)
1384 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1385 unsigned int nl_status = nl_dump_done(&dump->nl_dump);
1388 /* No other thread has access to 'dump' at this point. */
1389 atomic_read_relaxed(&dump->status, &dump_status);
1391 return dump_status ? dump_status : nl_status;
1394 struct dpif_netlink_flow_dump_thread {
1395 struct dpif_flow_dump_thread up;
1396 struct dpif_netlink_flow_dump *dump;
1397 struct dpif_netlink_flow flow;
1398 struct dpif_flow_stats stats;
1399 struct ofpbuf nl_flows; /* Always used to store flows. */
1400 struct ofpbuf *nl_actions; /* Used if kernel does not supply actions. */
1403 static struct dpif_netlink_flow_dump_thread *
1404 dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
1406 return CONTAINER_OF(thread, struct dpif_netlink_flow_dump_thread, up);
1409 static struct dpif_flow_dump_thread *
1410 dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump *dump_)
1412 struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
1413 struct dpif_netlink_flow_dump_thread *thread;
1415 thread = xmalloc(sizeof *thread);
1416 dpif_flow_dump_thread_init(&thread->up, &dump->up);
1417 thread->dump = dump;
1418 ofpbuf_init(&thread->nl_flows, NL_DUMP_BUFSIZE);
1419 thread->nl_actions = NULL;
1425 dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
1427 struct dpif_netlink_flow_dump_thread *thread
1428 = dpif_netlink_flow_dump_thread_cast(thread_);
1430 ofpbuf_uninit(&thread->nl_flows);
1431 ofpbuf_delete(thread->nl_actions);
1436 dpif_netlink_flow_to_dpif_flow(struct dpif *dpif, struct dpif_flow *dpif_flow,
1437 const struct dpif_netlink_flow *datapath_flow)
1439 dpif_flow->key = datapath_flow->key;
1440 dpif_flow->key_len = datapath_flow->key_len;
1441 dpif_flow->mask = datapath_flow->mask;
1442 dpif_flow->mask_len = datapath_flow->mask_len;
1443 dpif_flow->actions = datapath_flow->actions;
1444 dpif_flow->actions_len = datapath_flow->actions_len;
1445 dpif_flow->ufid_present = datapath_flow->ufid_present;
1446 dpif_flow->pmd_id = PMD_ID_NULL;
1447 if (datapath_flow->ufid_present) {
1448 dpif_flow->ufid = datapath_flow->ufid;
1450 ovs_assert(datapath_flow->key && datapath_flow->key_len);
1451 dpif_flow_hash(dpif, datapath_flow->key, datapath_flow->key_len,
1454 dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats);
1458 dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread *thread_,
1459 struct dpif_flow *flows, int max_flows)
1461 struct dpif_netlink_flow_dump_thread *thread
1462 = dpif_netlink_flow_dump_thread_cast(thread_);
1463 struct dpif_netlink_flow_dump *dump = thread->dump;
1464 struct dpif_netlink *dpif = dpif_netlink_cast(thread->up.dpif);
1467 ofpbuf_delete(thread->nl_actions);
1468 thread->nl_actions = NULL;
1472 || (n_flows < max_flows && thread->nl_flows.size)) {
1473 struct dpif_netlink_flow datapath_flow;
1474 struct ofpbuf nl_flow;
1477 /* Try to grab another flow. */
1478 if (!nl_dump_next(&dump->nl_dump, &nl_flow, &thread->nl_flows)) {
1482 /* Convert the flow to our output format. */
1483 error = dpif_netlink_flow_from_ofpbuf(&datapath_flow, &nl_flow);
1485 atomic_store_relaxed(&dump->status, error);
1489 if (dump->up.terse || datapath_flow.actions) {
1490 /* Common case: we don't want actions, or the flow includes
1492 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1495 /* Rare case: the flow does not include actions. Retrieve this
1496 * individual flow again to get the actions. */
1497 error = dpif_netlink_flow_get(dpif, &datapath_flow,
1498 &datapath_flow, &thread->nl_actions);
1499 if (error == ENOENT) {
1500 VLOG_DBG("dumped flow disappeared on get");
1503 VLOG_WARN("error fetching dumped flow: %s",
1504 ovs_strerror(error));
1505 atomic_store_relaxed(&dump->status, error);
1509 /* Save this flow. Then exit, because we only have one buffer to
1510 * handle this case. */
1511 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
1520 dpif_netlink_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec,
1523 struct ovs_header *k_exec;
1526 ofpbuf_prealloc_tailroom(buf, (64
1527 + dp_packet_size(d_exec->packet)
1528 + ODP_KEY_METADATA_SIZE
1529 + d_exec->actions_len));
1531 nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST,
1532 OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION);
1534 k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec);
1535 k_exec->dp_ifindex = dp_ifindex;
1537 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET,
1538 dp_packet_data(d_exec->packet),
1539 dp_packet_size(d_exec->packet));
1541 key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY);
1542 odp_key_from_pkt_metadata(buf, &d_exec->packet->md);
1543 nl_msg_end_nested(buf, key_ofs);
1545 nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS,
1546 d_exec->actions, d_exec->actions_len);
1547 if (d_exec->probe) {
1548 nl_msg_put_flag(buf, OVS_PACKET_ATTR_PROBE);
1552 /* Executes, against 'dpif', up to the first 'n_ops' operations in 'ops'.
1553 * Returns the number actually executed (at least 1, if 'n_ops' is
1556 dpif_netlink_operate__(struct dpif_netlink *dpif,
1557 struct dpif_op **ops, size_t n_ops)
1559 enum { MAX_OPS = 50 };
1562 struct nl_transaction txn;
1564 struct ofpbuf request;
1565 uint64_t request_stub[1024 / 8];
1567 struct ofpbuf reply;
1568 uint64_t reply_stub[1024 / 8];
1571 struct nl_transaction *txnsp[MAX_OPS];
1574 n_ops = MIN(n_ops, MAX_OPS);
1575 for (i = 0; i < n_ops; i++) {
1576 struct op_auxdata *aux = &auxes[i];
1577 struct dpif_op *op = ops[i];
1578 struct dpif_flow_put *put;
1579 struct dpif_flow_del *del;
1580 struct dpif_flow_get *get;
1581 struct dpif_netlink_flow flow;
1583 ofpbuf_use_stub(&aux->request,
1584 aux->request_stub, sizeof aux->request_stub);
1585 aux->txn.request = &aux->request;
1587 ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub);
1588 aux->txn.reply = NULL;
1591 case DPIF_OP_FLOW_PUT:
1592 put = &op->u.flow_put;
1593 dpif_netlink_init_flow_put(dpif, put, &flow);
1595 flow.nlmsg_flags |= NLM_F_ECHO;
1596 aux->txn.reply = &aux->reply;
1598 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1601 case DPIF_OP_FLOW_DEL:
1602 del = &op->u.flow_del;
1603 dpif_netlink_init_flow_del(dpif, del, &flow);
1605 flow.nlmsg_flags |= NLM_F_ECHO;
1606 aux->txn.reply = &aux->reply;
1608 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1611 case DPIF_OP_EXECUTE:
1612 /* Can't execute a packet that won't fit in a Netlink attribute. */
1613 if (OVS_UNLIKELY(nl_attr_oversized(
1614 dp_packet_size(op->u.execute.packet)))) {
1615 /* Report an error immediately if this is the first operation.
1616 * Otherwise the easiest thing to do is to postpone to the next
1617 * call (when this will be the first operation). */
1619 VLOG_ERR_RL(&error_rl,
1620 "dropping oversized %"PRIu32"-byte packet",
1621 dp_packet_size(op->u.execute.packet));
1622 op->error = ENOBUFS;
1627 dpif_netlink_encode_execute(dpif->dp_ifindex, &op->u.execute,
1632 case DPIF_OP_FLOW_GET:
1633 get = &op->u.flow_get;
1634 dpif_netlink_init_flow_get(dpif, get, &flow);
1635 aux->txn.reply = get->buffer;
1636 dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
1644 for (i = 0; i < n_ops; i++) {
1645 txnsp[i] = &auxes[i].txn;
1647 nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops);
1649 for (i = 0; i < n_ops; i++) {
1650 struct op_auxdata *aux = &auxes[i];
1651 struct nl_transaction *txn = &auxes[i].txn;
1652 struct dpif_op *op = ops[i];
1653 struct dpif_flow_put *put;
1654 struct dpif_flow_del *del;
1655 struct dpif_flow_get *get;
1657 op->error = txn->error;
1660 case DPIF_OP_FLOW_PUT:
1661 put = &op->u.flow_put;
1664 struct dpif_netlink_flow reply;
1666 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1669 dpif_netlink_flow_get_stats(&reply, put->stats);
1675 case DPIF_OP_FLOW_DEL:
1676 del = &op->u.flow_del;
1679 struct dpif_netlink_flow reply;
1681 op->error = dpif_netlink_flow_from_ofpbuf(&reply,
1684 dpif_netlink_flow_get_stats(&reply, del->stats);
1690 case DPIF_OP_EXECUTE:
1693 case DPIF_OP_FLOW_GET:
1694 get = &op->u.flow_get;
1696 struct dpif_netlink_flow reply;
1698 op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply);
1700 dpif_netlink_flow_to_dpif_flow(&dpif->dpif, get->flow,
1710 ofpbuf_uninit(&aux->request);
1711 ofpbuf_uninit(&aux->reply);
1718 dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops)
1720 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1723 size_t chunk = dpif_netlink_operate__(dpif, ops, n_ops);
1731 dpif_netlink_handler_uninit(struct dpif_handler *handler)
1733 vport_delete_sock_pool(handler);
1737 dpif_netlink_handler_init(struct dpif_handler *handler)
1739 return vport_create_sock_pool(handler);
1744 dpif_netlink_handler_init(struct dpif_handler *handler)
1746 handler->epoll_fd = epoll_create(10);
1747 return handler->epoll_fd < 0 ? errno : 0;
1751 dpif_netlink_handler_uninit(struct dpif_handler *handler)
1753 close(handler->epoll_fd);
1757 /* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
1758 * currently in 'dpif' in the kernel, by adding a new set of channels for
1759 * any kernel vport that lacks one and deleting any channels that have no
1760 * backing kernel vports. */
1762 dpif_netlink_refresh_channels(struct dpif_netlink *dpif, uint32_t n_handlers)
1763 OVS_REQ_WRLOCK(dpif->upcall_lock)
1765 unsigned long int *keep_channels;
1766 struct dpif_netlink_vport vport;
1767 size_t keep_channels_nbits;
1768 struct nl_dump dump;
1769 uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
1774 ovs_assert(!WINDOWS || n_handlers <= 1);
1775 ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
1777 if (dpif->n_handlers != n_handlers) {
1778 destroy_all_channels(dpif);
1779 dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers);
1780 for (i = 0; i < n_handlers; i++) {
1782 struct dpif_handler *handler = &dpif->handlers[i];
1784 error = dpif_netlink_handler_init(handler);
1787 struct dpif_handler *tmp = &dpif->handlers[i];
1790 for (j = 0; j < i; j++) {
1791 dpif_netlink_handler_uninit(tmp);
1793 free(dpif->handlers);
1794 dpif->handlers = NULL;
1799 dpif->n_handlers = n_handlers;
1802 for (i = 0; i < n_handlers; i++) {
1803 struct dpif_handler *handler = &dpif->handlers[i];
1805 handler->event_offset = handler->n_events = 0;
1808 keep_channels_nbits = dpif->uc_array_size;
1809 keep_channels = bitmap_allocate(keep_channels_nbits);
1811 ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
1812 dpif_netlink_port_dump_start__(dpif, &dump);
1813 while (!dpif_netlink_port_dump_next__(dpif, &dump, &vport, &buf)) {
1814 uint32_t port_no = odp_to_u32(vport.port_no);
1815 uint32_t *upcall_pids = NULL;
1818 if (port_no >= dpif->uc_array_size
1819 || !vport_get_pids(dpif, port_no, &upcall_pids)) {
1820 struct nl_sock **socksp = vport_create_socksp(dpif, &error);
1826 error = vport_add_channels(dpif, vport.port_no, socksp);
1828 VLOG_INFO("%s: could not add channels for port %s",
1829 dpif_name(&dpif->dpif), vport.name);
1830 vport_del_socksp(dpif, socksp);
1834 upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
1838 /* Configure the vport to deliver misses to 'sock'. */
1839 if (vport.upcall_pids[0] == 0
1840 || vport.n_upcall_pids != dpif->n_handlers
1841 || memcmp(upcall_pids, vport.upcall_pids, n_handlers * sizeof
1843 struct dpif_netlink_vport vport_request;
1845 dpif_netlink_vport_init(&vport_request);
1846 vport_request.cmd = OVS_VPORT_CMD_SET;
1847 vport_request.dp_ifindex = dpif->dp_ifindex;
1848 vport_request.port_no = vport.port_no;
1849 vport_request.n_upcall_pids = dpif->n_handlers;
1850 vport_request.upcall_pids = upcall_pids;
1851 error = dpif_netlink_vport_transact(&vport_request, NULL, NULL);
1853 VLOG_WARN_RL(&error_rl,
1854 "%s: failed to set upcall pid on port: %s",
1855 dpif_name(&dpif->dpif), ovs_strerror(error));
1857 if (error != ENODEV && error != ENOENT) {
1860 /* The vport isn't really there, even though the dump says
1861 * it is. Probably we just hit a race after a port
1868 if (port_no < keep_channels_nbits) {
1869 bitmap_set1(keep_channels, port_no);
1876 vport_del_channels(dpif, vport.port_no);
1878 nl_dump_done(&dump);
1879 ofpbuf_uninit(&buf);
1881 /* Discard any saved channels that we didn't reuse. */
1882 for (i = 0; i < keep_channels_nbits; i++) {
1883 if (!bitmap_is_set(keep_channels, i)) {
1884 vport_del_channels(dpif, u32_to_odp(i));
1887 free(keep_channels);
1893 dpif_netlink_recv_set__(struct dpif_netlink *dpif, bool enable)
1894 OVS_REQ_WRLOCK(dpif->upcall_lock)
1896 if ((dpif->handlers != NULL) == enable) {
1898 } else if (!enable) {
1899 destroy_all_channels(dpif);
1902 return dpif_netlink_refresh_channels(dpif, 1);
1907 dpif_netlink_recv_set(struct dpif *dpif_, bool enable)
1909 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1912 fat_rwlock_wrlock(&dpif->upcall_lock);
1913 error = dpif_netlink_recv_set__(dpif, enable);
1914 fat_rwlock_unlock(&dpif->upcall_lock);
1920 dpif_netlink_handlers_set(struct dpif *dpif_, uint32_t n_handlers)
1922 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
1926 /* Multiple upcall handlers will be supported once kernel datapath supports
1928 if (n_handlers > 1) {
1933 fat_rwlock_wrlock(&dpif->upcall_lock);
1934 if (dpif->handlers) {
1935 error = dpif_netlink_refresh_channels(dpif, n_handlers);
1937 fat_rwlock_unlock(&dpif->upcall_lock);
1943 dpif_netlink_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
1944 uint32_t queue_id, uint32_t *priority)
1946 if (queue_id < 0xf000) {
1947 *priority = TC_H_MAKE(1 << 16, queue_id + 1);
1955 parse_odp_packet(const struct dpif_netlink *dpif, struct ofpbuf *buf,
1956 struct dpif_upcall *upcall, int *dp_ifindex)
1958 static const struct nl_policy ovs_packet_policy[] = {
1959 /* Always present. */
1960 [OVS_PACKET_ATTR_PACKET] = { .type = NL_A_UNSPEC,
1961 .min_len = ETH_HEADER_LEN },
1962 [OVS_PACKET_ATTR_KEY] = { .type = NL_A_NESTED },
1964 /* OVS_PACKET_CMD_ACTION only. */
1965 [OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true },
1966 [OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true },
1969 struct ovs_header *ovs_header;
1970 struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)];
1971 struct nlmsghdr *nlmsg;
1972 struct genlmsghdr *genl;
1976 ofpbuf_use_const(&b, buf->data, buf->size);
1978 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
1979 genl = ofpbuf_try_pull(&b, sizeof *genl);
1980 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
1981 if (!nlmsg || !genl || !ovs_header
1982 || nlmsg->nlmsg_type != ovs_packet_family
1983 || !nl_policy_parse(&b, 0, ovs_packet_policy, a,
1984 ARRAY_SIZE(ovs_packet_policy))) {
1988 type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS
1989 : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION
1995 /* (Re)set ALL fields of '*upcall' on successful return. */
1996 upcall->type = type;
1997 upcall->key = CONST_CAST(struct nlattr *,
1998 nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
1999 upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
2000 dpif_flow_hash(&dpif->dpif, upcall->key, upcall->key_len, &upcall->ufid);
2001 upcall->userdata = a[OVS_PACKET_ATTR_USERDATA];
2002 upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY];
2004 /* Allow overwriting the netlink attribute header without reallocating. */
2005 dp_packet_use_stub(&upcall->packet,
2006 CONST_CAST(struct nlattr *,
2007 nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1,
2008 nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) +
2009 sizeof(struct nlattr));
2010 dp_packet_set_data(&upcall->packet,
2011 (char *)dp_packet_data(&upcall->packet) + sizeof(struct nlattr));
2012 dp_packet_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]));
2014 *dp_ifindex = ovs_header->dp_ifindex;
2020 #define PACKET_RECV_BATCH_SIZE 50
2022 dpif_netlink_recv_windows(struct dpif_netlink *dpif, uint32_t handler_id,
2023 struct dpif_upcall *upcall, struct ofpbuf *buf)
2024 OVS_REQ_RDLOCK(dpif->upcall_lock)
2026 struct dpif_handler *handler;
2028 struct dpif_windows_vport_sock *sock_pool;
2031 if (!dpif->handlers) {
2035 /* Only one handler is supported currently. */
2036 if (handler_id >= 1) {
2040 if (handler_id >= dpif->n_handlers) {
2044 handler = &dpif->handlers[handler_id];
2045 sock_pool = handler->vport_sock_pool;
2047 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2052 if (++read_tries > PACKET_RECV_BATCH_SIZE) {
2056 error = nl_sock_recv(sock_pool[i].nl_sock, buf, false);
2057 if (error == ENOBUFS) {
2058 /* ENOBUFS typically means that we've received so many
2059 * packets that the buffer overflowed. Try again
2060 * immediately because there's almost certainly a packet
2061 * waiting for us. */
2062 /* XXX: report_loss(dpif, ch, idx, handler_id); */
2066 /* XXX: ch->last_poll = time_msec(); */
2068 if (error == EAGAIN) {
2074 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2075 if (!error && dp_ifindex == dpif->dp_ifindex) {
2087 dpif_netlink_recv__(struct dpif_netlink *dpif, uint32_t handler_id,
2088 struct dpif_upcall *upcall, struct ofpbuf *buf)
2089 OVS_REQ_RDLOCK(dpif->upcall_lock)
2091 struct dpif_handler *handler;
2094 if (!dpif->handlers || handler_id >= dpif->n_handlers) {
2098 handler = &dpif->handlers[handler_id];
2099 if (handler->event_offset >= handler->n_events) {
2102 handler->event_offset = handler->n_events = 0;
2105 retval = epoll_wait(handler->epoll_fd, handler->epoll_events,
2106 dpif->uc_array_size, 0);
2107 } while (retval < 0 && errno == EINTR);
2110 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
2111 VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno));
2112 } else if (retval > 0) {
2113 handler->n_events = retval;
2117 while (handler->event_offset < handler->n_events) {
2118 int idx = handler->epoll_events[handler->event_offset].data.u32;
2119 struct dpif_channel *ch = &dpif->handlers[handler_id].channels[idx];
2121 handler->event_offset++;
2127 if (++read_tries > 50) {
2131 error = nl_sock_recv(ch->sock, buf, false);
2132 if (error == ENOBUFS) {
2133 /* ENOBUFS typically means that we've received so many
2134 * packets that the buffer overflowed. Try again
2135 * immediately because there's almost certainly a packet
2136 * waiting for us. */
2137 report_loss(dpif, ch, idx, handler_id);
2141 ch->last_poll = time_msec();
2143 if (error == EAGAIN) {
2149 error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
2150 if (!error && dp_ifindex == dpif->dp_ifindex) {
2163 dpif_netlink_recv(struct dpif *dpif_, uint32_t handler_id,
2164 struct dpif_upcall *upcall, struct ofpbuf *buf)
2166 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2169 fat_rwlock_rdlock(&dpif->upcall_lock);
2171 error = dpif_netlink_recv_windows(dpif, handler_id, upcall, buf);
2173 error = dpif_netlink_recv__(dpif, handler_id, upcall, buf);
2175 fat_rwlock_unlock(&dpif->upcall_lock);
2181 dpif_netlink_recv_wait__(struct dpif_netlink *dpif, uint32_t handler_id)
2182 OVS_REQ_RDLOCK(dpif->upcall_lock)
2186 struct dpif_windows_vport_sock *sock_pool =
2187 dpif->handlers[handler_id].vport_sock_pool;
2189 /* Only one handler is supported currently. */
2190 if (handler_id >= 1) {
2194 for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
2195 nl_sock_wait(sock_pool[i].nl_sock, POLLIN);
2198 if (dpif->handlers && handler_id < dpif->n_handlers) {
2199 struct dpif_handler *handler = &dpif->handlers[handler_id];
2201 poll_fd_wait(handler->epoll_fd, POLLIN);
2207 dpif_netlink_recv_wait(struct dpif *dpif_, uint32_t handler_id)
2209 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2211 fat_rwlock_rdlock(&dpif->upcall_lock);
2212 dpif_netlink_recv_wait__(dpif, handler_id);
2213 fat_rwlock_unlock(&dpif->upcall_lock);
2217 dpif_netlink_recv_purge__(struct dpif_netlink *dpif)
2218 OVS_REQ_WRLOCK(dpif->upcall_lock)
2220 if (dpif->handlers) {
2223 for (i = 0; i < dpif->uc_array_size; i++ ) {
2224 if (!dpif->handlers[0].channels[i].sock) {
2228 for (j = 0; j < dpif->n_handlers; j++) {
2229 nl_sock_drain(dpif->handlers[j].channels[i].sock);
2236 dpif_netlink_recv_purge(struct dpif *dpif_)
2238 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2240 fat_rwlock_wrlock(&dpif->upcall_lock);
2241 dpif_netlink_recv_purge__(dpif);
2242 fat_rwlock_unlock(&dpif->upcall_lock);
2246 dpif_netlink_get_datapath_version(void)
2248 char *version_str = NULL;
2252 #define MAX_VERSION_STR_SIZE 80
2253 #define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version"
2256 f = fopen(LINUX_DATAPATH_VERSION_FILE, "r");
2259 char version[MAX_VERSION_STR_SIZE];
2261 if (fgets(version, MAX_VERSION_STR_SIZE, f)) {
2262 newline = strchr(version, '\n');
2266 version_str = xstrdup(version);
2275 const struct dpif_class dpif_netlink_class = {
2277 dpif_netlink_enumerate,
2281 dpif_netlink_destroy,
2284 dpif_netlink_get_stats,
2285 dpif_netlink_port_add,
2286 dpif_netlink_port_del,
2287 dpif_netlink_port_query_by_number,
2288 dpif_netlink_port_query_by_name,
2289 dpif_netlink_port_get_pid,
2290 dpif_netlink_port_dump_start,
2291 dpif_netlink_port_dump_next,
2292 dpif_netlink_port_dump_done,
2293 dpif_netlink_port_poll,
2294 dpif_netlink_port_poll_wait,
2295 dpif_netlink_flow_flush,
2296 dpif_netlink_flow_dump_create,
2297 dpif_netlink_flow_dump_destroy,
2298 dpif_netlink_flow_dump_thread_create,
2299 dpif_netlink_flow_dump_thread_destroy,
2300 dpif_netlink_flow_dump_next,
2301 dpif_netlink_operate,
2302 dpif_netlink_recv_set,
2303 dpif_netlink_handlers_set,
2304 NULL, /* poll_thread_set */
2305 dpif_netlink_queue_to_priority,
2307 dpif_netlink_recv_wait,
2308 dpif_netlink_recv_purge,
2309 NULL, /* register_upcall_cb */
2310 NULL, /* enable_upcall */
2311 NULL, /* disable_upcall */
2312 dpif_netlink_get_datapath_version, /* get_datapath_version */
2316 dpif_netlink_init(void)
2318 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2321 if (ovsthread_once_start(&once)) {
2322 error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY,
2323 &ovs_datapath_family);
2325 VLOG_ERR("Generic Netlink family '%s' does not exist. "
2326 "The Open vSwitch kernel module is probably not loaded.",
2327 OVS_DATAPATH_FAMILY);
2330 error = nl_lookup_genl_family(OVS_VPORT_FAMILY, &ovs_vport_family);
2333 error = nl_lookup_genl_family(OVS_FLOW_FAMILY, &ovs_flow_family);
2336 error = nl_lookup_genl_family(OVS_PACKET_FAMILY,
2337 &ovs_packet_family);
2340 error = nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY, OVS_VPORT_MCGROUP,
2341 &ovs_vport_mcgroup);
2344 ovsthread_once_done(&once);
2351 dpif_netlink_is_internal_device(const char *name)
2353 struct dpif_netlink_vport reply;
2357 error = dpif_netlink_vport_get(name, &reply, &buf);
2360 } else if (error != ENODEV && error != ENOENT) {
2361 VLOG_WARN_RL(&error_rl, "%s: vport query failed (%s)",
2362 name, ovs_strerror(error));
2365 return reply.type == OVS_VPORT_TYPE_INTERNAL;
2368 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
2369 * by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a
2370 * positive errno value.
2372 * 'vport' will contain pointers into 'buf', so the caller should not free
2373 * 'buf' while 'vport' is still in use. */
2375 dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *vport,
2376 const struct ofpbuf *buf)
2378 static const struct nl_policy ovs_vport_policy[] = {
2379 [OVS_VPORT_ATTR_PORT_NO] = { .type = NL_A_U32 },
2380 [OVS_VPORT_ATTR_TYPE] = { .type = NL_A_U32 },
2381 [OVS_VPORT_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
2382 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NL_A_UNSPEC },
2383 [OVS_VPORT_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_vport_stats),
2385 [OVS_VPORT_ATTR_OPTIONS] = { .type = NL_A_NESTED, .optional = true },
2388 struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)];
2389 struct ovs_header *ovs_header;
2390 struct nlmsghdr *nlmsg;
2391 struct genlmsghdr *genl;
2394 dpif_netlink_vport_init(vport);
2396 ofpbuf_use_const(&b, buf->data, buf->size);
2397 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2398 genl = ofpbuf_try_pull(&b, sizeof *genl);
2399 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2400 if (!nlmsg || !genl || !ovs_header
2401 || nlmsg->nlmsg_type != ovs_vport_family
2402 || !nl_policy_parse(&b, 0, ovs_vport_policy, a,
2403 ARRAY_SIZE(ovs_vport_policy))) {
2407 vport->cmd = genl->cmd;
2408 vport->dp_ifindex = ovs_header->dp_ifindex;
2409 vport->port_no = nl_attr_get_odp_port(a[OVS_VPORT_ATTR_PORT_NO]);
2410 vport->type = nl_attr_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2411 vport->name = nl_attr_get_string(a[OVS_VPORT_ATTR_NAME]);
2412 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2413 vport->n_upcall_pids = nl_attr_get_size(a[OVS_VPORT_ATTR_UPCALL_PID])
2414 / (sizeof *vport->upcall_pids);
2415 vport->upcall_pids = nl_attr_get(a[OVS_VPORT_ATTR_UPCALL_PID]);
2418 if (a[OVS_VPORT_ATTR_STATS]) {
2419 vport->stats = nl_attr_get(a[OVS_VPORT_ATTR_STATS]);
2421 if (a[OVS_VPORT_ATTR_OPTIONS]) {
2422 vport->options = nl_attr_get(a[OVS_VPORT_ATTR_OPTIONS]);
2423 vport->options_len = nl_attr_get_size(a[OVS_VPORT_ATTR_OPTIONS]);
2428 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
2429 * followed by Netlink attributes corresponding to 'vport'. */
2431 dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *vport,
2434 struct ovs_header *ovs_header;
2436 nl_msg_put_genlmsghdr(buf, 0, ovs_vport_family, NLM_F_REQUEST | NLM_F_ECHO,
2437 vport->cmd, OVS_VPORT_VERSION);
2439 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
2440 ovs_header->dp_ifindex = vport->dp_ifindex;
2442 if (vport->port_no != ODPP_NONE) {
2443 nl_msg_put_odp_port(buf, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
2446 if (vport->type != OVS_VPORT_TYPE_UNSPEC) {
2447 nl_msg_put_u32(buf, OVS_VPORT_ATTR_TYPE, vport->type);
2451 nl_msg_put_string(buf, OVS_VPORT_ATTR_NAME, vport->name);
2454 if (vport->upcall_pids) {
2455 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_UPCALL_PID,
2457 vport->n_upcall_pids * sizeof *vport->upcall_pids);
2461 nl_msg_put_unspec(buf, OVS_VPORT_ATTR_STATS,
2462 vport->stats, sizeof *vport->stats);
2465 if (vport->options) {
2466 nl_msg_put_nested(buf, OVS_VPORT_ATTR_OPTIONS,
2467 vport->options, vport->options_len);
2471 /* Clears 'vport' to "empty" values. */
2473 dpif_netlink_vport_init(struct dpif_netlink_vport *vport)
2475 memset(vport, 0, sizeof *vport);
2476 vport->port_no = ODPP_NONE;
2479 /* Executes 'request' in the kernel datapath. If the command fails, returns a
2480 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
2481 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
2482 * result of the command is expected to be an ovs_vport also, which is decoded
2483 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
2484 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
2486 dpif_netlink_vport_transact(const struct dpif_netlink_vport *request,
2487 struct dpif_netlink_vport *reply,
2488 struct ofpbuf **bufp)
2490 struct ofpbuf *request_buf;
2493 ovs_assert((reply != NULL) == (bufp != NULL));
2495 error = dpif_netlink_init();
2499 dpif_netlink_vport_init(reply);
2504 request_buf = ofpbuf_new(1024);
2505 dpif_netlink_vport_to_ofpbuf(request, request_buf);
2506 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
2507 ofpbuf_delete(request_buf);
2511 error = dpif_netlink_vport_from_ofpbuf(reply, *bufp);
2514 dpif_netlink_vport_init(reply);
2515 ofpbuf_delete(*bufp);
2522 /* Obtains information about the kernel vport named 'name' and stores it into
2523 * '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no
2524 * longer needed ('reply' will contain pointers into '*bufp'). */
2526 dpif_netlink_vport_get(const char *name, struct dpif_netlink_vport *reply,
2527 struct ofpbuf **bufp)
2529 struct dpif_netlink_vport request;
2531 dpif_netlink_vport_init(&request);
2532 request.cmd = OVS_VPORT_CMD_GET;
2533 request.name = name;
2535 return dpif_netlink_vport_transact(&request, reply, bufp);
2538 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
2539 * by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a
2540 * positive errno value.
2542 * 'dp' will contain pointers into 'buf', so the caller should not free 'buf'
2543 * while 'dp' is still in use. */
2545 dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *dp, const struct ofpbuf *buf)
2547 static const struct nl_policy ovs_datapath_policy[] = {
2548 [OVS_DP_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
2549 [OVS_DP_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_dp_stats),
2551 [OVS_DP_ATTR_MEGAFLOW_STATS] = {
2552 NL_POLICY_FOR(struct ovs_dp_megaflow_stats),
2556 struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)];
2557 struct ovs_header *ovs_header;
2558 struct nlmsghdr *nlmsg;
2559 struct genlmsghdr *genl;
2562 dpif_netlink_dp_init(dp);
2564 ofpbuf_use_const(&b, buf->data, buf->size);
2565 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2566 genl = ofpbuf_try_pull(&b, sizeof *genl);
2567 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2568 if (!nlmsg || !genl || !ovs_header
2569 || nlmsg->nlmsg_type != ovs_datapath_family
2570 || !nl_policy_parse(&b, 0, ovs_datapath_policy, a,
2571 ARRAY_SIZE(ovs_datapath_policy))) {
2575 dp->cmd = genl->cmd;
2576 dp->dp_ifindex = ovs_header->dp_ifindex;
2577 dp->name = nl_attr_get_string(a[OVS_DP_ATTR_NAME]);
2578 if (a[OVS_DP_ATTR_STATS]) {
2579 dp->stats = nl_attr_get(a[OVS_DP_ATTR_STATS]);
2582 if (a[OVS_DP_ATTR_MEGAFLOW_STATS]) {
2583 dp->megaflow_stats = nl_attr_get(a[OVS_DP_ATTR_MEGAFLOW_STATS]);
2589 /* Appends to 'buf' the Generic Netlink message described by 'dp'. */
2591 dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp *dp, struct ofpbuf *buf)
2593 struct ovs_header *ovs_header;
2595 nl_msg_put_genlmsghdr(buf, 0, ovs_datapath_family,
2596 NLM_F_REQUEST | NLM_F_ECHO, dp->cmd,
2597 OVS_DATAPATH_VERSION);
2599 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
2600 ovs_header->dp_ifindex = dp->dp_ifindex;
2603 nl_msg_put_string(buf, OVS_DP_ATTR_NAME, dp->name);
2606 if (dp->upcall_pid) {
2607 nl_msg_put_u32(buf, OVS_DP_ATTR_UPCALL_PID, *dp->upcall_pid);
2610 if (dp->user_features) {
2611 nl_msg_put_u32(buf, OVS_DP_ATTR_USER_FEATURES, dp->user_features);
2614 /* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */
2617 /* Clears 'dp' to "empty" values. */
2619 dpif_netlink_dp_init(struct dpif_netlink_dp *dp)
2621 memset(dp, 0, sizeof *dp);
2625 dpif_netlink_dp_dump_start(struct nl_dump *dump)
2627 struct dpif_netlink_dp request;
2630 dpif_netlink_dp_init(&request);
2631 request.cmd = OVS_DP_CMD_GET;
2633 buf = ofpbuf_new(1024);
2634 dpif_netlink_dp_to_ofpbuf(&request, buf);
2635 nl_dump_start(dump, NETLINK_GENERIC, buf);
2639 /* Executes 'request' in the kernel datapath. If the command fails, returns a
2640 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
2641 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
2642 * result of the command is expected to be of the same form, which is decoded
2643 * and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
2644 * reply is no longer needed ('reply' will contain pointers into '*bufp'). */
2646 dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
2647 struct dpif_netlink_dp *reply, struct ofpbuf **bufp)
2649 struct ofpbuf *request_buf;
2652 ovs_assert((reply != NULL) == (bufp != NULL));
2654 request_buf = ofpbuf_new(1024);
2655 dpif_netlink_dp_to_ofpbuf(request, request_buf);
2656 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
2657 ofpbuf_delete(request_buf);
2660 dpif_netlink_dp_init(reply);
2662 error = dpif_netlink_dp_from_ofpbuf(reply, *bufp);
2665 ofpbuf_delete(*bufp);
2672 /* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'.
2673 * The caller must free '*bufp' when the reply is no longer needed ('reply'
2674 * will contain pointers into '*bufp'). */
2676 dpif_netlink_dp_get(const struct dpif *dpif_, struct dpif_netlink_dp *reply,
2677 struct ofpbuf **bufp)
2679 struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
2680 struct dpif_netlink_dp request;
2682 dpif_netlink_dp_init(&request);
2683 request.cmd = OVS_DP_CMD_GET;
2684 request.dp_ifindex = dpif->dp_ifindex;
2686 return dpif_netlink_dp_transact(&request, reply, bufp);
2689 /* Parses the contents of 'buf', which contains a "struct ovs_header" followed
2690 * by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a
2691 * positive errno value.
2693 * 'flow' will contain pointers into 'buf', so the caller should not free 'buf'
2694 * while 'flow' is still in use. */
2696 dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *flow,
2697 const struct ofpbuf *buf)
2699 static const struct nl_policy ovs_flow_policy[__OVS_FLOW_ATTR_MAX] = {
2700 [OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED, .optional = true },
2701 [OVS_FLOW_ATTR_MASK] = { .type = NL_A_NESTED, .optional = true },
2702 [OVS_FLOW_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
2703 [OVS_FLOW_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
2705 [OVS_FLOW_ATTR_TCP_FLAGS] = { .type = NL_A_U8, .optional = true },
2706 [OVS_FLOW_ATTR_USED] = { .type = NL_A_U64, .optional = true },
2707 [OVS_FLOW_ATTR_UFID] = { .type = NL_A_UNSPEC, .optional = true,
2708 .min_len = sizeof(ovs_u128) },
2709 /* The kernel never uses OVS_FLOW_ATTR_CLEAR. */
2710 /* The kernel never uses OVS_FLOW_ATTR_PROBE. */
2711 /* The kernel never uses OVS_FLOW_ATTR_UFID_FLAGS. */
2714 struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)];
2715 struct ovs_header *ovs_header;
2716 struct nlmsghdr *nlmsg;
2717 struct genlmsghdr *genl;
2720 dpif_netlink_flow_init(flow);
2722 ofpbuf_use_const(&b, buf->data, buf->size);
2723 nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
2724 genl = ofpbuf_try_pull(&b, sizeof *genl);
2725 ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
2726 if (!nlmsg || !genl || !ovs_header
2727 || nlmsg->nlmsg_type != ovs_flow_family
2728 || !nl_policy_parse(&b, 0, ovs_flow_policy, a,
2729 ARRAY_SIZE(ovs_flow_policy))) {
2732 if (!a[OVS_FLOW_ATTR_KEY] && !a[OVS_FLOW_ATTR_UFID]) {
2736 flow->nlmsg_flags = nlmsg->nlmsg_flags;
2737 flow->dp_ifindex = ovs_header->dp_ifindex;
2738 if (a[OVS_FLOW_ATTR_KEY]) {
2739 flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]);
2740 flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]);
2743 if (a[OVS_FLOW_ATTR_UFID]) {
2744 const ovs_u128 *ufid;
2746 ufid = nl_attr_get_unspec(a[OVS_FLOW_ATTR_UFID],
2747 nl_attr_get_size(a[OVS_FLOW_ATTR_UFID]));
2749 flow->ufid_present = true;
2751 if (a[OVS_FLOW_ATTR_MASK]) {
2752 flow->mask = nl_attr_get(a[OVS_FLOW_ATTR_MASK]);
2753 flow->mask_len = nl_attr_get_size(a[OVS_FLOW_ATTR_MASK]);
2755 if (a[OVS_FLOW_ATTR_ACTIONS]) {
2756 flow->actions = nl_attr_get(a[OVS_FLOW_ATTR_ACTIONS]);
2757 flow->actions_len = nl_attr_get_size(a[OVS_FLOW_ATTR_ACTIONS]);
2759 if (a[OVS_FLOW_ATTR_STATS]) {
2760 flow->stats = nl_attr_get(a[OVS_FLOW_ATTR_STATS]);
2762 if (a[OVS_FLOW_ATTR_TCP_FLAGS]) {
2763 flow->tcp_flags = nl_attr_get(a[OVS_FLOW_ATTR_TCP_FLAGS]);
2765 if (a[OVS_FLOW_ATTR_USED]) {
2766 flow->used = nl_attr_get(a[OVS_FLOW_ATTR_USED]);
2771 /* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
2772 * followed by Netlink attributes corresponding to 'flow'. */
2774 dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *flow,
2777 struct ovs_header *ovs_header;
2779 nl_msg_put_genlmsghdr(buf, 0, ovs_flow_family,
2780 NLM_F_REQUEST | flow->nlmsg_flags,
2781 flow->cmd, OVS_FLOW_VERSION);
2783 ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
2784 ovs_header->dp_ifindex = flow->dp_ifindex;
2786 if (flow->ufid_present) {
2787 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_UFID, &flow->ufid,
2790 if (flow->ufid_terse) {
2791 nl_msg_put_u32(buf, OVS_FLOW_ATTR_UFID_FLAGS,
2792 OVS_UFID_F_OMIT_KEY | OVS_UFID_F_OMIT_MASK
2793 | OVS_UFID_F_OMIT_ACTIONS);
2795 if (!flow->ufid_terse || !flow->ufid_present) {
2796 if (flow->key_len) {
2797 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_KEY,
2798 flow->key, flow->key_len);
2801 if (flow->mask_len) {
2802 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_MASK,
2803 flow->mask, flow->mask_len);
2805 if (flow->actions || flow->actions_len) {
2806 nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS,
2807 flow->actions, flow->actions_len);
2811 /* We never need to send these to the kernel. */
2812 ovs_assert(!flow->stats);
2813 ovs_assert(!flow->tcp_flags);
2814 ovs_assert(!flow->used);
2817 nl_msg_put_flag(buf, OVS_FLOW_ATTR_CLEAR);
2820 nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE);
2824 /* Clears 'flow' to "empty" values. */
2826 dpif_netlink_flow_init(struct dpif_netlink_flow *flow)
2828 memset(flow, 0, sizeof *flow);
2831 /* Executes 'request' in the kernel datapath. If the command fails, returns a
2832 * positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
2833 * without doing anything else. If 'reply' and 'bufp' are nonnull, then the
2834 * result of the command is expected to be a flow also, which is decoded and
2835 * stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply
2836 * is no longer needed ('reply' will contain pointers into '*bufp'). */
2838 dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
2839 struct dpif_netlink_flow *reply,
2840 struct ofpbuf **bufp)
2842 struct ofpbuf *request_buf;
2845 ovs_assert((reply != NULL) == (bufp != NULL));
2848 request->nlmsg_flags |= NLM_F_ECHO;
2851 request_buf = ofpbuf_new(1024);
2852 dpif_netlink_flow_to_ofpbuf(request, request_buf);
2853 error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
2854 ofpbuf_delete(request_buf);
2858 error = dpif_netlink_flow_from_ofpbuf(reply, *bufp);
2861 dpif_netlink_flow_init(reply);
2862 ofpbuf_delete(*bufp);
2870 dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *flow,
2871 struct dpif_flow_stats *stats)
2874 stats->n_packets = get_32aligned_u64(&flow->stats->n_packets);
2875 stats->n_bytes = get_32aligned_u64(&flow->stats->n_bytes);
2877 stats->n_packets = 0;
2880 stats->used = flow->used ? get_32aligned_u64(flow->used) : 0;
2881 stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0;
2884 /* Logs information about a packet that was recently lost in 'ch' (in
2887 report_loss(struct dpif_netlink *dpif, struct dpif_channel *ch, uint32_t ch_idx,
2888 uint32_t handler_id)
2890 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
2893 if (VLOG_DROP_WARN(&rl)) {
2898 if (ch->last_poll != LLONG_MIN) {
2899 ds_put_format(&s, " (last polled %lld ms ago)",
2900 time_msec() - ch->last_poll);
2903 VLOG_WARN("%s: lost packet on port channel %u of handler %u",
2904 dpif_name(&dpif->dpif), ch_idx, handler_id);