2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
34 #include "dp-packet.h"
35 #include "dpif-netdev.h"
36 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "ofp-print.h"
44 #include "ovs-thread.h"
49 #include "unaligned.h"
52 #include "openvswitch/vlog.h"
54 #include "rte_config.h"
56 #include "rte_virtio_net.h"
58 VLOG_DEFINE_THIS_MODULE(dpdk);
59 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
61 #define DPDK_PORT_WATCHDOG_INTERVAL 5
63 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
64 #define OVS_VPORT_DPDK "ovs_dpdk"
67 * need to reserve tons of extra space in the mbufs so we can align the
68 * DMA addresses to 4KB.
69 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
70 * performance for standard Ethernet MTU.
72 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_HEADER_LEN))
73 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
74 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
75 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len)- ETHER_HDR_LEN - ETHER_CRC_LEN)
76 #define MBUF_SIZE(mtu) ( MTU_TO_MAX_FRAME_LEN(mtu) \
77 + sizeof(struct dp_packet) \
78 + RTE_PKTMBUF_HEADROOM)
79 #define NETDEV_DPDK_MBUF_ALIGN 1024
81 /* Max and min number of packets in the mempool. OVS tries to allocate a
82 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
83 * enough hugepages) we keep halving the number until the allocation succeeds
84 * or we reach MIN_NB_MBUF */
86 #define MAX_NB_MBUF (4096 * 64)
87 #define MIN_NB_MBUF (4096 * 4)
88 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
90 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
91 BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
93 /* The smallest possible NB_MBUF that we're going to try should be a multiple
94 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
95 BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
100 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
101 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
103 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
105 static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
106 static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
109 * Maximum amount of time in micro seconds to try and enqueue to vhost.
111 #define VHOST_ENQ_RETRY_USECS 100
113 static const struct rte_eth_conf port_conf = {
115 .mq_mode = ETH_MQ_RX_RSS,
117 .header_split = 0, /* Header Split disabled */
118 .hw_ip_checksum = 0, /* IP checksum offload disabled */
119 .hw_vlan_filter = 0, /* VLAN filtering disabled */
120 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
126 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
130 .mq_mode = ETH_MQ_TX_NONE,
134 enum { MAX_TX_QUEUE_LEN = 384 };
135 enum { DPDK_RING_SIZE = 256 };
136 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
137 enum { DRAIN_TSC = 200000ULL };
144 static int rte_eal_init_ret = ENODEV;
146 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
148 /* Contains all 'struct dpdk_dev's. */
149 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
150 = OVS_LIST_INITIALIZER(&dpdk_list);
152 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
153 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
155 /* This mutex must be used by non pmd threads when allocating or freeing
156 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
157 * use mempools, a non pmd thread should hold this mutex while calling them */
158 static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
161 struct rte_mempool *mp;
165 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
168 /* There should be one 'struct dpdk_tx_queue' created for
170 struct dpdk_tx_queue {
171 bool flush_tx; /* Set to true to flush queue everytime */
172 /* pkts are queued. */
174 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
175 * from concurrent access. It is used only
176 * if the queue is shared among different
177 * pmd threads (see 'txq_needs_locking'). */
178 int map; /* Mapping of configured vhost-user queues
179 * to enabled by guest. */
181 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
184 /* dpdk has no way to remove dpdk ring ethernet devices
185 so we have to keep them around once they've been created
188 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
189 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
192 /* For the client rings */
193 struct rte_ring *cring_tx;
194 struct rte_ring *cring_rx;
195 unsigned int user_port_id; /* User given port no, parsed from port name */
196 int eth_port_id; /* ethernet device port id */
197 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
204 enum dpdk_dev_type type;
206 struct dpdk_tx_queue *tx_q;
208 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
210 struct dpdk_mp *dpdk_mp;
214 struct netdev_stats stats;
216 rte_spinlock_t stats_lock;
218 struct eth_addr hwaddr;
219 enum netdev_flags flags;
221 struct rte_eth_link link;
224 /* The user might request more txqs than the NIC has. We remap those
225 * ('up.n_txq') on these ('real_n_txq').
226 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
227 * true and we will take a spinlock on transmission */
230 bool txq_needs_locking;
232 /* virtio-net structure for vhost device */
233 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
235 /* Identifier used to distinguish vhost devices from each other */
236 char vhost_id[PATH_MAX];
239 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
242 struct netdev_rxq_dpdk {
243 struct netdev_rxq up;
247 static bool dpdk_thread_is_pmd(void);
249 static int netdev_dpdk_construct(struct netdev *);
251 struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
254 is_dpdk_class(const struct netdev_class *class)
256 return class->construct == netdev_dpdk_construct;
259 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
260 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
261 * value, insufficient buffers are allocated to accomodate the packet in its
262 * entirety. Furthermore, certain drivers need to ensure that there is also
263 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
264 * frames). If the RX buffer is too small, then the driver enables scatter RX
265 * behaviour, which reduces performance. To prevent this, use a buffer size that
266 * is closest to 'mtu', but which satisfies the aforementioned criteria.
269 dpdk_buf_size(int mtu)
271 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) + RTE_PKTMBUF_HEADROOM),
272 NETDEV_DPDK_MBUF_ALIGN);
275 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
276 * for all other segments data, bss and text. */
279 dpdk_rte_mzalloc(size_t sz)
283 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
290 /* XXX this function should be called only by pmd threads (or by non pmd
291 * threads holding the nonpmd_mempool_mutex) */
293 free_dpdk_buf(struct dp_packet *p)
295 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
297 rte_pktmbuf_free_seg(pkt);
301 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
302 void *opaque_arg OVS_UNUSED,
304 unsigned i OVS_UNUSED)
306 struct rte_mbuf *m = _m;
308 rte_pktmbuf_init(mp, opaque_arg, _m, i);
310 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
313 static struct dpdk_mp *
314 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
316 struct dpdk_mp *dmp = NULL;
317 char mp_name[RTE_MEMPOOL_NAMESIZE];
319 struct rte_pktmbuf_pool_private mbp_priv;
321 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
322 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
328 dmp = dpdk_rte_mzalloc(sizeof *dmp);
329 dmp->socket_id = socket_id;
332 mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct dp_packet);
333 mbp_priv.mbuf_priv_size = sizeof (struct dp_packet) - sizeof (struct rte_mbuf);
335 mp_size = MAX_NB_MBUF;
337 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
338 dmp->mtu, dmp->socket_id, mp_size) < 0) {
342 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
344 sizeof(struct rte_pktmbuf_pool_private),
345 rte_pktmbuf_pool_init, &mbp_priv,
346 ovs_rte_pktmbuf_init, NULL,
348 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
350 if (dmp->mp == NULL) {
353 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
356 list_push_back(&dpdk_mp_list, &dmp->list_node);
361 dpdk_mp_put(struct dpdk_mp *dmp)
369 ovs_assert(dmp->refcount >= 0);
372 /* I could not find any API to destroy mp. */
373 if (dmp->refcount == 0) {
374 list_delete(dmp->list_node);
375 /* destroy mp-pool. */
381 check_link_status(struct netdev_dpdk *dev)
383 struct rte_eth_link link;
385 rte_eth_link_get_nowait(dev->port_id, &link);
387 if (dev->link.link_status != link.link_status) {
388 netdev_change_seq_changed(&dev->up);
390 dev->link_reset_cnt++;
392 if (dev->link.link_status) {
393 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
394 dev->port_id, (unsigned)dev->link.link_speed,
395 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
396 ("full-duplex") : ("half-duplex"));
398 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
404 dpdk_watchdog(void *dummy OVS_UNUSED)
406 struct netdev_dpdk *dev;
408 pthread_detach(pthread_self());
411 ovs_mutex_lock(&dpdk_mutex);
412 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
413 ovs_mutex_lock(&dev->mutex);
414 check_link_status(dev);
415 ovs_mutex_unlock(&dev->mutex);
417 ovs_mutex_unlock(&dpdk_mutex);
418 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
425 dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
430 /* A device may report more queues than it makes available (this has
431 * been observed for Intel xl710, which reserves some of them for
432 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
433 * available. When this happens we can retry the configuration
434 * and request less queues */
435 while (n_rxq && n_txq) {
437 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
440 diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &port_conf);
445 for (i = 0; i < n_txq; i++) {
446 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
447 dev->socket_id, NULL);
449 VLOG_INFO("Interface %s txq(%d) setup error: %s",
450 dev->up.name, i, rte_strerror(-diag));
456 /* Retry with less tx queues */
461 for (i = 0; i < n_rxq; i++) {
462 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
463 dev->socket_id, NULL,
466 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
467 dev->up.name, i, rte_strerror(-diag));
473 /* Retry with less rx queues */
478 dev->up.n_rxq = n_rxq;
479 dev->real_n_txq = n_txq;
489 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
491 struct rte_pktmbuf_pool_private *mbp_priv;
492 struct rte_eth_dev_info info;
493 struct ether_addr eth_addr;
497 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
501 rte_eth_dev_info_get(dev->port_id, &info);
503 n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
504 n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
506 diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
508 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
509 dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
513 diag = rte_eth_dev_start(dev->port_id);
515 VLOG_ERR("Interface %s start error: %s", dev->up.name,
516 rte_strerror(-diag));
520 rte_eth_promiscuous_enable(dev->port_id);
521 rte_eth_allmulticast_enable(dev->port_id);
523 memset(ð_addr, 0x0, sizeof(eth_addr));
524 rte_eth_macaddr_get(dev->port_id, ð_addr);
525 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
526 dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
528 memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
529 rte_eth_link_get_nowait(dev->port_id, &dev->link);
531 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
532 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
534 dev->flags = NETDEV_UP | NETDEV_PROMISC;
538 static struct netdev_dpdk *
539 netdev_dpdk_cast(const struct netdev *netdev)
541 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
544 static struct netdev *
545 netdev_dpdk_alloc(void)
547 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
552 netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
556 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
557 for (i = 0; i < n_txqs; i++) {
558 int numa_id = ovs_numa_get_numa_id(i);
560 if (!netdev->txq_needs_locking) {
561 /* Each index is considered as a cpu core id, since there should
562 * be one tx queue for each cpu core. If the corresponding core
563 * is not on the same numa node as 'netdev', flags the
565 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
567 /* Queues are shared among CPUs. Always flush */
568 netdev->tx_q[i].flush_tx = true;
571 /* Initialize map for vhost devices. */
572 netdev->tx_q[i].map = -1;
573 rte_spinlock_init(&netdev->tx_q[i].tx_lock);
578 netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
579 enum dpdk_dev_type type)
580 OVS_REQUIRES(dpdk_mutex)
582 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
587 ovs_mutex_init(&netdev->mutex);
588 ovs_mutex_lock(&netdev->mutex);
590 rte_spinlock_init(&netdev->stats_lock);
592 /* If the 'sid' is negative, it means that the kernel fails
593 * to obtain the pci numa info. In that situation, always
595 if (type == DPDK_DEV_ETH) {
596 sid = rte_eth_dev_socket_id(port_no);
598 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
601 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
602 netdev->port_id = port_no;
605 netdev->mtu = ETHER_MTU;
606 netdev->max_packet_len = MTU_TO_FRAME_LEN(netdev->mtu);
608 buf_size = dpdk_buf_size(netdev->mtu);
609 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, FRAME_LEN_TO_MTU(buf_size));
610 if (!netdev->dpdk_mp) {
615 netdev_->n_txq = NR_QUEUE;
616 netdev_->n_rxq = NR_QUEUE;
617 netdev_->requested_n_rxq = NR_QUEUE;
618 netdev->real_n_txq = NR_QUEUE;
620 if (type == DPDK_DEV_ETH) {
621 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
622 err = dpdk_eth_dev_init(netdev);
627 netdev_dpdk_alloc_txq(netdev, OVS_VHOST_MAX_QUEUE_NUM);
630 list_push_back(&dpdk_list, &netdev->list_node);
634 rte_free(netdev->tx_q);
636 ovs_mutex_unlock(&netdev->mutex);
640 /* dev_name must be the prefix followed by a positive decimal number.
641 * (no leading + or - signs are allowed) */
643 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
644 unsigned int *port_no)
648 if (strncmp(dev_name, prefix, strlen(prefix))) {
652 cport = dev_name + strlen(prefix);
654 if (str_to_uint(cport, 10, port_no)) {
662 vhost_construct_helper(struct netdev *netdev_) OVS_REQUIRES(dpdk_mutex)
664 if (rte_eal_init_ret) {
665 return rte_eal_init_ret;
668 return netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
672 netdev_dpdk_vhost_cuse_construct(struct netdev *netdev_)
674 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
677 ovs_mutex_lock(&dpdk_mutex);
678 strncpy(netdev->vhost_id, netdev->up.name, sizeof(netdev->vhost_id));
679 err = vhost_construct_helper(netdev_);
680 ovs_mutex_unlock(&dpdk_mutex);
685 netdev_dpdk_vhost_user_construct(struct netdev *netdev_)
687 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
688 const char *name = netdev_->name;
691 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
692 * the file system. '/' or '\' would traverse directories, so they're not
693 * acceptable in 'name'. */
694 if (strchr(name, '/') || strchr(name, '\\')) {
695 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
696 "A valid name must not include '/' or '\\'",
701 ovs_mutex_lock(&dpdk_mutex);
702 /* Take the name of the vhost-user port and append it to the location where
703 * the socket is to be created, then register the socket.
705 snprintf(netdev->vhost_id, sizeof(netdev->vhost_id), "%s/%s",
706 vhost_sock_dir, name);
708 err = rte_vhost_driver_register(netdev->vhost_id);
710 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
713 fatal_signal_add_file_to_unlink(netdev->vhost_id);
714 VLOG_INFO("Socket %s created for vhost-user port %s\n",
715 netdev->vhost_id, name);
716 err = vhost_construct_helper(netdev_);
719 ovs_mutex_unlock(&dpdk_mutex);
724 netdev_dpdk_construct(struct netdev *netdev)
726 unsigned int port_no;
729 if (rte_eal_init_ret) {
730 return rte_eal_init_ret;
733 /* Names always start with "dpdk" */
734 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
739 ovs_mutex_lock(&dpdk_mutex);
740 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
741 ovs_mutex_unlock(&dpdk_mutex);
746 netdev_dpdk_destruct(struct netdev *netdev_)
748 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
750 ovs_mutex_lock(&dev->mutex);
751 rte_eth_dev_stop(dev->port_id);
752 ovs_mutex_unlock(&dev->mutex);
754 ovs_mutex_lock(&dpdk_mutex);
756 list_remove(&dev->list_node);
757 dpdk_mp_put(dev->dpdk_mp);
758 ovs_mutex_unlock(&dpdk_mutex);
762 netdev_dpdk_vhost_destruct(struct netdev *netdev_)
764 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
766 /* Can't remove a port while a guest is attached to it. */
767 if (netdev_dpdk_get_virtio(dev) != NULL) {
768 VLOG_ERR("Can not remove port, vhost device still attached");
772 if (rte_vhost_driver_unregister(dev->vhost_id)) {
773 VLOG_ERR("Unable to remove vhost-user socket %s", dev->vhost_id);
775 fatal_signal_remove_file_to_unlink(dev->vhost_id);
778 ovs_mutex_lock(&dpdk_mutex);
779 list_remove(&dev->list_node);
780 dpdk_mp_put(dev->dpdk_mp);
781 ovs_mutex_unlock(&dpdk_mutex);
785 netdev_dpdk_dealloc(struct netdev *netdev_)
787 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
793 netdev_dpdk_get_config(const struct netdev *netdev, struct smap *args)
795 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
797 ovs_mutex_lock(&dev->mutex);
799 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
800 smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq);
801 smap_add_format(args, "requested_tx_queues", "%d", netdev->n_txq);
802 smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
803 ovs_mutex_unlock(&dev->mutex);
809 netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args)
811 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
813 ovs_mutex_lock(&dev->mutex);
814 netdev->requested_n_rxq = MAX(smap_get_int(args, "n_rxq",
815 netdev->requested_n_rxq), 1);
816 netdev_change_seq_changed(netdev);
817 ovs_mutex_unlock(&dev->mutex);
823 netdev_dpdk_get_numa_id(const struct netdev *netdev_)
825 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
827 return netdev->socket_id;
830 /* Sets the number of tx queues and rx queues for the dpdk interface.
831 * If the configuration fails, do not try restoring its old configuration
832 * and just returns the error. */
834 netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
837 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
839 int old_rxq, old_txq;
841 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
845 ovs_mutex_lock(&dpdk_mutex);
846 ovs_mutex_lock(&netdev->mutex);
848 rte_eth_dev_stop(netdev->port_id);
850 old_txq = netdev->up.n_txq;
851 old_rxq = netdev->up.n_rxq;
852 netdev->up.n_txq = n_txq;
853 netdev->up.n_rxq = n_rxq;
855 rte_free(netdev->tx_q);
856 err = dpdk_eth_dev_init(netdev);
857 netdev_dpdk_alloc_txq(netdev, netdev->real_n_txq);
859 /* If there has been an error, it means that the requested queues
860 * have not been created. Restore the old numbers. */
861 netdev->up.n_txq = old_txq;
862 netdev->up.n_rxq = old_rxq;
865 netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
867 ovs_mutex_unlock(&netdev->mutex);
868 ovs_mutex_unlock(&dpdk_mutex);
874 netdev_dpdk_vhost_cuse_set_multiq(struct netdev *netdev_, unsigned int n_txq,
877 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
880 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
884 ovs_mutex_lock(&dpdk_mutex);
885 ovs_mutex_lock(&netdev->mutex);
887 netdev->up.n_txq = n_txq;
888 netdev->real_n_txq = 1;
889 netdev->up.n_rxq = 1;
890 netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
892 ovs_mutex_unlock(&netdev->mutex);
893 ovs_mutex_unlock(&dpdk_mutex);
899 netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
902 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
905 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
909 ovs_mutex_lock(&dpdk_mutex);
910 ovs_mutex_lock(&netdev->mutex);
912 netdev->up.n_txq = n_txq;
913 netdev->up.n_rxq = n_rxq;
915 ovs_mutex_unlock(&netdev->mutex);
916 ovs_mutex_unlock(&dpdk_mutex);
921 static struct netdev_rxq *
922 netdev_dpdk_rxq_alloc(void)
924 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
929 static struct netdev_rxq_dpdk *
930 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
932 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
936 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
938 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
939 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
941 ovs_mutex_lock(&netdev->mutex);
942 rx->port_id = netdev->port_id;
943 ovs_mutex_unlock(&netdev->mutex);
949 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
954 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
956 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
962 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
964 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
967 while (nb_tx != txq->count) {
970 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
979 if (OVS_UNLIKELY(nb_tx != txq->count)) {
980 /* free buffers, which we couldn't transmit, one at a time (each
981 * packet could come from a different mempool) */
984 for (i = nb_tx; i < txq->count; i++) {
985 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
987 rte_spinlock_lock(&dev->stats_lock);
988 dev->stats.tx_dropped += txq->count-nb_tx;
989 rte_spinlock_unlock(&dev->stats_lock);
993 txq->tsc = rte_get_timer_cycles();
997 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
999 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1001 if (txq->count == 0) {
1004 dpdk_queue_flush__(dev, qid);
1008 is_vhost_running(struct virtio_net *dev)
1010 return (dev != NULL && (dev->flags & VIRTIO_DEV_RUNNING));
1014 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
1015 struct dp_packet **packets, int count)
1018 struct dp_packet *packet;
1020 stats->rx_packets += count;
1021 for (i = 0; i < count; i++) {
1022 packet = packets[i];
1024 if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) {
1025 /* This only protects the following multicast counting from
1026 * too short packets, but it does not stop the packet from
1027 * further processing. */
1029 stats->rx_length_errors++;
1033 struct eth_header *eh = (struct eth_header *) dp_packet_data(packet);
1034 if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) {
1038 stats->rx_bytes += dp_packet_size(packet);
1043 * The receive path for the vhost port is the TX path out from guest.
1046 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
1047 struct dp_packet **packets, int *c)
1049 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
1050 struct netdev *netdev = rx->up.netdev;
1051 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
1052 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
1053 int qid = rxq_->queue_id;
1056 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
1060 if (rxq_->queue_id >= vhost_dev->real_n_rxq) {
1064 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid * VIRTIO_QNUM + VIRTIO_TXQ,
1065 vhost_dev->dpdk_mp->mp,
1066 (struct rte_mbuf **)packets,
1072 rte_spinlock_lock(&vhost_dev->stats_lock);
1073 netdev_dpdk_vhost_update_rx_counters(&vhost_dev->stats, packets, nb_rx);
1074 rte_spinlock_unlock(&vhost_dev->stats_lock);
1081 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
1084 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
1085 struct netdev *netdev = rx->up.netdev;
1086 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1089 /* There is only one tx queue for this core. Do not flush other
1091 * Do not flush tx queue which is shared among CPUs
1092 * since it is always flushed */
1093 if (rxq_->queue_id == rte_lcore_id() &&
1094 OVS_LIKELY(!dev->txq_needs_locking)) {
1095 dpdk_queue_flush(dev, rxq_->queue_id);
1098 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
1099 (struct rte_mbuf **) packets,
1111 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
1112 struct dp_packet **packets,
1117 int sent = attempted - dropped;
1119 stats->tx_packets += sent;
1120 stats->tx_dropped += dropped;
1122 for (i = 0; i < sent; i++) {
1123 stats->tx_bytes += dp_packet_size(packets[i]);
1128 __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
1129 struct dp_packet **pkts, int cnt,
1132 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
1133 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
1134 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
1135 unsigned int total_pkts = cnt;
1138 qid = vhost_dev->tx_q[qid % vhost_dev->real_n_txq].map;
1140 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev) || qid == -1)) {
1141 rte_spinlock_lock(&vhost_dev->stats_lock);
1142 vhost_dev->stats.tx_dropped+= cnt;
1143 rte_spinlock_unlock(&vhost_dev->stats_lock);
1147 rte_spinlock_lock(&vhost_dev->tx_q[qid].tx_lock);
1150 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
1151 unsigned int tx_pkts;
1153 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, vhost_qid,
1155 if (OVS_LIKELY(tx_pkts)) {
1156 /* Packets have been sent.*/
1158 /* Prepare for possible next iteration.*/
1159 cur_pkts = &cur_pkts[tx_pkts];
1161 uint64_t timeout = VHOST_ENQ_RETRY_USECS * rte_get_timer_hz() / 1E6;
1162 unsigned int expired = 0;
1165 start = rte_get_timer_cycles();
1169 * Unable to enqueue packets to vhost interface.
1170 * Check available entries before retrying.
1172 while (!rte_vring_available_entries(virtio_dev, vhost_qid)) {
1173 if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
1179 /* break out of main loop. */
1185 rte_spinlock_unlock(&vhost_dev->tx_q[qid].tx_lock);
1187 rte_spinlock_lock(&vhost_dev->stats_lock);
1188 netdev_dpdk_vhost_update_tx_counters(&vhost_dev->stats, pkts, total_pkts,
1190 rte_spinlock_unlock(&vhost_dev->stats_lock);
1196 for (i = 0; i < total_pkts; i++) {
1197 dp_packet_delete(pkts[i]);
1203 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
1204 struct rte_mbuf **pkts, int cnt)
1206 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1212 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
1213 int tocopy = MIN(freeslots, cnt-i);
1215 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
1216 tocopy * sizeof (struct rte_mbuf *));
1218 txq->count += tocopy;
1221 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
1222 dpdk_queue_flush__(dev, qid);
1224 diff_tsc = rte_get_timer_cycles() - txq->tsc;
1225 if (diff_tsc >= DRAIN_TSC) {
1226 dpdk_queue_flush__(dev, qid);
1231 /* Tx function. Transmit packets indefinitely */
1233 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
1235 OVS_NO_THREAD_SAFETY_ANALYSIS
1237 #if !defined(__CHECKER__) && !defined(_WIN32)
1238 const size_t PKT_ARRAY_SIZE = cnt;
1240 /* Sparse or MSVC doesn't like variable length array. */
1241 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
1243 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1244 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
1249 /* If we are on a non pmd thread we have to use the mempool mutex, because
1250 * every non pmd thread shares the same mempool cache */
1252 if (!dpdk_thread_is_pmd()) {
1253 ovs_mutex_lock(&nonpmd_mempool_mutex);
1256 for (i = 0; i < cnt; i++) {
1257 int size = dp_packet_size(pkts[i]);
1259 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1260 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1261 (int)size , dev->max_packet_len);
1267 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
1269 if (!mbufs[newcnt]) {
1274 /* We have to do a copy for now */
1275 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
1277 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1278 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1283 if (OVS_UNLIKELY(dropped)) {
1284 rte_spinlock_lock(&dev->stats_lock);
1285 dev->stats.tx_dropped += dropped;
1286 rte_spinlock_unlock(&dev->stats_lock);
1289 if (dev->type == DPDK_DEV_VHOST) {
1290 __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs, newcnt, true);
1292 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1293 dpdk_queue_flush(dev, qid);
1296 if (!dpdk_thread_is_pmd()) {
1297 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1302 netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet **pkts,
1303 int cnt, bool may_steal)
1305 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1308 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1310 for (i = 0; i < cnt; i++) {
1311 dp_packet_delete(pkts[i]);
1315 __netdev_dpdk_vhost_send(netdev, qid, pkts, cnt, may_steal);
1321 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
1322 struct dp_packet **pkts, int cnt, bool may_steal)
1326 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1327 qid = qid % dev->real_n_txq;
1328 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1331 if (OVS_UNLIKELY(!may_steal ||
1332 pkts[0]->source != DPBUF_DPDK)) {
1333 struct netdev *netdev = &dev->up;
1335 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1338 for (i = 0; i < cnt; i++) {
1339 dp_packet_delete(pkts[i]);
1343 int next_tx_idx = 0;
1346 for (i = 0; i < cnt; i++) {
1347 int size = dp_packet_size(pkts[i]);
1349 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1350 if (next_tx_idx != i) {
1351 dpdk_queue_pkts(dev, qid,
1352 (struct rte_mbuf **)&pkts[next_tx_idx],
1356 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1357 (int)size , dev->max_packet_len);
1359 dp_packet_delete(pkts[i]);
1361 next_tx_idx = i + 1;
1364 if (next_tx_idx != cnt) {
1365 dpdk_queue_pkts(dev, qid,
1366 (struct rte_mbuf **)&pkts[next_tx_idx],
1370 if (OVS_UNLIKELY(dropped)) {
1371 rte_spinlock_lock(&dev->stats_lock);
1372 dev->stats.tx_dropped += dropped;
1373 rte_spinlock_unlock(&dev->stats_lock);
1377 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1378 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1383 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
1384 struct dp_packet **pkts, int cnt, bool may_steal)
1386 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1388 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1393 netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1395 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1397 ovs_mutex_lock(&dev->mutex);
1398 if (!eth_addr_equals(dev->hwaddr, mac)) {
1400 netdev_change_seq_changed(netdev);
1402 ovs_mutex_unlock(&dev->mutex);
1408 netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1410 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1412 ovs_mutex_lock(&dev->mutex);
1414 ovs_mutex_unlock(&dev->mutex);
1420 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1422 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1424 ovs_mutex_lock(&dev->mutex);
1426 ovs_mutex_unlock(&dev->mutex);
1432 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1434 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1435 int old_mtu, err, dpdk_mtu;
1436 struct dpdk_mp *old_mp;
1440 ovs_mutex_lock(&dpdk_mutex);
1441 ovs_mutex_lock(&dev->mutex);
1442 if (dev->mtu == mtu) {
1447 buf_size = dpdk_buf_size(mtu);
1448 dpdk_mtu = FRAME_LEN_TO_MTU(buf_size);
1450 mp = dpdk_mp_get(dev->socket_id, dpdk_mtu);
1456 rte_eth_dev_stop(dev->port_id);
1459 old_mp = dev->dpdk_mp;
1462 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
1464 err = dpdk_eth_dev_init(dev);
1468 dev->dpdk_mp = old_mp;
1469 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
1470 dpdk_eth_dev_init(dev);
1474 dpdk_mp_put(old_mp);
1475 netdev_change_seq_changed(netdev);
1477 ovs_mutex_unlock(&dev->mutex);
1478 ovs_mutex_unlock(&dpdk_mutex);
1483 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1486 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1487 struct netdev_stats *stats)
1489 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1491 ovs_mutex_lock(&dev->mutex);
1492 memset(stats, 0, sizeof(*stats));
1493 /* Unsupported Stats */
1494 stats->collisions = UINT64_MAX;
1495 stats->rx_crc_errors = UINT64_MAX;
1496 stats->rx_fifo_errors = UINT64_MAX;
1497 stats->rx_frame_errors = UINT64_MAX;
1498 stats->rx_missed_errors = UINT64_MAX;
1499 stats->rx_over_errors = UINT64_MAX;
1500 stats->tx_aborted_errors = UINT64_MAX;
1501 stats->tx_carrier_errors = UINT64_MAX;
1502 stats->tx_errors = UINT64_MAX;
1503 stats->tx_fifo_errors = UINT64_MAX;
1504 stats->tx_heartbeat_errors = UINT64_MAX;
1505 stats->tx_window_errors = UINT64_MAX;
1506 stats->rx_dropped += UINT64_MAX;
1508 rte_spinlock_lock(&dev->stats_lock);
1509 /* Supported Stats */
1510 stats->rx_packets += dev->stats.rx_packets;
1511 stats->tx_packets += dev->stats.tx_packets;
1512 stats->tx_dropped += dev->stats.tx_dropped;
1513 stats->multicast = dev->stats.multicast;
1514 stats->rx_bytes = dev->stats.rx_bytes;
1515 stats->tx_bytes = dev->stats.tx_bytes;
1516 stats->rx_errors = dev->stats.rx_errors;
1517 stats->rx_length_errors = dev->stats.rx_length_errors;
1518 rte_spinlock_unlock(&dev->stats_lock);
1520 ovs_mutex_unlock(&dev->mutex);
1526 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1528 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1529 struct rte_eth_stats rte_stats;
1532 netdev_dpdk_get_carrier(netdev, &gg);
1533 ovs_mutex_lock(&dev->mutex);
1534 rte_eth_stats_get(dev->port_id, &rte_stats);
1536 memset(stats, 0, sizeof(*stats));
1538 stats->rx_packets = rte_stats.ipackets;
1539 stats->tx_packets = rte_stats.opackets;
1540 stats->rx_bytes = rte_stats.ibytes;
1541 stats->tx_bytes = rte_stats.obytes;
1542 /* DPDK counts imissed as errors, but count them here as dropped instead */
1543 stats->rx_errors = rte_stats.ierrors - rte_stats.imissed;
1544 stats->tx_errors = rte_stats.oerrors;
1545 stats->multicast = rte_stats.imcasts;
1547 rte_spinlock_lock(&dev->stats_lock);
1548 stats->tx_dropped = dev->stats.tx_dropped;
1549 rte_spinlock_unlock(&dev->stats_lock);
1551 /* These are the available DPDK counters for packets not received due to
1552 * local resource constraints in DPDK and NIC respectively. */
1553 stats->rx_dropped = rte_stats.rx_nombuf + rte_stats.imissed;
1554 stats->collisions = UINT64_MAX;
1556 stats->rx_length_errors = UINT64_MAX;
1557 stats->rx_over_errors = UINT64_MAX;
1558 stats->rx_crc_errors = UINT64_MAX;
1559 stats->rx_frame_errors = UINT64_MAX;
1560 stats->rx_fifo_errors = UINT64_MAX;
1561 stats->rx_missed_errors = rte_stats.imissed;
1563 stats->tx_aborted_errors = UINT64_MAX;
1564 stats->tx_carrier_errors = UINT64_MAX;
1565 stats->tx_fifo_errors = UINT64_MAX;
1566 stats->tx_heartbeat_errors = UINT64_MAX;
1567 stats->tx_window_errors = UINT64_MAX;
1569 ovs_mutex_unlock(&dev->mutex);
1575 netdev_dpdk_get_features(const struct netdev *netdev_,
1576 enum netdev_features *current,
1577 enum netdev_features *advertised OVS_UNUSED,
1578 enum netdev_features *supported OVS_UNUSED,
1579 enum netdev_features *peer OVS_UNUSED)
1581 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1582 struct rte_eth_link link;
1584 ovs_mutex_lock(&dev->mutex);
1586 ovs_mutex_unlock(&dev->mutex);
1588 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1589 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1590 *current = NETDEV_F_AUTONEG;
1592 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1593 if (link.link_speed == ETH_LINK_SPEED_10) {
1594 *current = NETDEV_F_10MB_HD;
1596 if (link.link_speed == ETH_LINK_SPEED_100) {
1597 *current = NETDEV_F_100MB_HD;
1599 if (link.link_speed == ETH_LINK_SPEED_1000) {
1600 *current = NETDEV_F_1GB_HD;
1602 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1603 if (link.link_speed == ETH_LINK_SPEED_10) {
1604 *current = NETDEV_F_10MB_FD;
1606 if (link.link_speed == ETH_LINK_SPEED_100) {
1607 *current = NETDEV_F_100MB_FD;
1609 if (link.link_speed == ETH_LINK_SPEED_1000) {
1610 *current = NETDEV_F_1GB_FD;
1612 if (link.link_speed == ETH_LINK_SPEED_10000) {
1613 *current = NETDEV_F_10GB_FD;
1621 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1623 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1626 ovs_mutex_lock(&dev->mutex);
1627 ifindex = dev->port_id;
1628 ovs_mutex_unlock(&dev->mutex);
1634 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1636 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1638 ovs_mutex_lock(&dev->mutex);
1639 check_link_status(dev);
1640 *carrier = dev->link.link_status;
1642 ovs_mutex_unlock(&dev->mutex);
1648 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev_, bool *carrier)
1650 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1651 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1653 ovs_mutex_lock(&dev->mutex);
1655 if (is_vhost_running(virtio_dev)) {
1661 ovs_mutex_unlock(&dev->mutex);
1666 static long long int
1667 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1669 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1670 long long int carrier_resets;
1672 ovs_mutex_lock(&dev->mutex);
1673 carrier_resets = dev->link_reset_cnt;
1674 ovs_mutex_unlock(&dev->mutex);
1676 return carrier_resets;
1680 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1681 long long int interval OVS_UNUSED)
1687 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1688 enum netdev_flags off, enum netdev_flags on,
1689 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
1693 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1697 *old_flagsp = dev->flags;
1701 if (dev->flags == *old_flagsp) {
1705 if (dev->type == DPDK_DEV_ETH) {
1706 if (dev->flags & NETDEV_UP) {
1707 err = rte_eth_dev_start(dev->port_id);
1712 if (dev->flags & NETDEV_PROMISC) {
1713 rte_eth_promiscuous_enable(dev->port_id);
1716 if (!(dev->flags & NETDEV_UP)) {
1717 rte_eth_dev_stop(dev->port_id);
1725 netdev_dpdk_update_flags(struct netdev *netdev_,
1726 enum netdev_flags off, enum netdev_flags on,
1727 enum netdev_flags *old_flagsp)
1729 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1732 ovs_mutex_lock(&netdev->mutex);
1733 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1734 ovs_mutex_unlock(&netdev->mutex);
1740 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1742 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1743 struct rte_eth_dev_info dev_info;
1745 if (dev->port_id < 0)
1748 ovs_mutex_lock(&dev->mutex);
1749 rte_eth_dev_info_get(dev->port_id, &dev_info);
1750 ovs_mutex_unlock(&dev->mutex);
1752 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1754 smap_add_format(args, "port_no", "%d", dev->port_id);
1755 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1756 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1757 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1758 smap_add_format(args, "max_rx_pktlen", "%u", dev->max_packet_len);
1759 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1760 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1761 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1762 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1763 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1764 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1766 if (dev_info.pci_dev) {
1767 smap_add_format(args, "pci-vendor_id", "0x%u",
1768 dev_info.pci_dev->id.vendor_id);
1769 smap_add_format(args, "pci-device_id", "0x%x",
1770 dev_info.pci_dev->id.device_id);
1777 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1778 OVS_REQUIRES(dev->mutex)
1780 enum netdev_flags old_flags;
1783 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1785 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1790 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1791 const char *argv[], void *aux OVS_UNUSED)
1795 if (!strcasecmp(argv[argc - 1], "up")) {
1797 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1800 unixctl_command_reply_error(conn, "Invalid Admin State");
1805 struct netdev *netdev = netdev_from_name(argv[1]);
1806 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1807 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1809 ovs_mutex_lock(&dpdk_dev->mutex);
1810 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1811 ovs_mutex_unlock(&dpdk_dev->mutex);
1813 netdev_close(netdev);
1815 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1816 netdev_close(netdev);
1820 struct netdev_dpdk *netdev;
1822 ovs_mutex_lock(&dpdk_mutex);
1823 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1824 ovs_mutex_lock(&netdev->mutex);
1825 netdev_dpdk_set_admin_state__(netdev, up);
1826 ovs_mutex_unlock(&netdev->mutex);
1828 ovs_mutex_unlock(&dpdk_mutex);
1830 unixctl_command_reply(conn, "OK");
1834 * Set virtqueue flags so that we do not receive interrupts.
1837 set_irq_status(struct virtio_net *dev)
1842 for (i = 0; i < dev->virt_qp_nb; i++) {
1843 idx = i * VIRTIO_QNUM;
1844 rte_vhost_enable_guest_notification(dev, idx + VIRTIO_RXQ, 0);
1845 rte_vhost_enable_guest_notification(dev, idx + VIRTIO_TXQ, 0);
1850 * Fixes mapping for vhost-user tx queues. Must be called after each
1851 * enabling/disabling of queues and real_n_txq modifications.
1854 netdev_dpdk_remap_txqs(struct netdev_dpdk *netdev)
1855 OVS_REQUIRES(netdev->mutex)
1857 int *enabled_queues, n_enabled = 0;
1858 int i, k, total_txqs = netdev->real_n_txq;
1860 enabled_queues = dpdk_rte_mzalloc(total_txqs * sizeof *enabled_queues);
1862 for (i = 0; i < total_txqs; i++) {
1863 /* Enabled queues always mapped to themselves. */
1864 if (netdev->tx_q[i].map == i) {
1865 enabled_queues[n_enabled++] = i;
1869 if (n_enabled == 0 && total_txqs != 0) {
1870 enabled_queues[0] = -1;
1875 for (i = 0; i < total_txqs; i++) {
1876 if (netdev->tx_q[i].map != i) {
1877 netdev->tx_q[i].map = enabled_queues[k];
1878 k = (k + 1) % n_enabled;
1882 VLOG_DBG("TX queue mapping for %s\n", netdev->vhost_id);
1883 for (i = 0; i < total_txqs; i++) {
1884 VLOG_DBG("%2d --> %2d", i, netdev->tx_q[i].map);
1887 rte_free(enabled_queues);
1891 netdev_dpdk_vhost_set_queues(struct netdev_dpdk *netdev, struct virtio_net *dev)
1892 OVS_REQUIRES(netdev->mutex)
1896 qp_num = dev->virt_qp_nb;
1897 if (qp_num > netdev->up.n_rxq) {
1898 VLOG_ERR("vHost Device '%s' %"PRIu64" can't be added - "
1899 "too many queues %d > %d", dev->ifname, dev->device_fh,
1900 qp_num, netdev->up.n_rxq);
1904 netdev->real_n_rxq = qp_num;
1905 netdev->real_n_txq = qp_num;
1906 netdev->txq_needs_locking = true;
1908 netdev_dpdk_remap_txqs(netdev);
1914 * A new virtio-net device is added to a vhost port.
1917 new_device(struct virtio_net *dev)
1919 struct netdev_dpdk *netdev;
1920 bool exists = false;
1922 ovs_mutex_lock(&dpdk_mutex);
1923 /* Add device to the vhost port with the same name as that passed down. */
1924 LIST_FOR_EACH(netdev, list_node, &dpdk_list) {
1925 if (strncmp(dev->ifname, netdev->vhost_id, IF_NAME_SZ) == 0) {
1926 ovs_mutex_lock(&netdev->mutex);
1927 if (netdev_dpdk_vhost_set_queues(netdev, dev)) {
1928 ovs_mutex_unlock(&netdev->mutex);
1929 ovs_mutex_unlock(&dpdk_mutex);
1932 ovsrcu_set(&netdev->virtio_dev, dev);
1934 dev->flags |= VIRTIO_DEV_RUNNING;
1935 /* Disable notifications. */
1936 set_irq_status(dev);
1937 ovs_mutex_unlock(&netdev->mutex);
1941 ovs_mutex_unlock(&dpdk_mutex);
1944 VLOG_INFO("vHost Device '%s' %"PRIu64" can't be added - name not "
1945 "found", dev->ifname, dev->device_fh);
1950 VLOG_INFO("vHost Device '%s' %"PRIu64" has been added", dev->ifname,
1956 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1957 * flag to stop any more packets from being sent or received to/from a VM and
1958 * ensure all currently queued packets have been sent/received before removing
1962 destroy_device(volatile struct virtio_net *dev)
1964 struct netdev_dpdk *vhost_dev;
1965 bool exists = false;
1967 ovs_mutex_lock(&dpdk_mutex);
1968 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
1969 if (netdev_dpdk_get_virtio(vhost_dev) == dev) {
1971 ovs_mutex_lock(&vhost_dev->mutex);
1972 dev->flags &= ~VIRTIO_DEV_RUNNING;
1973 ovsrcu_set(&vhost_dev->virtio_dev, NULL);
1975 ovs_mutex_unlock(&vhost_dev->mutex);
1980 ovs_mutex_unlock(&dpdk_mutex);
1982 if (exists == true) {
1984 * Wait for other threads to quiesce after setting the 'virtio_dev'
1985 * to NULL, before returning.
1987 ovsrcu_synchronize();
1989 * As call to ovsrcu_synchronize() will end the quiescent state,
1990 * put thread back into quiescent state before returning.
1992 ovsrcu_quiesce_start();
1993 VLOG_INFO("vHost Device '%s' %"PRIu64" has been removed", dev->ifname,
1996 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", dev->ifname,
2003 vring_state_changed(struct virtio_net *dev, uint16_t queue_id, int enable)
2005 struct netdev_dpdk *vhost_dev;
2006 bool exists = false;
2007 int qid = queue_id / VIRTIO_QNUM;
2009 if (queue_id % VIRTIO_QNUM == VIRTIO_TXQ) {
2013 ovs_mutex_lock(&dpdk_mutex);
2014 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
2015 if (strncmp(dev->ifname, vhost_dev->vhost_id, IF_NAME_SZ) == 0) {
2016 ovs_mutex_lock(&vhost_dev->mutex);
2018 vhost_dev->tx_q[qid].map = qid;
2020 vhost_dev->tx_q[qid].map = -1;
2022 netdev_dpdk_remap_txqs(vhost_dev);
2024 ovs_mutex_unlock(&vhost_dev->mutex);
2028 ovs_mutex_unlock(&dpdk_mutex);
2031 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s' %"
2032 PRIu64" changed to \'%s\'", queue_id, qid, dev->ifname,
2033 dev->device_fh, (enable == 1) ? "enabled" : "disabled");
2035 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", dev->ifname,
2044 netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
2046 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
2050 * These callbacks allow virtio-net devices to be added to vhost ports when
2051 * configuration has been fully complete.
2053 static const struct virtio_net_device_ops virtio_net_device_ops =
2055 .new_device = new_device,
2056 .destroy_device = destroy_device,
2057 .vring_state_changed = vring_state_changed
2061 start_vhost_loop(void *dummy OVS_UNUSED)
2063 pthread_detach(pthread_self());
2064 /* Put the cuse thread into quiescent state. */
2065 ovsrcu_quiesce_start();
2066 rte_vhost_driver_session_start();
2071 dpdk_vhost_class_init(void)
2073 rte_vhost_driver_callback_register(&virtio_net_device_ops);
2074 ovs_thread_create("vhost_thread", start_vhost_loop, NULL);
2079 dpdk_vhost_cuse_class_init(void)
2084 /* Register CUSE device to handle IOCTLs.
2085 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
2086 * is set to vhost-net.
2088 err = rte_vhost_driver_register(cuse_dev_name);
2091 VLOG_ERR("CUSE device setup failure.");
2095 dpdk_vhost_class_init();
2100 dpdk_vhost_user_class_init(void)
2102 dpdk_vhost_class_init();
2107 dpdk_common_init(void)
2109 unixctl_command_register("netdev-dpdk/set-admin-state",
2110 "[netdev] up|down", 1, 2,
2111 netdev_dpdk_set_admin_state, NULL);
2113 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
2119 dpdk_ring_create(const char dev_name[], unsigned int port_no,
2120 unsigned int *eth_port_id)
2122 struct dpdk_ring *ivshmem;
2123 char ring_name[RTE_RING_NAMESIZE];
2126 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
2127 if (ivshmem == NULL) {
2131 /* XXX: Add support for multiquque ring. */
2132 err = snprintf(ring_name, sizeof(ring_name), "%s_tx", dev_name);
2137 /* Create single producer tx ring, netdev does explicit locking. */
2138 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2140 if (ivshmem->cring_tx == NULL) {
2145 err = snprintf(ring_name, sizeof(ring_name), "%s_rx", dev_name);
2150 /* Create single consumer rx ring, netdev does explicit locking. */
2151 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2153 if (ivshmem->cring_rx == NULL) {
2158 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
2159 &ivshmem->cring_tx, 1, SOCKET0);
2166 ivshmem->user_port_id = port_no;
2167 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
2168 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
2170 *eth_port_id = ivshmem->eth_port_id;
2175 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
2177 struct dpdk_ring *ivshmem;
2178 unsigned int port_no;
2181 /* Names always start with "dpdkr" */
2182 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
2187 /* look through our list to find the device */
2188 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
2189 if (ivshmem->user_port_id == port_no) {
2190 VLOG_INFO("Found dpdk ring device %s:", dev_name);
2191 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
2195 /* Need to create the device rings */
2196 return dpdk_ring_create(dev_name, port_no, eth_port_id);
2200 netdev_dpdk_ring_send(struct netdev *netdev_, int qid,
2201 struct dp_packet **pkts, int cnt, bool may_steal)
2203 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
2206 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2207 * rss hash field is clear. This is because the same mbuf may be modified by
2208 * the consumer of the ring and return into the datapath without recalculating
2210 for (i = 0; i < cnt; i++) {
2211 dp_packet_rss_invalidate(pkts[i]);
2214 netdev_dpdk_send__(netdev, qid, pkts, cnt, may_steal);
2219 netdev_dpdk_ring_construct(struct netdev *netdev)
2221 unsigned int port_no = 0;
2224 if (rte_eal_init_ret) {
2225 return rte_eal_init_ret;
2228 ovs_mutex_lock(&dpdk_mutex);
2230 err = dpdk_ring_open(netdev->name, &port_no);
2235 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
2238 ovs_mutex_unlock(&dpdk_mutex);
2242 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
2243 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
2247 NULL, /* netdev_dpdk_run */ \
2248 NULL, /* netdev_dpdk_wait */ \
2250 netdev_dpdk_alloc, \
2253 netdev_dpdk_dealloc, \
2254 netdev_dpdk_get_config, \
2255 netdev_dpdk_set_config, \
2256 NULL, /* get_tunnel_config */ \
2257 NULL, /* build header */ \
2258 NULL, /* push header */ \
2259 NULL, /* pop header */ \
2260 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2261 MULTIQ, /* set_multiq */ \
2264 NULL, /* send_wait */ \
2266 netdev_dpdk_set_etheraddr, \
2267 netdev_dpdk_get_etheraddr, \
2268 netdev_dpdk_get_mtu, \
2269 netdev_dpdk_set_mtu, \
2270 netdev_dpdk_get_ifindex, \
2272 netdev_dpdk_get_carrier_resets, \
2273 netdev_dpdk_set_miimon, \
2276 NULL, /* set_advertisements */ \
2278 NULL, /* set_policing */ \
2279 NULL, /* get_qos_types */ \
2280 NULL, /* get_qos_capabilities */ \
2281 NULL, /* get_qos */ \
2282 NULL, /* set_qos */ \
2283 NULL, /* get_queue */ \
2284 NULL, /* set_queue */ \
2285 NULL, /* delete_queue */ \
2286 NULL, /* get_queue_stats */ \
2287 NULL, /* queue_dump_start */ \
2288 NULL, /* queue_dump_next */ \
2289 NULL, /* queue_dump_done */ \
2290 NULL, /* dump_queue_stats */ \
2292 NULL, /* get_in4 */ \
2293 NULL, /* set_in4 */ \
2294 NULL, /* get_in6 */ \
2295 NULL, /* add_router */ \
2296 NULL, /* get_next_hop */ \
2298 NULL, /* arp_lookup */ \
2300 netdev_dpdk_update_flags, \
2302 netdev_dpdk_rxq_alloc, \
2303 netdev_dpdk_rxq_construct, \
2304 netdev_dpdk_rxq_destruct, \
2305 netdev_dpdk_rxq_dealloc, \
2307 NULL, /* rx_wait */ \
2308 NULL, /* rxq_drain */ \
2312 process_vhost_flags(char *flag, char *default_val, int size,
2313 char **argv, char **new_val)
2317 /* Depending on which version of vhost is in use, process the vhost-specific
2318 * flag if it is provided on the vswitchd command line, otherwise resort to
2321 * For vhost-user: Process "-vhost_sock_dir" to set the custom location of
2322 * the vhost-user socket(s).
2323 * For vhost-cuse: Process "-cuse_dev_name" to set the custom name of the
2324 * vhost-cuse character device.
2326 if (!strcmp(argv[1], flag) && (strlen(argv[2]) <= size)) {
2328 *new_val = xstrdup(argv[2]);
2329 VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
2331 VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
2332 *new_val = default_val;
2339 dpdk_init(int argc, char **argv)
2343 char *pragram_name = argv[0];
2345 if (argc < 2 || strcmp(argv[1], "--dpdk"))
2348 /* Remove the --dpdk argument from arg list.*/
2352 /* Reject --user option */
2354 for (i = 0; i < argc; i++) {
2355 if (!strcmp(argv[i], "--user")) {
2356 VLOG_ERR("Can not mix --dpdk and --user options, aborting.");
2361 if (process_vhost_flags("-cuse_dev_name", xstrdup("vhost-net"),
2362 PATH_MAX, argv, &cuse_dev_name)) {
2364 if (process_vhost_flags("-vhost_sock_dir", xstrdup(ovs_rundir()),
2365 NAME_MAX, argv, &vhost_sock_dir)) {
2369 err = stat(vhost_sock_dir, &s);
2371 VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
2376 /* Remove the vhost flag configuration parameters from the argument
2377 * list, so that the correct elements are passed to the DPDK
2378 * initialization function
2381 argv += 2; /* Increment by two to bypass the vhost flag arguments */
2385 /* Keep the program name argument as this is needed for call to
2388 argv[0] = pragram_name;
2390 /* Make sure things are initialized ... */
2391 result = rte_eal_init(argc, argv);
2393 ovs_abort(result, "Cannot init EAL");
2396 rte_memzone_dump(stdout);
2397 rte_eal_init_ret = 0;
2399 if (argc > result) {
2400 argv[result] = argv[0];
2403 /* We are called from the main thread here */
2404 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
2406 return result + 1 + base;
2409 static const struct netdev_class dpdk_class =
2413 netdev_dpdk_construct,
2414 netdev_dpdk_destruct,
2415 netdev_dpdk_set_multiq,
2416 netdev_dpdk_eth_send,
2417 netdev_dpdk_get_carrier,
2418 netdev_dpdk_get_stats,
2419 netdev_dpdk_get_features,
2420 netdev_dpdk_get_status,
2421 netdev_dpdk_rxq_recv);
2423 static const struct netdev_class dpdk_ring_class =
2427 netdev_dpdk_ring_construct,
2428 netdev_dpdk_destruct,
2429 netdev_dpdk_set_multiq,
2430 netdev_dpdk_ring_send,
2431 netdev_dpdk_get_carrier,
2432 netdev_dpdk_get_stats,
2433 netdev_dpdk_get_features,
2434 netdev_dpdk_get_status,
2435 netdev_dpdk_rxq_recv);
2437 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class =
2440 dpdk_vhost_cuse_class_init,
2441 netdev_dpdk_vhost_cuse_construct,
2442 netdev_dpdk_vhost_destruct,
2443 netdev_dpdk_vhost_cuse_set_multiq,
2444 netdev_dpdk_vhost_send,
2445 netdev_dpdk_vhost_get_carrier,
2446 netdev_dpdk_vhost_get_stats,
2449 netdev_dpdk_vhost_rxq_recv);
2451 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class =
2454 dpdk_vhost_user_class_init,
2455 netdev_dpdk_vhost_user_construct,
2456 netdev_dpdk_vhost_destruct,
2457 netdev_dpdk_vhost_set_multiq,
2458 netdev_dpdk_vhost_send,
2459 netdev_dpdk_vhost_get_carrier,
2460 netdev_dpdk_vhost_get_stats,
2463 netdev_dpdk_vhost_rxq_recv);
2466 netdev_dpdk_register(void)
2468 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2470 if (rte_eal_init_ret) {
2474 if (ovsthread_once_start(&once)) {
2476 netdev_register_provider(&dpdk_class);
2477 netdev_register_provider(&dpdk_ring_class);
2479 netdev_register_provider(&dpdk_vhost_cuse_class);
2481 netdev_register_provider(&dpdk_vhost_user_class);
2483 ovsthread_once_done(&once);
2488 pmd_thread_setaffinity_cpu(unsigned cpu)
2494 CPU_SET(cpu, &cpuset);
2495 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
2497 VLOG_ERR("Thread affinity error %d",err);
2500 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2501 ovs_assert(cpu != NON_PMD_CORE_ID);
2502 RTE_PER_LCORE(_lcore_id) = cpu;
2508 dpdk_thread_is_pmd(void)
2510 return rte_lcore_id() != NON_PMD_CORE_ID;