2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
34 #include "dp-packet.h"
35 #include "dpif-netdev.h"
36 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "ofp-print.h"
44 #include "ovs-thread.h"
49 #include "unaligned.h"
52 #include "openvswitch/vlog.h"
54 #include "rte_config.h"
56 #include "rte_virtio_net.h"
58 VLOG_DEFINE_THIS_MODULE(dpdk);
59 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
61 #define DPDK_PORT_WATCHDOG_INTERVAL 5
63 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
64 #define OVS_VPORT_DPDK "ovs_dpdk"
67 * need to reserve tons of extra space in the mbufs so we can align the
68 * DMA addresses to 4KB.
69 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
70 * performance for standard Ethernet MTU.
72 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
73 #define MBUF_SIZE_MTU(mtu) (MTU_TO_MAX_LEN(mtu) \
74 + sizeof(struct dp_packet) \
75 + RTE_PKTMBUF_HEADROOM)
76 #define MBUF_SIZE_DRIVER (2048 \
77 + sizeof (struct rte_mbuf) \
78 + RTE_PKTMBUF_HEADROOM)
79 #define MBUF_SIZE(mtu) MAX(MBUF_SIZE_MTU(mtu), MBUF_SIZE_DRIVER)
81 /* Max and min number of packets in the mempool. OVS tries to allocate a
82 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
83 * enough hugepages) we keep halving the number until the allocation succeeds
84 * or we reach MIN_NB_MBUF */
86 #define MAX_NB_MBUF (4096 * 64)
87 #define MIN_NB_MBUF (4096 * 4)
88 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
90 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
91 BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
93 /* The smallest possible NB_MBUF that we're going to try should be a multiple
94 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
95 BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
100 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
101 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
103 static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
104 static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
107 * Maximum amount of time in micro seconds to try and enqueue to vhost.
109 #define VHOST_ENQ_RETRY_USECS 100
111 static const struct rte_eth_conf port_conf = {
113 .mq_mode = ETH_MQ_RX_RSS,
115 .header_split = 0, /* Header Split disabled */
116 .hw_ip_checksum = 0, /* IP checksum offload disabled */
117 .hw_vlan_filter = 0, /* VLAN filtering disabled */
118 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
124 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
128 .mq_mode = ETH_MQ_TX_NONE,
132 enum { MAX_TX_QUEUE_LEN = 384 };
133 enum { DPDK_RING_SIZE = 256 };
134 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
135 enum { DRAIN_TSC = 200000ULL };
142 static int rte_eal_init_ret = ENODEV;
144 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
146 /* Contains all 'struct dpdk_dev's. */
147 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
148 = OVS_LIST_INITIALIZER(&dpdk_list);
150 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
151 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
153 /* This mutex must be used by non pmd threads when allocating or freeing
154 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
155 * use mempools, a non pmd thread should hold this mutex while calling them */
156 static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
159 struct rte_mempool *mp;
163 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
166 /* There should be one 'struct dpdk_tx_queue' created for
168 struct dpdk_tx_queue {
169 bool flush_tx; /* Set to true to flush queue everytime */
170 /* pkts are queued. */
172 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
173 * from concurrent access. It is used only
174 * if the queue is shared among different
175 * pmd threads (see 'txq_needs_locking'). */
177 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
180 /* dpdk has no way to remove dpdk ring ethernet devices
181 so we have to keep them around once they've been created
184 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
185 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
188 /* For the client rings */
189 struct rte_ring *cring_tx;
190 struct rte_ring *cring_rx;
191 int user_port_id; /* User given port no, parsed from port name */
192 int eth_port_id; /* ethernet device port id */
193 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
200 enum dpdk_dev_type type;
202 struct dpdk_tx_queue *tx_q;
204 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
206 struct dpdk_mp *dpdk_mp;
210 struct netdev_stats stats;
212 rte_spinlock_t stats_lock;
214 struct eth_addr hwaddr;
215 enum netdev_flags flags;
217 struct rte_eth_link link;
220 /* The user might request more txqs than the NIC has. We remap those
221 * ('up.n_txq') on these ('real_n_txq').
222 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
223 * true and we will take a spinlock on transmission */
226 bool txq_needs_locking;
228 /* virtio-net structure for vhost device */
229 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
231 /* Identifier used to distinguish vhost devices from each other */
232 char vhost_id[PATH_MAX];
235 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
238 struct netdev_rxq_dpdk {
239 struct netdev_rxq up;
243 static bool dpdk_thread_is_pmd(void);
245 static int netdev_dpdk_construct(struct netdev *);
247 struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
250 is_dpdk_class(const struct netdev_class *class)
252 return class->construct == netdev_dpdk_construct;
255 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
256 * for all other segments data, bss and text. */
259 dpdk_rte_mzalloc(size_t sz)
263 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
270 /* XXX this function should be called only by pmd threads (or by non pmd
271 * threads holding the nonpmd_mempool_mutex) */
273 free_dpdk_buf(struct dp_packet *p)
275 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
277 rte_pktmbuf_free_seg(pkt);
281 __rte_pktmbuf_init(struct rte_mempool *mp,
282 void *opaque_arg OVS_UNUSED,
284 unsigned i OVS_UNUSED)
286 struct rte_mbuf *m = _m;
287 uint32_t buf_len = mp->elt_size - sizeof(struct dp_packet);
289 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dp_packet));
291 memset(m, 0, mp->elt_size);
293 /* start of buffer is just after mbuf structure */
294 m->buf_addr = (char *)m + sizeof(struct dp_packet);
295 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
296 sizeof(struct dp_packet);
297 m->buf_len = (uint16_t)buf_len;
299 /* keep some headroom between start of buffer and data */
300 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
302 /* init some constant fields */
309 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
310 void *opaque_arg OVS_UNUSED,
312 unsigned i OVS_UNUSED)
314 struct rte_mbuf *m = _m;
316 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
318 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
321 static struct dpdk_mp *
322 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
324 struct dpdk_mp *dmp = NULL;
325 char mp_name[RTE_MEMPOOL_NAMESIZE];
328 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
329 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
335 dmp = dpdk_rte_mzalloc(sizeof *dmp);
336 dmp->socket_id = socket_id;
340 mp_size = MAX_NB_MBUF;
342 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
343 dmp->mtu, dmp->socket_id, mp_size) < 0) {
347 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
349 sizeof(struct rte_pktmbuf_pool_private),
350 rte_pktmbuf_pool_init, NULL,
351 ovs_rte_pktmbuf_init, NULL,
353 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
355 if (dmp->mp == NULL) {
358 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
361 list_push_back(&dpdk_mp_list, &dmp->list_node);
366 dpdk_mp_put(struct dpdk_mp *dmp)
374 ovs_assert(dmp->refcount >= 0);
377 /* I could not find any API to destroy mp. */
378 if (dmp->refcount == 0) {
379 list_delete(dmp->list_node);
380 /* destroy mp-pool. */
386 check_link_status(struct netdev_dpdk *dev)
388 struct rte_eth_link link;
390 rte_eth_link_get_nowait(dev->port_id, &link);
392 if (dev->link.link_status != link.link_status) {
393 netdev_change_seq_changed(&dev->up);
395 dev->link_reset_cnt++;
397 if (dev->link.link_status) {
398 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
399 dev->port_id, (unsigned)dev->link.link_speed,
400 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
401 ("full-duplex") : ("half-duplex"));
403 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
409 dpdk_watchdog(void *dummy OVS_UNUSED)
411 struct netdev_dpdk *dev;
413 pthread_detach(pthread_self());
416 ovs_mutex_lock(&dpdk_mutex);
417 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
418 ovs_mutex_lock(&dev->mutex);
419 check_link_status(dev);
420 ovs_mutex_unlock(&dev->mutex);
422 ovs_mutex_unlock(&dpdk_mutex);
423 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
430 dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
435 /* A device may report more queues than it makes available (this has
436 * been observed for Intel xl710, which reserves some of them for
437 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
438 * available. When this happens we can retry the configuration
439 * and request less queues */
440 while (n_rxq && n_txq) {
442 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
445 diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &port_conf);
450 for (i = 0; i < n_txq; i++) {
451 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
452 dev->socket_id, NULL);
454 VLOG_INFO("Interface %s txq(%d) setup error: %s",
455 dev->up.name, i, rte_strerror(-diag));
461 /* Retry with less tx queues */
466 for (i = 0; i < n_rxq; i++) {
467 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
468 dev->socket_id, NULL,
471 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
472 dev->up.name, i, rte_strerror(-diag));
478 /* Retry with less rx queues */
483 dev->up.n_rxq = n_rxq;
484 dev->real_n_txq = n_txq;
494 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
496 struct rte_pktmbuf_pool_private *mbp_priv;
497 struct rte_eth_dev_info info;
498 struct ether_addr eth_addr;
502 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
506 rte_eth_dev_info_get(dev->port_id, &info);
508 n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
509 n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
511 diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
513 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
514 dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
518 diag = rte_eth_dev_start(dev->port_id);
520 VLOG_ERR("Interface %s start error: %s", dev->up.name,
521 rte_strerror(-diag));
525 rte_eth_promiscuous_enable(dev->port_id);
526 rte_eth_allmulticast_enable(dev->port_id);
528 memset(ð_addr, 0x0, sizeof(eth_addr));
529 rte_eth_macaddr_get(dev->port_id, ð_addr);
530 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
531 dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
533 memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
534 rte_eth_link_get_nowait(dev->port_id, &dev->link);
536 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
537 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
539 dev->flags = NETDEV_UP | NETDEV_PROMISC;
543 static struct netdev_dpdk *
544 netdev_dpdk_cast(const struct netdev *netdev)
546 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
549 static struct netdev *
550 netdev_dpdk_alloc(void)
552 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
557 netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
561 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
562 for (i = 0; i < n_txqs; i++) {
563 int numa_id = ovs_numa_get_numa_id(i);
565 if (!netdev->txq_needs_locking) {
566 /* Each index is considered as a cpu core id, since there should
567 * be one tx queue for each cpu core. If the corresponding core
568 * is not on the same numa node as 'netdev', flags the
570 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
572 /* Queues are shared among CPUs. Always flush */
573 netdev->tx_q[i].flush_tx = true;
575 rte_spinlock_init(&netdev->tx_q[i].tx_lock);
580 netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
581 enum dpdk_dev_type type)
582 OVS_REQUIRES(dpdk_mutex)
584 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
588 ovs_mutex_init(&netdev->mutex);
589 ovs_mutex_lock(&netdev->mutex);
591 rte_spinlock_init(&netdev->stats_lock);
593 /* If the 'sid' is negative, it means that the kernel fails
594 * to obtain the pci numa info. In that situation, always
596 if (type == DPDK_DEV_ETH) {
597 sid = rte_eth_dev_socket_id(port_no);
599 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
602 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
603 netdev->port_id = port_no;
606 netdev->mtu = ETHER_MTU;
607 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
609 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
610 if (!netdev->dpdk_mp) {
615 netdev_->n_txq = NR_QUEUE;
616 netdev_->n_rxq = NR_QUEUE;
617 netdev->real_n_txq = NR_QUEUE;
619 if (type == DPDK_DEV_ETH) {
620 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
621 err = dpdk_eth_dev_init(netdev);
627 list_push_back(&dpdk_list, &netdev->list_node);
631 rte_free(netdev->tx_q);
633 ovs_mutex_unlock(&netdev->mutex);
638 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
639 unsigned int *port_no)
643 if (strncmp(dev_name, prefix, strlen(prefix))) {
647 cport = dev_name + strlen(prefix);
648 *port_no = strtol(cport, NULL, 0); /* string must be null terminated */
653 vhost_construct_helper(struct netdev *netdev_) OVS_REQUIRES(dpdk_mutex)
655 if (rte_eal_init_ret) {
656 return rte_eal_init_ret;
659 return netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
663 netdev_dpdk_vhost_cuse_construct(struct netdev *netdev_)
665 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
668 ovs_mutex_lock(&dpdk_mutex);
669 strncpy(netdev->vhost_id, netdev->up.name, sizeof(netdev->vhost_id));
670 err = vhost_construct_helper(netdev_);
671 ovs_mutex_unlock(&dpdk_mutex);
676 netdev_dpdk_vhost_user_construct(struct netdev *netdev_)
678 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
681 ovs_mutex_lock(&dpdk_mutex);
682 /* Take the name of the vhost-user port and append it to the location where
683 * the socket is to be created, then register the socket.
685 snprintf(netdev->vhost_id, sizeof(netdev->vhost_id), "%s/%s",
686 vhost_sock_dir, netdev_->name);
687 err = rte_vhost_driver_register(netdev->vhost_id);
689 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
692 fatal_signal_add_file_to_unlink(netdev->vhost_id);
693 VLOG_INFO("Socket %s created for vhost-user port %s\n",
694 netdev->vhost_id, netdev_->name);
695 err = vhost_construct_helper(netdev_);
698 ovs_mutex_unlock(&dpdk_mutex);
703 netdev_dpdk_construct(struct netdev *netdev)
705 unsigned int port_no;
708 if (rte_eal_init_ret) {
709 return rte_eal_init_ret;
712 /* Names always start with "dpdk" */
713 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
718 ovs_mutex_lock(&dpdk_mutex);
719 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
720 ovs_mutex_unlock(&dpdk_mutex);
725 netdev_dpdk_destruct(struct netdev *netdev_)
727 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
729 ovs_mutex_lock(&dev->mutex);
730 rte_eth_dev_stop(dev->port_id);
731 ovs_mutex_unlock(&dev->mutex);
733 ovs_mutex_lock(&dpdk_mutex);
735 list_remove(&dev->list_node);
736 dpdk_mp_put(dev->dpdk_mp);
737 ovs_mutex_unlock(&dpdk_mutex);
741 netdev_dpdk_vhost_destruct(struct netdev *netdev_)
743 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
745 /* Can't remove a port while a guest is attached to it. */
746 if (netdev_dpdk_get_virtio(dev) != NULL) {
747 VLOG_ERR("Can not remove port, vhost device still attached");
751 if (rte_vhost_driver_unregister(dev->vhost_id)) {
752 VLOG_ERR("Unable to remove vhost-user socket %s", dev->vhost_id);
754 fatal_signal_remove_file_to_unlink(dev->vhost_id);
757 ovs_mutex_lock(&dpdk_mutex);
758 list_remove(&dev->list_node);
759 dpdk_mp_put(dev->dpdk_mp);
760 ovs_mutex_unlock(&dpdk_mutex);
764 netdev_dpdk_dealloc(struct netdev *netdev_)
766 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
772 netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
774 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
776 ovs_mutex_lock(&dev->mutex);
778 smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
779 smap_add_format(args, "requested_tx_queues", "%d", netdev_->n_txq);
780 smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
781 ovs_mutex_unlock(&dev->mutex);
787 netdev_dpdk_get_numa_id(const struct netdev *netdev_)
789 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
791 return netdev->socket_id;
794 /* Sets the number of tx queues and rx queues for the dpdk interface.
795 * If the configuration fails, do not try restoring its old configuration
796 * and just returns the error. */
798 netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
801 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
803 int old_rxq, old_txq;
805 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
809 ovs_mutex_lock(&dpdk_mutex);
810 ovs_mutex_lock(&netdev->mutex);
812 rte_eth_dev_stop(netdev->port_id);
814 old_txq = netdev->up.n_txq;
815 old_rxq = netdev->up.n_rxq;
816 netdev->up.n_txq = n_txq;
817 netdev->up.n_rxq = n_rxq;
819 rte_free(netdev->tx_q);
820 err = dpdk_eth_dev_init(netdev);
821 netdev_dpdk_alloc_txq(netdev, netdev->real_n_txq);
823 /* If there has been an error, it means that the requested queues
824 * have not been created. Restore the old numbers. */
825 netdev->up.n_txq = old_txq;
826 netdev->up.n_rxq = old_rxq;
829 netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
831 ovs_mutex_unlock(&netdev->mutex);
832 ovs_mutex_unlock(&dpdk_mutex);
838 netdev_dpdk_vhost_cuse_set_multiq(struct netdev *netdev_, unsigned int n_txq,
841 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
844 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
848 ovs_mutex_lock(&dpdk_mutex);
849 ovs_mutex_lock(&netdev->mutex);
851 netdev->up.n_txq = n_txq;
852 netdev->real_n_txq = 1;
853 netdev->up.n_rxq = 1;
854 netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
856 ovs_mutex_unlock(&netdev->mutex);
857 ovs_mutex_unlock(&dpdk_mutex);
863 netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
866 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
869 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
873 ovs_mutex_lock(&dpdk_mutex);
874 ovs_mutex_lock(&netdev->mutex);
876 rte_free(netdev->tx_q);
877 netdev->up.n_txq = n_txq;
878 netdev->up.n_rxq = n_rxq;
879 netdev_dpdk_alloc_txq(netdev, netdev->up.n_txq);
881 ovs_mutex_unlock(&netdev->mutex);
882 ovs_mutex_unlock(&dpdk_mutex);
887 static struct netdev_rxq *
888 netdev_dpdk_rxq_alloc(void)
890 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
895 static struct netdev_rxq_dpdk *
896 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
898 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
902 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
904 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
905 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
907 ovs_mutex_lock(&netdev->mutex);
908 rx->port_id = netdev->port_id;
909 ovs_mutex_unlock(&netdev->mutex);
915 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
920 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
922 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
928 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
930 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
933 while (nb_tx != txq->count) {
936 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
945 if (OVS_UNLIKELY(nb_tx != txq->count)) {
946 /* free buffers, which we couldn't transmit, one at a time (each
947 * packet could come from a different mempool) */
950 for (i = nb_tx; i < txq->count; i++) {
951 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
953 rte_spinlock_lock(&dev->stats_lock);
954 dev->stats.tx_dropped += txq->count-nb_tx;
955 rte_spinlock_unlock(&dev->stats_lock);
959 txq->tsc = rte_get_timer_cycles();
963 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
965 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
967 if (txq->count == 0) {
970 dpdk_queue_flush__(dev, qid);
974 is_vhost_running(struct virtio_net *dev)
976 return (dev != NULL && (dev->flags & VIRTIO_DEV_RUNNING));
980 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
981 struct dp_packet **packets, int count)
984 struct dp_packet *packet;
986 stats->rx_packets += count;
987 for (i = 0; i < count; i++) {
990 if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) {
991 /* This only protects the following multicast counting from
992 * too short packets, but it does not stop the packet from
993 * further processing. */
995 stats->rx_length_errors++;
999 struct eth_header *eh = (struct eth_header *) dp_packet_data(packet);
1000 if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) {
1004 stats->rx_bytes += dp_packet_size(packet);
1009 * The receive path for the vhost port is the TX path out from guest.
1012 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
1013 struct dp_packet **packets, int *c)
1015 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
1016 struct netdev *netdev = rx->up.netdev;
1017 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
1018 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
1019 int qid = rxq_->queue_id;
1022 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
1026 if (rxq_->queue_id >= vhost_dev->real_n_rxq) {
1030 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid * VIRTIO_QNUM + VIRTIO_TXQ,
1031 vhost_dev->dpdk_mp->mp,
1032 (struct rte_mbuf **)packets,
1038 rte_spinlock_lock(&vhost_dev->stats_lock);
1039 netdev_dpdk_vhost_update_rx_counters(&vhost_dev->stats, packets, nb_rx);
1040 rte_spinlock_unlock(&vhost_dev->stats_lock);
1047 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
1050 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
1051 struct netdev *netdev = rx->up.netdev;
1052 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1055 /* There is only one tx queue for this core. Do not flush other
1057 * Do not flush tx queue which is shared among CPUs
1058 * since it is always flushed */
1059 if (rxq_->queue_id == rte_lcore_id() &&
1060 OVS_LIKELY(!dev->txq_needs_locking)) {
1061 dpdk_queue_flush(dev, rxq_->queue_id);
1064 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
1065 (struct rte_mbuf **) packets,
1077 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
1078 struct dp_packet **packets,
1083 int sent = attempted - dropped;
1085 stats->tx_packets += sent;
1086 stats->tx_dropped += dropped;
1088 for (i = 0; i < sent; i++) {
1089 stats->tx_bytes += dp_packet_size(packets[i]);
1094 __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
1095 struct dp_packet **pkts, int cnt,
1098 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
1099 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
1100 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
1101 unsigned int total_pkts = cnt;
1104 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
1105 rte_spinlock_lock(&vhost_dev->stats_lock);
1106 vhost_dev->stats.tx_dropped+= cnt;
1107 rte_spinlock_unlock(&vhost_dev->stats_lock);
1111 if (vhost_dev->txq_needs_locking) {
1112 qid = qid % vhost_dev->real_n_txq;
1113 rte_spinlock_lock(&vhost_dev->tx_q[qid].tx_lock);
1117 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
1118 unsigned int tx_pkts;
1120 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, vhost_qid,
1122 if (OVS_LIKELY(tx_pkts)) {
1123 /* Packets have been sent.*/
1125 /* Prepare for possible next iteration.*/
1126 cur_pkts = &cur_pkts[tx_pkts];
1128 uint64_t timeout = VHOST_ENQ_RETRY_USECS * rte_get_timer_hz() / 1E6;
1129 unsigned int expired = 0;
1132 start = rte_get_timer_cycles();
1136 * Unable to enqueue packets to vhost interface.
1137 * Check available entries before retrying.
1139 while (!rte_vring_available_entries(virtio_dev, vhost_qid)) {
1140 if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
1146 /* break out of main loop. */
1152 if (vhost_dev->txq_needs_locking) {
1153 rte_spinlock_unlock(&vhost_dev->tx_q[qid].tx_lock);
1156 rte_spinlock_lock(&vhost_dev->stats_lock);
1157 netdev_dpdk_vhost_update_tx_counters(&vhost_dev->stats, pkts, total_pkts,
1159 rte_spinlock_unlock(&vhost_dev->stats_lock);
1165 for (i = 0; i < total_pkts; i++) {
1166 dp_packet_delete(pkts[i]);
1172 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
1173 struct rte_mbuf **pkts, int cnt)
1175 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1181 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
1182 int tocopy = MIN(freeslots, cnt-i);
1184 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
1185 tocopy * sizeof (struct rte_mbuf *));
1187 txq->count += tocopy;
1190 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
1191 dpdk_queue_flush__(dev, qid);
1193 diff_tsc = rte_get_timer_cycles() - txq->tsc;
1194 if (diff_tsc >= DRAIN_TSC) {
1195 dpdk_queue_flush__(dev, qid);
1200 /* Tx function. Transmit packets indefinitely */
1202 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
1204 OVS_NO_THREAD_SAFETY_ANALYSIS
1206 #if !defined(__CHECKER__) && !defined(_WIN32)
1207 const size_t PKT_ARRAY_SIZE = cnt;
1209 /* Sparse or MSVC doesn't like variable length array. */
1210 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
1212 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1213 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
1218 /* If we are on a non pmd thread we have to use the mempool mutex, because
1219 * every non pmd thread shares the same mempool cache */
1221 if (!dpdk_thread_is_pmd()) {
1222 ovs_mutex_lock(&nonpmd_mempool_mutex);
1225 for (i = 0; i < cnt; i++) {
1226 int size = dp_packet_size(pkts[i]);
1228 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1229 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1230 (int)size , dev->max_packet_len);
1236 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
1238 if (!mbufs[newcnt]) {
1243 /* We have to do a copy for now */
1244 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
1246 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1247 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1252 if (OVS_UNLIKELY(dropped)) {
1253 rte_spinlock_lock(&dev->stats_lock);
1254 dev->stats.tx_dropped += dropped;
1255 rte_spinlock_unlock(&dev->stats_lock);
1258 if (dev->type == DPDK_DEV_VHOST) {
1259 __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs, newcnt, true);
1261 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1262 dpdk_queue_flush(dev, qid);
1265 if (!dpdk_thread_is_pmd()) {
1266 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1271 netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet **pkts,
1272 int cnt, bool may_steal)
1274 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1277 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1279 for (i = 0; i < cnt; i++) {
1280 dp_packet_delete(pkts[i]);
1284 __netdev_dpdk_vhost_send(netdev, qid, pkts, cnt, may_steal);
1290 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
1291 struct dp_packet **pkts, int cnt, bool may_steal)
1295 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1296 qid = qid % dev->real_n_txq;
1297 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1300 if (OVS_UNLIKELY(!may_steal ||
1301 pkts[0]->source != DPBUF_DPDK)) {
1302 struct netdev *netdev = &dev->up;
1304 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1307 for (i = 0; i < cnt; i++) {
1308 dp_packet_delete(pkts[i]);
1312 int next_tx_idx = 0;
1315 for (i = 0; i < cnt; i++) {
1316 int size = dp_packet_size(pkts[i]);
1318 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1319 if (next_tx_idx != i) {
1320 dpdk_queue_pkts(dev, qid,
1321 (struct rte_mbuf **)&pkts[next_tx_idx],
1325 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1326 (int)size , dev->max_packet_len);
1328 dp_packet_delete(pkts[i]);
1330 next_tx_idx = i + 1;
1333 if (next_tx_idx != cnt) {
1334 dpdk_queue_pkts(dev, qid,
1335 (struct rte_mbuf **)&pkts[next_tx_idx],
1339 if (OVS_UNLIKELY(dropped)) {
1340 rte_spinlock_lock(&dev->stats_lock);
1341 dev->stats.tx_dropped += dropped;
1342 rte_spinlock_unlock(&dev->stats_lock);
1346 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1347 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1352 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
1353 struct dp_packet **pkts, int cnt, bool may_steal)
1355 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1357 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1362 netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1364 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1366 ovs_mutex_lock(&dev->mutex);
1367 if (!eth_addr_equals(dev->hwaddr, mac)) {
1369 netdev_change_seq_changed(netdev);
1371 ovs_mutex_unlock(&dev->mutex);
1377 netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1379 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1381 ovs_mutex_lock(&dev->mutex);
1383 ovs_mutex_unlock(&dev->mutex);
1389 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1391 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1393 ovs_mutex_lock(&dev->mutex);
1395 ovs_mutex_unlock(&dev->mutex);
1401 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1403 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1405 struct dpdk_mp *old_mp;
1408 ovs_mutex_lock(&dpdk_mutex);
1409 ovs_mutex_lock(&dev->mutex);
1410 if (dev->mtu == mtu) {
1415 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
1421 rte_eth_dev_stop(dev->port_id);
1424 old_mp = dev->dpdk_mp;
1427 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1429 err = dpdk_eth_dev_init(dev);
1433 dev->dpdk_mp = old_mp;
1434 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1435 dpdk_eth_dev_init(dev);
1439 dpdk_mp_put(old_mp);
1440 netdev_change_seq_changed(netdev);
1442 ovs_mutex_unlock(&dev->mutex);
1443 ovs_mutex_unlock(&dpdk_mutex);
1448 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1451 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1452 struct netdev_stats *stats)
1454 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1456 ovs_mutex_lock(&dev->mutex);
1457 memset(stats, 0, sizeof(*stats));
1458 /* Unsupported Stats */
1459 stats->collisions = UINT64_MAX;
1460 stats->rx_crc_errors = UINT64_MAX;
1461 stats->rx_fifo_errors = UINT64_MAX;
1462 stats->rx_frame_errors = UINT64_MAX;
1463 stats->rx_missed_errors = UINT64_MAX;
1464 stats->rx_over_errors = UINT64_MAX;
1465 stats->tx_aborted_errors = UINT64_MAX;
1466 stats->tx_carrier_errors = UINT64_MAX;
1467 stats->tx_errors = UINT64_MAX;
1468 stats->tx_fifo_errors = UINT64_MAX;
1469 stats->tx_heartbeat_errors = UINT64_MAX;
1470 stats->tx_window_errors = UINT64_MAX;
1471 stats->rx_dropped += UINT64_MAX;
1473 rte_spinlock_lock(&dev->stats_lock);
1474 /* Supported Stats */
1475 stats->rx_packets += dev->stats.rx_packets;
1476 stats->tx_packets += dev->stats.tx_packets;
1477 stats->tx_dropped += dev->stats.tx_dropped;
1478 stats->multicast = dev->stats.multicast;
1479 stats->rx_bytes = dev->stats.rx_bytes;
1480 stats->tx_bytes = dev->stats.tx_bytes;
1481 stats->rx_errors = dev->stats.rx_errors;
1482 stats->rx_length_errors = dev->stats.rx_length_errors;
1483 rte_spinlock_unlock(&dev->stats_lock);
1485 ovs_mutex_unlock(&dev->mutex);
1491 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1493 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1494 struct rte_eth_stats rte_stats;
1497 netdev_dpdk_get_carrier(netdev, &gg);
1498 ovs_mutex_lock(&dev->mutex);
1499 rte_eth_stats_get(dev->port_id, &rte_stats);
1501 memset(stats, 0, sizeof(*stats));
1503 stats->rx_packets = rte_stats.ipackets;
1504 stats->tx_packets = rte_stats.opackets;
1505 stats->rx_bytes = rte_stats.ibytes;
1506 stats->tx_bytes = rte_stats.obytes;
1507 /* DPDK counts imissed as errors, but count them here as dropped instead */
1508 stats->rx_errors = rte_stats.ierrors - rte_stats.imissed;
1509 stats->tx_errors = rte_stats.oerrors;
1510 stats->multicast = rte_stats.imcasts;
1512 rte_spinlock_lock(&dev->stats_lock);
1513 stats->tx_dropped = dev->stats.tx_dropped;
1514 rte_spinlock_unlock(&dev->stats_lock);
1516 /* These are the available DPDK counters for packets not received due to
1517 * local resource constraints in DPDK and NIC respectively. */
1518 stats->rx_dropped = rte_stats.rx_nombuf + rte_stats.imissed;
1519 stats->collisions = UINT64_MAX;
1521 stats->rx_length_errors = UINT64_MAX;
1522 stats->rx_over_errors = UINT64_MAX;
1523 stats->rx_crc_errors = UINT64_MAX;
1524 stats->rx_frame_errors = UINT64_MAX;
1525 stats->rx_fifo_errors = UINT64_MAX;
1526 stats->rx_missed_errors = rte_stats.imissed;
1528 stats->tx_aborted_errors = UINT64_MAX;
1529 stats->tx_carrier_errors = UINT64_MAX;
1530 stats->tx_fifo_errors = UINT64_MAX;
1531 stats->tx_heartbeat_errors = UINT64_MAX;
1532 stats->tx_window_errors = UINT64_MAX;
1534 ovs_mutex_unlock(&dev->mutex);
1540 netdev_dpdk_get_features(const struct netdev *netdev_,
1541 enum netdev_features *current,
1542 enum netdev_features *advertised OVS_UNUSED,
1543 enum netdev_features *supported OVS_UNUSED,
1544 enum netdev_features *peer OVS_UNUSED)
1546 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1547 struct rte_eth_link link;
1549 ovs_mutex_lock(&dev->mutex);
1551 ovs_mutex_unlock(&dev->mutex);
1553 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1554 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1555 *current = NETDEV_F_AUTONEG;
1557 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1558 if (link.link_speed == ETH_LINK_SPEED_10) {
1559 *current = NETDEV_F_10MB_HD;
1561 if (link.link_speed == ETH_LINK_SPEED_100) {
1562 *current = NETDEV_F_100MB_HD;
1564 if (link.link_speed == ETH_LINK_SPEED_1000) {
1565 *current = NETDEV_F_1GB_HD;
1567 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1568 if (link.link_speed == ETH_LINK_SPEED_10) {
1569 *current = NETDEV_F_10MB_FD;
1571 if (link.link_speed == ETH_LINK_SPEED_100) {
1572 *current = NETDEV_F_100MB_FD;
1574 if (link.link_speed == ETH_LINK_SPEED_1000) {
1575 *current = NETDEV_F_1GB_FD;
1577 if (link.link_speed == ETH_LINK_SPEED_10000) {
1578 *current = NETDEV_F_10GB_FD;
1586 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1588 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1591 ovs_mutex_lock(&dev->mutex);
1592 ifindex = dev->port_id;
1593 ovs_mutex_unlock(&dev->mutex);
1599 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1601 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1603 ovs_mutex_lock(&dev->mutex);
1604 check_link_status(dev);
1605 *carrier = dev->link.link_status;
1607 ovs_mutex_unlock(&dev->mutex);
1613 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev_, bool *carrier)
1615 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1616 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1618 ovs_mutex_lock(&dev->mutex);
1620 if (is_vhost_running(virtio_dev)) {
1626 ovs_mutex_unlock(&dev->mutex);
1631 static long long int
1632 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1634 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1635 long long int carrier_resets;
1637 ovs_mutex_lock(&dev->mutex);
1638 carrier_resets = dev->link_reset_cnt;
1639 ovs_mutex_unlock(&dev->mutex);
1641 return carrier_resets;
1645 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1646 long long int interval OVS_UNUSED)
1652 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1653 enum netdev_flags off, enum netdev_flags on,
1654 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
1658 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1662 *old_flagsp = dev->flags;
1666 if (dev->flags == *old_flagsp) {
1670 if (dev->type == DPDK_DEV_ETH) {
1671 if (dev->flags & NETDEV_UP) {
1672 err = rte_eth_dev_start(dev->port_id);
1677 if (dev->flags & NETDEV_PROMISC) {
1678 rte_eth_promiscuous_enable(dev->port_id);
1681 if (!(dev->flags & NETDEV_UP)) {
1682 rte_eth_dev_stop(dev->port_id);
1690 netdev_dpdk_update_flags(struct netdev *netdev_,
1691 enum netdev_flags off, enum netdev_flags on,
1692 enum netdev_flags *old_flagsp)
1694 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1697 ovs_mutex_lock(&netdev->mutex);
1698 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1699 ovs_mutex_unlock(&netdev->mutex);
1705 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1707 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1708 struct rte_eth_dev_info dev_info;
1710 if (dev->port_id < 0)
1713 ovs_mutex_lock(&dev->mutex);
1714 rte_eth_dev_info_get(dev->port_id, &dev_info);
1715 ovs_mutex_unlock(&dev->mutex);
1717 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1719 smap_add_format(args, "port_no", "%d", dev->port_id);
1720 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1721 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1722 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1723 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1724 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1725 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1726 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1727 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1728 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1729 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1731 if (dev_info.pci_dev) {
1732 smap_add_format(args, "pci-vendor_id", "0x%u",
1733 dev_info.pci_dev->id.vendor_id);
1734 smap_add_format(args, "pci-device_id", "0x%x",
1735 dev_info.pci_dev->id.device_id);
1742 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1743 OVS_REQUIRES(dev->mutex)
1745 enum netdev_flags old_flags;
1748 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1750 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1755 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1756 const char *argv[], void *aux OVS_UNUSED)
1760 if (!strcasecmp(argv[argc - 1], "up")) {
1762 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1765 unixctl_command_reply_error(conn, "Invalid Admin State");
1770 struct netdev *netdev = netdev_from_name(argv[1]);
1771 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1772 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1774 ovs_mutex_lock(&dpdk_dev->mutex);
1775 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1776 ovs_mutex_unlock(&dpdk_dev->mutex);
1778 netdev_close(netdev);
1780 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1781 netdev_close(netdev);
1785 struct netdev_dpdk *netdev;
1787 ovs_mutex_lock(&dpdk_mutex);
1788 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1789 ovs_mutex_lock(&netdev->mutex);
1790 netdev_dpdk_set_admin_state__(netdev, up);
1791 ovs_mutex_unlock(&netdev->mutex);
1793 ovs_mutex_unlock(&dpdk_mutex);
1795 unixctl_command_reply(conn, "OK");
1799 * Set virtqueue flags so that we do not receive interrupts.
1802 set_irq_status(struct virtio_net *dev)
1807 for (i = 0; i < dev->virt_qp_nb; i++) {
1808 idx = i * VIRTIO_QNUM;
1809 rte_vhost_enable_guest_notification(dev, idx + VIRTIO_RXQ, 0);
1810 rte_vhost_enable_guest_notification(dev, idx + VIRTIO_TXQ, 0);
1816 netdev_dpdk_vhost_set_queues(struct netdev_dpdk *netdev, struct virtio_net *dev)
1820 qp_num = dev->virt_qp_nb;
1821 if (qp_num > netdev->up.n_rxq) {
1822 VLOG_ERR("vHost Device '%s' %"PRIu64" can't be added - "
1823 "too many queues %d > %d", dev->ifname, dev->device_fh,
1824 qp_num, netdev->up.n_rxq);
1828 netdev->real_n_rxq = qp_num;
1829 netdev->real_n_txq = qp_num;
1830 if (netdev->up.n_txq > netdev->real_n_txq) {
1831 netdev->txq_needs_locking = true;
1833 netdev->txq_needs_locking = false;
1840 * A new virtio-net device is added to a vhost port.
1843 new_device(struct virtio_net *dev)
1845 struct netdev_dpdk *netdev;
1846 bool exists = false;
1848 ovs_mutex_lock(&dpdk_mutex);
1849 /* Add device to the vhost port with the same name as that passed down. */
1850 LIST_FOR_EACH(netdev, list_node, &dpdk_list) {
1851 if (strncmp(dev->ifname, netdev->vhost_id, IF_NAME_SZ) == 0) {
1852 ovs_mutex_lock(&netdev->mutex);
1853 if (netdev_dpdk_vhost_set_queues(netdev, dev)) {
1854 ovs_mutex_unlock(&netdev->mutex);
1855 ovs_mutex_unlock(&dpdk_mutex);
1858 ovsrcu_set(&netdev->virtio_dev, dev);
1860 dev->flags |= VIRTIO_DEV_RUNNING;
1861 /* Disable notifications. */
1862 set_irq_status(dev);
1863 ovs_mutex_unlock(&netdev->mutex);
1867 ovs_mutex_unlock(&dpdk_mutex);
1870 VLOG_INFO("vHost Device '%s' %"PRIu64" can't be added - name not "
1871 "found", dev->ifname, dev->device_fh);
1876 VLOG_INFO("vHost Device '%s' %"PRIu64" has been added", dev->ifname,
1882 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1883 * flag to stop any more packets from being sent or received to/from a VM and
1884 * ensure all currently queued packets have been sent/received before removing
1888 destroy_device(volatile struct virtio_net *dev)
1890 struct netdev_dpdk *vhost_dev;
1892 ovs_mutex_lock(&dpdk_mutex);
1893 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
1894 if (netdev_dpdk_get_virtio(vhost_dev) == dev) {
1896 ovs_mutex_lock(&vhost_dev->mutex);
1897 dev->flags &= ~VIRTIO_DEV_RUNNING;
1898 ovsrcu_set(&vhost_dev->virtio_dev, NULL);
1899 ovs_mutex_unlock(&vhost_dev->mutex);
1902 * Wait for other threads to quiesce before
1903 * setting the virtio_dev to NULL.
1905 ovsrcu_synchronize();
1907 * As call to ovsrcu_synchronize() will end the quiescent state,
1908 * put thread back into quiescent state before returning.
1910 ovsrcu_quiesce_start();
1913 ovs_mutex_unlock(&dpdk_mutex);
1915 VLOG_INFO("vHost Device '%s' %"PRIu64" has been removed", dev->ifname,
1920 netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
1922 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
1926 * These callbacks allow virtio-net devices to be added to vhost ports when
1927 * configuration has been fully complete.
1929 static const struct virtio_net_device_ops virtio_net_device_ops =
1931 .new_device = new_device,
1932 .destroy_device = destroy_device,
1936 start_vhost_loop(void *dummy OVS_UNUSED)
1938 pthread_detach(pthread_self());
1939 /* Put the cuse thread into quiescent state. */
1940 ovsrcu_quiesce_start();
1941 rte_vhost_driver_session_start();
1946 dpdk_vhost_class_init(void)
1948 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1949 ovs_thread_create("vhost_thread", start_vhost_loop, NULL);
1954 dpdk_vhost_cuse_class_init(void)
1959 /* Register CUSE device to handle IOCTLs.
1960 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1961 * is set to vhost-net.
1963 err = rte_vhost_driver_register(cuse_dev_name);
1966 VLOG_ERR("CUSE device setup failure.");
1970 dpdk_vhost_class_init();
1975 dpdk_vhost_user_class_init(void)
1977 dpdk_vhost_class_init();
1982 dpdk_common_init(void)
1984 unixctl_command_register("netdev-dpdk/set-admin-state",
1985 "[netdev] up|down", 1, 2,
1986 netdev_dpdk_set_admin_state, NULL);
1988 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1994 dpdk_ring_create(const char dev_name[], unsigned int port_no,
1995 unsigned int *eth_port_id)
1997 struct dpdk_ring *ivshmem;
2001 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
2002 if (ivshmem == NULL) {
2006 /* XXX: Add support for multiquque ring. */
2007 err = snprintf(ring_name, 10, "%s_tx", dev_name);
2012 /* Create single producer tx ring, netdev does explicit locking. */
2013 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2015 if (ivshmem->cring_tx == NULL) {
2020 err = snprintf(ring_name, 10, "%s_rx", dev_name);
2025 /* Create single consumer rx ring, netdev does explicit locking. */
2026 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2028 if (ivshmem->cring_rx == NULL) {
2033 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
2034 &ivshmem->cring_tx, 1, SOCKET0);
2041 ivshmem->user_port_id = port_no;
2042 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
2043 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
2045 *eth_port_id = ivshmem->eth_port_id;
2050 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
2052 struct dpdk_ring *ivshmem;
2053 unsigned int port_no;
2056 /* Names always start with "dpdkr" */
2057 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
2062 /* look through our list to find the device */
2063 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
2064 if (ivshmem->user_port_id == port_no) {
2065 VLOG_INFO("Found dpdk ring device %s:", dev_name);
2066 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
2070 /* Need to create the device rings */
2071 return dpdk_ring_create(dev_name, port_no, eth_port_id);
2075 netdev_dpdk_ring_send(struct netdev *netdev_, int qid,
2076 struct dp_packet **pkts, int cnt, bool may_steal)
2078 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
2081 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2082 * rss hash field is clear. This is because the same mbuf may be modified by
2083 * the consumer of the ring and return into the datapath without recalculating
2085 for (i = 0; i < cnt; i++) {
2086 dp_packet_rss_invalidate(pkts[i]);
2089 netdev_dpdk_send__(netdev, qid, pkts, cnt, may_steal);
2094 netdev_dpdk_ring_construct(struct netdev *netdev)
2096 unsigned int port_no = 0;
2099 if (rte_eal_init_ret) {
2100 return rte_eal_init_ret;
2103 ovs_mutex_lock(&dpdk_mutex);
2105 err = dpdk_ring_open(netdev->name, &port_no);
2110 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
2113 ovs_mutex_unlock(&dpdk_mutex);
2117 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
2118 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
2122 NULL, /* netdev_dpdk_run */ \
2123 NULL, /* netdev_dpdk_wait */ \
2125 netdev_dpdk_alloc, \
2128 netdev_dpdk_dealloc, \
2129 netdev_dpdk_get_config, \
2130 NULL, /* netdev_dpdk_set_config */ \
2131 NULL, /* get_tunnel_config */ \
2132 NULL, /* build header */ \
2133 NULL, /* push header */ \
2134 NULL, /* pop header */ \
2135 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2136 MULTIQ, /* set_multiq */ \
2139 NULL, /* send_wait */ \
2141 netdev_dpdk_set_etheraddr, \
2142 netdev_dpdk_get_etheraddr, \
2143 netdev_dpdk_get_mtu, \
2144 netdev_dpdk_set_mtu, \
2145 netdev_dpdk_get_ifindex, \
2147 netdev_dpdk_get_carrier_resets, \
2148 netdev_dpdk_set_miimon, \
2151 NULL, /* set_advertisements */ \
2153 NULL, /* set_policing */ \
2154 NULL, /* get_qos_types */ \
2155 NULL, /* get_qos_capabilities */ \
2156 NULL, /* get_qos */ \
2157 NULL, /* set_qos */ \
2158 NULL, /* get_queue */ \
2159 NULL, /* set_queue */ \
2160 NULL, /* delete_queue */ \
2161 NULL, /* get_queue_stats */ \
2162 NULL, /* queue_dump_start */ \
2163 NULL, /* queue_dump_next */ \
2164 NULL, /* queue_dump_done */ \
2165 NULL, /* dump_queue_stats */ \
2167 NULL, /* get_in4 */ \
2168 NULL, /* set_in4 */ \
2169 NULL, /* get_in6 */ \
2170 NULL, /* add_router */ \
2171 NULL, /* get_next_hop */ \
2173 NULL, /* arp_lookup */ \
2175 netdev_dpdk_update_flags, \
2177 netdev_dpdk_rxq_alloc, \
2178 netdev_dpdk_rxq_construct, \
2179 netdev_dpdk_rxq_destruct, \
2180 netdev_dpdk_rxq_dealloc, \
2182 NULL, /* rx_wait */ \
2183 NULL, /* rxq_drain */ \
2187 process_vhost_flags(char *flag, char *default_val, int size,
2188 char **argv, char **new_val)
2192 /* Depending on which version of vhost is in use, process the vhost-specific
2193 * flag if it is provided on the vswitchd command line, otherwise resort to
2196 * For vhost-user: Process "-vhost_sock_dir" to set the custom location of
2197 * the vhost-user socket(s).
2198 * For vhost-cuse: Process "-cuse_dev_name" to set the custom name of the
2199 * vhost-cuse character device.
2201 if (!strcmp(argv[1], flag) && (strlen(argv[2]) <= size)) {
2203 *new_val = xstrdup(argv[2]);
2204 VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
2206 VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
2207 *new_val = default_val;
2214 dpdk_init(int argc, char **argv)
2218 char *pragram_name = argv[0];
2220 if (argc < 2 || strcmp(argv[1], "--dpdk"))
2223 /* Remove the --dpdk argument from arg list.*/
2227 /* Reject --user option */
2229 for (i = 0; i < argc; i++) {
2230 if (!strcmp(argv[i], "--user")) {
2231 VLOG_ERR("Can not mix --dpdk and --user options, aborting.");
2236 if (process_vhost_flags("-cuse_dev_name", xstrdup("vhost-net"),
2237 PATH_MAX, argv, &cuse_dev_name)) {
2239 if (process_vhost_flags("-vhost_sock_dir", xstrdup(ovs_rundir()),
2240 NAME_MAX, argv, &vhost_sock_dir)) {
2244 err = stat(vhost_sock_dir, &s);
2246 VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
2251 /* Remove the vhost flag configuration parameters from the argument
2252 * list, so that the correct elements are passed to the DPDK
2253 * initialization function
2256 argv += 2; /* Increment by two to bypass the vhost flag arguments */
2260 /* Keep the program name argument as this is needed for call to
2263 argv[0] = pragram_name;
2265 /* Make sure things are initialized ... */
2266 result = rte_eal_init(argc, argv);
2268 ovs_abort(result, "Cannot init EAL");
2271 rte_memzone_dump(stdout);
2272 rte_eal_init_ret = 0;
2274 if (argc > result) {
2275 argv[result] = argv[0];
2278 /* We are called from the main thread here */
2279 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
2281 return result + 1 + base;
2284 static const struct netdev_class dpdk_class =
2288 netdev_dpdk_construct,
2289 netdev_dpdk_destruct,
2290 netdev_dpdk_set_multiq,
2291 netdev_dpdk_eth_send,
2292 netdev_dpdk_get_carrier,
2293 netdev_dpdk_get_stats,
2294 netdev_dpdk_get_features,
2295 netdev_dpdk_get_status,
2296 netdev_dpdk_rxq_recv);
2298 static const struct netdev_class dpdk_ring_class =
2302 netdev_dpdk_ring_construct,
2303 netdev_dpdk_destruct,
2304 netdev_dpdk_set_multiq,
2305 netdev_dpdk_ring_send,
2306 netdev_dpdk_get_carrier,
2307 netdev_dpdk_get_stats,
2308 netdev_dpdk_get_features,
2309 netdev_dpdk_get_status,
2310 netdev_dpdk_rxq_recv);
2312 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class =
2315 dpdk_vhost_cuse_class_init,
2316 netdev_dpdk_vhost_cuse_construct,
2317 netdev_dpdk_vhost_destruct,
2318 netdev_dpdk_vhost_cuse_set_multiq,
2319 netdev_dpdk_vhost_send,
2320 netdev_dpdk_vhost_get_carrier,
2321 netdev_dpdk_vhost_get_stats,
2324 netdev_dpdk_vhost_rxq_recv);
2326 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class =
2329 dpdk_vhost_user_class_init,
2330 netdev_dpdk_vhost_user_construct,
2331 netdev_dpdk_vhost_destruct,
2332 netdev_dpdk_vhost_set_multiq,
2333 netdev_dpdk_vhost_send,
2334 netdev_dpdk_vhost_get_carrier,
2335 netdev_dpdk_vhost_get_stats,
2338 netdev_dpdk_vhost_rxq_recv);
2341 netdev_dpdk_register(void)
2343 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2345 if (rte_eal_init_ret) {
2349 if (ovsthread_once_start(&once)) {
2351 netdev_register_provider(&dpdk_class);
2352 netdev_register_provider(&dpdk_ring_class);
2354 netdev_register_provider(&dpdk_vhost_cuse_class);
2356 netdev_register_provider(&dpdk_vhost_user_class);
2358 ovsthread_once_done(&once);
2363 pmd_thread_setaffinity_cpu(unsigned cpu)
2369 CPU_SET(cpu, &cpuset);
2370 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
2372 VLOG_ERR("Thread affinity error %d",err);
2375 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2376 ovs_assert(cpu != NON_PMD_CORE_ID);
2377 RTE_PER_LCORE(_lcore_id) = cpu;
2383 dpdk_thread_is_pmd(void)
2385 return rte_lcore_id() != NON_PMD_CORE_ID;