2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
34 #include "dp-packet.h"
35 #include "dpif-netdev.h"
37 #include "netdev-dpdk.h"
38 #include "netdev-provider.h"
39 #include "netdev-vport.h"
41 #include "ofp-print.h"
43 #include "ovs-thread.h"
48 #include "unaligned.h"
51 #include "openvswitch/vlog.h"
53 #include "rte_config.h"
55 #include "rte_virtio_net.h"
57 VLOG_DEFINE_THIS_MODULE(dpdk);
58 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
60 #define DPDK_PORT_WATCHDOG_INTERVAL 5
62 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
63 #define OVS_VPORT_DPDK "ovs_dpdk"
66 * need to reserve tons of extra space in the mbufs so we can align the
67 * DMA addresses to 4KB.
68 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
69 * performance for standard Ethernet MTU.
71 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
72 #define MBUF_SIZE_MTU(mtu) (MTU_TO_MAX_LEN(mtu) \
73 + sizeof(struct dp_packet) \
74 + RTE_PKTMBUF_HEADROOM)
75 #define MBUF_SIZE_DRIVER (2048 \
76 + sizeof (struct rte_mbuf) \
77 + RTE_PKTMBUF_HEADROOM)
78 #define MBUF_SIZE(mtu) MAX(MBUF_SIZE_MTU(mtu), MBUF_SIZE_DRIVER)
80 /* Max and min number of packets in the mempool. OVS tries to allocate a
81 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
82 * enough hugepages) we keep halving the number until the allocation succeeds
83 * or we reach MIN_NB_MBUF */
85 #define MAX_NB_MBUF (4096 * 64)
86 #define MIN_NB_MBUF (4096 * 4)
87 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
89 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
90 BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
92 /* The smallest possible NB_MBUF that we're going to try should be a multiple
93 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
94 BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
99 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
100 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
102 static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
103 static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
106 * Maximum amount of time in micro seconds to try and enqueue to vhost.
108 #define VHOST_ENQ_RETRY_USECS 100
110 static const struct rte_eth_conf port_conf = {
112 .mq_mode = ETH_MQ_RX_RSS,
114 .header_split = 0, /* Header Split disabled */
115 .hw_ip_checksum = 0, /* IP checksum offload disabled */
116 .hw_vlan_filter = 0, /* VLAN filtering disabled */
117 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
123 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
127 .mq_mode = ETH_MQ_TX_NONE,
131 enum { MAX_TX_QUEUE_LEN = 384 };
132 enum { DPDK_RING_SIZE = 256 };
133 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
134 enum { DRAIN_TSC = 200000ULL };
141 static int rte_eal_init_ret = ENODEV;
143 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
145 /* Contains all 'struct dpdk_dev's. */
146 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
147 = OVS_LIST_INITIALIZER(&dpdk_list);
149 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
150 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
152 /* This mutex must be used by non pmd threads when allocating or freeing
153 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
154 * use mempools, a non pmd thread should hold this mutex while calling them */
155 static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
158 struct rte_mempool *mp;
162 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
165 /* There should be one 'struct dpdk_tx_queue' created for
167 struct dpdk_tx_queue {
168 bool flush_tx; /* Set to true to flush queue everytime */
169 /* pkts are queued. */
171 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
172 * from concurrent access. It is used only
173 * if the queue is shared among different
174 * pmd threads (see 'txq_needs_locking'). */
176 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
179 /* dpdk has no way to remove dpdk ring ethernet devices
180 so we have to keep them around once they've been created
183 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
184 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
187 /* For the client rings */
188 struct rte_ring *cring_tx;
189 struct rte_ring *cring_rx;
190 int user_port_id; /* User given port no, parsed from port name */
191 int eth_port_id; /* ethernet device port id */
192 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
199 enum dpdk_dev_type type;
201 struct dpdk_tx_queue *tx_q;
203 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
205 struct dpdk_mp *dpdk_mp;
209 struct netdev_stats stats;
211 rte_spinlock_t stats_lock;
213 struct eth_addr hwaddr;
214 enum netdev_flags flags;
216 struct rte_eth_link link;
219 /* The user might request more txqs than the NIC has. We remap those
220 * ('up.n_txq') on these ('real_n_txq').
221 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
222 * true and we will take a spinlock on transmission */
224 bool txq_needs_locking;
226 /* Spinlock for vhost transmission. Other DPDK devices use spinlocks in
228 rte_spinlock_t vhost_tx_lock;
230 /* virtio-net structure for vhost device */
231 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
233 /* Identifier used to distinguish vhost devices from each other */
234 char vhost_id[PATH_MAX];
237 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
240 struct netdev_rxq_dpdk {
241 struct netdev_rxq up;
245 static bool thread_is_pmd(void);
247 static int netdev_dpdk_construct(struct netdev *);
249 struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
252 is_dpdk_class(const struct netdev_class *class)
254 return class->construct == netdev_dpdk_construct;
257 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
258 * for all other segments data, bss and text. */
261 dpdk_rte_mzalloc(size_t sz)
265 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
272 /* XXX this function should be called only by pmd threads (or by non pmd
273 * threads holding the nonpmd_mempool_mutex) */
275 free_dpdk_buf(struct dp_packet *p)
277 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
279 rte_pktmbuf_free_seg(pkt);
283 __rte_pktmbuf_init(struct rte_mempool *mp,
284 void *opaque_arg OVS_UNUSED,
286 unsigned i OVS_UNUSED)
288 struct rte_mbuf *m = _m;
289 uint32_t buf_len = mp->elt_size - sizeof(struct dp_packet);
291 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dp_packet));
293 memset(m, 0, mp->elt_size);
295 /* start of buffer is just after mbuf structure */
296 m->buf_addr = (char *)m + sizeof(struct dp_packet);
297 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
298 sizeof(struct dp_packet);
299 m->buf_len = (uint16_t)buf_len;
301 /* keep some headroom between start of buffer and data */
302 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
304 /* init some constant fields */
311 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
312 void *opaque_arg OVS_UNUSED,
314 unsigned i OVS_UNUSED)
316 struct rte_mbuf *m = _m;
318 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
320 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
323 static struct dpdk_mp *
324 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
326 struct dpdk_mp *dmp = NULL;
327 char mp_name[RTE_MEMPOOL_NAMESIZE];
330 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
331 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
337 dmp = dpdk_rte_mzalloc(sizeof *dmp);
338 dmp->socket_id = socket_id;
342 mp_size = MAX_NB_MBUF;
344 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
345 dmp->mtu, dmp->socket_id, mp_size) < 0) {
349 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
351 sizeof(struct rte_pktmbuf_pool_private),
352 rte_pktmbuf_pool_init, NULL,
353 ovs_rte_pktmbuf_init, NULL,
355 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
357 if (dmp->mp == NULL) {
360 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
363 list_push_back(&dpdk_mp_list, &dmp->list_node);
368 dpdk_mp_put(struct dpdk_mp *dmp)
376 ovs_assert(dmp->refcount >= 0);
379 /* I could not find any API to destroy mp. */
380 if (dmp->refcount == 0) {
381 list_delete(dmp->list_node);
382 /* destroy mp-pool. */
388 check_link_status(struct netdev_dpdk *dev)
390 struct rte_eth_link link;
392 rte_eth_link_get_nowait(dev->port_id, &link);
394 if (dev->link.link_status != link.link_status) {
395 netdev_change_seq_changed(&dev->up);
397 dev->link_reset_cnt++;
399 if (dev->link.link_status) {
400 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
401 dev->port_id, (unsigned)dev->link.link_speed,
402 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
403 ("full-duplex") : ("half-duplex"));
405 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
411 dpdk_watchdog(void *dummy OVS_UNUSED)
413 struct netdev_dpdk *dev;
415 pthread_detach(pthread_self());
418 ovs_mutex_lock(&dpdk_mutex);
419 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
420 ovs_mutex_lock(&dev->mutex);
421 check_link_status(dev);
422 ovs_mutex_unlock(&dev->mutex);
424 ovs_mutex_unlock(&dpdk_mutex);
425 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
432 dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
437 /* A device may report more queues than it makes available (this has
438 * been observed for Intel xl710, which reserves some of them for
439 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
440 * available. When this happens we can retry the configuration
441 * and request less queues */
442 while (n_rxq && n_txq) {
444 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
447 diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &port_conf);
452 for (i = 0; i < n_txq; i++) {
453 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
454 dev->socket_id, NULL);
456 VLOG_INFO("Interface %s txq(%d) setup error: %s",
457 dev->up.name, i, rte_strerror(-diag));
463 /* Retry with less tx queues */
468 for (i = 0; i < n_rxq; i++) {
469 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
470 dev->socket_id, NULL,
473 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
474 dev->up.name, i, rte_strerror(-diag));
480 /* Retry with less rx queues */
485 dev->up.n_rxq = n_rxq;
486 dev->real_n_txq = n_txq;
496 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
498 struct rte_pktmbuf_pool_private *mbp_priv;
499 struct rte_eth_dev_info info;
500 struct ether_addr eth_addr;
504 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
508 rte_eth_dev_info_get(dev->port_id, &info);
510 n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
511 n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
513 diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
515 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
516 dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
520 diag = rte_eth_dev_start(dev->port_id);
522 VLOG_ERR("Interface %s start error: %s", dev->up.name,
523 rte_strerror(-diag));
527 rte_eth_promiscuous_enable(dev->port_id);
528 rte_eth_allmulticast_enable(dev->port_id);
530 memset(ð_addr, 0x0, sizeof(eth_addr));
531 rte_eth_macaddr_get(dev->port_id, ð_addr);
532 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
533 dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
535 memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
536 rte_eth_link_get_nowait(dev->port_id, &dev->link);
538 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
539 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
541 dev->flags = NETDEV_UP | NETDEV_PROMISC;
545 static struct netdev_dpdk *
546 netdev_dpdk_cast(const struct netdev *netdev)
548 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
551 static struct netdev *
552 netdev_dpdk_alloc(void)
554 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
559 netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
563 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
564 for (i = 0; i < n_txqs; i++) {
565 int numa_id = ovs_numa_get_numa_id(i);
567 if (!netdev->txq_needs_locking) {
568 /* Each index is considered as a cpu core id, since there should
569 * be one tx queue for each cpu core. If the corresponding core
570 * is not on the same numa node as 'netdev', flags the
572 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
574 /* Queues are shared among CPUs. Always flush */
575 netdev->tx_q[i].flush_tx = true;
577 rte_spinlock_init(&netdev->tx_q[i].tx_lock);
582 netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
583 enum dpdk_dev_type type)
584 OVS_REQUIRES(dpdk_mutex)
586 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
590 ovs_mutex_init(&netdev->mutex);
591 ovs_mutex_lock(&netdev->mutex);
593 rte_spinlock_init(&netdev->stats_lock);
595 /* If the 'sid' is negative, it means that the kernel fails
596 * to obtain the pci numa info. In that situation, always
598 if (type == DPDK_DEV_ETH) {
599 sid = rte_eth_dev_socket_id(port_no);
601 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
604 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
605 netdev->port_id = port_no;
608 netdev->mtu = ETHER_MTU;
609 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
611 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
612 if (!netdev->dpdk_mp) {
617 netdev_->n_txq = NR_QUEUE;
618 netdev_->n_rxq = NR_QUEUE;
619 netdev->real_n_txq = NR_QUEUE;
621 if (type == DPDK_DEV_ETH) {
622 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
623 err = dpdk_eth_dev_init(netdev);
629 list_push_back(&dpdk_list, &netdev->list_node);
633 rte_free(netdev->tx_q);
635 ovs_mutex_unlock(&netdev->mutex);
640 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
641 unsigned int *port_no)
645 if (strncmp(dev_name, prefix, strlen(prefix))) {
649 cport = dev_name + strlen(prefix);
650 *port_no = strtol(cport, NULL, 0); /* string must be null terminated */
655 vhost_construct_helper(struct netdev *netdev_) OVS_REQUIRES(dpdk_mutex)
657 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
659 if (rte_eal_init_ret) {
660 return rte_eal_init_ret;
663 rte_spinlock_init(&netdev->vhost_tx_lock);
664 return netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
668 netdev_dpdk_vhost_cuse_construct(struct netdev *netdev_)
670 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
673 ovs_mutex_lock(&dpdk_mutex);
674 strncpy(netdev->vhost_id, netdev->up.name, sizeof(netdev->vhost_id));
675 err = vhost_construct_helper(netdev_);
676 ovs_mutex_unlock(&dpdk_mutex);
681 netdev_dpdk_vhost_user_construct(struct netdev *netdev_)
683 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
686 ovs_mutex_lock(&dpdk_mutex);
687 /* Take the name of the vhost-user port and append it to the location where
688 * the socket is to be created, then register the socket.
690 snprintf(netdev->vhost_id, sizeof(netdev->vhost_id), "%s/%s",
691 vhost_sock_dir, netdev_->name);
692 err = rte_vhost_driver_register(netdev->vhost_id);
694 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
697 VLOG_INFO("Socket %s created for vhost-user port %s\n", netdev->vhost_id, netdev_->name);
698 err = vhost_construct_helper(netdev_);
699 ovs_mutex_unlock(&dpdk_mutex);
704 netdev_dpdk_construct(struct netdev *netdev)
706 unsigned int port_no;
709 if (rte_eal_init_ret) {
710 return rte_eal_init_ret;
713 /* Names always start with "dpdk" */
714 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
719 ovs_mutex_lock(&dpdk_mutex);
720 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
721 ovs_mutex_unlock(&dpdk_mutex);
726 netdev_dpdk_destruct(struct netdev *netdev_)
728 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
730 ovs_mutex_lock(&dev->mutex);
731 rte_eth_dev_stop(dev->port_id);
732 ovs_mutex_unlock(&dev->mutex);
734 ovs_mutex_lock(&dpdk_mutex);
736 list_remove(&dev->list_node);
737 dpdk_mp_put(dev->dpdk_mp);
738 ovs_mutex_unlock(&dpdk_mutex);
742 netdev_dpdk_vhost_destruct(struct netdev *netdev_)
744 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
746 /* Can't remove a port while a guest is attached to it. */
747 if (netdev_dpdk_get_virtio(dev) != NULL) {
748 VLOG_ERR("Can not remove port, vhost device still attached");
752 ovs_mutex_lock(&dpdk_mutex);
753 list_remove(&dev->list_node);
754 dpdk_mp_put(dev->dpdk_mp);
755 ovs_mutex_unlock(&dpdk_mutex);
759 netdev_dpdk_dealloc(struct netdev *netdev_)
761 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
767 netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
769 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
771 ovs_mutex_lock(&dev->mutex);
773 smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
774 smap_add_format(args, "requested_tx_queues", "%d", netdev_->n_txq);
775 smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
776 ovs_mutex_unlock(&dev->mutex);
782 netdev_dpdk_get_numa_id(const struct netdev *netdev_)
784 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
786 return netdev->socket_id;
789 /* Sets the number of tx queues and rx queues for the dpdk interface.
790 * If the configuration fails, do not try restoring its old configuration
791 * and just returns the error. */
793 netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
796 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
798 int old_rxq, old_txq;
800 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
804 ovs_mutex_lock(&dpdk_mutex);
805 ovs_mutex_lock(&netdev->mutex);
807 rte_eth_dev_stop(netdev->port_id);
809 old_txq = netdev->up.n_txq;
810 old_rxq = netdev->up.n_rxq;
811 netdev->up.n_txq = n_txq;
812 netdev->up.n_rxq = n_rxq;
814 rte_free(netdev->tx_q);
815 err = dpdk_eth_dev_init(netdev);
816 netdev_dpdk_alloc_txq(netdev, netdev->real_n_txq);
818 /* If there has been an error, it means that the requested queues
819 * have not been created. Restore the old numbers. */
820 netdev->up.n_txq = old_txq;
821 netdev->up.n_rxq = old_rxq;
824 netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
826 ovs_mutex_unlock(&netdev->mutex);
827 ovs_mutex_unlock(&dpdk_mutex);
833 netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
836 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
839 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
843 ovs_mutex_lock(&dpdk_mutex);
844 ovs_mutex_lock(&netdev->mutex);
846 netdev->up.n_txq = n_txq;
847 netdev->real_n_txq = 1;
848 netdev->up.n_rxq = 1;
850 ovs_mutex_unlock(&netdev->mutex);
851 ovs_mutex_unlock(&dpdk_mutex);
856 static struct netdev_rxq *
857 netdev_dpdk_rxq_alloc(void)
859 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
864 static struct netdev_rxq_dpdk *
865 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
867 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
871 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
873 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
874 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
876 ovs_mutex_lock(&netdev->mutex);
877 rx->port_id = netdev->port_id;
878 ovs_mutex_unlock(&netdev->mutex);
884 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
889 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
891 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
897 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
899 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
902 while (nb_tx != txq->count) {
905 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
914 if (OVS_UNLIKELY(nb_tx != txq->count)) {
915 /* free buffers, which we couldn't transmit, one at a time (each
916 * packet could come from a different mempool) */
919 for (i = nb_tx; i < txq->count; i++) {
920 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
922 rte_spinlock_lock(&dev->stats_lock);
923 dev->stats.tx_dropped += txq->count-nb_tx;
924 rte_spinlock_unlock(&dev->stats_lock);
928 txq->tsc = rte_get_timer_cycles();
932 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
934 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
936 if (txq->count == 0) {
939 dpdk_queue_flush__(dev, qid);
943 is_vhost_running(struct virtio_net *dev)
945 return (dev != NULL && (dev->flags & VIRTIO_DEV_RUNNING));
949 * The receive path for the vhost port is the TX path out from guest.
952 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
953 struct dp_packet **packets, int *c)
955 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
956 struct netdev *netdev = rx->up.netdev;
957 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
958 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
962 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
966 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid,
967 vhost_dev->dpdk_mp->mp,
968 (struct rte_mbuf **)packets,
974 rte_spinlock_lock(&vhost_dev->stats_lock);
975 vhost_dev->stats.rx_packets += (uint64_t)nb_rx;
976 rte_spinlock_unlock(&vhost_dev->stats_lock);
983 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
986 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
987 struct netdev *netdev = rx->up.netdev;
988 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
991 /* There is only one tx queue for this core. Do not flush other
993 * Do not flush tx queue which is shared among CPUs
994 * since it is always flushed */
995 if (rxq_->queue_id == rte_lcore_id() &&
996 OVS_LIKELY(!dev->txq_needs_locking)) {
997 dpdk_queue_flush(dev, rxq_->queue_id);
1000 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
1001 (struct rte_mbuf **) packets,
1013 __netdev_dpdk_vhost_send(struct netdev *netdev, struct dp_packet **pkts,
1014 int cnt, bool may_steal)
1016 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
1017 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
1018 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
1019 unsigned int total_pkts = cnt;
1022 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
1023 rte_spinlock_lock(&vhost_dev->stats_lock);
1024 vhost_dev->stats.tx_dropped+= cnt;
1025 rte_spinlock_unlock(&vhost_dev->stats_lock);
1029 /* There is vHost TX single queue, So we need to lock it for TX. */
1030 rte_spinlock_lock(&vhost_dev->vhost_tx_lock);
1033 unsigned int tx_pkts;
1035 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, VIRTIO_RXQ,
1037 if (OVS_LIKELY(tx_pkts)) {
1038 /* Packets have been sent.*/
1040 /* Prepare for possible next iteration.*/
1041 cur_pkts = &cur_pkts[tx_pkts];
1043 uint64_t timeout = VHOST_ENQ_RETRY_USECS * rte_get_timer_hz() / 1E6;
1044 unsigned int expired = 0;
1047 start = rte_get_timer_cycles();
1051 * Unable to enqueue packets to vhost interface.
1052 * Check available entries before retrying.
1054 while (!rte_vring_available_entries(virtio_dev, VIRTIO_RXQ)) {
1055 if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
1061 /* break out of main loop. */
1066 rte_spinlock_unlock(&vhost_dev->vhost_tx_lock);
1068 rte_spinlock_lock(&vhost_dev->stats_lock);
1069 vhost_dev->stats.tx_packets += (total_pkts - cnt);
1070 vhost_dev->stats.tx_dropped += cnt;
1071 rte_spinlock_unlock(&vhost_dev->stats_lock);
1077 for (i = 0; i < total_pkts; i++) {
1078 dp_packet_delete(pkts[i]);
1084 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
1085 struct rte_mbuf **pkts, int cnt)
1087 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1093 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
1094 int tocopy = MIN(freeslots, cnt-i);
1096 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
1097 tocopy * sizeof (struct rte_mbuf *));
1099 txq->count += tocopy;
1102 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
1103 dpdk_queue_flush__(dev, qid);
1105 diff_tsc = rte_get_timer_cycles() - txq->tsc;
1106 if (diff_tsc >= DRAIN_TSC) {
1107 dpdk_queue_flush__(dev, qid);
1112 /* Tx function. Transmit packets indefinitely */
1114 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
1116 OVS_NO_THREAD_SAFETY_ANALYSIS
1118 #if !defined(__CHECKER__) && !defined(_WIN32)
1119 const size_t PKT_ARRAY_SIZE = cnt;
1121 /* Sparse or MSVC doesn't like variable length array. */
1122 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
1124 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1125 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
1130 /* If we are on a non pmd thread we have to use the mempool mutex, because
1131 * every non pmd thread shares the same mempool cache */
1133 if (!thread_is_pmd()) {
1134 ovs_mutex_lock(&nonpmd_mempool_mutex);
1137 for (i = 0; i < cnt; i++) {
1138 int size = dp_packet_size(pkts[i]);
1140 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1141 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1142 (int)size , dev->max_packet_len);
1148 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
1150 if (!mbufs[newcnt]) {
1155 /* We have to do a copy for now */
1156 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
1158 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1159 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1164 if (OVS_UNLIKELY(dropped)) {
1165 rte_spinlock_lock(&dev->stats_lock);
1166 dev->stats.tx_dropped += dropped;
1167 rte_spinlock_unlock(&dev->stats_lock);
1170 if (dev->type == DPDK_DEV_VHOST) {
1171 __netdev_dpdk_vhost_send(netdev, (struct dp_packet **) mbufs, newcnt, true);
1173 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1174 dpdk_queue_flush(dev, qid);
1177 if (!thread_is_pmd()) {
1178 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1183 netdev_dpdk_vhost_send(struct netdev *netdev, int qid OVS_UNUSED, struct dp_packet **pkts,
1184 int cnt, bool may_steal)
1186 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1189 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1191 for (i = 0; i < cnt; i++) {
1192 dp_packet_delete(pkts[i]);
1196 __netdev_dpdk_vhost_send(netdev, pkts, cnt, may_steal);
1202 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
1203 struct dp_packet **pkts, int cnt, bool may_steal)
1207 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1208 qid = qid % dev->real_n_txq;
1209 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1212 if (OVS_UNLIKELY(!may_steal ||
1213 pkts[0]->source != DPBUF_DPDK)) {
1214 struct netdev *netdev = &dev->up;
1216 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1219 for (i = 0; i < cnt; i++) {
1220 dp_packet_delete(pkts[i]);
1224 int next_tx_idx = 0;
1227 for (i = 0; i < cnt; i++) {
1228 int size = dp_packet_size(pkts[i]);
1230 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1231 if (next_tx_idx != i) {
1232 dpdk_queue_pkts(dev, qid,
1233 (struct rte_mbuf **)&pkts[next_tx_idx],
1237 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1238 (int)size , dev->max_packet_len);
1240 dp_packet_delete(pkts[i]);
1242 next_tx_idx = i + 1;
1245 if (next_tx_idx != cnt) {
1246 dpdk_queue_pkts(dev, qid,
1247 (struct rte_mbuf **)&pkts[next_tx_idx],
1251 if (OVS_UNLIKELY(dropped)) {
1252 rte_spinlock_lock(&dev->stats_lock);
1253 dev->stats.tx_dropped += dropped;
1254 rte_spinlock_unlock(&dev->stats_lock);
1258 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1259 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1264 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
1265 struct dp_packet **pkts, int cnt, bool may_steal)
1267 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1269 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1274 netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1276 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1278 ovs_mutex_lock(&dev->mutex);
1279 if (!eth_addr_equals(dev->hwaddr, mac)) {
1281 netdev_change_seq_changed(netdev);
1283 ovs_mutex_unlock(&dev->mutex);
1289 netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1291 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1293 ovs_mutex_lock(&dev->mutex);
1295 ovs_mutex_unlock(&dev->mutex);
1301 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1303 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1305 ovs_mutex_lock(&dev->mutex);
1307 ovs_mutex_unlock(&dev->mutex);
1313 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1315 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1317 struct dpdk_mp *old_mp;
1320 ovs_mutex_lock(&dpdk_mutex);
1321 ovs_mutex_lock(&dev->mutex);
1322 if (dev->mtu == mtu) {
1327 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
1333 rte_eth_dev_stop(dev->port_id);
1336 old_mp = dev->dpdk_mp;
1339 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1341 err = dpdk_eth_dev_init(dev);
1345 dev->dpdk_mp = old_mp;
1346 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1347 dpdk_eth_dev_init(dev);
1351 dpdk_mp_put(old_mp);
1352 netdev_change_seq_changed(netdev);
1354 ovs_mutex_unlock(&dev->mutex);
1355 ovs_mutex_unlock(&dpdk_mutex);
1360 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1363 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1364 struct netdev_stats *stats)
1366 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1368 ovs_mutex_lock(&dev->mutex);
1369 memset(stats, 0, sizeof(*stats));
1370 /* Unsupported Stats */
1371 stats->rx_errors = UINT64_MAX;
1372 stats->tx_errors = UINT64_MAX;
1373 stats->multicast = UINT64_MAX;
1374 stats->collisions = UINT64_MAX;
1375 stats->rx_crc_errors = UINT64_MAX;
1376 stats->rx_fifo_errors = UINT64_MAX;
1377 stats->rx_frame_errors = UINT64_MAX;
1378 stats->rx_length_errors = UINT64_MAX;
1379 stats->rx_missed_errors = UINT64_MAX;
1380 stats->rx_over_errors = UINT64_MAX;
1381 stats->tx_aborted_errors = UINT64_MAX;
1382 stats->tx_carrier_errors = UINT64_MAX;
1383 stats->tx_errors = UINT64_MAX;
1384 stats->tx_fifo_errors = UINT64_MAX;
1385 stats->tx_heartbeat_errors = UINT64_MAX;
1386 stats->tx_window_errors = UINT64_MAX;
1387 stats->rx_bytes += UINT64_MAX;
1388 stats->rx_dropped += UINT64_MAX;
1389 stats->tx_bytes += UINT64_MAX;
1391 rte_spinlock_lock(&dev->stats_lock);
1392 /* Supported Stats */
1393 stats->rx_packets += dev->stats.rx_packets;
1394 stats->tx_packets += dev->stats.tx_packets;
1395 stats->tx_dropped += dev->stats.tx_dropped;
1396 rte_spinlock_unlock(&dev->stats_lock);
1397 ovs_mutex_unlock(&dev->mutex);
1403 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1405 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1406 struct rte_eth_stats rte_stats;
1409 netdev_dpdk_get_carrier(netdev, &gg);
1410 ovs_mutex_lock(&dev->mutex);
1411 rte_eth_stats_get(dev->port_id, &rte_stats);
1413 memset(stats, 0, sizeof(*stats));
1415 stats->rx_packets = rte_stats.ipackets;
1416 stats->tx_packets = rte_stats.opackets;
1417 stats->rx_bytes = rte_stats.ibytes;
1418 stats->tx_bytes = rte_stats.obytes;
1419 stats->rx_errors = rte_stats.ierrors;
1420 stats->tx_errors = rte_stats.oerrors;
1421 stats->multicast = rte_stats.imcasts;
1423 rte_spinlock_lock(&dev->stats_lock);
1424 stats->tx_dropped = dev->stats.tx_dropped;
1425 rte_spinlock_unlock(&dev->stats_lock);
1426 ovs_mutex_unlock(&dev->mutex);
1432 netdev_dpdk_get_features(const struct netdev *netdev_,
1433 enum netdev_features *current,
1434 enum netdev_features *advertised OVS_UNUSED,
1435 enum netdev_features *supported OVS_UNUSED,
1436 enum netdev_features *peer OVS_UNUSED)
1438 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1439 struct rte_eth_link link;
1441 ovs_mutex_lock(&dev->mutex);
1443 ovs_mutex_unlock(&dev->mutex);
1445 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1446 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1447 *current = NETDEV_F_AUTONEG;
1449 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1450 if (link.link_speed == ETH_LINK_SPEED_10) {
1451 *current = NETDEV_F_10MB_HD;
1453 if (link.link_speed == ETH_LINK_SPEED_100) {
1454 *current = NETDEV_F_100MB_HD;
1456 if (link.link_speed == ETH_LINK_SPEED_1000) {
1457 *current = NETDEV_F_1GB_HD;
1459 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1460 if (link.link_speed == ETH_LINK_SPEED_10) {
1461 *current = NETDEV_F_10MB_FD;
1463 if (link.link_speed == ETH_LINK_SPEED_100) {
1464 *current = NETDEV_F_100MB_FD;
1466 if (link.link_speed == ETH_LINK_SPEED_1000) {
1467 *current = NETDEV_F_1GB_FD;
1469 if (link.link_speed == ETH_LINK_SPEED_10000) {
1470 *current = NETDEV_F_10GB_FD;
1478 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1480 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1483 ovs_mutex_lock(&dev->mutex);
1484 ifindex = dev->port_id;
1485 ovs_mutex_unlock(&dev->mutex);
1491 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1493 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1495 ovs_mutex_lock(&dev->mutex);
1496 check_link_status(dev);
1497 *carrier = dev->link.link_status;
1499 ovs_mutex_unlock(&dev->mutex);
1505 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev_, bool *carrier)
1507 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1508 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1510 ovs_mutex_lock(&dev->mutex);
1512 if (is_vhost_running(virtio_dev)) {
1518 ovs_mutex_unlock(&dev->mutex);
1523 static long long int
1524 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1526 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1527 long long int carrier_resets;
1529 ovs_mutex_lock(&dev->mutex);
1530 carrier_resets = dev->link_reset_cnt;
1531 ovs_mutex_unlock(&dev->mutex);
1533 return carrier_resets;
1537 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1538 long long int interval OVS_UNUSED)
1544 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1545 enum netdev_flags off, enum netdev_flags on,
1546 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
1550 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1554 *old_flagsp = dev->flags;
1558 if (dev->flags == *old_flagsp) {
1562 if (dev->type == DPDK_DEV_ETH) {
1563 if (dev->flags & NETDEV_UP) {
1564 err = rte_eth_dev_start(dev->port_id);
1569 if (dev->flags & NETDEV_PROMISC) {
1570 rte_eth_promiscuous_enable(dev->port_id);
1573 if (!(dev->flags & NETDEV_UP)) {
1574 rte_eth_dev_stop(dev->port_id);
1582 netdev_dpdk_update_flags(struct netdev *netdev_,
1583 enum netdev_flags off, enum netdev_flags on,
1584 enum netdev_flags *old_flagsp)
1586 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1589 ovs_mutex_lock(&netdev->mutex);
1590 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1591 ovs_mutex_unlock(&netdev->mutex);
1597 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1599 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1600 struct rte_eth_dev_info dev_info;
1602 if (dev->port_id < 0)
1605 ovs_mutex_lock(&dev->mutex);
1606 rte_eth_dev_info_get(dev->port_id, &dev_info);
1607 ovs_mutex_unlock(&dev->mutex);
1609 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1611 smap_add_format(args, "port_no", "%d", dev->port_id);
1612 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1613 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1614 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1615 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1616 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1617 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1618 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1619 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1620 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1621 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1623 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1624 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1630 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1631 OVS_REQUIRES(dev->mutex)
1633 enum netdev_flags old_flags;
1636 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1638 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1643 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1644 const char *argv[], void *aux OVS_UNUSED)
1648 if (!strcasecmp(argv[argc - 1], "up")) {
1650 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1653 unixctl_command_reply_error(conn, "Invalid Admin State");
1658 struct netdev *netdev = netdev_from_name(argv[1]);
1659 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1660 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1662 ovs_mutex_lock(&dpdk_dev->mutex);
1663 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1664 ovs_mutex_unlock(&dpdk_dev->mutex);
1666 netdev_close(netdev);
1668 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1669 netdev_close(netdev);
1673 struct netdev_dpdk *netdev;
1675 ovs_mutex_lock(&dpdk_mutex);
1676 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1677 ovs_mutex_lock(&netdev->mutex);
1678 netdev_dpdk_set_admin_state__(netdev, up);
1679 ovs_mutex_unlock(&netdev->mutex);
1681 ovs_mutex_unlock(&dpdk_mutex);
1683 unixctl_command_reply(conn, "OK");
1687 * Set virtqueue flags so that we do not receive interrupts.
1690 set_irq_status(struct virtio_net *dev)
1692 dev->virtqueue[VIRTIO_RXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1693 dev->virtqueue[VIRTIO_TXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1697 * A new virtio-net device is added to a vhost port.
1700 new_device(struct virtio_net *dev)
1702 struct netdev_dpdk *netdev;
1703 bool exists = false;
1705 ovs_mutex_lock(&dpdk_mutex);
1706 /* Add device to the vhost port with the same name as that passed down. */
1707 LIST_FOR_EACH(netdev, list_node, &dpdk_list) {
1708 if (strncmp(dev->ifname, netdev->vhost_id, IF_NAME_SZ) == 0) {
1709 ovs_mutex_lock(&netdev->mutex);
1710 ovsrcu_set(&netdev->virtio_dev, dev);
1711 ovs_mutex_unlock(&netdev->mutex);
1713 dev->flags |= VIRTIO_DEV_RUNNING;
1714 /* Disable notifications. */
1715 set_irq_status(dev);
1719 ovs_mutex_unlock(&dpdk_mutex);
1722 VLOG_INFO("vHost Device '%s' (%ld) can't be added - name not found",
1723 dev->ifname, dev->device_fh);
1728 VLOG_INFO("vHost Device '%s' (%ld) has been added",
1729 dev->ifname, dev->device_fh);
1734 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1735 * flag to stop any more packets from being sent or received to/from a VM and
1736 * ensure all currently queued packets have been sent/received before removing
1740 destroy_device(volatile struct virtio_net *dev)
1742 struct netdev_dpdk *vhost_dev;
1744 ovs_mutex_lock(&dpdk_mutex);
1745 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
1746 if (netdev_dpdk_get_virtio(vhost_dev) == dev) {
1748 ovs_mutex_lock(&vhost_dev->mutex);
1749 dev->flags &= ~VIRTIO_DEV_RUNNING;
1750 ovsrcu_set(&vhost_dev->virtio_dev, NULL);
1751 ovs_mutex_unlock(&vhost_dev->mutex);
1754 * Wait for other threads to quiesce before
1755 * setting the virtio_dev to NULL.
1757 ovsrcu_synchronize();
1759 * As call to ovsrcu_synchronize() will end the quiescent state,
1760 * put thread back into quiescent state before returning.
1762 ovsrcu_quiesce_start();
1765 ovs_mutex_unlock(&dpdk_mutex);
1767 VLOG_INFO("vHost Device '%s' (%ld) has been removed",
1768 dev->ifname, dev->device_fh);
1772 netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
1774 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
1778 * These callbacks allow virtio-net devices to be added to vhost ports when
1779 * configuration has been fully complete.
1781 static const struct virtio_net_device_ops virtio_net_device_ops =
1783 .new_device = new_device,
1784 .destroy_device = destroy_device,
1788 start_vhost_loop(void *dummy OVS_UNUSED)
1790 pthread_detach(pthread_self());
1791 /* Put the cuse thread into quiescent state. */
1792 ovsrcu_quiesce_start();
1793 rte_vhost_driver_session_start();
1798 dpdk_vhost_class_init(void)
1800 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1801 ovs_thread_create("vhost_thread", start_vhost_loop, NULL);
1806 dpdk_vhost_cuse_class_init(void)
1811 /* Register CUSE device to handle IOCTLs.
1812 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1813 * is set to vhost-net.
1815 err = rte_vhost_driver_register(cuse_dev_name);
1818 VLOG_ERR("CUSE device setup failure.");
1822 dpdk_vhost_class_init();
1827 dpdk_vhost_user_class_init(void)
1829 dpdk_vhost_class_init();
1834 dpdk_common_init(void)
1836 unixctl_command_register("netdev-dpdk/set-admin-state",
1837 "[netdev] up|down", 1, 2,
1838 netdev_dpdk_set_admin_state, NULL);
1840 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1846 dpdk_ring_create(const char dev_name[], unsigned int port_no,
1847 unsigned int *eth_port_id)
1849 struct dpdk_ring *ivshmem;
1853 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
1854 if (ivshmem == NULL) {
1858 /* XXX: Add support for multiquque ring. */
1859 err = snprintf(ring_name, 10, "%s_tx", dev_name);
1864 /* Create single consumer/producer rings, netdev does explicit locking. */
1865 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1866 RING_F_SP_ENQ | RING_F_SC_DEQ);
1867 if (ivshmem->cring_tx == NULL) {
1872 err = snprintf(ring_name, 10, "%s_rx", dev_name);
1877 /* Create single consumer/producer rings, netdev does explicit locking. */
1878 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1879 RING_F_SP_ENQ | RING_F_SC_DEQ);
1880 if (ivshmem->cring_rx == NULL) {
1885 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
1886 &ivshmem->cring_tx, 1, SOCKET0);
1893 ivshmem->user_port_id = port_no;
1894 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
1895 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
1897 *eth_port_id = ivshmem->eth_port_id;
1902 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
1904 struct dpdk_ring *ivshmem;
1905 unsigned int port_no;
1908 /* Names always start with "dpdkr" */
1909 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
1914 /* look through our list to find the device */
1915 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
1916 if (ivshmem->user_port_id == port_no) {
1917 VLOG_INFO("Found dpdk ring device %s:", dev_name);
1918 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
1922 /* Need to create the device rings */
1923 return dpdk_ring_create(dev_name, port_no, eth_port_id);
1927 netdev_dpdk_ring_send(struct netdev *netdev_, int qid,
1928 struct dp_packet **pkts, int cnt, bool may_steal)
1930 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1933 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
1934 * rss hash field is clear. This is because the same mbuf may be modified by
1935 * the consumer of the ring and return into the datapath without recalculating
1937 for (i = 0; i < cnt; i++) {
1938 dp_packet_set_rss_hash(pkts[i], 0);
1941 netdev_dpdk_send__(netdev, qid, pkts, cnt, may_steal);
1946 netdev_dpdk_ring_construct(struct netdev *netdev)
1948 unsigned int port_no = 0;
1951 if (rte_eal_init_ret) {
1952 return rte_eal_init_ret;
1955 ovs_mutex_lock(&dpdk_mutex);
1957 err = dpdk_ring_open(netdev->name, &port_no);
1962 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
1965 ovs_mutex_unlock(&dpdk_mutex);
1969 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
1970 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
1974 NULL, /* netdev_dpdk_run */ \
1975 NULL, /* netdev_dpdk_wait */ \
1977 netdev_dpdk_alloc, \
1980 netdev_dpdk_dealloc, \
1981 netdev_dpdk_get_config, \
1982 NULL, /* netdev_dpdk_set_config */ \
1983 NULL, /* get_tunnel_config */ \
1984 NULL, /* build header */ \
1985 NULL, /* push header */ \
1986 NULL, /* pop header */ \
1987 netdev_dpdk_get_numa_id, /* get_numa_id */ \
1988 MULTIQ, /* set_multiq */ \
1991 NULL, /* send_wait */ \
1993 netdev_dpdk_set_etheraddr, \
1994 netdev_dpdk_get_etheraddr, \
1995 netdev_dpdk_get_mtu, \
1996 netdev_dpdk_set_mtu, \
1997 netdev_dpdk_get_ifindex, \
1999 netdev_dpdk_get_carrier_resets, \
2000 netdev_dpdk_set_miimon, \
2003 NULL, /* set_advertisements */ \
2005 NULL, /* set_policing */ \
2006 NULL, /* get_qos_types */ \
2007 NULL, /* get_qos_capabilities */ \
2008 NULL, /* get_qos */ \
2009 NULL, /* set_qos */ \
2010 NULL, /* get_queue */ \
2011 NULL, /* set_queue */ \
2012 NULL, /* delete_queue */ \
2013 NULL, /* get_queue_stats */ \
2014 NULL, /* queue_dump_start */ \
2015 NULL, /* queue_dump_next */ \
2016 NULL, /* queue_dump_done */ \
2017 NULL, /* dump_queue_stats */ \
2019 NULL, /* get_in4 */ \
2020 NULL, /* set_in4 */ \
2021 NULL, /* get_in6 */ \
2022 NULL, /* add_router */ \
2023 NULL, /* get_next_hop */ \
2025 NULL, /* arp_lookup */ \
2027 netdev_dpdk_update_flags, \
2029 netdev_dpdk_rxq_alloc, \
2030 netdev_dpdk_rxq_construct, \
2031 netdev_dpdk_rxq_destruct, \
2032 netdev_dpdk_rxq_dealloc, \
2034 NULL, /* rx_wait */ \
2035 NULL, /* rxq_drain */ \
2039 process_vhost_flags(char *flag, char *default_val, int size,
2040 char **argv, char **new_val)
2044 /* Depending on which version of vhost is in use, process the vhost-specific
2045 * flag if it is provided on the vswitchd command line, otherwise resort to
2048 * For vhost-user: Process "-cuse_dev_name" to set the custom location of
2049 * the vhost-user socket(s).
2050 * For vhost-cuse: Process "-vhost_sock_dir" to set the custom name of the
2051 * vhost-cuse character device.
2053 if (!strcmp(argv[1], flag) && (strlen(argv[2]) <= size)) {
2055 *new_val = strdup(argv[2]);
2056 VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
2058 VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
2059 *new_val = default_val;
2066 dpdk_init(int argc, char **argv)
2070 char *pragram_name = argv[0];
2072 if (argc < 2 || strcmp(argv[1], "--dpdk"))
2075 /* Remove the --dpdk argument from arg list.*/
2080 if (process_vhost_flags("-cuse_dev_name", strdup("vhost-net"),
2081 PATH_MAX, argv, &cuse_dev_name)) {
2083 if (process_vhost_flags("-vhost_sock_dir", strdup(ovs_rundir()),
2084 NAME_MAX, argv, &vhost_sock_dir)) {
2088 err = stat(vhost_sock_dir, &s);
2090 VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
2095 /* Remove the vhost flag configuration parameters from the argument
2096 * list, so that the correct elements are passed to the DPDK
2097 * initialization function
2100 argv += 2; /* Increment by two to bypass the vhost flag arguments */
2104 /* Keep the program name argument as this is needed for call to
2107 argv[0] = pragram_name;
2109 /* Make sure things are initialized ... */
2110 result = rte_eal_init(argc, argv);
2112 ovs_abort(result, "Cannot init EAL");
2115 rte_memzone_dump(stdout);
2116 rte_eal_init_ret = 0;
2118 if (argc > result) {
2119 argv[result] = argv[0];
2122 /* We are called from the main thread here */
2123 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
2125 return result + 1 + base;
2128 static const struct netdev_class dpdk_class =
2132 netdev_dpdk_construct,
2133 netdev_dpdk_destruct,
2134 netdev_dpdk_set_multiq,
2135 netdev_dpdk_eth_send,
2136 netdev_dpdk_get_carrier,
2137 netdev_dpdk_get_stats,
2138 netdev_dpdk_get_features,
2139 netdev_dpdk_get_status,
2140 netdev_dpdk_rxq_recv);
2142 static const struct netdev_class dpdk_ring_class =
2146 netdev_dpdk_ring_construct,
2147 netdev_dpdk_destruct,
2148 netdev_dpdk_set_multiq,
2149 netdev_dpdk_ring_send,
2150 netdev_dpdk_get_carrier,
2151 netdev_dpdk_get_stats,
2152 netdev_dpdk_get_features,
2153 netdev_dpdk_get_status,
2154 netdev_dpdk_rxq_recv);
2156 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class =
2159 dpdk_vhost_cuse_class_init,
2160 netdev_dpdk_vhost_cuse_construct,
2161 netdev_dpdk_vhost_destruct,
2162 netdev_dpdk_vhost_set_multiq,
2163 netdev_dpdk_vhost_send,
2164 netdev_dpdk_vhost_get_carrier,
2165 netdev_dpdk_vhost_get_stats,
2168 netdev_dpdk_vhost_rxq_recv);
2170 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class =
2173 dpdk_vhost_user_class_init,
2174 netdev_dpdk_vhost_user_construct,
2175 netdev_dpdk_vhost_destruct,
2176 netdev_dpdk_vhost_set_multiq,
2177 netdev_dpdk_vhost_send,
2178 netdev_dpdk_vhost_get_carrier,
2179 netdev_dpdk_vhost_get_stats,
2182 netdev_dpdk_vhost_rxq_recv);
2185 netdev_dpdk_register(void)
2187 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2189 if (rte_eal_init_ret) {
2193 if (ovsthread_once_start(&once)) {
2195 netdev_register_provider(&dpdk_class);
2196 netdev_register_provider(&dpdk_ring_class);
2198 netdev_register_provider(&dpdk_vhost_cuse_class);
2200 netdev_register_provider(&dpdk_vhost_user_class);
2202 ovsthread_once_done(&once);
2207 pmd_thread_setaffinity_cpu(unsigned cpu)
2213 CPU_SET(cpu, &cpuset);
2214 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
2216 VLOG_ERR("Thread affinity error %d",err);
2219 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2220 ovs_assert(cpu != NON_PMD_CORE_ID);
2221 RTE_PER_LCORE(_lcore_id) = cpu;
2229 return rte_lcore_id() != NON_PMD_CORE_ID;