2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
31 #include "dp-packet.h"
32 #include "dpif-netdev.h"
34 #include "netdev-dpdk.h"
35 #include "netdev-provider.h"
36 #include "netdev-vport.h"
38 #include "ofp-print.h"
40 #include "ovs-thread.h"
45 #include "unaligned.h"
48 #include "openvswitch/vlog.h"
50 #include "rte_config.h"
52 #include "rte_virtio_net.h"
54 VLOG_DEFINE_THIS_MODULE(dpdk);
55 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
57 #define DPDK_PORT_WATCHDOG_INTERVAL 5
59 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
60 #define OVS_VPORT_DPDK "ovs_dpdk"
63 * need to reserve tons of extra space in the mbufs so we can align the
64 * DMA addresses to 4KB.
67 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
68 #define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
69 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
71 /* Max and min number of packets in the mempool. OVS tries to allocate a
72 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
73 * enough hugepages) we keep halving the number until the allocation succeeds
74 * or we reach MIN_NB_MBUF */
76 #define MAX_NB_MBUF (4096 * 64)
77 #define MIN_NB_MBUF (4096 * 4)
78 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
80 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
81 BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
83 /* The smallest possible NB_MBUF that we're going to try should be a multiple
84 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
85 BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
90 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
91 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
93 /* Character device cuse_dev_name. */
94 static char *cuse_dev_name = NULL;
97 * Maximum amount of time in micro seconds to try and enqueue to vhost.
99 #define VHOST_ENQ_RETRY_USECS 100
101 static const struct rte_eth_conf port_conf = {
103 .mq_mode = ETH_MQ_RX_RSS,
105 .header_split = 0, /* Header Split disabled */
106 .hw_ip_checksum = 0, /* IP checksum offload disabled */
107 .hw_vlan_filter = 0, /* VLAN filtering disabled */
108 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
114 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
118 .mq_mode = ETH_MQ_TX_NONE,
122 enum { MAX_TX_QUEUE_LEN = 384 };
123 enum { DPDK_RING_SIZE = 256 };
124 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
125 enum { DRAIN_TSC = 200000ULL };
132 static int rte_eal_init_ret = ENODEV;
134 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
136 /* Contains all 'struct dpdk_dev's. */
137 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
138 = OVS_LIST_INITIALIZER(&dpdk_list);
140 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
141 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
143 /* This mutex must be used by non pmd threads when allocating or freeing
144 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
145 * use mempools, a non pmd thread should hold this mutex while calling them */
146 static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
149 struct rte_mempool *mp;
153 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
156 /* There should be one 'struct dpdk_tx_queue' created for
158 struct dpdk_tx_queue {
159 bool flush_tx; /* Set to true to flush queue everytime */
160 /* pkts are queued. */
163 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
166 /* dpdk has no way to remove dpdk ring ethernet devices
167 so we have to keep them around once they've been created
170 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
171 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
174 /* For the client rings */
175 struct rte_ring *cring_tx;
176 struct rte_ring *cring_rx;
177 int user_port_id; /* User given port no, parsed from port name */
178 int eth_port_id; /* ethernet device port id */
179 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
186 enum dpdk_dev_type type;
188 struct dpdk_tx_queue *tx_q;
190 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
192 struct dpdk_mp *dpdk_mp;
196 struct netdev_stats stats;
198 uint8_t hwaddr[ETH_ADDR_LEN];
199 enum netdev_flags flags;
201 struct rte_eth_link link;
204 /* virtio-net structure for vhost device */
205 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
208 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
209 rte_spinlock_t txq_lock;
212 struct netdev_rxq_dpdk {
213 struct netdev_rxq up;
217 static bool thread_is_pmd(void);
219 static int netdev_dpdk_construct(struct netdev *);
221 struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
224 is_dpdk_class(const struct netdev_class *class)
226 return class->construct == netdev_dpdk_construct;
229 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
230 * for all other segments data, bss and text. */
233 dpdk_rte_mzalloc(size_t sz)
237 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
244 /* XXX this function should be called only by pmd threads (or by non pmd
245 * threads holding the nonpmd_mempool_mutex) */
247 free_dpdk_buf(struct dp_packet *p)
249 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
251 rte_pktmbuf_free_seg(pkt);
255 __rte_pktmbuf_init(struct rte_mempool *mp,
256 void *opaque_arg OVS_UNUSED,
258 unsigned i OVS_UNUSED)
260 struct rte_mbuf *m = _m;
261 uint32_t buf_len = mp->elt_size - sizeof(struct dp_packet);
263 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dp_packet));
265 memset(m, 0, mp->elt_size);
267 /* start of buffer is just after mbuf structure */
268 m->buf_addr = (char *)m + sizeof(struct dp_packet);
269 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
270 sizeof(struct dp_packet);
271 m->buf_len = (uint16_t)buf_len;
273 /* keep some headroom between start of buffer and data */
274 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
276 /* init some constant fields */
283 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
284 void *opaque_arg OVS_UNUSED,
286 unsigned i OVS_UNUSED)
288 struct rte_mbuf *m = _m;
290 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
292 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
295 static struct dpdk_mp *
296 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
298 struct dpdk_mp *dmp = NULL;
299 char mp_name[RTE_MEMPOOL_NAMESIZE];
302 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
303 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
309 dmp = dpdk_rte_mzalloc(sizeof *dmp);
310 dmp->socket_id = socket_id;
314 mp_size = MAX_NB_MBUF;
316 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
317 dmp->mtu, dmp->socket_id, mp_size) < 0) {
321 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
323 sizeof(struct rte_pktmbuf_pool_private),
324 rte_pktmbuf_pool_init, NULL,
325 ovs_rte_pktmbuf_init, NULL,
327 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
329 if (dmp->mp == NULL) {
332 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
335 list_push_back(&dpdk_mp_list, &dmp->list_node);
340 dpdk_mp_put(struct dpdk_mp *dmp)
348 ovs_assert(dmp->refcount >= 0);
351 /* I could not find any API to destroy mp. */
352 if (dmp->refcount == 0) {
353 list_delete(dmp->list_node);
354 /* destroy mp-pool. */
360 check_link_status(struct netdev_dpdk *dev)
362 struct rte_eth_link link;
364 rte_eth_link_get_nowait(dev->port_id, &link);
366 if (dev->link.link_status != link.link_status) {
367 netdev_change_seq_changed(&dev->up);
369 dev->link_reset_cnt++;
371 if (dev->link.link_status) {
372 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
373 dev->port_id, (unsigned)dev->link.link_speed,
374 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
375 ("full-duplex") : ("half-duplex"));
377 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
383 dpdk_watchdog(void *dummy OVS_UNUSED)
385 struct netdev_dpdk *dev;
387 pthread_detach(pthread_self());
390 ovs_mutex_lock(&dpdk_mutex);
391 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
392 ovs_mutex_lock(&dev->mutex);
393 check_link_status(dev);
394 ovs_mutex_unlock(&dev->mutex);
396 ovs_mutex_unlock(&dpdk_mutex);
397 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
404 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
406 struct rte_pktmbuf_pool_private *mbp_priv;
407 struct ether_addr eth_addr;
411 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
415 diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->up.n_txq,
418 VLOG_ERR("eth dev config error %d",diag);
422 for (i = 0; i < dev->up.n_txq; i++) {
423 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
424 dev->socket_id, NULL);
426 VLOG_ERR("eth dev tx queue setup error %d",diag);
431 for (i = 0; i < dev->up.n_rxq; i++) {
432 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
434 NULL, dev->dpdk_mp->mp);
436 VLOG_ERR("eth dev rx queue setup error %d",diag);
441 diag = rte_eth_dev_start(dev->port_id);
443 VLOG_ERR("eth dev start error %d",diag);
447 rte_eth_promiscuous_enable(dev->port_id);
448 rte_eth_allmulticast_enable(dev->port_id);
450 memset(ð_addr, 0x0, sizeof(eth_addr));
451 rte_eth_macaddr_get(dev->port_id, ð_addr);
452 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
453 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
455 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
456 rte_eth_link_get_nowait(dev->port_id, &dev->link);
458 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
459 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
461 dev->flags = NETDEV_UP | NETDEV_PROMISC;
465 static struct netdev_dpdk *
466 netdev_dpdk_cast(const struct netdev *netdev)
468 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
471 static struct netdev *
472 netdev_dpdk_alloc(void)
474 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
479 netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
483 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
484 /* Each index is considered as a cpu core id, since there should
485 * be one tx queue for each cpu core. */
486 for (i = 0; i < n_txqs; i++) {
487 int numa_id = ovs_numa_get_numa_id(i);
489 /* If the corresponding core is not on the same numa node
490 * as 'netdev', flags the 'flush_tx'. */
491 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
496 netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
497 enum dpdk_dev_type type)
498 OVS_REQUIRES(dpdk_mutex)
500 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
504 ovs_mutex_init(&netdev->mutex);
505 ovs_mutex_lock(&netdev->mutex);
507 /* If the 'sid' is negative, it means that the kernel fails
508 * to obtain the pci numa info. In that situation, always
510 if (type == DPDK_DEV_ETH) {
511 sid = rte_eth_dev_socket_id(port_no);
513 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
516 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
517 netdev->port_id = port_no;
520 netdev->mtu = ETHER_MTU;
521 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
522 rte_spinlock_init(&netdev->txq_lock);
524 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
525 if (!netdev->dpdk_mp) {
530 netdev_->n_txq = NR_QUEUE;
531 netdev_->n_rxq = NR_QUEUE;
533 if (type == DPDK_DEV_ETH) {
534 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
535 err = dpdk_eth_dev_init(netdev);
541 list_push_back(&dpdk_list, &netdev->list_node);
545 rte_free(netdev->tx_q);
547 ovs_mutex_unlock(&netdev->mutex);
552 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
553 unsigned int *port_no)
557 if (strncmp(dev_name, prefix, strlen(prefix))) {
561 cport = dev_name + strlen(prefix);
562 *port_no = strtol(cport, NULL, 0); /* string must be null terminated */
567 netdev_dpdk_vhost_construct(struct netdev *netdev_)
571 if (rte_eal_init_ret) {
572 return rte_eal_init_ret;
575 ovs_mutex_lock(&dpdk_mutex);
576 err = netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
577 ovs_mutex_unlock(&dpdk_mutex);
583 netdev_dpdk_construct(struct netdev *netdev)
585 unsigned int port_no;
588 if (rte_eal_init_ret) {
589 return rte_eal_init_ret;
592 /* Names always start with "dpdk" */
593 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
598 ovs_mutex_lock(&dpdk_mutex);
599 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
600 ovs_mutex_unlock(&dpdk_mutex);
605 netdev_dpdk_destruct(struct netdev *netdev_)
607 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
609 ovs_mutex_lock(&dev->mutex);
610 rte_eth_dev_stop(dev->port_id);
611 ovs_mutex_unlock(&dev->mutex);
613 ovs_mutex_lock(&dpdk_mutex);
615 list_remove(&dev->list_node);
616 dpdk_mp_put(dev->dpdk_mp);
617 ovs_mutex_unlock(&dpdk_mutex);
621 netdev_dpdk_vhost_destruct(struct netdev *netdev_)
623 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
625 /* Can't remove a port while a guest is attached to it. */
626 if (netdev_dpdk_get_virtio(dev) != NULL) {
627 VLOG_ERR("Can not remove port, vhost device still attached");
631 ovs_mutex_lock(&dpdk_mutex);
632 list_remove(&dev->list_node);
633 dpdk_mp_put(dev->dpdk_mp);
634 ovs_mutex_unlock(&dpdk_mutex);
638 netdev_dpdk_dealloc(struct netdev *netdev_)
640 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
646 netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
648 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
650 ovs_mutex_lock(&dev->mutex);
652 smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
653 smap_add_format(args, "configured_tx_queues", "%d", netdev_->n_txq);
654 ovs_mutex_unlock(&dev->mutex);
660 netdev_dpdk_get_numa_id(const struct netdev *netdev_)
662 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
664 return netdev->socket_id;
667 /* Sets the number of tx queues and rx queues for the dpdk interface.
668 * If the configuration fails, do not try restoring its old configuration
669 * and just returns the error. */
671 netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
674 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
677 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
681 ovs_mutex_lock(&dpdk_mutex);
682 ovs_mutex_lock(&netdev->mutex);
684 rte_eth_dev_stop(netdev->port_id);
686 netdev->up.n_txq = n_txq;
687 netdev->up.n_rxq = n_rxq;
689 rte_free(netdev->tx_q);
690 netdev_dpdk_alloc_txq(netdev, n_txq);
691 err = dpdk_eth_dev_init(netdev);
693 ovs_mutex_unlock(&netdev->mutex);
694 ovs_mutex_unlock(&dpdk_mutex);
700 netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
703 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
706 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
710 ovs_mutex_lock(&dpdk_mutex);
711 ovs_mutex_lock(&netdev->mutex);
713 netdev->up.n_txq = n_txq;
714 netdev->up.n_rxq = n_rxq;
716 ovs_mutex_unlock(&netdev->mutex);
717 ovs_mutex_unlock(&dpdk_mutex);
722 static struct netdev_rxq *
723 netdev_dpdk_rxq_alloc(void)
725 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
730 static struct netdev_rxq_dpdk *
731 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
733 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
737 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
739 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
740 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
742 ovs_mutex_lock(&netdev->mutex);
743 rx->port_id = netdev->port_id;
744 ovs_mutex_unlock(&netdev->mutex);
750 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
755 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
757 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
763 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
765 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
768 while (nb_tx != txq->count) {
771 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
780 if (OVS_UNLIKELY(nb_tx != txq->count)) {
781 /* free buffers, which we couldn't transmit, one at a time (each
782 * packet could come from a different mempool) */
785 for (i = nb_tx; i < txq->count; i++) {
786 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
788 ovs_mutex_lock(&dev->mutex);
789 dev->stats.tx_dropped += txq->count-nb_tx;
790 ovs_mutex_unlock(&dev->mutex);
794 txq->tsc = rte_get_timer_cycles();
798 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
800 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
802 if (txq->count == 0) {
805 dpdk_queue_flush__(dev, qid);
809 is_vhost_running(struct virtio_net *dev)
811 return (dev != NULL && (dev->flags & VIRTIO_DEV_RUNNING));
815 * The receive path for the vhost port is the TX path out from guest.
818 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
819 struct dp_packet **packets, int *c)
821 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
822 struct netdev *netdev = rx->up.netdev;
823 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
824 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
828 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
832 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid,
833 vhost_dev->dpdk_mp->mp,
834 (struct rte_mbuf **)packets,
840 vhost_dev->stats.rx_packets += (uint64_t)nb_rx;
846 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
849 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
850 struct netdev *netdev = rx->up.netdev;
851 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
854 /* There is only one tx queue for this core. Do not flush other
856 if (rxq_->queue_id == rte_lcore_id()) {
857 dpdk_queue_flush(dev, rxq_->queue_id);
860 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
861 (struct rte_mbuf **) packets,
873 __netdev_dpdk_vhost_send(struct netdev *netdev, struct dp_packet **pkts,
874 int cnt, bool may_steal)
876 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
877 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
878 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
879 unsigned int total_pkts = cnt;
882 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
883 ovs_mutex_lock(&vhost_dev->mutex);
884 vhost_dev->stats.tx_dropped+= cnt;
885 ovs_mutex_unlock(&vhost_dev->mutex);
889 /* There is vHost TX single queue, So we need to lock it for TX. */
890 rte_spinlock_lock(&vhost_dev->txq_lock);
893 unsigned int tx_pkts;
895 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, VIRTIO_RXQ,
897 if (OVS_LIKELY(tx_pkts)) {
898 /* Packets have been sent.*/
900 /* Prepare for possible next iteration.*/
901 cur_pkts = &cur_pkts[tx_pkts];
903 uint64_t timeout = VHOST_ENQ_RETRY_USECS * rte_get_timer_hz() / 1E6;
904 unsigned int expired = 0;
907 start = rte_get_timer_cycles();
911 * Unable to enqueue packets to vhost interface.
912 * Check available entries before retrying.
914 while (!rte_vring_available_entries(virtio_dev, VIRTIO_RXQ)) {
915 if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
921 /* break out of main loop. */
927 vhost_dev->stats.tx_packets += (total_pkts - cnt);
928 vhost_dev->stats.tx_dropped += cnt;
929 rte_spinlock_unlock(&vhost_dev->txq_lock);
935 for (i = 0; i < total_pkts; i++) {
936 dp_packet_delete(pkts[i]);
942 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
943 struct rte_mbuf **pkts, int cnt)
945 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
951 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
952 int tocopy = MIN(freeslots, cnt-i);
954 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
955 tocopy * sizeof (struct rte_mbuf *));
957 txq->count += tocopy;
960 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
961 dpdk_queue_flush__(dev, qid);
963 diff_tsc = rte_get_timer_cycles() - txq->tsc;
964 if (diff_tsc >= DRAIN_TSC) {
965 dpdk_queue_flush__(dev, qid);
970 /* Tx function. Transmit packets indefinitely */
972 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
974 OVS_NO_THREAD_SAFETY_ANALYSIS
976 #if !defined(__CHECKER__) && !defined(_WIN32)
977 const size_t PKT_ARRAY_SIZE = cnt;
979 /* Sparse or MSVC doesn't like variable length array. */
980 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
982 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
983 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
988 /* If we are on a non pmd thread we have to use the mempool mutex, because
989 * every non pmd thread shares the same mempool cache */
991 if (!thread_is_pmd()) {
992 ovs_mutex_lock(&nonpmd_mempool_mutex);
995 for (i = 0; i < cnt; i++) {
996 int size = dp_packet_size(pkts[i]);
998 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
999 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1000 (int)size , dev->max_packet_len);
1006 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
1008 if (!mbufs[newcnt]) {
1013 /* We have to do a copy for now */
1014 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
1016 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1017 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1022 if (OVS_UNLIKELY(dropped)) {
1023 ovs_mutex_lock(&dev->mutex);
1024 dev->stats.tx_dropped += dropped;
1025 ovs_mutex_unlock(&dev->mutex);
1028 if (dev->type == DPDK_DEV_VHOST) {
1029 __netdev_dpdk_vhost_send(netdev, (struct dp_packet **) mbufs, newcnt, true);
1031 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1032 dpdk_queue_flush(dev, qid);
1035 if (!thread_is_pmd()) {
1036 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1041 netdev_dpdk_vhost_send(struct netdev *netdev, int qid OVS_UNUSED, struct dp_packet **pkts,
1042 int cnt, bool may_steal)
1044 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1047 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1049 for (i = 0; i < cnt; i++) {
1050 dp_packet_delete(pkts[i]);
1054 __netdev_dpdk_vhost_send(netdev, pkts, cnt, may_steal);
1060 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
1061 struct dp_packet **pkts, int cnt, bool may_steal)
1065 if (OVS_UNLIKELY(!may_steal ||
1066 pkts[0]->source != DPBUF_DPDK)) {
1067 struct netdev *netdev = &dev->up;
1069 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1072 for (i = 0; i < cnt; i++) {
1073 dp_packet_delete(pkts[i]);
1077 int next_tx_idx = 0;
1080 for (i = 0; i < cnt; i++) {
1081 int size = dp_packet_size(pkts[i]);
1083 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1084 if (next_tx_idx != i) {
1085 dpdk_queue_pkts(dev, qid,
1086 (struct rte_mbuf **)&pkts[next_tx_idx],
1090 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1091 (int)size , dev->max_packet_len);
1093 dp_packet_delete(pkts[i]);
1095 next_tx_idx = i + 1;
1098 if (next_tx_idx != cnt) {
1099 dpdk_queue_pkts(dev, qid,
1100 (struct rte_mbuf **)&pkts[next_tx_idx],
1104 if (OVS_UNLIKELY(dropped)) {
1105 ovs_mutex_lock(&dev->mutex);
1106 dev->stats.tx_dropped += dropped;
1107 ovs_mutex_unlock(&dev->mutex);
1113 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
1114 struct dp_packet **pkts, int cnt, bool may_steal)
1116 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1118 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1123 netdev_dpdk_set_etheraddr(struct netdev *netdev,
1124 const uint8_t mac[ETH_ADDR_LEN])
1126 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1128 ovs_mutex_lock(&dev->mutex);
1129 if (!eth_addr_equals(dev->hwaddr, mac)) {
1130 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
1131 netdev_change_seq_changed(netdev);
1133 ovs_mutex_unlock(&dev->mutex);
1139 netdev_dpdk_get_etheraddr(const struct netdev *netdev,
1140 uint8_t mac[ETH_ADDR_LEN])
1142 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1144 ovs_mutex_lock(&dev->mutex);
1145 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
1146 ovs_mutex_unlock(&dev->mutex);
1152 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1154 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1156 ovs_mutex_lock(&dev->mutex);
1158 ovs_mutex_unlock(&dev->mutex);
1164 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1166 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1168 struct dpdk_mp *old_mp;
1171 ovs_mutex_lock(&dpdk_mutex);
1172 ovs_mutex_lock(&dev->mutex);
1173 if (dev->mtu == mtu) {
1178 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
1184 rte_eth_dev_stop(dev->port_id);
1187 old_mp = dev->dpdk_mp;
1190 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1192 err = dpdk_eth_dev_init(dev);
1196 dev->dpdk_mp = old_mp;
1197 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1198 dpdk_eth_dev_init(dev);
1202 dpdk_mp_put(old_mp);
1203 netdev_change_seq_changed(netdev);
1205 ovs_mutex_unlock(&dev->mutex);
1206 ovs_mutex_unlock(&dpdk_mutex);
1211 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1214 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1215 struct netdev_stats *stats)
1217 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1219 ovs_mutex_lock(&dev->mutex);
1220 memset(stats, 0, sizeof(*stats));
1221 /* Unsupported Stats */
1222 stats->rx_errors = UINT64_MAX;
1223 stats->tx_errors = UINT64_MAX;
1224 stats->multicast = UINT64_MAX;
1225 stats->collisions = UINT64_MAX;
1226 stats->rx_crc_errors = UINT64_MAX;
1227 stats->rx_fifo_errors = UINT64_MAX;
1228 stats->rx_frame_errors = UINT64_MAX;
1229 stats->rx_length_errors = UINT64_MAX;
1230 stats->rx_missed_errors = UINT64_MAX;
1231 stats->rx_over_errors = UINT64_MAX;
1232 stats->tx_aborted_errors = UINT64_MAX;
1233 stats->tx_carrier_errors = UINT64_MAX;
1234 stats->tx_errors = UINT64_MAX;
1235 stats->tx_fifo_errors = UINT64_MAX;
1236 stats->tx_heartbeat_errors = UINT64_MAX;
1237 stats->tx_window_errors = UINT64_MAX;
1238 stats->rx_bytes += UINT64_MAX;
1239 stats->rx_dropped += UINT64_MAX;
1240 stats->tx_bytes += UINT64_MAX;
1242 /* Supported Stats */
1243 stats->rx_packets += dev->stats.rx_packets;
1244 stats->tx_packets += dev->stats.tx_packets;
1245 stats->tx_dropped += dev->stats.tx_dropped;
1246 ovs_mutex_unlock(&dev->mutex);
1252 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1254 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1255 struct rte_eth_stats rte_stats;
1258 netdev_dpdk_get_carrier(netdev, &gg);
1259 ovs_mutex_lock(&dev->mutex);
1260 rte_eth_stats_get(dev->port_id, &rte_stats);
1262 memset(stats, 0, sizeof(*stats));
1264 stats->rx_packets = rte_stats.ipackets;
1265 stats->tx_packets = rte_stats.opackets;
1266 stats->rx_bytes = rte_stats.ibytes;
1267 stats->tx_bytes = rte_stats.obytes;
1268 stats->rx_errors = rte_stats.ierrors;
1269 stats->tx_errors = rte_stats.oerrors;
1270 stats->multicast = rte_stats.imcasts;
1272 stats->tx_dropped = dev->stats.tx_dropped;
1273 ovs_mutex_unlock(&dev->mutex);
1279 netdev_dpdk_get_features(const struct netdev *netdev_,
1280 enum netdev_features *current,
1281 enum netdev_features *advertised OVS_UNUSED,
1282 enum netdev_features *supported OVS_UNUSED,
1283 enum netdev_features *peer OVS_UNUSED)
1285 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1286 struct rte_eth_link link;
1288 ovs_mutex_lock(&dev->mutex);
1290 ovs_mutex_unlock(&dev->mutex);
1292 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1293 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1294 *current = NETDEV_F_AUTONEG;
1296 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1297 if (link.link_speed == ETH_LINK_SPEED_10) {
1298 *current = NETDEV_F_10MB_HD;
1300 if (link.link_speed == ETH_LINK_SPEED_100) {
1301 *current = NETDEV_F_100MB_HD;
1303 if (link.link_speed == ETH_LINK_SPEED_1000) {
1304 *current = NETDEV_F_1GB_HD;
1306 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1307 if (link.link_speed == ETH_LINK_SPEED_10) {
1308 *current = NETDEV_F_10MB_FD;
1310 if (link.link_speed == ETH_LINK_SPEED_100) {
1311 *current = NETDEV_F_100MB_FD;
1313 if (link.link_speed == ETH_LINK_SPEED_1000) {
1314 *current = NETDEV_F_1GB_FD;
1316 if (link.link_speed == ETH_LINK_SPEED_10000) {
1317 *current = NETDEV_F_10GB_FD;
1325 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1327 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1330 ovs_mutex_lock(&dev->mutex);
1331 ifindex = dev->port_id;
1332 ovs_mutex_unlock(&dev->mutex);
1338 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1340 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1342 ovs_mutex_lock(&dev->mutex);
1343 check_link_status(dev);
1344 *carrier = dev->link.link_status;
1346 ovs_mutex_unlock(&dev->mutex);
1352 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev_, bool *carrier)
1354 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1355 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1357 ovs_mutex_lock(&dev->mutex);
1359 if (is_vhost_running(virtio_dev)) {
1365 ovs_mutex_unlock(&dev->mutex);
1370 static long long int
1371 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1373 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1374 long long int carrier_resets;
1376 ovs_mutex_lock(&dev->mutex);
1377 carrier_resets = dev->link_reset_cnt;
1378 ovs_mutex_unlock(&dev->mutex);
1380 return carrier_resets;
1384 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1385 long long int interval OVS_UNUSED)
1391 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1392 enum netdev_flags off, enum netdev_flags on,
1393 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
1397 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1401 *old_flagsp = dev->flags;
1405 if (dev->flags == *old_flagsp) {
1409 if (dev->type == DPDK_DEV_ETH) {
1410 if (dev->flags & NETDEV_UP) {
1411 err = rte_eth_dev_start(dev->port_id);
1416 if (dev->flags & NETDEV_PROMISC) {
1417 rte_eth_promiscuous_enable(dev->port_id);
1420 if (!(dev->flags & NETDEV_UP)) {
1421 rte_eth_dev_stop(dev->port_id);
1429 netdev_dpdk_update_flags(struct netdev *netdev_,
1430 enum netdev_flags off, enum netdev_flags on,
1431 enum netdev_flags *old_flagsp)
1433 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1436 ovs_mutex_lock(&netdev->mutex);
1437 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1438 ovs_mutex_unlock(&netdev->mutex);
1444 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1446 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1447 struct rte_eth_dev_info dev_info;
1449 if (dev->port_id < 0)
1452 ovs_mutex_lock(&dev->mutex);
1453 rte_eth_dev_info_get(dev->port_id, &dev_info);
1454 ovs_mutex_unlock(&dev->mutex);
1456 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1458 smap_add_format(args, "port_no", "%d", dev->port_id);
1459 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1460 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1461 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1462 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1463 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1464 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1465 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1466 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1467 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1468 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1470 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1471 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1477 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1478 OVS_REQUIRES(dev->mutex)
1480 enum netdev_flags old_flags;
1483 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1485 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1490 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1491 const char *argv[], void *aux OVS_UNUSED)
1495 if (!strcasecmp(argv[argc - 1], "up")) {
1497 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1500 unixctl_command_reply_error(conn, "Invalid Admin State");
1505 struct netdev *netdev = netdev_from_name(argv[1]);
1506 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1507 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1509 ovs_mutex_lock(&dpdk_dev->mutex);
1510 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1511 ovs_mutex_unlock(&dpdk_dev->mutex);
1513 netdev_close(netdev);
1515 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1516 netdev_close(netdev);
1520 struct netdev_dpdk *netdev;
1522 ovs_mutex_lock(&dpdk_mutex);
1523 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1524 ovs_mutex_lock(&netdev->mutex);
1525 netdev_dpdk_set_admin_state__(netdev, up);
1526 ovs_mutex_unlock(&netdev->mutex);
1528 ovs_mutex_unlock(&dpdk_mutex);
1530 unixctl_command_reply(conn, "OK");
1534 * Set virtqueue flags so that we do not receive interrupts.
1537 set_irq_status(struct virtio_net *dev)
1539 dev->virtqueue[VIRTIO_RXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1540 dev->virtqueue[VIRTIO_TXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1544 * A new virtio-net device is added to a vhost port.
1547 new_device(struct virtio_net *dev)
1549 struct netdev_dpdk *netdev;
1550 bool exists = false;
1552 ovs_mutex_lock(&dpdk_mutex);
1553 /* Add device to the vhost port with the same name as that passed down. */
1554 LIST_FOR_EACH(netdev, list_node, &dpdk_list) {
1555 if (strncmp(dev->ifname, netdev->up.name, IFNAMSIZ) == 0) {
1556 ovs_mutex_lock(&netdev->mutex);
1557 ovsrcu_set(&netdev->virtio_dev, dev);
1558 ovs_mutex_unlock(&netdev->mutex);
1560 dev->flags |= VIRTIO_DEV_RUNNING;
1561 /* Disable notifications. */
1562 set_irq_status(dev);
1566 ovs_mutex_unlock(&dpdk_mutex);
1569 VLOG_INFO("vHost Device '%s' (%ld) can't be added - name not found",
1570 dev->ifname, dev->device_fh);
1575 VLOG_INFO("vHost Device '%s' (%ld) has been added",
1576 dev->ifname, dev->device_fh);
1581 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1582 * flag to stop any more packets from being sent or received to/from a VM and
1583 * ensure all currently queued packets have been sent/received before removing
1587 destroy_device(volatile struct virtio_net *dev)
1589 struct netdev_dpdk *vhost_dev;
1591 ovs_mutex_lock(&dpdk_mutex);
1592 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
1593 if (netdev_dpdk_get_virtio(vhost_dev) == dev) {
1595 ovs_mutex_lock(&vhost_dev->mutex);
1596 dev->flags &= ~VIRTIO_DEV_RUNNING;
1597 ovsrcu_set(&vhost_dev->virtio_dev, NULL);
1598 ovs_mutex_unlock(&vhost_dev->mutex);
1601 * Wait for other threads to quiesce before
1602 * setting the virtio_dev to NULL.
1604 ovsrcu_synchronize();
1606 * As call to ovsrcu_synchronize() will end the quiescent state,
1607 * put thread back into quiescent state before returning.
1609 ovsrcu_quiesce_start();
1612 ovs_mutex_unlock(&dpdk_mutex);
1614 VLOG_INFO("vHost Device '%s' (%ld) has been removed",
1615 dev->ifname, dev->device_fh);
1619 netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
1621 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
1625 * These callbacks allow virtio-net devices to be added to vhost ports when
1626 * configuration has been fully complete.
1628 static const struct virtio_net_device_ops virtio_net_device_ops =
1630 .new_device = new_device,
1631 .destroy_device = destroy_device,
1635 start_cuse_session_loop(void *dummy OVS_UNUSED)
1637 pthread_detach(pthread_self());
1638 /* Put the cuse thread into quiescent state. */
1639 ovsrcu_quiesce_start();
1640 rte_vhost_driver_session_start();
1645 dpdk_vhost_class_init(void)
1649 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1651 /* Register CUSE device to handle IOCTLs.
1652 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1653 * is set to vhost-net.
1655 err = rte_vhost_driver_register(cuse_dev_name);
1658 VLOG_ERR("CUSE device setup failure.");
1662 ovs_thread_create("cuse_thread", start_cuse_session_loop, NULL);
1667 dpdk_common_init(void)
1669 unixctl_command_register("netdev-dpdk/set-admin-state",
1670 "[netdev] up|down", 1, 2,
1671 netdev_dpdk_set_admin_state, NULL);
1673 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1679 dpdk_ring_create(const char dev_name[], unsigned int port_no,
1680 unsigned int *eth_port_id)
1682 struct dpdk_ring *ivshmem;
1686 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
1687 if (ivshmem == NULL) {
1691 /* XXX: Add support for multiquque ring. */
1692 err = snprintf(ring_name, 10, "%s_tx", dev_name);
1697 /* Create single consumer/producer rings, netdev does explicit locking. */
1698 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1699 RING_F_SP_ENQ | RING_F_SC_DEQ);
1700 if (ivshmem->cring_tx == NULL) {
1705 err = snprintf(ring_name, 10, "%s_rx", dev_name);
1710 /* Create single consumer/producer rings, netdev does explicit locking. */
1711 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1712 RING_F_SP_ENQ | RING_F_SC_DEQ);
1713 if (ivshmem->cring_rx == NULL) {
1718 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
1719 &ivshmem->cring_tx, 1, SOCKET0);
1726 ivshmem->user_port_id = port_no;
1727 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
1728 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
1730 *eth_port_id = ivshmem->eth_port_id;
1735 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
1737 struct dpdk_ring *ivshmem;
1738 unsigned int port_no;
1741 /* Names always start with "dpdkr" */
1742 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
1747 /* look through our list to find the device */
1748 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
1749 if (ivshmem->user_port_id == port_no) {
1750 VLOG_INFO("Found dpdk ring device %s:", dev_name);
1751 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
1755 /* Need to create the device rings */
1756 return dpdk_ring_create(dev_name, port_no, eth_port_id);
1760 netdev_dpdk_ring_send(struct netdev *netdev, int qid OVS_UNUSED,
1761 struct dp_packet **pkts, int cnt, bool may_steal)
1763 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1766 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
1767 * rss hash field is clear. This is because the same mbuf may be modified by
1768 * the consumer of the ring and return into the datapath without recalculating
1770 for (i = 0; i < cnt; i++) {
1771 dp_packet_set_rss_hash(pkts[i], 0);
1774 /* DPDK Rings have a single TX queue, Therefore needs locking. */
1775 rte_spinlock_lock(&dev->txq_lock);
1776 netdev_dpdk_send__(dev, 0, pkts, cnt, may_steal);
1777 rte_spinlock_unlock(&dev->txq_lock);
1782 netdev_dpdk_ring_construct(struct netdev *netdev)
1784 unsigned int port_no = 0;
1787 if (rte_eal_init_ret) {
1788 return rte_eal_init_ret;
1791 ovs_mutex_lock(&dpdk_mutex);
1793 err = dpdk_ring_open(netdev->name, &port_no);
1798 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
1801 ovs_mutex_unlock(&dpdk_mutex);
1805 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
1806 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
1810 NULL, /* netdev_dpdk_run */ \
1811 NULL, /* netdev_dpdk_wait */ \
1813 netdev_dpdk_alloc, \
1816 netdev_dpdk_dealloc, \
1817 netdev_dpdk_get_config, \
1818 NULL, /* netdev_dpdk_set_config */ \
1819 NULL, /* get_tunnel_config */ \
1820 NULL, /* build header */ \
1821 NULL, /* push header */ \
1822 NULL, /* pop header */ \
1823 netdev_dpdk_get_numa_id, /* get_numa_id */ \
1824 MULTIQ, /* set_multiq */ \
1827 NULL, /* send_wait */ \
1829 netdev_dpdk_set_etheraddr, \
1830 netdev_dpdk_get_etheraddr, \
1831 netdev_dpdk_get_mtu, \
1832 netdev_dpdk_set_mtu, \
1833 netdev_dpdk_get_ifindex, \
1835 netdev_dpdk_get_carrier_resets, \
1836 netdev_dpdk_set_miimon, \
1839 NULL, /* set_advertisements */ \
1841 NULL, /* set_policing */ \
1842 NULL, /* get_qos_types */ \
1843 NULL, /* get_qos_capabilities */ \
1844 NULL, /* get_qos */ \
1845 NULL, /* set_qos */ \
1846 NULL, /* get_queue */ \
1847 NULL, /* set_queue */ \
1848 NULL, /* delete_queue */ \
1849 NULL, /* get_queue_stats */ \
1850 NULL, /* queue_dump_start */ \
1851 NULL, /* queue_dump_next */ \
1852 NULL, /* queue_dump_done */ \
1853 NULL, /* dump_queue_stats */ \
1855 NULL, /* get_in4 */ \
1856 NULL, /* set_in4 */ \
1857 NULL, /* get_in6 */ \
1858 NULL, /* add_router */ \
1859 NULL, /* get_next_hop */ \
1861 NULL, /* arp_lookup */ \
1863 netdev_dpdk_update_flags, \
1865 netdev_dpdk_rxq_alloc, \
1866 netdev_dpdk_rxq_construct, \
1867 netdev_dpdk_rxq_destruct, \
1868 netdev_dpdk_rxq_dealloc, \
1870 NULL, /* rx_wait */ \
1871 NULL, /* rxq_drain */ \
1875 dpdk_init(int argc, char **argv)
1879 char *pragram_name = argv[0];
1881 if (argc < 2 || strcmp(argv[1], "--dpdk"))
1884 /* Remove the --dpdk argument from arg list.*/
1888 /* If the cuse_dev_name parameter has been provided, set 'cuse_dev_name' to
1889 * this string if it meets the correct criteria. Otherwise, set it to the
1890 * default (vhost-net).
1892 if (!strcmp(argv[1], "--cuse_dev_name") &&
1893 (strlen(argv[2]) <= NAME_MAX)) {
1895 cuse_dev_name = strdup(argv[2]);
1897 /* Remove the cuse_dev_name configuration parameters from the argument
1898 * list, so that the correct elements are passed to the DPDK
1899 * initialization function
1902 argv += 2; /* Increment by two to bypass the cuse_dev_name arguments */
1905 VLOG_ERR("User-provided cuse_dev_name in use: /dev/%s", cuse_dev_name);
1907 cuse_dev_name = "vhost-net";
1908 VLOG_INFO("No cuse_dev_name provided - defaulting to /dev/vhost-net");
1911 /* Keep the program name argument as this is needed for call to
1914 argv[0] = pragram_name;
1916 /* Make sure things are initialized ... */
1917 result = rte_eal_init(argc, argv);
1919 ovs_abort(result, "Cannot init EAL");
1922 rte_memzone_dump(stdout);
1923 rte_eal_init_ret = 0;
1925 if (argc > result) {
1926 argv[result] = argv[0];
1929 /* We are called from the main thread here */
1930 thread_set_nonpmd();
1932 return result + 1 + base;
1935 static const struct netdev_class dpdk_class =
1939 netdev_dpdk_construct,
1940 netdev_dpdk_destruct,
1941 netdev_dpdk_set_multiq,
1942 netdev_dpdk_eth_send,
1943 netdev_dpdk_get_carrier,
1944 netdev_dpdk_get_stats,
1945 netdev_dpdk_get_features,
1946 netdev_dpdk_get_status,
1947 netdev_dpdk_rxq_recv);
1949 static const struct netdev_class dpdk_ring_class =
1953 netdev_dpdk_ring_construct,
1954 netdev_dpdk_destruct,
1956 netdev_dpdk_ring_send,
1957 netdev_dpdk_get_carrier,
1958 netdev_dpdk_get_stats,
1959 netdev_dpdk_get_features,
1960 netdev_dpdk_get_status,
1961 netdev_dpdk_rxq_recv);
1963 static const struct netdev_class dpdk_vhost_class =
1966 dpdk_vhost_class_init,
1967 netdev_dpdk_vhost_construct,
1968 netdev_dpdk_vhost_destruct,
1969 netdev_dpdk_vhost_set_multiq,
1970 netdev_dpdk_vhost_send,
1971 netdev_dpdk_vhost_get_carrier,
1972 netdev_dpdk_vhost_get_stats,
1975 netdev_dpdk_vhost_rxq_recv);
1978 netdev_dpdk_register(void)
1980 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
1982 if (rte_eal_init_ret) {
1986 if (ovsthread_once_start(&once)) {
1988 netdev_register_provider(&dpdk_class);
1989 netdev_register_provider(&dpdk_ring_class);
1990 netdev_register_provider(&dpdk_vhost_class);
1991 ovsthread_once_done(&once);
1996 pmd_thread_setaffinity_cpu(int cpu)
2002 CPU_SET(cpu, &cpuset);
2003 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
2005 VLOG_ERR("Thread affinity error %d",err);
2008 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2009 ovs_assert(cpu != NON_PMD_CORE_ID);
2010 RTE_PER_LCORE(_lcore_id) = cpu;
2016 thread_set_nonpmd(void)
2018 /* We have to use NON_PMD_CORE_ID to allow non-pmd threads to perform
2019 * certain DPDK operations, like rte_eth_dev_configure(). */
2020 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
2026 return rte_lcore_id() != NON_PMD_CORE_ID;