2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
31 #include "dp-packet.h"
32 #include "dpif-netdev.h"
34 #include "netdev-dpdk.h"
35 #include "netdev-provider.h"
36 #include "netdev-vport.h"
38 #include "ofp-print.h"
40 #include "ovs-thread.h"
45 #include "unaligned.h"
48 #include "openvswitch/vlog.h"
50 #include "rte_config.h"
53 VLOG_DEFINE_THIS_MODULE(dpdk);
54 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
56 #define DPDK_PORT_WATCHDOG_INTERVAL 5
58 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
59 #define OVS_VPORT_DPDK "ovs_dpdk"
62 * need to reserve tons of extra space in the mbufs so we can align the
63 * DMA addresses to 4KB.
66 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
67 #define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
68 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
70 /* XXX: mempool size should be based on system resources. */
71 #define NB_MBUF (4096 * 64)
72 #define MP_CACHE_SZ (256 * 2)
75 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
76 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
78 /* XXX: Needs per NIC value for these constants. */
79 #define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */
80 #define RX_HTHRESH 32 /* Default values of RX host threshold reg. */
81 #define RX_WTHRESH 16 /* Default values of RX write-back threshold reg. */
83 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
84 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
85 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
87 static const struct rte_eth_conf port_conf = {
89 .mq_mode = ETH_MQ_RX_RSS,
91 .header_split = 0, /* Header Split disabled */
92 .hw_ip_checksum = 0, /* IP checksum offload disabled */
93 .hw_vlan_filter = 0, /* VLAN filtering disabled */
94 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
100 .rss_hf = ETH_RSS_IPV4_TCP | ETH_RSS_IPV4 | ETH_RSS_IPV6
101 | ETH_RSS_IPV4_UDP | ETH_RSS_IPV6_TCP | ETH_RSS_IPV6_UDP,
105 .mq_mode = ETH_MQ_TX_NONE,
109 static const struct rte_eth_rxconf rx_conf = {
111 .pthresh = RX_PTHRESH,
112 .hthresh = RX_HTHRESH,
113 .wthresh = RX_WTHRESH,
117 static const struct rte_eth_txconf tx_conf = {
119 .pthresh = TX_PTHRESH,
120 .hthresh = TX_HTHRESH,
121 .wthresh = TX_WTHRESH,
125 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS|ETH_TXQ_FLAGS_NOOFFLOADS,
128 enum { MAX_RX_QUEUE_LEN = 192 };
129 enum { MAX_TX_QUEUE_LEN = 384 };
130 enum { DPDK_RING_SIZE = 256 };
131 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
132 enum { DRAIN_TSC = 200000ULL };
134 static int rte_eal_init_ret = ENODEV;
136 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
138 /* Contains all 'struct dpdk_dev's. */
139 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
140 = OVS_LIST_INITIALIZER(&dpdk_list);
142 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
143 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
145 /* This mutex must be used by non pmd threads when allocating or freeing
146 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
147 * use mempools, a non pmd thread should hold this mutex while calling them */
148 struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
151 struct rte_mempool *mp;
155 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
158 /* There should be one 'struct dpdk_tx_queue' created for
160 struct dpdk_tx_queue {
161 bool flush_tx; /* Set to true to flush queue everytime */
162 /* pkts are queued. */
165 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
168 /* dpdk has no way to remove dpdk ring ethernet devices
169 so we have to keep them around once they've been created
172 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
173 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
176 /* For the client rings */
177 struct rte_ring *cring_tx;
178 struct rte_ring *cring_rx;
179 int user_port_id; /* User given port no, parsed from port name */
180 int eth_port_id; /* ethernet device port id */
181 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
189 struct dpdk_tx_queue *tx_q;
191 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
193 struct dpdk_mp *dpdk_mp;
197 struct netdev_stats stats;
199 uint8_t hwaddr[ETH_ADDR_LEN];
200 enum netdev_flags flags;
202 struct rte_eth_link link;
206 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
207 rte_spinlock_t dpdkr_tx_lock;
210 struct netdev_rxq_dpdk {
211 struct netdev_rxq up;
215 static bool thread_is_pmd(void);
217 static int netdev_dpdk_construct(struct netdev *);
220 is_dpdk_class(const struct netdev_class *class)
222 return class->construct == netdev_dpdk_construct;
225 /* XXX: use dpdk malloc for entire OVS. infact huge page shld be used
226 * for all other sengments data, bss and text. */
229 dpdk_rte_mzalloc(size_t sz)
233 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
240 /* XXX this function should be called only by pmd threads (or by non pmd
241 * threads holding the nonpmd_mempool_mutex) */
243 free_dpdk_buf(struct dp_packet *p)
245 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
247 rte_pktmbuf_free_seg(pkt);
251 __rte_pktmbuf_init(struct rte_mempool *mp,
252 void *opaque_arg OVS_UNUSED,
254 unsigned i OVS_UNUSED)
256 struct rte_mbuf *m = _m;
257 uint32_t buf_len = mp->elt_size - sizeof(struct dp_packet);
259 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dp_packet));
261 memset(m, 0, mp->elt_size);
263 /* start of buffer is just after mbuf structure */
264 m->buf_addr = (char *)m + sizeof(struct dp_packet);
265 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
266 sizeof(struct dp_packet);
267 m->buf_len = (uint16_t)buf_len;
269 /* keep some headroom between start of buffer and data */
270 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
272 /* init some constant fields */
279 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
280 void *opaque_arg OVS_UNUSED,
282 unsigned i OVS_UNUSED)
284 struct rte_mbuf *m = _m;
286 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
288 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
291 static struct dpdk_mp *
292 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
294 struct dpdk_mp *dmp = NULL;
295 char mp_name[RTE_MEMPOOL_NAMESIZE];
297 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
298 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
304 dmp = dpdk_rte_mzalloc(sizeof *dmp);
305 dmp->socket_id = socket_id;
309 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d", dmp->mtu,
310 dmp->socket_id) < 0) {
314 dmp->mp = rte_mempool_create(mp_name, NB_MBUF, MBUF_SIZE(mtu),
316 sizeof(struct rte_pktmbuf_pool_private),
317 rte_pktmbuf_pool_init, NULL,
318 ovs_rte_pktmbuf_init, NULL,
321 if (dmp->mp == NULL) {
325 list_push_back(&dpdk_mp_list, &dmp->list_node);
330 dpdk_mp_put(struct dpdk_mp *dmp)
338 ovs_assert(dmp->refcount >= 0);
341 /* I could not find any API to destroy mp. */
342 if (dmp->refcount == 0) {
343 list_delete(dmp->list_node);
344 /* destroy mp-pool. */
350 check_link_status(struct netdev_dpdk *dev)
352 struct rte_eth_link link;
354 rte_eth_link_get_nowait(dev->port_id, &link);
356 if (dev->link.link_status != link.link_status) {
357 netdev_change_seq_changed(&dev->up);
359 dev->link_reset_cnt++;
361 if (dev->link.link_status) {
362 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
363 dev->port_id, (unsigned)dev->link.link_speed,
364 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
365 ("full-duplex") : ("half-duplex"));
367 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
373 dpdk_watchdog(void *dummy OVS_UNUSED)
375 struct netdev_dpdk *dev;
377 pthread_detach(pthread_self());
380 ovs_mutex_lock(&dpdk_mutex);
381 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
382 ovs_mutex_lock(&dev->mutex);
383 check_link_status(dev);
384 ovs_mutex_unlock(&dev->mutex);
386 ovs_mutex_unlock(&dpdk_mutex);
387 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
394 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
396 struct rte_pktmbuf_pool_private *mbp_priv;
397 struct ether_addr eth_addr;
401 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
405 diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->up.n_txq,
408 VLOG_ERR("eth dev config error %d",diag);
412 for (i = 0; i < dev->up.n_txq; i++) {
413 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
414 dev->socket_id, &tx_conf);
416 VLOG_ERR("eth dev tx queue setup error %d",diag);
421 for (i = 0; i < dev->up.n_rxq; i++) {
422 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
424 &rx_conf, dev->dpdk_mp->mp);
426 VLOG_ERR("eth dev rx queue setup error %d",diag);
431 diag = rte_eth_dev_start(dev->port_id);
433 VLOG_ERR("eth dev start error %d",diag);
437 rte_eth_promiscuous_enable(dev->port_id);
438 rte_eth_allmulticast_enable(dev->port_id);
440 memset(ð_addr, 0x0, sizeof(eth_addr));
441 rte_eth_macaddr_get(dev->port_id, ð_addr);
442 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
443 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
445 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
446 rte_eth_link_get_nowait(dev->port_id, &dev->link);
448 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
449 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
451 dev->flags = NETDEV_UP | NETDEV_PROMISC;
455 static struct netdev_dpdk *
456 netdev_dpdk_cast(const struct netdev *netdev)
458 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
461 static struct netdev *
462 netdev_dpdk_alloc(void)
464 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
469 netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
473 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
474 /* Each index is considered as a cpu core id, since there should
475 * be one tx queue for each cpu core. */
476 for (i = 0; i < n_txqs; i++) {
477 int numa_id = ovs_numa_get_numa_id(i);
479 /* If the corresponding core is not on the same numa node
480 * as 'netdev', flags the 'flush_tx'. */
481 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
486 netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no)
487 OVS_REQUIRES(dpdk_mutex)
489 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
493 ovs_mutex_init(&netdev->mutex);
495 ovs_mutex_lock(&netdev->mutex);
497 /* If the 'sid' is negative, it means that the kernel fails
498 * to obtain the pci numa info. In that situation, always
500 sid = rte_eth_dev_socket_id(port_no);
501 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
502 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
503 netdev->port_id = port_no;
505 netdev->mtu = ETHER_MTU;
506 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
507 rte_spinlock_init(&netdev->dpdkr_tx_lock);
509 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
510 if (!netdev->dpdk_mp) {
515 netdev_->n_txq = NR_QUEUE;
516 netdev_->n_rxq = NR_QUEUE;
517 err = dpdk_eth_dev_init(netdev);
522 list_push_back(&dpdk_list, &netdev->list_node);
526 rte_free(netdev->tx_q);
528 ovs_mutex_unlock(&netdev->mutex);
533 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
534 unsigned int *port_no)
538 if (strncmp(dev_name, prefix, strlen(prefix))) {
542 cport = dev_name + strlen(prefix);
543 *port_no = strtol(cport, 0, 0); /* string must be null terminated */
548 netdev_dpdk_construct(struct netdev *netdev)
550 unsigned int port_no;
553 if (rte_eal_init_ret) {
554 return rte_eal_init_ret;
557 /* Names always start with "dpdk" */
558 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
563 ovs_mutex_lock(&dpdk_mutex);
564 err = netdev_dpdk_init(netdev, port_no);
565 ovs_mutex_unlock(&dpdk_mutex);
570 netdev_dpdk_destruct(struct netdev *netdev_)
572 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
574 ovs_mutex_lock(&dev->mutex);
575 rte_eth_dev_stop(dev->port_id);
576 ovs_mutex_unlock(&dev->mutex);
578 ovs_mutex_lock(&dpdk_mutex);
580 list_remove(&dev->list_node);
581 dpdk_mp_put(dev->dpdk_mp);
582 ovs_mutex_unlock(&dpdk_mutex);
584 ovs_mutex_destroy(&dev->mutex);
588 netdev_dpdk_dealloc(struct netdev *netdev_)
590 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
596 netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
598 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
600 ovs_mutex_lock(&dev->mutex);
602 smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
603 smap_add_format(args, "configured_tx_queues", "%d", netdev_->n_txq);
604 ovs_mutex_unlock(&dev->mutex);
610 netdev_dpdk_get_numa_id(const struct netdev *netdev_)
612 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
614 return netdev->socket_id;
617 /* Sets the number of tx queues and rx queues for the dpdk interface.
618 * If the configuration fails, do not try restoring its old configuration
619 * and just returns the error. */
621 netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
624 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
627 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
631 ovs_mutex_lock(&dpdk_mutex);
632 ovs_mutex_lock(&netdev->mutex);
634 rte_eth_dev_stop(netdev->port_id);
636 netdev->up.n_txq = n_txq;
637 netdev->up.n_rxq = n_rxq;
638 rte_free(netdev->tx_q);
639 netdev_dpdk_alloc_txq(netdev, n_txq);
640 err = dpdk_eth_dev_init(netdev);
642 ovs_mutex_unlock(&netdev->mutex);
643 ovs_mutex_unlock(&dpdk_mutex);
648 static struct netdev_rxq *
649 netdev_dpdk_rxq_alloc(void)
651 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
656 static struct netdev_rxq_dpdk *
657 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
659 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
663 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
665 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
666 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
668 ovs_mutex_lock(&netdev->mutex);
669 rx->port_id = netdev->port_id;
670 ovs_mutex_unlock(&netdev->mutex);
676 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
681 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
683 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
689 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
691 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
694 while (nb_tx != txq->count) {
697 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
706 if (OVS_UNLIKELY(nb_tx != txq->count)) {
707 /* free buffers, which we couldn't transmit, one at a time (each
708 * packet could come from a different mempool) */
711 for (i = nb_tx; i < txq->count; i++) {
712 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
714 ovs_mutex_lock(&dev->mutex);
715 dev->stats.tx_dropped += txq->count-nb_tx;
716 ovs_mutex_unlock(&dev->mutex);
720 txq->tsc = rte_get_timer_cycles();
724 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
726 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
728 if (txq->count == 0) {
731 dpdk_queue_flush__(dev, qid);
735 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
738 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
739 struct netdev *netdev = rx->up.netdev;
740 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
743 /* There is only one tx queue for this core. Do not flush other
745 if (rxq_->queue_id == rte_lcore_id()) {
746 dpdk_queue_flush(dev, rxq_->queue_id);
749 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
750 (struct rte_mbuf **) packets,
751 MIN((int)NETDEV_MAX_RX_BATCH,
752 (int)MAX_RX_QUEUE_LEN));
763 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
764 struct rte_mbuf **pkts, int cnt)
766 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
772 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
773 int tocopy = MIN(freeslots, cnt-i);
775 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
776 tocopy * sizeof (struct rte_mbuf *));
778 txq->count += tocopy;
781 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
782 dpdk_queue_flush__(dev, qid);
784 diff_tsc = rte_get_timer_cycles() - txq->tsc;
785 if (diff_tsc >= DRAIN_TSC) {
786 dpdk_queue_flush__(dev, qid);
791 /* Tx function. Transmit packets indefinitely */
793 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet ** pkts,
795 OVS_NO_THREAD_SAFETY_ANALYSIS
797 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
798 struct rte_mbuf *mbufs[cnt];
803 /* If we are on a non pmd thread we have to use the mempool mutex, because
804 * every non pmd thread shares the same mempool cache */
806 if (!thread_is_pmd()) {
807 ovs_mutex_lock(&nonpmd_mempool_mutex);
810 for (i = 0; i < cnt; i++) {
811 int size = dp_packet_size(pkts[i]);
813 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
814 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
815 (int)size , dev->max_packet_len);
821 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
823 if (!mbufs[newcnt]) {
828 /* We have to do a copy for now */
829 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
831 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
832 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
837 if (OVS_UNLIKELY(dropped)) {
838 ovs_mutex_lock(&dev->mutex);
839 dev->stats.tx_dropped += dropped;
840 ovs_mutex_unlock(&dev->mutex);
843 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
844 dpdk_queue_flush(dev, qid);
846 if (!thread_is_pmd()) {
847 ovs_mutex_unlock(&nonpmd_mempool_mutex);
852 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
853 struct dp_packet **pkts, int cnt, bool may_steal)
857 if (OVS_UNLIKELY(!may_steal ||
858 pkts[0]->source != DPBUF_DPDK)) {
859 struct netdev *netdev = &dev->up;
861 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
864 for (i = 0; i < cnt; i++) {
865 dp_packet_delete(pkts[i]);
872 for (i = 0; i < cnt; i++) {
873 int size = dp_packet_size(pkts[i]);
874 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
875 if (next_tx_idx != i) {
876 dpdk_queue_pkts(dev, qid,
877 (struct rte_mbuf **)&pkts[next_tx_idx],
881 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
882 (int)size , dev->max_packet_len);
884 dp_packet_delete(pkts[i]);
889 if (next_tx_idx != cnt) {
890 dpdk_queue_pkts(dev, qid,
891 (struct rte_mbuf **)&pkts[next_tx_idx],
895 if (OVS_UNLIKELY(dropped)) {
896 ovs_mutex_lock(&dev->mutex);
897 dev->stats.tx_dropped += dropped;
898 ovs_mutex_unlock(&dev->mutex);
904 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
905 struct dp_packet **pkts, int cnt, bool may_steal)
907 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
909 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
914 netdev_dpdk_set_etheraddr(struct netdev *netdev,
915 const uint8_t mac[ETH_ADDR_LEN])
917 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
919 ovs_mutex_lock(&dev->mutex);
920 if (!eth_addr_equals(dev->hwaddr, mac)) {
921 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
922 netdev_change_seq_changed(netdev);
924 ovs_mutex_unlock(&dev->mutex);
930 netdev_dpdk_get_etheraddr(const struct netdev *netdev,
931 uint8_t mac[ETH_ADDR_LEN])
933 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
935 ovs_mutex_lock(&dev->mutex);
936 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
937 ovs_mutex_unlock(&dev->mutex);
943 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
945 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
947 ovs_mutex_lock(&dev->mutex);
949 ovs_mutex_unlock(&dev->mutex);
955 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
957 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
959 struct dpdk_mp *old_mp;
962 ovs_mutex_lock(&dpdk_mutex);
963 ovs_mutex_lock(&dev->mutex);
964 if (dev->mtu == mtu) {
969 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
975 rte_eth_dev_stop(dev->port_id);
978 old_mp = dev->dpdk_mp;
981 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
983 err = dpdk_eth_dev_init(dev);
987 dev->dpdk_mp = old_mp;
988 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
989 dpdk_eth_dev_init(dev);
994 netdev_change_seq_changed(netdev);
996 ovs_mutex_unlock(&dev->mutex);
997 ovs_mutex_unlock(&dpdk_mutex);
1002 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1005 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1007 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1008 struct rte_eth_stats rte_stats;
1011 netdev_dpdk_get_carrier(netdev, &gg);
1012 ovs_mutex_lock(&dev->mutex);
1013 rte_eth_stats_get(dev->port_id, &rte_stats);
1015 memset(stats, 0, sizeof(*stats));
1017 stats->rx_packets = rte_stats.ipackets;
1018 stats->tx_packets = rte_stats.opackets;
1019 stats->rx_bytes = rte_stats.ibytes;
1020 stats->tx_bytes = rte_stats.obytes;
1021 stats->rx_errors = rte_stats.ierrors;
1022 stats->tx_errors = rte_stats.oerrors;
1023 stats->multicast = rte_stats.imcasts;
1025 stats->tx_dropped = dev->stats.tx_dropped;
1026 ovs_mutex_unlock(&dev->mutex);
1032 netdev_dpdk_get_features(const struct netdev *netdev_,
1033 enum netdev_features *current,
1034 enum netdev_features *advertised OVS_UNUSED,
1035 enum netdev_features *supported OVS_UNUSED,
1036 enum netdev_features *peer OVS_UNUSED)
1038 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1039 struct rte_eth_link link;
1041 ovs_mutex_lock(&dev->mutex);
1043 ovs_mutex_unlock(&dev->mutex);
1045 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1046 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1047 *current = NETDEV_F_AUTONEG;
1049 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1050 if (link.link_speed == ETH_LINK_SPEED_10) {
1051 *current = NETDEV_F_10MB_HD;
1053 if (link.link_speed == ETH_LINK_SPEED_100) {
1054 *current = NETDEV_F_100MB_HD;
1056 if (link.link_speed == ETH_LINK_SPEED_1000) {
1057 *current = NETDEV_F_1GB_HD;
1059 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1060 if (link.link_speed == ETH_LINK_SPEED_10) {
1061 *current = NETDEV_F_10MB_FD;
1063 if (link.link_speed == ETH_LINK_SPEED_100) {
1064 *current = NETDEV_F_100MB_FD;
1066 if (link.link_speed == ETH_LINK_SPEED_1000) {
1067 *current = NETDEV_F_1GB_FD;
1069 if (link.link_speed == ETH_LINK_SPEED_10000) {
1070 *current = NETDEV_F_10GB_FD;
1078 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1080 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1083 ovs_mutex_lock(&dev->mutex);
1084 ifindex = dev->port_id;
1085 ovs_mutex_unlock(&dev->mutex);
1091 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1093 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1095 ovs_mutex_lock(&dev->mutex);
1096 check_link_status(dev);
1097 *carrier = dev->link.link_status;
1098 ovs_mutex_unlock(&dev->mutex);
1103 static long long int
1104 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1106 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1107 long long int carrier_resets;
1109 ovs_mutex_lock(&dev->mutex);
1110 carrier_resets = dev->link_reset_cnt;
1111 ovs_mutex_unlock(&dev->mutex);
1113 return carrier_resets;
1117 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1118 long long int interval OVS_UNUSED)
1124 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1125 enum netdev_flags off, enum netdev_flags on,
1126 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
1130 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1134 *old_flagsp = dev->flags;
1138 if (dev->flags == *old_flagsp) {
1142 if (dev->flags & NETDEV_UP) {
1143 err = rte_eth_dev_start(dev->port_id);
1148 if (dev->flags & NETDEV_PROMISC) {
1149 rte_eth_promiscuous_enable(dev->port_id);
1152 if (!(dev->flags & NETDEV_UP)) {
1153 rte_eth_dev_stop(dev->port_id);
1160 netdev_dpdk_update_flags(struct netdev *netdev_,
1161 enum netdev_flags off, enum netdev_flags on,
1162 enum netdev_flags *old_flagsp)
1164 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1167 ovs_mutex_lock(&netdev->mutex);
1168 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1169 ovs_mutex_unlock(&netdev->mutex);
1175 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1177 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1178 struct rte_eth_dev_info dev_info;
1180 if (dev->port_id < 0)
1183 ovs_mutex_lock(&dev->mutex);
1184 rte_eth_dev_info_get(dev->port_id, &dev_info);
1185 ovs_mutex_unlock(&dev->mutex);
1187 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1189 smap_add_format(args, "port_no", "%d", dev->port_id);
1190 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1191 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1192 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1193 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1194 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1195 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1196 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1197 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1198 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1199 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1201 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1202 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1208 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1209 OVS_REQUIRES(dev->mutex)
1211 enum netdev_flags old_flags;
1214 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1216 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1221 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1222 const char *argv[], void *aux OVS_UNUSED)
1226 if (!strcasecmp(argv[argc - 1], "up")) {
1228 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1231 unixctl_command_reply_error(conn, "Invalid Admin State");
1236 struct netdev *netdev = netdev_from_name(argv[1]);
1237 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1238 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1240 ovs_mutex_lock(&dpdk_dev->mutex);
1241 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1242 ovs_mutex_unlock(&dpdk_dev->mutex);
1244 netdev_close(netdev);
1246 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1247 netdev_close(netdev);
1251 struct netdev_dpdk *netdev;
1253 ovs_mutex_lock(&dpdk_mutex);
1254 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1255 ovs_mutex_lock(&netdev->mutex);
1256 netdev_dpdk_set_admin_state__(netdev, up);
1257 ovs_mutex_unlock(&netdev->mutex);
1259 ovs_mutex_unlock(&dpdk_mutex);
1261 unixctl_command_reply(conn, "OK");
1265 dpdk_common_init(void)
1267 unixctl_command_register("netdev-dpdk/set-admin-state",
1268 "[netdev] up|down", 1, 2,
1269 netdev_dpdk_set_admin_state, NULL);
1271 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1277 dpdk_ring_create(const char dev_name[], unsigned int port_no,
1278 unsigned int *eth_port_id)
1280 struct dpdk_ring *ivshmem;
1284 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
1285 if (ivshmem == NULL) {
1289 /* XXX: Add support for multiquque ring. */
1290 err = snprintf(ring_name, 10, "%s_tx", dev_name);
1295 /* Create single consumer/producer rings, netdev does explicit locking. */
1296 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1297 RING_F_SP_ENQ | RING_F_SC_DEQ);
1298 if (ivshmem->cring_tx == NULL) {
1303 err = snprintf(ring_name, 10, "%s_rx", dev_name);
1308 /* Create single consumer/producer rings, netdev does explicit locking. */
1309 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1310 RING_F_SP_ENQ | RING_F_SC_DEQ);
1311 if (ivshmem->cring_rx == NULL) {
1316 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
1317 &ivshmem->cring_tx, 1, SOCKET0);
1324 ivshmem->user_port_id = port_no;
1325 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
1326 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
1328 *eth_port_id = ivshmem->eth_port_id;
1333 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
1335 struct dpdk_ring *ivshmem;
1336 unsigned int port_no;
1339 /* Names always start with "dpdkr" */
1340 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
1345 /* look through our list to find the device */
1346 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
1347 if (ivshmem->user_port_id == port_no) {
1348 VLOG_INFO("Found dpdk ring device %s:\n", dev_name);
1349 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
1353 /* Need to create the device rings */
1354 return dpdk_ring_create(dev_name, port_no, eth_port_id);
1358 netdev_dpdk_ring_send(struct netdev *netdev, int qid OVS_UNUSED,
1359 struct dp_packet **pkts, int cnt, bool may_steal)
1361 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1363 /* DPDK Rings have a single TX queue, Therefore needs locking. */
1364 rte_spinlock_lock(&dev->dpdkr_tx_lock);
1365 netdev_dpdk_send__(dev, 0, pkts, cnt, may_steal);
1366 rte_spinlock_unlock(&dev->dpdkr_tx_lock);
1371 netdev_dpdk_ring_construct(struct netdev *netdev)
1373 unsigned int port_no = 0;
1376 if (rte_eal_init_ret) {
1377 return rte_eal_init_ret;
1380 ovs_mutex_lock(&dpdk_mutex);
1382 err = dpdk_ring_open(netdev->name, &port_no);
1387 err = netdev_dpdk_init(netdev, port_no);
1390 ovs_mutex_unlock(&dpdk_mutex);
1394 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, MULTIQ, SEND) \
1398 NULL, /* netdev_dpdk_run */ \
1399 NULL, /* netdev_dpdk_wait */ \
1401 netdev_dpdk_alloc, \
1403 netdev_dpdk_destruct, \
1404 netdev_dpdk_dealloc, \
1405 netdev_dpdk_get_config, \
1406 NULL, /* netdev_dpdk_set_config */ \
1407 NULL, /* get_tunnel_config */ \
1408 NULL, /* build header */ \
1409 NULL, /* push header */ \
1410 NULL, /* pop header */ \
1411 netdev_dpdk_get_numa_id, /* get_numa_id */ \
1412 MULTIQ, /* set_multiq */ \
1415 NULL, /* send_wait */ \
1417 netdev_dpdk_set_etheraddr, \
1418 netdev_dpdk_get_etheraddr, \
1419 netdev_dpdk_get_mtu, \
1420 netdev_dpdk_set_mtu, \
1421 netdev_dpdk_get_ifindex, \
1422 netdev_dpdk_get_carrier, \
1423 netdev_dpdk_get_carrier_resets, \
1424 netdev_dpdk_set_miimon, \
1425 netdev_dpdk_get_stats, \
1426 netdev_dpdk_get_features, \
1427 NULL, /* set_advertisements */ \
1429 NULL, /* set_policing */ \
1430 NULL, /* get_qos_types */ \
1431 NULL, /* get_qos_capabilities */ \
1432 NULL, /* get_qos */ \
1433 NULL, /* set_qos */ \
1434 NULL, /* get_queue */ \
1435 NULL, /* set_queue */ \
1436 NULL, /* delete_queue */ \
1437 NULL, /* get_queue_stats */ \
1438 NULL, /* queue_dump_start */ \
1439 NULL, /* queue_dump_next */ \
1440 NULL, /* queue_dump_done */ \
1441 NULL, /* dump_queue_stats */ \
1443 NULL, /* get_in4 */ \
1444 NULL, /* set_in4 */ \
1445 NULL, /* get_in6 */ \
1446 NULL, /* add_router */ \
1447 NULL, /* get_next_hop */ \
1448 netdev_dpdk_get_status, \
1449 NULL, /* arp_lookup */ \
1451 netdev_dpdk_update_flags, \
1453 netdev_dpdk_rxq_alloc, \
1454 netdev_dpdk_rxq_construct, \
1455 netdev_dpdk_rxq_destruct, \
1456 netdev_dpdk_rxq_dealloc, \
1457 netdev_dpdk_rxq_recv, \
1458 NULL, /* rx_wait */ \
1459 NULL, /* rxq_drain */ \
1463 dpdk_init(int argc, char **argv)
1467 if (argc < 2 || strcmp(argv[1], "--dpdk"))
1470 /* Make sure program name passed to rte_eal_init() is vswitchd. */
1476 /* Make sure things are initialized ... */
1477 result = rte_eal_init(argc, argv);
1479 ovs_abort(result, "Cannot init EAL\n");
1482 rte_memzone_dump(stdout);
1483 rte_eal_init_ret = 0;
1485 if (argc > result) {
1486 argv[result] = argv[0];
1489 /* We are called from the main thread here */
1490 thread_set_nonpmd();
1495 const struct netdev_class dpdk_class =
1499 netdev_dpdk_construct,
1500 netdev_dpdk_set_multiq,
1501 netdev_dpdk_eth_send);
1503 const struct netdev_class dpdk_ring_class =
1507 netdev_dpdk_ring_construct,
1509 netdev_dpdk_ring_send);
1512 netdev_dpdk_register(void)
1514 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
1516 if (rte_eal_init_ret) {
1520 if (ovsthread_once_start(&once)) {
1522 netdev_register_provider(&dpdk_class);
1523 netdev_register_provider(&dpdk_ring_class);
1524 ovsthread_once_done(&once);
1529 pmd_thread_setaffinity_cpu(int cpu)
1535 CPU_SET(cpu, &cpuset);
1536 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
1538 VLOG_ERR("Thread affinity error %d",err);
1541 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
1542 ovs_assert(cpu != NON_PMD_CORE_ID);
1543 RTE_PER_LCORE(_lcore_id) = cpu;
1549 thread_set_nonpmd(void)
1551 /* We have to use NON_PMD_CORE_ID to allow non-pmd threads to perform
1552 * certain DPDK operations, like rte_eth_dev_configure(). */
1553 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
1559 return rte_lcore_id() != NON_PMD_CORE_ID;