2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
31 #include "dpif-netdev.h"
33 #include "netdev-dpdk.h"
34 #include "netdev-provider.h"
35 #include "netdev-vport.h"
37 #include "ofp-print.h"
39 #include "ovs-thread.h"
41 #include "packet-dpif.h"
45 #include "unaligned.h"
50 VLOG_DEFINE_THIS_MODULE(dpdk);
51 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
53 #define DPDK_PORT_WATCHDOG_INTERVAL 5
55 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
56 #define OVS_VPORT_DPDK "ovs_dpdk"
59 * need to reserve tons of extra space in the mbufs so we can align the
60 * DMA addresses to 4KB.
63 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
64 #define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
65 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
67 /* TODO: mempool size should be based on system resources. */
68 #define NB_MBUF (4096 * 64)
69 #define MP_CACHE_SZ (256 * 2)
72 #define NON_PMD_THREAD_TX_QUEUE 0
74 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
75 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
77 /* TODO: Needs per NIC value for these constants. */
78 #define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */
79 #define RX_HTHRESH 32 /* Default values of RX host threshold reg. */
80 #define RX_WTHRESH 16 /* Default values of RX write-back threshold reg. */
82 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
83 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
84 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
86 static const struct rte_eth_conf port_conf = {
88 .mq_mode = ETH_MQ_RX_RSS,
90 .header_split = 0, /* Header Split disabled */
91 .hw_ip_checksum = 0, /* IP checksum offload disabled */
92 .hw_vlan_filter = 0, /* VLAN filtering disabled */
93 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
99 .rss_hf = ETH_RSS_IPV4_TCP | ETH_RSS_IPV4 | ETH_RSS_IPV6,
103 .mq_mode = ETH_MQ_TX_NONE,
107 static const struct rte_eth_rxconf rx_conf = {
109 .pthresh = RX_PTHRESH,
110 .hthresh = RX_HTHRESH,
111 .wthresh = RX_WTHRESH,
115 static const struct rte_eth_txconf tx_conf = {
117 .pthresh = TX_PTHRESH,
118 .hthresh = TX_HTHRESH,
119 .wthresh = TX_WTHRESH,
123 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS|ETH_TXQ_FLAGS_NOOFFLOADS,
126 enum { MAX_RX_QUEUE_LEN = 192 };
127 enum { MAX_TX_QUEUE_LEN = 384 };
128 enum { DRAIN_TSC = 200000ULL };
130 static int rte_eal_init_ret = ENODEV;
132 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
134 /* Contains all 'struct dpdk_dev's. */
135 static struct list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
136 = LIST_INITIALIZER(&dpdk_list);
138 static struct list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
139 = LIST_INITIALIZER(&dpdk_mp_list);
141 /* This mutex must be used by non pmd threads when allocating or freeing
142 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
143 * use mempools, a non pmd thread should hold this mutex while calling them */
144 struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
147 struct rte_mempool *mp;
151 struct list list_node OVS_GUARDED_BY(dpdk_mutex);
154 struct dpdk_tx_queue {
155 rte_spinlock_t tx_lock;
158 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
161 /* dpdk has no way to remove dpdk ring ethernet devices
162 so we have to keep them around once they've been created
165 static struct list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
166 = LIST_INITIALIZER(&dpdk_ring_list);
169 /* For the client rings */
170 struct rte_ring *cring_tx;
171 struct rte_ring *cring_rx;
172 int user_port_id; /* User given port no, parsed from port name */
173 int eth_port_id; /* ethernet device port id */
174 struct list list_node OVS_GUARDED_BY(dpdk_mutex);
182 struct dpdk_tx_queue tx_q[NR_QUEUE];
184 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
186 struct dpdk_mp *dpdk_mp;
190 struct netdev_stats stats_offset;
191 struct netdev_stats stats;
193 uint8_t hwaddr[ETH_ADDR_LEN];
194 enum netdev_flags flags;
196 struct rte_eth_link link;
200 struct list list_node OVS_GUARDED_BY(dpdk_mutex);
203 struct netdev_rxq_dpdk {
204 struct netdev_rxq up;
208 static bool thread_is_pmd(void);
210 static int netdev_dpdk_construct(struct netdev *);
213 is_dpdk_class(const struct netdev_class *class)
215 return class->construct == netdev_dpdk_construct;
218 /* TODO: use dpdk malloc for entire OVS. infact huge page shld be used
219 * for all other sengments data, bss and text. */
222 dpdk_rte_mzalloc(size_t sz)
226 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
233 /* XXX this function should be called only by pmd threads (or by non pmd
234 * threads holding the nonpmd_mempool_mutex) */
236 free_dpdk_buf(struct dpif_packet *p)
238 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
240 rte_pktmbuf_free_seg(pkt);
244 __rte_pktmbuf_init(struct rte_mempool *mp,
245 void *opaque_arg OVS_UNUSED,
247 unsigned i OVS_UNUSED)
249 struct rte_mbuf *m = _m;
250 uint32_t buf_len = mp->elt_size - sizeof(struct dpif_packet);
252 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dpif_packet));
254 memset(m, 0, mp->elt_size);
256 /* start of buffer is just after mbuf structure */
257 m->buf_addr = (char *)m + sizeof(struct dpif_packet);
258 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
259 sizeof(struct dpif_packet);
260 m->buf_len = (uint16_t)buf_len;
262 /* keep some headroom between start of buffer and data */
263 m->pkt.data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
265 /* init some constant fields */
266 m->type = RTE_MBUF_PKT;
269 m->pkt.in_port = 0xff;
273 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
274 void *opaque_arg OVS_UNUSED,
276 unsigned i OVS_UNUSED)
278 struct rte_mbuf *m = _m;
280 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
282 ofpbuf_init_dpdk((struct ofpbuf *) m, m->buf_len);
285 static struct dpdk_mp *
286 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
288 struct dpdk_mp *dmp = NULL;
289 char mp_name[RTE_MEMPOOL_NAMESIZE];
291 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
292 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
298 dmp = dpdk_rte_mzalloc(sizeof *dmp);
299 dmp->socket_id = socket_id;
303 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d", dmp->mtu) < 0) {
307 dmp->mp = rte_mempool_create(mp_name, NB_MBUF, MBUF_SIZE(mtu),
309 sizeof(struct rte_pktmbuf_pool_private),
310 rte_pktmbuf_pool_init, NULL,
311 ovs_rte_pktmbuf_init, NULL,
314 if (dmp->mp == NULL) {
318 list_push_back(&dpdk_mp_list, &dmp->list_node);
323 dpdk_mp_put(struct dpdk_mp *dmp)
331 ovs_assert(dmp->refcount >= 0);
334 /* I could not find any API to destroy mp. */
335 if (dmp->refcount == 0) {
336 list_delete(dmp->list_node);
337 /* destroy mp-pool. */
343 check_link_status(struct netdev_dpdk *dev)
345 struct rte_eth_link link;
347 rte_eth_link_get_nowait(dev->port_id, &link);
349 if (dev->link.link_status != link.link_status) {
350 netdev_change_seq_changed(&dev->up);
352 dev->link_reset_cnt++;
354 if (dev->link.link_status) {
355 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
356 dev->port_id, (unsigned)dev->link.link_speed,
357 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
358 ("full-duplex") : ("half-duplex"));
360 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
366 dpdk_watchdog(void *dummy OVS_UNUSED)
368 struct netdev_dpdk *dev;
370 pthread_detach(pthread_self());
373 ovs_mutex_lock(&dpdk_mutex);
374 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
375 ovs_mutex_lock(&dev->mutex);
376 check_link_status(dev);
377 ovs_mutex_unlock(&dev->mutex);
379 ovs_mutex_unlock(&dpdk_mutex);
380 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
387 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
389 struct rte_pktmbuf_pool_private *mbp_priv;
390 struct ether_addr eth_addr;
394 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
398 diag = rte_eth_dev_configure(dev->port_id, NR_QUEUE, NR_QUEUE, &port_conf);
400 VLOG_ERR("eth dev config error %d",diag);
404 for (i = 0; i < NR_QUEUE; i++) {
405 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
406 dev->socket_id, &tx_conf);
408 VLOG_ERR("eth dev tx queue setup error %d",diag);
413 for (i = 0; i < NR_QUEUE; i++) {
414 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
416 &rx_conf, dev->dpdk_mp->mp);
418 VLOG_ERR("eth dev rx queue setup error %d",diag);
423 diag = rte_eth_dev_start(dev->port_id);
425 VLOG_ERR("eth dev start error %d",diag);
429 rte_eth_promiscuous_enable(dev->port_id);
430 rte_eth_allmulticast_enable(dev->port_id);
432 memset(ð_addr, 0x0, sizeof(eth_addr));
433 rte_eth_macaddr_get(dev->port_id, ð_addr);
434 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
435 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
437 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
438 rte_eth_link_get_nowait(dev->port_id, &dev->link);
440 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
441 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
443 dev->flags = NETDEV_UP | NETDEV_PROMISC;
447 static struct netdev_dpdk *
448 netdev_dpdk_cast(const struct netdev *netdev)
450 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
453 static struct netdev *
454 netdev_dpdk_alloc(void)
456 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
461 netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no) OVS_REQUIRES(dpdk_mutex)
463 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
467 ovs_mutex_init(&netdev->mutex);
469 ovs_mutex_lock(&netdev->mutex);
471 for (i = 0; i < NR_QUEUE; i++) {
472 rte_spinlock_init(&netdev->tx_q[i].tx_lock);
475 netdev->port_id = port_no;
478 netdev->mtu = ETHER_MTU;
479 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
481 /* TODO: need to discover device node at run time. */
482 netdev->socket_id = SOCKET0;
484 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
485 if (!netdev->dpdk_mp) {
490 err = dpdk_eth_dev_init(netdev);
494 netdev_->n_rxq = NR_QUEUE;
496 list_push_back(&dpdk_list, &netdev->list_node);
499 ovs_mutex_unlock(&netdev->mutex);
504 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
505 unsigned int *port_no)
509 if (strncmp(dev_name, prefix, strlen(prefix))) {
513 cport = dev_name + strlen(prefix);
514 *port_no = strtol(cport, 0, 0); /* string must be null terminated */
519 netdev_dpdk_construct(struct netdev *netdev)
521 unsigned int port_no;
524 if (rte_eal_init_ret) {
525 return rte_eal_init_ret;
528 /* Names always start with "dpdk" */
529 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
534 ovs_mutex_lock(&dpdk_mutex);
535 err = netdev_dpdk_init(netdev, port_no);
536 ovs_mutex_unlock(&dpdk_mutex);
541 netdev_dpdk_destruct(struct netdev *netdev_)
543 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
545 ovs_mutex_lock(&dev->mutex);
546 rte_eth_dev_stop(dev->port_id);
547 ovs_mutex_unlock(&dev->mutex);
549 ovs_mutex_lock(&dpdk_mutex);
550 list_remove(&dev->list_node);
551 dpdk_mp_put(dev->dpdk_mp);
552 ovs_mutex_unlock(&dpdk_mutex);
554 ovs_mutex_destroy(&dev->mutex);
558 netdev_dpdk_dealloc(struct netdev *netdev_)
560 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
566 netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
568 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
570 ovs_mutex_lock(&dev->mutex);
572 /* TODO: Allow to configure number of queues. */
573 smap_add_format(args, "configured_rx_queues", "%u", netdev_->n_rxq);
574 smap_add_format(args, "configured_tx_queues", "%u", netdev_->n_rxq);
575 ovs_mutex_unlock(&dev->mutex);
580 static struct netdev_rxq *
581 netdev_dpdk_rxq_alloc(void)
583 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
588 static struct netdev_rxq_dpdk *
589 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
591 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
595 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
597 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
598 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
600 ovs_mutex_lock(&netdev->mutex);
601 rx->port_id = netdev->port_id;
602 ovs_mutex_unlock(&netdev->mutex);
608 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
613 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
615 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
621 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
623 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
626 nb_tx = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts, txq->count);
627 if (OVS_UNLIKELY(nb_tx != txq->count)) {
628 /* free buffers, which we couldn't transmit, one at a time (each
629 * packet could come from a different mempool) */
632 for (i = nb_tx; i < txq->count; i++) {
633 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
637 txq->tsc = rte_get_timer_cycles();
641 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
643 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
645 if (txq->count == 0) {
648 rte_spinlock_lock(&txq->tx_lock);
649 dpdk_queue_flush__(dev, qid);
650 rte_spinlock_unlock(&txq->tx_lock);
654 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dpif_packet **packets,
657 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
658 struct netdev *netdev = rx->up.netdev;
659 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
662 dpdk_queue_flush(dev, rxq_->queue_id);
664 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
665 (struct rte_mbuf **) packets,
666 MIN((int)NETDEV_MAX_RX_BATCH,
667 (int)MAX_RX_QUEUE_LEN));
678 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
679 struct rte_mbuf **pkts, int cnt)
681 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
686 rte_spinlock_lock(&txq->tx_lock);
688 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
689 int tocopy = MIN(freeslots, cnt-i);
691 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
692 tocopy * sizeof (struct rte_mbuf *));
694 txq->count += tocopy;
697 if (txq->count == MAX_TX_QUEUE_LEN) {
698 dpdk_queue_flush__(dev, qid);
700 diff_tsc = rte_get_timer_cycles() - txq->tsc;
701 if (diff_tsc >= DRAIN_TSC) {
702 dpdk_queue_flush__(dev, qid);
705 rte_spinlock_unlock(&txq->tx_lock);
708 /* Tx function. Transmit packets indefinitely */
710 dpdk_do_tx_copy(struct netdev *netdev, struct dpif_packet ** pkts, int cnt)
711 OVS_NO_THREAD_SAFETY_ANALYSIS
713 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
714 struct rte_mbuf *mbufs[cnt];
719 /* If we are on a non pmd thread we have to use the mempool mutex, because
720 * every non pmd thread shares the same mempool cache */
722 if (!thread_is_pmd()) {
723 ovs_mutex_lock(&nonpmd_mempool_mutex);
726 for (i = 0; i < cnt; i++) {
727 int size = ofpbuf_size(&pkts[i]->ofpbuf);
729 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
730 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
731 (int)size , dev->max_packet_len);
737 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
739 if (!mbufs[newcnt]) {
744 /* We have to do a copy for now */
745 memcpy(mbufs[newcnt]->pkt.data, ofpbuf_data(&pkts[i]->ofpbuf), size);
747 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
748 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
753 if (OVS_UNLIKELY(dropped)) {
754 ovs_mutex_lock(&dev->mutex);
755 dev->stats.tx_dropped += dropped;
756 ovs_mutex_unlock(&dev->mutex);
759 dpdk_queue_pkts(dev, NON_PMD_THREAD_TX_QUEUE, mbufs, newcnt);
760 dpdk_queue_flush(dev, NON_PMD_THREAD_TX_QUEUE);
762 if (!thread_is_pmd()) {
763 ovs_mutex_unlock(&nonpmd_mempool_mutex);
768 netdev_dpdk_send(struct netdev *netdev, struct dpif_packet **pkts, int cnt,
771 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
775 if (!may_steal || pkts[0]->ofpbuf.source != OFPBUF_DPDK) {
776 dpdk_do_tx_copy(netdev, pkts, cnt);
779 for (i = 0; i < cnt; i++) {
780 dpif_packet_delete(pkts[i]);
788 qid = rte_lcore_id() % NR_QUEUE;
790 for (i = 0; i < cnt; i++) {
791 int size = ofpbuf_size(&pkts[i]->ofpbuf);
792 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
793 if (next_tx_idx != i) {
794 dpdk_queue_pkts(dev, qid,
795 (struct rte_mbuf **)&pkts[next_tx_idx],
799 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
800 (int)size , dev->max_packet_len);
802 dpif_packet_delete(pkts[i]);
807 if (next_tx_idx != cnt) {
808 dpdk_queue_pkts(dev, qid,
809 (struct rte_mbuf **)&pkts[next_tx_idx],
813 if (OVS_UNLIKELY(dropped)) {
814 ovs_mutex_lock(&dev->mutex);
815 dev->stats.tx_dropped += dropped;
816 ovs_mutex_unlock(&dev->mutex);
825 netdev_dpdk_set_etheraddr(struct netdev *netdev,
826 const uint8_t mac[ETH_ADDR_LEN])
828 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
830 ovs_mutex_lock(&dev->mutex);
831 if (!eth_addr_equals(dev->hwaddr, mac)) {
832 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
833 netdev_change_seq_changed(netdev);
835 ovs_mutex_unlock(&dev->mutex);
841 netdev_dpdk_get_etheraddr(const struct netdev *netdev,
842 uint8_t mac[ETH_ADDR_LEN])
844 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
846 ovs_mutex_lock(&dev->mutex);
847 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
848 ovs_mutex_unlock(&dev->mutex);
854 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
856 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
858 ovs_mutex_lock(&dev->mutex);
860 ovs_mutex_unlock(&dev->mutex);
866 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
868 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
870 struct dpdk_mp *old_mp;
873 ovs_mutex_lock(&dpdk_mutex);
874 ovs_mutex_lock(&dev->mutex);
875 if (dev->mtu == mtu) {
880 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
886 rte_eth_dev_stop(dev->port_id);
889 old_mp = dev->dpdk_mp;
892 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
894 err = dpdk_eth_dev_init(dev);
898 dev->dpdk_mp = old_mp;
899 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
900 dpdk_eth_dev_init(dev);
905 netdev_change_seq_changed(netdev);
907 ovs_mutex_unlock(&dev->mutex);
908 ovs_mutex_unlock(&dpdk_mutex);
913 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
916 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
918 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
919 struct rte_eth_stats rte_stats;
922 netdev_dpdk_get_carrier(netdev, &gg);
923 ovs_mutex_lock(&dev->mutex);
924 rte_eth_stats_get(dev->port_id, &rte_stats);
926 *stats = dev->stats_offset;
928 stats->rx_packets += rte_stats.ipackets;
929 stats->tx_packets += rte_stats.opackets;
930 stats->rx_bytes += rte_stats.ibytes;
931 stats->tx_bytes += rte_stats.obytes;
932 stats->rx_errors += rte_stats.ierrors;
933 stats->tx_errors += rte_stats.oerrors;
934 stats->multicast += rte_stats.imcasts;
936 stats->tx_dropped += dev->stats.tx_dropped;
937 ovs_mutex_unlock(&dev->mutex);
943 netdev_dpdk_set_stats(struct netdev *netdev, const struct netdev_stats *stats)
945 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
947 ovs_mutex_lock(&dev->mutex);
948 dev->stats_offset = *stats;
949 ovs_mutex_unlock(&dev->mutex);
955 netdev_dpdk_get_features(const struct netdev *netdev_,
956 enum netdev_features *current,
957 enum netdev_features *advertised OVS_UNUSED,
958 enum netdev_features *supported OVS_UNUSED,
959 enum netdev_features *peer OVS_UNUSED)
961 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
962 struct rte_eth_link link;
964 ovs_mutex_lock(&dev->mutex);
966 ovs_mutex_unlock(&dev->mutex);
968 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
969 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
970 *current = NETDEV_F_AUTONEG;
972 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
973 if (link.link_speed == ETH_LINK_SPEED_10) {
974 *current = NETDEV_F_10MB_HD;
976 if (link.link_speed == ETH_LINK_SPEED_100) {
977 *current = NETDEV_F_100MB_HD;
979 if (link.link_speed == ETH_LINK_SPEED_1000) {
980 *current = NETDEV_F_1GB_HD;
982 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
983 if (link.link_speed == ETH_LINK_SPEED_10) {
984 *current = NETDEV_F_10MB_FD;
986 if (link.link_speed == ETH_LINK_SPEED_100) {
987 *current = NETDEV_F_100MB_FD;
989 if (link.link_speed == ETH_LINK_SPEED_1000) {
990 *current = NETDEV_F_1GB_FD;
992 if (link.link_speed == ETH_LINK_SPEED_10000) {
993 *current = NETDEV_F_10GB_FD;
1001 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1003 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1006 ovs_mutex_lock(&dev->mutex);
1007 ifindex = dev->port_id;
1008 ovs_mutex_unlock(&dev->mutex);
1014 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1016 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1018 ovs_mutex_lock(&dev->mutex);
1019 check_link_status(dev);
1020 *carrier = dev->link.link_status;
1021 ovs_mutex_unlock(&dev->mutex);
1026 static long long int
1027 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1029 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1030 long long int carrier_resets;
1032 ovs_mutex_lock(&dev->mutex);
1033 carrier_resets = dev->link_reset_cnt;
1034 ovs_mutex_unlock(&dev->mutex);
1036 return carrier_resets;
1040 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1041 long long int interval OVS_UNUSED)
1047 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1048 enum netdev_flags off, enum netdev_flags on,
1049 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
1053 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1057 *old_flagsp = dev->flags;
1061 if (dev->flags == *old_flagsp) {
1065 if (dev->flags & NETDEV_UP) {
1066 err = rte_eth_dev_start(dev->port_id);
1071 if (dev->flags & NETDEV_PROMISC) {
1072 rte_eth_promiscuous_enable(dev->port_id);
1075 if (!(dev->flags & NETDEV_UP)) {
1076 rte_eth_dev_stop(dev->port_id);
1083 netdev_dpdk_update_flags(struct netdev *netdev_,
1084 enum netdev_flags off, enum netdev_flags on,
1085 enum netdev_flags *old_flagsp)
1087 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1090 ovs_mutex_lock(&netdev->mutex);
1091 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1092 ovs_mutex_unlock(&netdev->mutex);
1098 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1100 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1101 struct rte_eth_dev_info dev_info;
1103 if (dev->port_id <= 0)
1106 ovs_mutex_lock(&dev->mutex);
1107 rte_eth_dev_info_get(dev->port_id, &dev_info);
1108 ovs_mutex_unlock(&dev->mutex);
1110 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1112 smap_add_format(args, "port_no", "%d", dev->port_id);
1113 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1114 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1115 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1116 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1117 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1118 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1119 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1120 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1121 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1122 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1124 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1125 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1131 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1132 OVS_REQUIRES(dev->mutex)
1134 enum netdev_flags old_flags;
1137 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1139 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1144 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1145 const char *argv[], void *aux OVS_UNUSED)
1149 if (!strcasecmp(argv[argc - 1], "up")) {
1151 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1154 unixctl_command_reply_error(conn, "Invalid Admin State");
1159 struct netdev *netdev = netdev_from_name(argv[1]);
1160 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1161 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1163 ovs_mutex_lock(&dpdk_dev->mutex);
1164 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1165 ovs_mutex_unlock(&dpdk_dev->mutex);
1167 netdev_close(netdev);
1169 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1170 netdev_close(netdev);
1174 struct netdev_dpdk *netdev;
1176 ovs_mutex_lock(&dpdk_mutex);
1177 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1178 ovs_mutex_lock(&netdev->mutex);
1179 netdev_dpdk_set_admin_state__(netdev, up);
1180 ovs_mutex_unlock(&netdev->mutex);
1182 ovs_mutex_unlock(&dpdk_mutex);
1184 unixctl_command_reply(conn, "OK");
1188 dpdk_common_init(void)
1190 unixctl_command_register("netdev-dpdk/set-admin-state",
1191 "[netdev] up|down", 1, 2,
1192 netdev_dpdk_set_admin_state, NULL);
1194 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1198 dpdk_class_init(void)
1202 result = rte_pmd_init_all();
1204 VLOG_ERR("Cannot init PMD");
1208 result = rte_eal_pci_probe();
1210 VLOG_ERR("Cannot probe PCI");
1214 VLOG_INFO("Ethernet Device Count: %d", (int)rte_eth_dev_count());
1222 dpdk_ring_create(const char dev_name[], unsigned int port_no,
1223 unsigned int *eth_port_id)
1225 struct dpdk_ring *ivshmem;
1229 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
1230 if (ivshmem == NULL) {
1234 err = snprintf(ring_name, 10, "%s_tx", dev_name);
1239 ivshmem->cring_tx = rte_ring_create(ring_name, MAX_RX_QUEUE_LEN, SOCKET0, 0);
1240 if (ivshmem->cring_tx == NULL) {
1245 err = snprintf(ring_name, 10, "%s_rx", dev_name);
1250 ivshmem->cring_rx = rte_ring_create(ring_name, MAX_RX_QUEUE_LEN, SOCKET0, 0);
1251 if (ivshmem->cring_rx == NULL) {
1256 err = rte_eth_from_rings(&ivshmem->cring_rx, 1, &ivshmem->cring_tx, 1, SOCKET0);
1262 ivshmem->user_port_id = port_no;
1263 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
1264 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
1266 *eth_port_id = ivshmem->eth_port_id;
1271 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
1273 struct dpdk_ring *ivshmem;
1274 unsigned int port_no;
1277 /* Names always start with "dpdkr" */
1278 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
1283 /* look through our list to find the device */
1284 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
1285 if (ivshmem->user_port_id == port_no) {
1286 VLOG_INFO("Found dpdk ring device %s:\n", dev_name);
1287 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
1291 /* Need to create the device rings */
1292 return dpdk_ring_create(dev_name, port_no, eth_port_id);
1296 netdev_dpdk_ring_construct(struct netdev *netdev)
1298 unsigned int port_no = 0;
1301 if (rte_eal_init_ret) {
1302 return rte_eal_init_ret;
1305 ovs_mutex_lock(&dpdk_mutex);
1307 err = dpdk_ring_open(netdev->name, &port_no);
1312 err = netdev_dpdk_init(netdev, port_no);
1315 ovs_mutex_unlock(&dpdk_mutex);
1319 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT) \
1323 NULL, /* netdev_dpdk_run */ \
1324 NULL, /* netdev_dpdk_wait */ \
1326 netdev_dpdk_alloc, \
1328 netdev_dpdk_destruct, \
1329 netdev_dpdk_dealloc, \
1330 netdev_dpdk_get_config, \
1331 NULL, /* netdev_dpdk_set_config */ \
1332 NULL, /* get_tunnel_config */ \
1334 netdev_dpdk_send, /* send */ \
1335 NULL, /* send_wait */ \
1337 netdev_dpdk_set_etheraddr, \
1338 netdev_dpdk_get_etheraddr, \
1339 netdev_dpdk_get_mtu, \
1340 netdev_dpdk_set_mtu, \
1341 netdev_dpdk_get_ifindex, \
1342 netdev_dpdk_get_carrier, \
1343 netdev_dpdk_get_carrier_resets, \
1344 netdev_dpdk_set_miimon, \
1345 netdev_dpdk_get_stats, \
1346 netdev_dpdk_set_stats, \
1347 netdev_dpdk_get_features, \
1348 NULL, /* set_advertisements */ \
1350 NULL, /* set_policing */ \
1351 NULL, /* get_qos_types */ \
1352 NULL, /* get_qos_capabilities */ \
1353 NULL, /* get_qos */ \
1354 NULL, /* set_qos */ \
1355 NULL, /* get_queue */ \
1356 NULL, /* set_queue */ \
1357 NULL, /* delete_queue */ \
1358 NULL, /* get_queue_stats */ \
1359 NULL, /* queue_dump_start */ \
1360 NULL, /* queue_dump_next */ \
1361 NULL, /* queue_dump_done */ \
1362 NULL, /* dump_queue_stats */ \
1364 NULL, /* get_in4 */ \
1365 NULL, /* set_in4 */ \
1366 NULL, /* get_in6 */ \
1367 NULL, /* add_router */ \
1368 NULL, /* get_next_hop */ \
1369 netdev_dpdk_get_status, \
1370 NULL, /* arp_lookup */ \
1372 netdev_dpdk_update_flags, \
1374 netdev_dpdk_rxq_alloc, \
1375 netdev_dpdk_rxq_construct, \
1376 netdev_dpdk_rxq_destruct, \
1377 netdev_dpdk_rxq_dealloc, \
1378 netdev_dpdk_rxq_recv, \
1379 NULL, /* rx_wait */ \
1380 NULL, /* rxq_drain */ \
1384 dpdk_init(int argc, char **argv)
1388 if (argc < 2 || strcmp(argv[1], "--dpdk"))
1391 /* Make sure program name passed to rte_eal_init() is vswitchd. */
1397 /* Make sure things are initialized ... */
1398 result = rte_eal_init(argc, argv);
1400 ovs_abort(result, "Cannot init EAL\n");
1404 rte_eal_init_ret = 0;
1406 if (argc > result) {
1407 argv[result] = argv[0];
1410 /* We are called from the main thread here */
1411 thread_set_nonpmd();
1416 const struct netdev_class dpdk_class =
1420 netdev_dpdk_construct);
1422 const struct netdev_class dpdk_ring_class =
1426 netdev_dpdk_ring_construct);
1429 netdev_dpdk_register(void)
1431 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
1433 if (rte_eal_init_ret) {
1437 if (ovsthread_once_start(&once)) {
1439 netdev_register_provider(&dpdk_class);
1440 netdev_register_provider(&dpdk_ring_class);
1441 ovsthread_once_done(&once);
1446 pmd_thread_setaffinity_cpu(int cpu)
1452 CPU_SET(cpu, &cpuset);
1453 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
1455 VLOG_ERR("Thread affinity error %d",err);
1458 /* lcore_id 0 is reseved for use by non pmd threads. */
1459 RTE_PER_LCORE(_lcore_id) = cpu + 1;
1465 thread_set_nonpmd(void)
1467 /* We cannot have RTE_MAX_LCORE pmd threads, because lcore_id 0 is reserved
1468 * for non pmd threads */
1469 BUILD_ASSERT(NR_PMD_THREADS < RTE_MAX_LCORE);
1470 /* We have to use 0 to allow non pmd threads to perform certain DPDK
1471 * operations, like rte_eth_dev_configure(). */
1472 RTE_PER_LCORE(_lcore_id) = 0;
1478 return rte_lcore_id() != 0;