2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
31 #include "dpif-netdev.h"
33 #include "netdev-dpdk.h"
34 #include "netdev-provider.h"
35 #include "netdev-vport.h"
37 #include "ofp-print.h"
39 #include "ovs-thread.h"
41 #include "packet-dpif.h"
45 #include "unaligned.h"
50 VLOG_DEFINE_THIS_MODULE(dpdk);
51 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
53 #define DPDK_PORT_WATCHDOG_INTERVAL 5
55 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
56 #define OVS_VPORT_DPDK "ovs_dpdk"
59 * need to reserve tons of extra space in the mbufs so we can align the
60 * DMA addresses to 4KB.
63 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
64 #define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
65 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
67 /* TODO: mempool size should be based on system resources. */
68 #define NB_MBUF (4096 * 64)
69 #define MP_CACHE_SZ (256 * 2)
72 #define NON_PMD_THREAD_TX_QUEUE 0
74 /* TODO: Needs per NIC value for these constants. */
75 #define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */
76 #define RX_HTHRESH 32 /* Default values of RX host threshold reg. */
77 #define RX_WTHRESH 16 /* Default values of RX write-back threshold reg. */
79 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
80 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
81 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
83 static const struct rte_eth_conf port_conf = {
85 .mq_mode = ETH_MQ_RX_RSS,
87 .header_split = 0, /* Header Split disabled */
88 .hw_ip_checksum = 0, /* IP checksum offload disabled */
89 .hw_vlan_filter = 0, /* VLAN filtering disabled */
90 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
96 .rss_hf = ETH_RSS_IPV4_TCP | ETH_RSS_IPV4 | ETH_RSS_IPV6,
100 .mq_mode = ETH_MQ_TX_NONE,
104 static const struct rte_eth_rxconf rx_conf = {
106 .pthresh = RX_PTHRESH,
107 .hthresh = RX_HTHRESH,
108 .wthresh = RX_WTHRESH,
112 static const struct rte_eth_txconf tx_conf = {
114 .pthresh = TX_PTHRESH,
115 .hthresh = TX_HTHRESH,
116 .wthresh = TX_WTHRESH,
122 enum { MAX_RX_QUEUE_LEN = 64 };
123 enum { MAX_TX_QUEUE_LEN = 64 };
124 enum { DRAIN_TSC = 200000ULL };
126 static int rte_eal_init_ret = ENODEV;
128 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
130 /* Contains all 'struct dpdk_dev's. */
131 static struct list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
132 = LIST_INITIALIZER(&dpdk_list);
134 static struct list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
135 = LIST_INITIALIZER(&dpdk_mp_list);
138 struct rte_mempool *mp;
142 struct list list_node OVS_GUARDED_BY(dpdk_mutex);
145 struct dpdk_tx_queue {
146 rte_spinlock_t tx_lock;
149 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
157 struct dpdk_tx_queue tx_q[NR_QUEUE];
159 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
161 struct dpdk_mp *dpdk_mp;
165 struct netdev_stats stats_offset;
166 struct netdev_stats stats;
168 uint8_t hwaddr[ETH_ADDR_LEN];
169 enum netdev_flags flags;
171 struct rte_eth_link link;
175 struct list list_node OVS_GUARDED_BY(dpdk_mutex);
178 struct netdev_rxq_dpdk {
179 struct netdev_rxq up;
183 static int netdev_dpdk_construct(struct netdev *);
186 is_dpdk_class(const struct netdev_class *class)
188 return class->construct == netdev_dpdk_construct;
191 /* TODO: use dpdk malloc for entire OVS. infact huge page shld be used
192 * for all other sengments data, bss and text. */
195 dpdk_rte_mzalloc(size_t sz)
199 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
207 free_dpdk_buf(struct dpif_packet *p)
209 struct ofpbuf *ofp = &p->ofpbuf;
210 struct rte_mbuf *pkt = (struct rte_mbuf *) ofp->dpdk_buf;
212 rte_mempool_put(pkt->pool, pkt);
216 __rte_pktmbuf_init(struct rte_mempool *mp,
217 void *opaque_arg OVS_UNUSED,
219 unsigned i OVS_UNUSED)
221 struct rte_mbuf *m = _m;
222 uint32_t buf_len = mp->elt_size - sizeof(struct dpif_packet);
224 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dpif_packet));
226 memset(m, 0, mp->elt_size);
228 /* start of buffer is just after mbuf structure */
229 m->buf_addr = (char *)m + sizeof(struct dpif_packet);
230 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
231 sizeof(struct dpif_packet);
232 m->buf_len = (uint16_t)buf_len;
234 /* keep some headroom between start of buffer and data */
235 m->pkt.data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
237 /* init some constant fields */
238 m->type = RTE_MBUF_PKT;
241 m->pkt.in_port = 0xff;
245 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
246 void *opaque_arg OVS_UNUSED,
248 unsigned i OVS_UNUSED)
250 struct rte_mbuf *m = _m;
252 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
254 ofpbuf_init_dpdk((struct ofpbuf *) m, m->buf_len);
257 static struct dpdk_mp *
258 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
260 struct dpdk_mp *dmp = NULL;
261 char mp_name[RTE_MEMPOOL_NAMESIZE];
263 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
264 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
270 dmp = dpdk_rte_mzalloc(sizeof *dmp);
271 dmp->socket_id = socket_id;
275 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d", dmp->mtu);
276 dmp->mp = rte_mempool_create(mp_name, NB_MBUF, MBUF_SIZE(mtu),
278 sizeof(struct rte_pktmbuf_pool_private),
279 rte_pktmbuf_pool_init, NULL,
280 ovs_rte_pktmbuf_init, NULL,
283 if (dmp->mp == NULL) {
287 list_push_back(&dpdk_mp_list, &dmp->list_node);
292 dpdk_mp_put(struct dpdk_mp *dmp)
300 ovs_assert(dmp->refcount >= 0);
303 /* I could not find any API to destroy mp. */
304 if (dmp->refcount == 0) {
305 list_delete(dmp->list_node);
306 /* destroy mp-pool. */
312 check_link_status(struct netdev_dpdk *dev)
314 struct rte_eth_link link;
316 rte_eth_link_get_nowait(dev->port_id, &link);
318 if (dev->link.link_status != link.link_status) {
319 netdev_change_seq_changed(&dev->up);
321 dev->link_reset_cnt++;
323 if (dev->link.link_status) {
324 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
325 dev->port_id, (unsigned)dev->link.link_speed,
326 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
327 ("full-duplex") : ("half-duplex"));
329 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
335 dpdk_watchdog(void *dummy OVS_UNUSED)
337 struct netdev_dpdk *dev;
339 pthread_detach(pthread_self());
342 ovs_mutex_lock(&dpdk_mutex);
343 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
344 ovs_mutex_lock(&dev->mutex);
345 check_link_status(dev);
346 ovs_mutex_unlock(&dev->mutex);
348 ovs_mutex_unlock(&dpdk_mutex);
349 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
356 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
358 struct rte_pktmbuf_pool_private *mbp_priv;
359 struct ether_addr eth_addr;
363 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
367 diag = rte_eth_dev_configure(dev->port_id, NR_QUEUE, NR_QUEUE, &port_conf);
369 VLOG_ERR("eth dev config error %d",diag);
373 for (i = 0; i < NR_QUEUE; i++) {
374 diag = rte_eth_tx_queue_setup(dev->port_id, i, MAX_TX_QUEUE_LEN,
375 dev->socket_id, &tx_conf);
377 VLOG_ERR("eth dev tx queue setup error %d",diag);
382 for (i = 0; i < NR_QUEUE; i++) {
383 diag = rte_eth_rx_queue_setup(dev->port_id, i, MAX_RX_QUEUE_LEN,
385 &rx_conf, dev->dpdk_mp->mp);
387 VLOG_ERR("eth dev rx queue setup error %d",diag);
392 diag = rte_eth_dev_start(dev->port_id);
394 VLOG_ERR("eth dev start error %d",diag);
398 rte_eth_promiscuous_enable(dev->port_id);
399 rte_eth_allmulticast_enable(dev->port_id);
401 memset(ð_addr, 0x0, sizeof(eth_addr));
402 rte_eth_macaddr_get(dev->port_id, ð_addr);
403 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
404 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
406 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
407 rte_eth_link_get_nowait(dev->port_id, &dev->link);
409 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
410 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
412 dev->flags = NETDEV_UP | NETDEV_PROMISC;
416 static struct netdev_dpdk *
417 netdev_dpdk_cast(const struct netdev *netdev)
419 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
422 static struct netdev *
423 netdev_dpdk_alloc(void)
425 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
430 netdev_dpdk_construct(struct netdev *netdev_)
432 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
433 unsigned int port_no;
438 if (rte_eal_init_ret) {
439 return rte_eal_init_ret;
442 ovs_mutex_lock(&dpdk_mutex);
443 cport = netdev_->name + 4; /* Names always start with "dpdk" */
445 if (strncmp(netdev_->name, "dpdk", 4)) {
450 port_no = strtol(cport, 0, 0); /* string must be null terminated */
452 for (i = 0; i < NR_QUEUE; i++) {
453 rte_spinlock_init(&netdev->tx_q[i].tx_lock);
456 ovs_mutex_init(&netdev->mutex);
458 ovs_mutex_lock(&netdev->mutex);
461 netdev->mtu = ETHER_MTU;
462 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
464 /* TODO: need to discover device node at run time. */
465 netdev->socket_id = SOCKET0;
466 netdev->port_id = port_no;
468 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
469 if (!netdev->dpdk_mp) {
474 err = dpdk_eth_dev_init(netdev);
478 netdev_->n_rxq = NR_QUEUE;
480 list_push_back(&dpdk_list, &netdev->list_node);
483 ovs_mutex_unlock(&netdev->mutex);
485 ovs_mutex_unlock(&dpdk_mutex);
490 netdev_dpdk_destruct(struct netdev *netdev_)
492 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
494 ovs_mutex_lock(&dev->mutex);
495 rte_eth_dev_stop(dev->port_id);
496 ovs_mutex_unlock(&dev->mutex);
498 ovs_mutex_lock(&dpdk_mutex);
499 list_remove(&dev->list_node);
500 dpdk_mp_put(dev->dpdk_mp);
501 ovs_mutex_unlock(&dpdk_mutex);
503 ovs_mutex_destroy(&dev->mutex);
507 netdev_dpdk_dealloc(struct netdev *netdev_)
509 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
515 netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
517 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
519 ovs_mutex_lock(&dev->mutex);
521 /* TODO: Allow to configure number of queues. */
522 smap_add_format(args, "configured_rx_queues", "%u", netdev_->n_rxq);
523 smap_add_format(args, "configured_tx_queues", "%u", netdev_->n_rxq);
524 ovs_mutex_unlock(&dev->mutex);
529 static struct netdev_rxq *
530 netdev_dpdk_rxq_alloc(void)
532 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
537 static struct netdev_rxq_dpdk *
538 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
540 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
544 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
546 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
547 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
549 ovs_mutex_lock(&netdev->mutex);
550 rx->port_id = netdev->port_id;
551 ovs_mutex_unlock(&netdev->mutex);
557 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
562 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
564 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
570 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
572 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
575 if (txq->count == 0) {
578 rte_spinlock_lock(&txq->tx_lock);
579 nb_tx = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts, txq->count);
580 if (nb_tx != txq->count) {
581 /* free buffers if we couldn't transmit packets */
582 rte_mempool_put_bulk(dev->dpdk_mp->mp,
583 (void **) &txq->burst_pkts[nb_tx],
584 (txq->count - nb_tx));
587 rte_spinlock_unlock(&txq->tx_lock);
591 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dpif_packet **packets,
594 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
595 struct netdev *netdev = rx->up.netdev;
596 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
599 dpdk_queue_flush(dev, rxq_->queue_id);
601 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
602 (struct rte_mbuf **) packets,
603 MIN((int)NETDEV_MAX_RX_BATCH,
604 (int)MAX_RX_QUEUE_LEN));
615 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
616 struct rte_mbuf **pkts, int cnt)
618 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
625 rte_spinlock_lock(&txq->tx_lock);
627 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
628 int tocopy = MIN(freeslots, cnt-i);
630 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
631 tocopy * sizeof (struct rte_mbuf *));
633 txq->count += tocopy;
636 if (txq->count == MAX_TX_QUEUE_LEN) {
639 cur_tsc = rte_get_timer_cycles();
640 if (txq->count == 1) {
643 diff_tsc = cur_tsc - txq->tsc;
644 if (diff_tsc >= DRAIN_TSC) {
650 nb_tx = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts,
652 if (nb_tx != txq->count) {
653 /* free buffers if we couldn't transmit packets */
654 rte_mempool_put_bulk(dev->dpdk_mp->mp,
655 (void **) &txq->burst_pkts[nb_tx],
656 (txq->count - nb_tx));
660 rte_spinlock_unlock(&txq->tx_lock);
663 /* Tx function. Transmit packets indefinitely */
665 dpdk_do_tx_copy(struct netdev *netdev, struct dpif_packet ** pkts, int cnt)
667 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
668 struct rte_mbuf *mbufs[cnt];
671 for (i = 0; i < cnt; i++) {
672 int size = ofpbuf_size(&pkts[i]->ofpbuf);
673 if (size > dev->max_packet_len) {
674 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
675 (int)size , dev->max_packet_len);
677 ovs_mutex_lock(&dev->mutex);
678 dev->stats.tx_dropped++;
679 ovs_mutex_unlock(&dev->mutex);
684 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
686 if (!mbufs[newcnt]) {
687 ovs_mutex_lock(&dev->mutex);
688 dev->stats.tx_dropped++;
689 ovs_mutex_unlock(&dev->mutex);
693 /* We have to do a copy for now */
694 memcpy(mbufs[newcnt]->pkt.data, ofpbuf_data(&pkts[i]->ofpbuf), size);
696 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
697 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
702 dpdk_queue_pkts(dev, NON_PMD_THREAD_TX_QUEUE, mbufs, newcnt);
703 dpdk_queue_flush(dev, NON_PMD_THREAD_TX_QUEUE);
707 netdev_dpdk_send(struct netdev *netdev, struct dpif_packet **pkts, int cnt,
710 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
714 if (!may_steal || pkts[0]->ofpbuf.source != OFPBUF_DPDK) {
715 dpdk_do_tx_copy(netdev, pkts, cnt);
718 for (i = 0; i < cnt; i++) {
719 dpif_packet_delete(pkts[i]);
727 qid = rte_lcore_id() % NR_QUEUE;
729 for (i = 0; i < cnt; i++) {
730 int size = ofpbuf_size(&pkts[i]->ofpbuf);
731 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
732 if (next_tx_idx != i) {
733 dpdk_queue_pkts(dev, qid,
734 (struct rte_mbuf **)&pkts[next_tx_idx],
737 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
738 (int)size , dev->max_packet_len);
740 dpif_packet_delete(pkts[i]);
746 if (next_tx_idx != cnt) {
747 dpdk_queue_pkts(dev, qid,
748 (struct rte_mbuf **)&pkts[next_tx_idx],
752 if (OVS_UNLIKELY(dropped)) {
753 ovs_mutex_lock(&dev->mutex);
754 dev->stats.tx_dropped += dropped;
755 ovs_mutex_unlock(&dev->mutex);
764 netdev_dpdk_set_etheraddr(struct netdev *netdev,
765 const uint8_t mac[ETH_ADDR_LEN])
767 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
769 ovs_mutex_lock(&dev->mutex);
770 if (!eth_addr_equals(dev->hwaddr, mac)) {
771 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
772 netdev_change_seq_changed(netdev);
774 ovs_mutex_unlock(&dev->mutex);
780 netdev_dpdk_get_etheraddr(const struct netdev *netdev,
781 uint8_t mac[ETH_ADDR_LEN])
783 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
785 ovs_mutex_lock(&dev->mutex);
786 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
787 ovs_mutex_unlock(&dev->mutex);
793 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
795 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
797 ovs_mutex_lock(&dev->mutex);
799 ovs_mutex_unlock(&dev->mutex);
805 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
807 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
809 struct dpdk_mp *old_mp;
812 ovs_mutex_lock(&dpdk_mutex);
813 ovs_mutex_lock(&dev->mutex);
814 if (dev->mtu == mtu) {
819 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
825 rte_eth_dev_stop(dev->port_id);
828 old_mp = dev->dpdk_mp;
831 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
833 err = dpdk_eth_dev_init(dev);
837 dev->dpdk_mp = old_mp;
838 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
839 dpdk_eth_dev_init(dev);
844 netdev_change_seq_changed(netdev);
846 ovs_mutex_unlock(&dev->mutex);
847 ovs_mutex_unlock(&dpdk_mutex);
852 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
855 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
857 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
858 struct rte_eth_stats rte_stats;
861 netdev_dpdk_get_carrier(netdev, &gg);
862 ovs_mutex_lock(&dev->mutex);
863 rte_eth_stats_get(dev->port_id, &rte_stats);
865 *stats = dev->stats_offset;
867 stats->rx_packets += rte_stats.ipackets;
868 stats->tx_packets += rte_stats.opackets;
869 stats->rx_bytes += rte_stats.ibytes;
870 stats->tx_bytes += rte_stats.obytes;
871 stats->rx_errors += rte_stats.ierrors;
872 stats->tx_errors += rte_stats.oerrors;
873 stats->multicast += rte_stats.imcasts;
875 stats->tx_dropped += dev->stats.tx_dropped;
876 ovs_mutex_unlock(&dev->mutex);
882 netdev_dpdk_set_stats(struct netdev *netdev, const struct netdev_stats *stats)
884 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
886 ovs_mutex_lock(&dev->mutex);
887 dev->stats_offset = *stats;
888 ovs_mutex_unlock(&dev->mutex);
894 netdev_dpdk_get_features(const struct netdev *netdev_,
895 enum netdev_features *current,
896 enum netdev_features *advertised OVS_UNUSED,
897 enum netdev_features *supported OVS_UNUSED,
898 enum netdev_features *peer OVS_UNUSED)
900 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
901 struct rte_eth_link link;
903 ovs_mutex_lock(&dev->mutex);
905 ovs_mutex_unlock(&dev->mutex);
907 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
908 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
909 *current = NETDEV_F_AUTONEG;
911 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
912 if (link.link_speed == ETH_LINK_SPEED_10) {
913 *current = NETDEV_F_10MB_HD;
915 if (link.link_speed == ETH_LINK_SPEED_100) {
916 *current = NETDEV_F_100MB_HD;
918 if (link.link_speed == ETH_LINK_SPEED_1000) {
919 *current = NETDEV_F_1GB_HD;
921 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
922 if (link.link_speed == ETH_LINK_SPEED_10) {
923 *current = NETDEV_F_10MB_FD;
925 if (link.link_speed == ETH_LINK_SPEED_100) {
926 *current = NETDEV_F_100MB_FD;
928 if (link.link_speed == ETH_LINK_SPEED_1000) {
929 *current = NETDEV_F_1GB_FD;
931 if (link.link_speed == ETH_LINK_SPEED_10000) {
932 *current = NETDEV_F_10GB_FD;
940 netdev_dpdk_get_ifindex(const struct netdev *netdev)
942 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
945 ovs_mutex_lock(&dev->mutex);
946 ifindex = dev->port_id;
947 ovs_mutex_unlock(&dev->mutex);
953 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
955 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
957 ovs_mutex_lock(&dev->mutex);
958 check_link_status(dev);
959 *carrier = dev->link.link_status;
960 ovs_mutex_unlock(&dev->mutex);
966 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
968 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
969 long long int carrier_resets;
971 ovs_mutex_lock(&dev->mutex);
972 carrier_resets = dev->link_reset_cnt;
973 ovs_mutex_unlock(&dev->mutex);
975 return carrier_resets;
979 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
980 long long int interval OVS_UNUSED)
986 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
987 enum netdev_flags off, enum netdev_flags on,
988 enum netdev_flags *old_flagsp)
989 OVS_REQUIRES(dev->mutex)
993 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
997 *old_flagsp = dev->flags;
1001 if (dev->flags == *old_flagsp) {
1005 if (dev->flags & NETDEV_UP) {
1006 err = rte_eth_dev_start(dev->port_id);
1011 if (dev->flags & NETDEV_PROMISC) {
1012 rte_eth_promiscuous_enable(dev->port_id);
1015 if (!(dev->flags & NETDEV_UP)) {
1016 rte_eth_dev_stop(dev->port_id);
1023 netdev_dpdk_update_flags(struct netdev *netdev_,
1024 enum netdev_flags off, enum netdev_flags on,
1025 enum netdev_flags *old_flagsp)
1027 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1030 ovs_mutex_lock(&netdev->mutex);
1031 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1032 ovs_mutex_unlock(&netdev->mutex);
1038 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1040 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1041 struct rte_eth_dev_info dev_info;
1043 if (dev->port_id <= 0)
1046 ovs_mutex_lock(&dev->mutex);
1047 rte_eth_dev_info_get(dev->port_id, &dev_info);
1048 ovs_mutex_unlock(&dev->mutex);
1050 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1052 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1053 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1054 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1055 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1056 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1057 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1058 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1059 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1060 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1061 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1063 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1064 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1070 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1071 OVS_REQUIRES(dev->mutex)
1073 enum netdev_flags old_flags;
1076 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1078 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1083 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1084 const char *argv[], void *aux OVS_UNUSED)
1088 if (!strcasecmp(argv[argc - 1], "up")) {
1090 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1093 unixctl_command_reply_error(conn, "Invalid Admin State");
1098 struct netdev *netdev = netdev_from_name(argv[1]);
1099 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1100 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1102 ovs_mutex_lock(&dpdk_dev->mutex);
1103 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1104 ovs_mutex_unlock(&dpdk_dev->mutex);
1106 netdev_close(netdev);
1108 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1109 netdev_close(netdev);
1113 struct netdev_dpdk *netdev;
1115 ovs_mutex_lock(&dpdk_mutex);
1116 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1117 ovs_mutex_lock(&netdev->mutex);
1118 netdev_dpdk_set_admin_state__(netdev, up);
1119 ovs_mutex_unlock(&netdev->mutex);
1121 ovs_mutex_unlock(&dpdk_mutex);
1123 unixctl_command_reply(conn, "OK");
1127 dpdk_class_init(void)
1131 if (rte_eal_init_ret) {
1135 result = rte_pmd_init_all();
1137 VLOG_ERR("Cannot init PMD");
1141 result = rte_eal_pci_probe();
1143 VLOG_ERR("Cannot probe PCI");
1147 if (rte_eth_dev_count() < 1) {
1148 VLOG_ERR("No Ethernet devices found. Try assigning ports to UIO.");
1151 VLOG_INFO("Ethernet Device Count: %d", (int)rte_eth_dev_count());
1153 list_init(&dpdk_list);
1154 list_init(&dpdk_mp_list);
1156 unixctl_command_register("netdev-dpdk/set-admin-state",
1157 "[netdev] up|down", 1, 2,
1158 netdev_dpdk_set_admin_state, NULL);
1160 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1164 static struct netdev_class netdev_dpdk_class = {
1166 dpdk_class_init, /* init */
1167 NULL, /* netdev_dpdk_run */
1168 NULL, /* netdev_dpdk_wait */
1171 netdev_dpdk_construct,
1172 netdev_dpdk_destruct,
1173 netdev_dpdk_dealloc,
1174 netdev_dpdk_get_config,
1175 NULL, /* netdev_dpdk_set_config */
1176 NULL, /* get_tunnel_config */
1178 netdev_dpdk_send, /* send */
1179 NULL, /* send_wait */
1181 netdev_dpdk_set_etheraddr,
1182 netdev_dpdk_get_etheraddr,
1183 netdev_dpdk_get_mtu,
1184 netdev_dpdk_set_mtu,
1185 netdev_dpdk_get_ifindex,
1186 netdev_dpdk_get_carrier,
1187 netdev_dpdk_get_carrier_resets,
1188 netdev_dpdk_set_miimon,
1189 netdev_dpdk_get_stats,
1190 netdev_dpdk_set_stats,
1191 netdev_dpdk_get_features,
1192 NULL, /* set_advertisements */
1194 NULL, /* set_policing */
1195 NULL, /* get_qos_types */
1196 NULL, /* get_qos_capabilities */
1199 NULL, /* get_queue */
1200 NULL, /* set_queue */
1201 NULL, /* delete_queue */
1202 NULL, /* get_queue_stats */
1203 NULL, /* queue_dump_start */
1204 NULL, /* queue_dump_next */
1205 NULL, /* queue_dump_done */
1206 NULL, /* dump_queue_stats */
1211 NULL, /* add_router */
1212 NULL, /* get_next_hop */
1213 netdev_dpdk_get_status,
1214 NULL, /* arp_lookup */
1216 netdev_dpdk_update_flags,
1218 netdev_dpdk_rxq_alloc,
1219 netdev_dpdk_rxq_construct,
1220 netdev_dpdk_rxq_destruct,
1221 netdev_dpdk_rxq_dealloc,
1222 netdev_dpdk_rxq_recv,
1223 NULL, /* rxq_wait */
1224 NULL, /* rxq_drain */
1228 dpdk_init(int argc, char **argv)
1232 if (argc < 2 || strcmp(argv[1], "--dpdk"))
1235 /* Make sure program name passed to rte_eal_init() is vswitchd. */
1241 /* Make sure things are initialized ... */
1242 result = rte_eal_init(argc, argv);
1244 ovs_abort(result, "Cannot init EAL\n");
1248 rte_eal_init_ret = 0;
1250 if (argc > result) {
1251 argv[result] = argv[0];
1258 netdev_dpdk_register(void)
1260 netdev_register_provider(&netdev_dpdk_class);
1264 pmd_thread_setaffinity_cpu(int cpu)
1270 CPU_SET(cpu, &cpuset);
1271 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
1273 VLOG_ERR("Thread affinity error %d",err);
1276 RTE_PER_LCORE(_lcore_id) = cpu;