2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
34 #include "dp-packet.h"
35 #include "dpif-netdev.h"
36 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "ofp-print.h"
44 #include "ovs-thread.h"
50 #include "unaligned.h"
53 #include "openvswitch/vlog.h"
55 #include "rte_config.h"
57 #include "rte_meter.h"
58 #include "rte_virtio_net.h"
60 VLOG_DEFINE_THIS_MODULE(dpdk);
61 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
63 #define DPDK_PORT_WATCHDOG_INTERVAL 5
65 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
66 #define OVS_VPORT_DPDK "ovs_dpdk"
69 * need to reserve tons of extra space in the mbufs so we can align the
70 * DMA addresses to 4KB.
71 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
72 * performance for standard Ethernet MTU.
74 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_HEADER_LEN))
75 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
76 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
77 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len)- ETHER_HDR_LEN - ETHER_CRC_LEN)
78 #define MBUF_SIZE(mtu) ( MTU_TO_MAX_FRAME_LEN(mtu) \
79 + sizeof(struct dp_packet) \
80 + RTE_PKTMBUF_HEADROOM)
81 #define NETDEV_DPDK_MBUF_ALIGN 1024
83 /* Max and min number of packets in the mempool. OVS tries to allocate a
84 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
85 * enough hugepages) we keep halving the number until the allocation succeeds
86 * or we reach MIN_NB_MBUF */
88 #define MAX_NB_MBUF (4096 * 64)
89 #define MIN_NB_MBUF (4096 * 4)
90 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
92 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
93 BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
95 /* The smallest possible NB_MBUF that we're going to try should be a multiple
96 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
97 BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
102 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
103 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
105 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
107 static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
108 static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
111 * Maximum amount of time in micro seconds to try and enqueue to vhost.
113 #define VHOST_ENQ_RETRY_USECS 100
115 static const struct rte_eth_conf port_conf = {
117 .mq_mode = ETH_MQ_RX_RSS,
119 .header_split = 0, /* Header Split disabled */
120 .hw_ip_checksum = 0, /* IP checksum offload disabled */
121 .hw_vlan_filter = 0, /* VLAN filtering disabled */
122 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
128 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
132 .mq_mode = ETH_MQ_TX_NONE,
136 enum { MAX_TX_QUEUE_LEN = 384 };
137 enum { DPDK_RING_SIZE = 256 };
138 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
139 enum { DRAIN_TSC = 200000ULL };
146 static int rte_eal_init_ret = ENODEV;
148 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
150 /* Quality of Service */
152 /* An instance of a QoS configuration. Always associated with a particular
155 * Each QoS implementation subclasses this with whatever additional data it
159 const struct dpdk_qos_ops *ops;
162 /* A particular implementation of dpdk QoS operations.
164 * The functions below return 0 if successful or a positive errno value on
165 * failure, except where otherwise noted. All of them must be provided, except
166 * where otherwise noted.
168 struct dpdk_qos_ops {
170 /* Name of the QoS type */
171 const char *qos_name;
173 /* Called to construct the QoS implementation on 'netdev'. The
174 * implementation should make the appropriate calls to configure QoS
175 * according to 'details'. The implementation may assume that any current
176 * QoS configuration already installed should be destroyed before
177 * constructing the new configuration.
179 * The contents of 'details' should be documented as valid for 'ovs_name'
180 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
181 * (which is built as ovs-vswitchd.conf.db(8)).
183 * This function must return 0 if and only if it sets 'netdev->qos_conf'
184 * to an initialized 'struct qos_conf'.
186 * For all QoS implementations it should always be non-null.
188 int (*qos_construct)(struct netdev *netdev, const struct smap *details);
190 /* Destroys the data structures allocated by the implementation as part of
193 * For all QoS implementations it should always be non-null.
195 void (*qos_destruct)(struct netdev *netdev, struct qos_conf *conf);
197 /* Retrieves details of 'netdev->qos_conf' configuration into 'details'.
199 * The contents of 'details' should be documented as valid for 'ovs_name'
200 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
201 * (which is built as ovs-vswitchd.conf.db(8)).
203 int (*qos_get)(const struct netdev *netdev, struct smap *details);
205 /* Reconfigures 'netdev->qos_conf' according to 'details', performing any
206 * required calls to complete the reconfiguration.
208 * The contents of 'details' should be documented as valid for 'ovs_name'
209 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
210 * (which is built as ovs-vswitchd.conf.db(8)).
212 * This function may be null if 'qos_conf' is not configurable.
214 int (*qos_set)(struct netdev *netdev, const struct smap *details);
216 /* Modify an array of rte_mbufs. The modification is specific to
217 * each qos implementation.
219 * The function should take and array of mbufs and an int representing
220 * the current number of mbufs present in the array.
222 * After the function has performed a qos modification to the array of
223 * mbufs it returns an int representing the number of mbufs now present in
224 * the array. This value is can then be passed to the port send function
225 * along with the modified array for transmission.
227 * For all QoS implementations it should always be non-null.
229 int (*qos_run)(struct netdev *netdev, struct rte_mbuf **pkts,
233 /* dpdk_qos_ops for each type of user space QoS implementation */
234 static const struct dpdk_qos_ops egress_policer_ops;
237 * Array of dpdk_qos_ops, contains pointer to all supported QoS
240 static const struct dpdk_qos_ops *const qos_confs[] = {
245 /* Contains all 'struct dpdk_dev's. */
246 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
247 = OVS_LIST_INITIALIZER(&dpdk_list);
249 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
250 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
252 /* This mutex must be used by non pmd threads when allocating or freeing
253 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
254 * use mempools, a non pmd thread should hold this mutex while calling them */
255 static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
258 struct rte_mempool *mp;
262 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
265 /* There should be one 'struct dpdk_tx_queue' created for
267 struct dpdk_tx_queue {
268 bool flush_tx; /* Set to true to flush queue everytime */
269 /* pkts are queued. */
271 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
272 * from concurrent access. It is used only
273 * if the queue is shared among different
274 * pmd threads (see 'txq_needs_locking'). */
275 int map; /* Mapping of configured vhost-user queues
276 * to enabled by guest. */
278 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
281 /* dpdk has no way to remove dpdk ring ethernet devices
282 so we have to keep them around once they've been created
285 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
286 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
289 /* For the client rings */
290 struct rte_ring *cring_tx;
291 struct rte_ring *cring_rx;
292 unsigned int user_port_id; /* User given port no, parsed from port name */
293 int eth_port_id; /* ethernet device port id */
294 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
301 enum dpdk_dev_type type;
303 struct dpdk_tx_queue *tx_q;
305 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
307 struct dpdk_mp *dpdk_mp;
311 struct netdev_stats stats;
313 rte_spinlock_t stats_lock;
315 struct eth_addr hwaddr;
316 enum netdev_flags flags;
318 struct rte_eth_link link;
321 /* The user might request more txqs than the NIC has. We remap those
322 * ('up.n_txq') on these ('real_n_txq').
323 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
324 * true and we will take a spinlock on transmission */
327 bool txq_needs_locking;
329 /* virtio-net structure for vhost device */
330 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
332 /* Identifier used to distinguish vhost devices from each other */
333 char vhost_id[PATH_MAX];
336 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
338 /* QoS configuration and lock for the device */
339 struct qos_conf *qos_conf;
340 rte_spinlock_t qos_lock;
344 struct netdev_rxq_dpdk {
345 struct netdev_rxq up;
349 static bool dpdk_thread_is_pmd(void);
351 static int netdev_dpdk_construct(struct netdev *);
353 struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
356 is_dpdk_class(const struct netdev_class *class)
358 return class->construct == netdev_dpdk_construct;
361 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
362 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
363 * value, insufficient buffers are allocated to accomodate the packet in its
364 * entirety. Furthermore, certain drivers need to ensure that there is also
365 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
366 * frames). If the RX buffer is too small, then the driver enables scatter RX
367 * behaviour, which reduces performance. To prevent this, use a buffer size that
368 * is closest to 'mtu', but which satisfies the aforementioned criteria.
371 dpdk_buf_size(int mtu)
373 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) + RTE_PKTMBUF_HEADROOM),
374 NETDEV_DPDK_MBUF_ALIGN);
377 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
378 * for all other segments data, bss and text. */
381 dpdk_rte_mzalloc(size_t sz)
385 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
392 /* XXX this function should be called only by pmd threads (or by non pmd
393 * threads holding the nonpmd_mempool_mutex) */
395 free_dpdk_buf(struct dp_packet *p)
397 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
399 rte_pktmbuf_free_seg(pkt);
403 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
404 void *opaque_arg OVS_UNUSED,
406 unsigned i OVS_UNUSED)
408 struct rte_mbuf *m = _m;
410 rte_pktmbuf_init(mp, opaque_arg, _m, i);
412 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
415 static struct dpdk_mp *
416 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
418 struct dpdk_mp *dmp = NULL;
419 char mp_name[RTE_MEMPOOL_NAMESIZE];
421 struct rte_pktmbuf_pool_private mbp_priv;
423 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
424 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
430 dmp = dpdk_rte_mzalloc(sizeof *dmp);
431 dmp->socket_id = socket_id;
434 mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct dp_packet);
435 mbp_priv.mbuf_priv_size = sizeof (struct dp_packet) - sizeof (struct rte_mbuf);
437 mp_size = MAX_NB_MBUF;
439 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
440 dmp->mtu, dmp->socket_id, mp_size) < 0) {
444 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
446 sizeof(struct rte_pktmbuf_pool_private),
447 rte_pktmbuf_pool_init, &mbp_priv,
448 ovs_rte_pktmbuf_init, NULL,
450 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
452 if (dmp->mp == NULL) {
455 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
458 list_push_back(&dpdk_mp_list, &dmp->list_node);
463 dpdk_mp_put(struct dpdk_mp *dmp)
471 ovs_assert(dmp->refcount >= 0);
474 /* I could not find any API to destroy mp. */
475 if (dmp->refcount == 0) {
476 list_delete(dmp->list_node);
477 /* destroy mp-pool. */
483 check_link_status(struct netdev_dpdk *dev)
485 struct rte_eth_link link;
487 rte_eth_link_get_nowait(dev->port_id, &link);
489 if (dev->link.link_status != link.link_status) {
490 netdev_change_seq_changed(&dev->up);
492 dev->link_reset_cnt++;
494 if (dev->link.link_status) {
495 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
496 dev->port_id, (unsigned)dev->link.link_speed,
497 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
498 ("full-duplex") : ("half-duplex"));
500 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
506 dpdk_watchdog(void *dummy OVS_UNUSED)
508 struct netdev_dpdk *dev;
510 pthread_detach(pthread_self());
513 ovs_mutex_lock(&dpdk_mutex);
514 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
515 ovs_mutex_lock(&dev->mutex);
516 check_link_status(dev);
517 ovs_mutex_unlock(&dev->mutex);
519 ovs_mutex_unlock(&dpdk_mutex);
520 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
527 dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
532 /* A device may report more queues than it makes available (this has
533 * been observed for Intel xl710, which reserves some of them for
534 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
535 * available. When this happens we can retry the configuration
536 * and request less queues */
537 while (n_rxq && n_txq) {
539 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
542 diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &port_conf);
547 for (i = 0; i < n_txq; i++) {
548 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
549 dev->socket_id, NULL);
551 VLOG_INFO("Interface %s txq(%d) setup error: %s",
552 dev->up.name, i, rte_strerror(-diag));
558 /* Retry with less tx queues */
563 for (i = 0; i < n_rxq; i++) {
564 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
565 dev->socket_id, NULL,
568 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
569 dev->up.name, i, rte_strerror(-diag));
575 /* Retry with less rx queues */
580 dev->up.n_rxq = n_rxq;
581 dev->real_n_txq = n_txq;
591 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
593 struct rte_pktmbuf_pool_private *mbp_priv;
594 struct rte_eth_dev_info info;
595 struct ether_addr eth_addr;
599 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
603 rte_eth_dev_info_get(dev->port_id, &info);
605 n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
606 n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
608 diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
610 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
611 dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
615 diag = rte_eth_dev_start(dev->port_id);
617 VLOG_ERR("Interface %s start error: %s", dev->up.name,
618 rte_strerror(-diag));
622 rte_eth_promiscuous_enable(dev->port_id);
623 rte_eth_allmulticast_enable(dev->port_id);
625 memset(ð_addr, 0x0, sizeof(eth_addr));
626 rte_eth_macaddr_get(dev->port_id, ð_addr);
627 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
628 dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
630 memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
631 rte_eth_link_get_nowait(dev->port_id, &dev->link);
633 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
634 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
636 dev->flags = NETDEV_UP | NETDEV_PROMISC;
640 static struct netdev_dpdk *
641 netdev_dpdk_cast(const struct netdev *netdev)
643 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
646 static struct netdev *
647 netdev_dpdk_alloc(void)
649 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
654 netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
658 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
659 for (i = 0; i < n_txqs; i++) {
660 int numa_id = ovs_numa_get_numa_id(i);
662 if (!netdev->txq_needs_locking) {
663 /* Each index is considered as a cpu core id, since there should
664 * be one tx queue for each cpu core. If the corresponding core
665 * is not on the same numa node as 'netdev', flags the
667 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
669 /* Queues are shared among CPUs. Always flush */
670 netdev->tx_q[i].flush_tx = true;
673 /* Initialize map for vhost devices. */
674 netdev->tx_q[i].map = -1;
675 rte_spinlock_init(&netdev->tx_q[i].tx_lock);
680 netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
681 enum dpdk_dev_type type)
682 OVS_REQUIRES(dpdk_mutex)
684 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
689 ovs_mutex_init(&netdev->mutex);
690 ovs_mutex_lock(&netdev->mutex);
692 rte_spinlock_init(&netdev->stats_lock);
694 /* If the 'sid' is negative, it means that the kernel fails
695 * to obtain the pci numa info. In that situation, always
697 if (type == DPDK_DEV_ETH) {
698 sid = rte_eth_dev_socket_id(port_no);
700 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
703 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
704 netdev->port_id = port_no;
707 netdev->mtu = ETHER_MTU;
708 netdev->max_packet_len = MTU_TO_FRAME_LEN(netdev->mtu);
710 buf_size = dpdk_buf_size(netdev->mtu);
711 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, FRAME_LEN_TO_MTU(buf_size));
712 if (!netdev->dpdk_mp) {
717 /* Initialise QoS configuration to NULL and qos lock to unlocked */
718 netdev->qos_conf = NULL;
719 rte_spinlock_init(&netdev->qos_lock);
721 netdev_->n_txq = NR_QUEUE;
722 netdev_->n_rxq = NR_QUEUE;
723 netdev_->requested_n_rxq = NR_QUEUE;
724 netdev->real_n_txq = NR_QUEUE;
726 if (type == DPDK_DEV_ETH) {
727 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
728 err = dpdk_eth_dev_init(netdev);
733 netdev_dpdk_alloc_txq(netdev, OVS_VHOST_MAX_QUEUE_NUM);
736 list_push_back(&dpdk_list, &netdev->list_node);
740 rte_free(netdev->tx_q);
742 ovs_mutex_unlock(&netdev->mutex);
746 /* dev_name must be the prefix followed by a positive decimal number.
747 * (no leading + or - signs are allowed) */
749 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
750 unsigned int *port_no)
754 if (strncmp(dev_name, prefix, strlen(prefix))) {
758 cport = dev_name + strlen(prefix);
760 if (str_to_uint(cport, 10, port_no)) {
768 vhost_construct_helper(struct netdev *netdev_) OVS_REQUIRES(dpdk_mutex)
770 if (rte_eal_init_ret) {
771 return rte_eal_init_ret;
774 return netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
778 netdev_dpdk_vhost_cuse_construct(struct netdev *netdev_)
780 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
783 ovs_mutex_lock(&dpdk_mutex);
784 strncpy(netdev->vhost_id, netdev->up.name, sizeof(netdev->vhost_id));
785 err = vhost_construct_helper(netdev_);
786 ovs_mutex_unlock(&dpdk_mutex);
791 netdev_dpdk_vhost_user_construct(struct netdev *netdev_)
793 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
794 const char *name = netdev_->name;
797 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
798 * the file system. '/' or '\' would traverse directories, so they're not
799 * acceptable in 'name'. */
800 if (strchr(name, '/') || strchr(name, '\\')) {
801 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
802 "A valid name must not include '/' or '\\'",
807 ovs_mutex_lock(&dpdk_mutex);
808 /* Take the name of the vhost-user port and append it to the location where
809 * the socket is to be created, then register the socket.
811 snprintf(netdev->vhost_id, sizeof(netdev->vhost_id), "%s/%s",
812 vhost_sock_dir, name);
814 err = rte_vhost_driver_register(netdev->vhost_id);
816 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
819 fatal_signal_add_file_to_unlink(netdev->vhost_id);
820 VLOG_INFO("Socket %s created for vhost-user port %s\n",
821 netdev->vhost_id, name);
822 err = vhost_construct_helper(netdev_);
825 ovs_mutex_unlock(&dpdk_mutex);
830 netdev_dpdk_construct(struct netdev *netdev)
832 unsigned int port_no;
835 if (rte_eal_init_ret) {
836 return rte_eal_init_ret;
839 /* Names always start with "dpdk" */
840 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
845 ovs_mutex_lock(&dpdk_mutex);
846 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
847 ovs_mutex_unlock(&dpdk_mutex);
852 netdev_dpdk_destruct(struct netdev *netdev_)
854 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
856 ovs_mutex_lock(&dev->mutex);
857 rte_eth_dev_stop(dev->port_id);
858 ovs_mutex_unlock(&dev->mutex);
860 ovs_mutex_lock(&dpdk_mutex);
862 list_remove(&dev->list_node);
863 dpdk_mp_put(dev->dpdk_mp);
864 ovs_mutex_unlock(&dpdk_mutex);
868 netdev_dpdk_vhost_destruct(struct netdev *netdev_)
870 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
872 /* Can't remove a port while a guest is attached to it. */
873 if (netdev_dpdk_get_virtio(dev) != NULL) {
874 VLOG_ERR("Can not remove port, vhost device still attached");
878 if (rte_vhost_driver_unregister(dev->vhost_id)) {
879 VLOG_ERR("Unable to remove vhost-user socket %s", dev->vhost_id);
881 fatal_signal_remove_file_to_unlink(dev->vhost_id);
884 ovs_mutex_lock(&dpdk_mutex);
886 list_remove(&dev->list_node);
887 dpdk_mp_put(dev->dpdk_mp);
888 ovs_mutex_unlock(&dpdk_mutex);
892 netdev_dpdk_dealloc(struct netdev *netdev_)
894 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
900 netdev_dpdk_get_config(const struct netdev *netdev, struct smap *args)
902 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
904 ovs_mutex_lock(&dev->mutex);
906 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
907 smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq);
908 smap_add_format(args, "requested_tx_queues", "%d", netdev->n_txq);
909 smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
910 ovs_mutex_unlock(&dev->mutex);
916 netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args)
918 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
920 ovs_mutex_lock(&dev->mutex);
921 netdev->requested_n_rxq = MAX(smap_get_int(args, "n_rxq",
922 netdev->requested_n_rxq), 1);
923 netdev_change_seq_changed(netdev);
924 ovs_mutex_unlock(&dev->mutex);
930 netdev_dpdk_get_numa_id(const struct netdev *netdev_)
932 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
934 return netdev->socket_id;
937 /* Sets the number of tx queues and rx queues for the dpdk interface.
938 * If the configuration fails, do not try restoring its old configuration
939 * and just returns the error. */
941 netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
944 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
946 int old_rxq, old_txq;
948 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
952 ovs_mutex_lock(&dpdk_mutex);
953 ovs_mutex_lock(&netdev->mutex);
955 rte_eth_dev_stop(netdev->port_id);
957 old_txq = netdev->up.n_txq;
958 old_rxq = netdev->up.n_rxq;
959 netdev->up.n_txq = n_txq;
960 netdev->up.n_rxq = n_rxq;
962 rte_free(netdev->tx_q);
963 err = dpdk_eth_dev_init(netdev);
964 netdev_dpdk_alloc_txq(netdev, netdev->real_n_txq);
966 /* If there has been an error, it means that the requested queues
967 * have not been created. Restore the old numbers. */
968 netdev->up.n_txq = old_txq;
969 netdev->up.n_rxq = old_rxq;
972 netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
974 ovs_mutex_unlock(&netdev->mutex);
975 ovs_mutex_unlock(&dpdk_mutex);
981 netdev_dpdk_vhost_cuse_set_multiq(struct netdev *netdev_, unsigned int n_txq,
984 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
987 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
991 ovs_mutex_lock(&dpdk_mutex);
992 ovs_mutex_lock(&netdev->mutex);
994 netdev->up.n_txq = n_txq;
995 netdev->real_n_txq = 1;
996 netdev->up.n_rxq = 1;
997 netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
999 ovs_mutex_unlock(&netdev->mutex);
1000 ovs_mutex_unlock(&dpdk_mutex);
1006 netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
1009 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1012 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
1016 ovs_mutex_lock(&dpdk_mutex);
1017 ovs_mutex_lock(&netdev->mutex);
1019 netdev->up.n_txq = n_txq;
1020 netdev->up.n_rxq = n_rxq;
1022 ovs_mutex_unlock(&netdev->mutex);
1023 ovs_mutex_unlock(&dpdk_mutex);
1028 static struct netdev_rxq *
1029 netdev_dpdk_rxq_alloc(void)
1031 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
1036 static struct netdev_rxq_dpdk *
1037 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
1039 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
1043 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
1045 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
1046 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
1048 ovs_mutex_lock(&netdev->mutex);
1049 rx->port_id = netdev->port_id;
1050 ovs_mutex_unlock(&netdev->mutex);
1056 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
1061 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
1063 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
1069 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
1071 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1074 while (nb_tx != txq->count) {
1077 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
1078 txq->count - nb_tx);
1086 if (OVS_UNLIKELY(nb_tx != txq->count)) {
1087 /* free buffers, which we couldn't transmit, one at a time (each
1088 * packet could come from a different mempool) */
1091 for (i = nb_tx; i < txq->count; i++) {
1092 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
1094 rte_spinlock_lock(&dev->stats_lock);
1095 dev->stats.tx_dropped += txq->count-nb_tx;
1096 rte_spinlock_unlock(&dev->stats_lock);
1100 txq->tsc = rte_get_timer_cycles();
1104 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
1106 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1108 if (txq->count == 0) {
1111 dpdk_queue_flush__(dev, qid);
1115 is_vhost_running(struct virtio_net *dev)
1117 return (dev != NULL && (dev->flags & VIRTIO_DEV_RUNNING));
1121 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
1122 struct dp_packet **packets, int count)
1125 struct dp_packet *packet;
1127 stats->rx_packets += count;
1128 for (i = 0; i < count; i++) {
1129 packet = packets[i];
1131 if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) {
1132 /* This only protects the following multicast counting from
1133 * too short packets, but it does not stop the packet from
1134 * further processing. */
1136 stats->rx_length_errors++;
1140 struct eth_header *eh = (struct eth_header *) dp_packet_data(packet);
1141 if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) {
1145 stats->rx_bytes += dp_packet_size(packet);
1150 * The receive path for the vhost port is the TX path out from guest.
1153 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
1154 struct dp_packet **packets, int *c)
1156 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
1157 struct netdev *netdev = rx->up.netdev;
1158 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
1159 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
1160 int qid = rxq_->queue_id;
1163 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
1167 if (rxq_->queue_id >= vhost_dev->real_n_rxq) {
1171 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid * VIRTIO_QNUM + VIRTIO_TXQ,
1172 vhost_dev->dpdk_mp->mp,
1173 (struct rte_mbuf **)packets,
1179 rte_spinlock_lock(&vhost_dev->stats_lock);
1180 netdev_dpdk_vhost_update_rx_counters(&vhost_dev->stats, packets, nb_rx);
1181 rte_spinlock_unlock(&vhost_dev->stats_lock);
1188 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
1191 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
1192 struct netdev *netdev = rx->up.netdev;
1193 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1196 /* There is only one tx queue for this core. Do not flush other
1198 * Do not flush tx queue which is shared among CPUs
1199 * since it is always flushed */
1200 if (rxq_->queue_id == rte_lcore_id() &&
1201 OVS_LIKELY(!dev->txq_needs_locking)) {
1202 dpdk_queue_flush(dev, rxq_->queue_id);
1205 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
1206 (struct rte_mbuf **) packets,
1218 netdev_dpdk_qos_run__(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
1221 struct netdev *netdev = &dev->up;
1223 if (dev->qos_conf != NULL) {
1224 rte_spinlock_lock(&dev->qos_lock);
1225 if (dev->qos_conf != NULL) {
1226 cnt = dev->qos_conf->ops->qos_run(netdev, pkts, cnt);
1228 rte_spinlock_unlock(&dev->qos_lock);
1235 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
1236 struct dp_packet **packets,
1241 int sent = attempted - dropped;
1243 stats->tx_packets += sent;
1244 stats->tx_dropped += dropped;
1246 for (i = 0; i < sent; i++) {
1247 stats->tx_bytes += dp_packet_size(packets[i]);
1252 __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
1253 struct dp_packet **pkts, int cnt,
1256 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
1257 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
1258 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
1259 unsigned int total_pkts = cnt;
1260 unsigned int qos_pkts = cnt;
1263 qid = vhost_dev->tx_q[qid % vhost_dev->real_n_txq].map;
1265 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev) || qid == -1)) {
1266 rte_spinlock_lock(&vhost_dev->stats_lock);
1267 vhost_dev->stats.tx_dropped+= cnt;
1268 rte_spinlock_unlock(&vhost_dev->stats_lock);
1272 rte_spinlock_lock(&vhost_dev->tx_q[qid].tx_lock);
1274 /* Check has QoS has been configured for the netdev */
1275 cnt = netdev_dpdk_qos_run__(vhost_dev, cur_pkts, cnt);
1279 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
1280 unsigned int tx_pkts;
1282 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, vhost_qid,
1284 if (OVS_LIKELY(tx_pkts)) {
1285 /* Packets have been sent.*/
1287 /* Prepare for possible next iteration.*/
1288 cur_pkts = &cur_pkts[tx_pkts];
1290 uint64_t timeout = VHOST_ENQ_RETRY_USECS * rte_get_timer_hz() / 1E6;
1291 unsigned int expired = 0;
1294 start = rte_get_timer_cycles();
1298 * Unable to enqueue packets to vhost interface.
1299 * Check available entries before retrying.
1301 while (!rte_vring_available_entries(virtio_dev, vhost_qid)) {
1302 if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
1308 /* break out of main loop. */
1314 rte_spinlock_unlock(&vhost_dev->tx_q[qid].tx_lock);
1316 rte_spinlock_lock(&vhost_dev->stats_lock);
1318 netdev_dpdk_vhost_update_tx_counters(&vhost_dev->stats, pkts, total_pkts,
1320 rte_spinlock_unlock(&vhost_dev->stats_lock);
1326 for (i = 0; i < total_pkts; i++) {
1327 dp_packet_delete(pkts[i]);
1333 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
1334 struct rte_mbuf **pkts, int cnt)
1336 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1342 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
1343 int tocopy = MIN(freeslots, cnt-i);
1345 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
1346 tocopy * sizeof (struct rte_mbuf *));
1348 txq->count += tocopy;
1351 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
1352 dpdk_queue_flush__(dev, qid);
1354 diff_tsc = rte_get_timer_cycles() - txq->tsc;
1355 if (diff_tsc >= DRAIN_TSC) {
1356 dpdk_queue_flush__(dev, qid);
1361 /* Tx function. Transmit packets indefinitely */
1363 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
1365 OVS_NO_THREAD_SAFETY_ANALYSIS
1367 #if !defined(__CHECKER__) && !defined(_WIN32)
1368 const size_t PKT_ARRAY_SIZE = cnt;
1370 /* Sparse or MSVC doesn't like variable length array. */
1371 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
1373 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1374 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
1379 /* If we are on a non pmd thread we have to use the mempool mutex, because
1380 * every non pmd thread shares the same mempool cache */
1382 if (!dpdk_thread_is_pmd()) {
1383 ovs_mutex_lock(&nonpmd_mempool_mutex);
1386 for (i = 0; i < cnt; i++) {
1387 int size = dp_packet_size(pkts[i]);
1389 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1390 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1391 (int)size , dev->max_packet_len);
1397 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
1399 if (!mbufs[newcnt]) {
1404 /* We have to do a copy for now */
1405 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
1407 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1408 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1413 if (dev->type == DPDK_DEV_VHOST) {
1414 __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs, newcnt, true);
1416 unsigned int qos_pkts = newcnt;
1418 /* Check if QoS has been configured for this netdev. */
1419 newcnt = netdev_dpdk_qos_run__(dev, mbufs, newcnt);
1421 dropped += qos_pkts - newcnt;
1422 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1423 dpdk_queue_flush(dev, qid);
1426 if (OVS_UNLIKELY(dropped)) {
1427 rte_spinlock_lock(&dev->stats_lock);
1428 dev->stats.tx_dropped += dropped;
1429 rte_spinlock_unlock(&dev->stats_lock);
1432 if (!dpdk_thread_is_pmd()) {
1433 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1438 netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet **pkts,
1439 int cnt, bool may_steal)
1441 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1444 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1446 for (i = 0; i < cnt; i++) {
1447 dp_packet_delete(pkts[i]);
1451 __netdev_dpdk_vhost_send(netdev, qid, pkts, cnt, may_steal);
1457 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
1458 struct dp_packet **pkts, int cnt, bool may_steal)
1462 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1463 qid = qid % dev->real_n_txq;
1464 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1467 if (OVS_UNLIKELY(!may_steal ||
1468 pkts[0]->source != DPBUF_DPDK)) {
1469 struct netdev *netdev = &dev->up;
1471 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1474 for (i = 0; i < cnt; i++) {
1475 dp_packet_delete(pkts[i]);
1479 int next_tx_idx = 0;
1481 unsigned int qos_pkts = 0;
1482 unsigned int temp_cnt = 0;
1484 for (i = 0; i < cnt; i++) {
1485 int size = dp_packet_size(pkts[i]);
1487 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1488 if (next_tx_idx != i) {
1489 temp_cnt = i - next_tx_idx;
1490 qos_pkts = temp_cnt;
1492 temp_cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts,
1494 dropped += qos_pkts - temp_cnt;
1495 dpdk_queue_pkts(dev, qid,
1496 (struct rte_mbuf **)&pkts[next_tx_idx],
1501 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1502 (int)size , dev->max_packet_len);
1504 dp_packet_delete(pkts[i]);
1506 next_tx_idx = i + 1;
1509 if (next_tx_idx != cnt) {
1513 cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts, cnt);
1514 dropped += qos_pkts - cnt;
1515 dpdk_queue_pkts(dev, qid, (struct rte_mbuf **)&pkts[next_tx_idx],
1519 if (OVS_UNLIKELY(dropped)) {
1520 rte_spinlock_lock(&dev->stats_lock);
1521 dev->stats.tx_dropped += dropped;
1522 rte_spinlock_unlock(&dev->stats_lock);
1526 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1527 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1532 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
1533 struct dp_packet **pkts, int cnt, bool may_steal)
1535 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1537 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1542 netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1544 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1546 ovs_mutex_lock(&dev->mutex);
1547 if (!eth_addr_equals(dev->hwaddr, mac)) {
1549 netdev_change_seq_changed(netdev);
1551 ovs_mutex_unlock(&dev->mutex);
1557 netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1559 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1561 ovs_mutex_lock(&dev->mutex);
1563 ovs_mutex_unlock(&dev->mutex);
1569 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1571 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1573 ovs_mutex_lock(&dev->mutex);
1575 ovs_mutex_unlock(&dev->mutex);
1581 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1583 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1584 int old_mtu, err, dpdk_mtu;
1585 struct dpdk_mp *old_mp;
1589 ovs_mutex_lock(&dpdk_mutex);
1590 ovs_mutex_lock(&dev->mutex);
1591 if (dev->mtu == mtu) {
1596 buf_size = dpdk_buf_size(mtu);
1597 dpdk_mtu = FRAME_LEN_TO_MTU(buf_size);
1599 mp = dpdk_mp_get(dev->socket_id, dpdk_mtu);
1605 rte_eth_dev_stop(dev->port_id);
1608 old_mp = dev->dpdk_mp;
1611 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
1613 err = dpdk_eth_dev_init(dev);
1617 dev->dpdk_mp = old_mp;
1618 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
1619 dpdk_eth_dev_init(dev);
1623 dpdk_mp_put(old_mp);
1624 netdev_change_seq_changed(netdev);
1626 ovs_mutex_unlock(&dev->mutex);
1627 ovs_mutex_unlock(&dpdk_mutex);
1632 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1635 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1636 struct netdev_stats *stats)
1638 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1640 ovs_mutex_lock(&dev->mutex);
1641 memset(stats, 0, sizeof(*stats));
1642 /* Unsupported Stats */
1643 stats->collisions = UINT64_MAX;
1644 stats->rx_crc_errors = UINT64_MAX;
1645 stats->rx_fifo_errors = UINT64_MAX;
1646 stats->rx_frame_errors = UINT64_MAX;
1647 stats->rx_missed_errors = UINT64_MAX;
1648 stats->rx_over_errors = UINT64_MAX;
1649 stats->tx_aborted_errors = UINT64_MAX;
1650 stats->tx_carrier_errors = UINT64_MAX;
1651 stats->tx_errors = UINT64_MAX;
1652 stats->tx_fifo_errors = UINT64_MAX;
1653 stats->tx_heartbeat_errors = UINT64_MAX;
1654 stats->tx_window_errors = UINT64_MAX;
1655 stats->rx_dropped += UINT64_MAX;
1657 rte_spinlock_lock(&dev->stats_lock);
1658 /* Supported Stats */
1659 stats->rx_packets += dev->stats.rx_packets;
1660 stats->tx_packets += dev->stats.tx_packets;
1661 stats->tx_dropped += dev->stats.tx_dropped;
1662 stats->multicast = dev->stats.multicast;
1663 stats->rx_bytes = dev->stats.rx_bytes;
1664 stats->tx_bytes = dev->stats.tx_bytes;
1665 stats->rx_errors = dev->stats.rx_errors;
1666 stats->rx_length_errors = dev->stats.rx_length_errors;
1667 rte_spinlock_unlock(&dev->stats_lock);
1669 ovs_mutex_unlock(&dev->mutex);
1675 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1677 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1678 struct rte_eth_stats rte_stats;
1681 netdev_dpdk_get_carrier(netdev, &gg);
1682 ovs_mutex_lock(&dev->mutex);
1683 rte_eth_stats_get(dev->port_id, &rte_stats);
1685 memset(stats, 0, sizeof(*stats));
1687 stats->rx_packets = rte_stats.ipackets;
1688 stats->tx_packets = rte_stats.opackets;
1689 stats->rx_bytes = rte_stats.ibytes;
1690 stats->tx_bytes = rte_stats.obytes;
1691 /* DPDK counts imissed as errors, but count them here as dropped instead */
1692 stats->rx_errors = rte_stats.ierrors - rte_stats.imissed;
1693 stats->tx_errors = rte_stats.oerrors;
1694 stats->multicast = rte_stats.imcasts;
1696 rte_spinlock_lock(&dev->stats_lock);
1697 stats->tx_dropped = dev->stats.tx_dropped;
1698 rte_spinlock_unlock(&dev->stats_lock);
1700 /* These are the available DPDK counters for packets not received due to
1701 * local resource constraints in DPDK and NIC respectively. */
1702 stats->rx_dropped = rte_stats.rx_nombuf + rte_stats.imissed;
1703 stats->collisions = UINT64_MAX;
1705 stats->rx_length_errors = UINT64_MAX;
1706 stats->rx_over_errors = UINT64_MAX;
1707 stats->rx_crc_errors = UINT64_MAX;
1708 stats->rx_frame_errors = UINT64_MAX;
1709 stats->rx_fifo_errors = UINT64_MAX;
1710 stats->rx_missed_errors = rte_stats.imissed;
1712 stats->tx_aborted_errors = UINT64_MAX;
1713 stats->tx_carrier_errors = UINT64_MAX;
1714 stats->tx_fifo_errors = UINT64_MAX;
1715 stats->tx_heartbeat_errors = UINT64_MAX;
1716 stats->tx_window_errors = UINT64_MAX;
1718 ovs_mutex_unlock(&dev->mutex);
1724 netdev_dpdk_get_features(const struct netdev *netdev_,
1725 enum netdev_features *current,
1726 enum netdev_features *advertised OVS_UNUSED,
1727 enum netdev_features *supported OVS_UNUSED,
1728 enum netdev_features *peer OVS_UNUSED)
1730 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1731 struct rte_eth_link link;
1733 ovs_mutex_lock(&dev->mutex);
1735 ovs_mutex_unlock(&dev->mutex);
1737 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1738 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1739 *current = NETDEV_F_AUTONEG;
1741 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1742 if (link.link_speed == ETH_LINK_SPEED_10) {
1743 *current = NETDEV_F_10MB_HD;
1745 if (link.link_speed == ETH_LINK_SPEED_100) {
1746 *current = NETDEV_F_100MB_HD;
1748 if (link.link_speed == ETH_LINK_SPEED_1000) {
1749 *current = NETDEV_F_1GB_HD;
1751 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1752 if (link.link_speed == ETH_LINK_SPEED_10) {
1753 *current = NETDEV_F_10MB_FD;
1755 if (link.link_speed == ETH_LINK_SPEED_100) {
1756 *current = NETDEV_F_100MB_FD;
1758 if (link.link_speed == ETH_LINK_SPEED_1000) {
1759 *current = NETDEV_F_1GB_FD;
1761 if (link.link_speed == ETH_LINK_SPEED_10000) {
1762 *current = NETDEV_F_10GB_FD;
1770 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1772 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1775 ovs_mutex_lock(&dev->mutex);
1776 ifindex = dev->port_id;
1777 ovs_mutex_unlock(&dev->mutex);
1783 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1785 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1787 ovs_mutex_lock(&dev->mutex);
1788 check_link_status(dev);
1789 *carrier = dev->link.link_status;
1791 ovs_mutex_unlock(&dev->mutex);
1797 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev_, bool *carrier)
1799 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1800 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1802 ovs_mutex_lock(&dev->mutex);
1804 if (is_vhost_running(virtio_dev)) {
1810 ovs_mutex_unlock(&dev->mutex);
1815 static long long int
1816 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1818 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1819 long long int carrier_resets;
1821 ovs_mutex_lock(&dev->mutex);
1822 carrier_resets = dev->link_reset_cnt;
1823 ovs_mutex_unlock(&dev->mutex);
1825 return carrier_resets;
1829 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1830 long long int interval OVS_UNUSED)
1836 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1837 enum netdev_flags off, enum netdev_flags on,
1838 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
1842 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1846 *old_flagsp = dev->flags;
1850 if (dev->flags == *old_flagsp) {
1854 if (dev->type == DPDK_DEV_ETH) {
1855 if (dev->flags & NETDEV_UP) {
1856 err = rte_eth_dev_start(dev->port_id);
1861 if (dev->flags & NETDEV_PROMISC) {
1862 rte_eth_promiscuous_enable(dev->port_id);
1865 if (!(dev->flags & NETDEV_UP)) {
1866 rte_eth_dev_stop(dev->port_id);
1874 netdev_dpdk_update_flags(struct netdev *netdev_,
1875 enum netdev_flags off, enum netdev_flags on,
1876 enum netdev_flags *old_flagsp)
1878 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1881 ovs_mutex_lock(&netdev->mutex);
1882 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1883 ovs_mutex_unlock(&netdev->mutex);
1889 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1891 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1892 struct rte_eth_dev_info dev_info;
1894 if (dev->port_id < 0)
1897 ovs_mutex_lock(&dev->mutex);
1898 rte_eth_dev_info_get(dev->port_id, &dev_info);
1899 ovs_mutex_unlock(&dev->mutex);
1901 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1903 smap_add_format(args, "port_no", "%d", dev->port_id);
1904 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1905 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1906 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1907 smap_add_format(args, "max_rx_pktlen", "%u", dev->max_packet_len);
1908 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1909 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1910 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1911 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1912 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1913 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1915 if (dev_info.pci_dev) {
1916 smap_add_format(args, "pci-vendor_id", "0x%u",
1917 dev_info.pci_dev->id.vendor_id);
1918 smap_add_format(args, "pci-device_id", "0x%x",
1919 dev_info.pci_dev->id.device_id);
1926 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1927 OVS_REQUIRES(dev->mutex)
1929 enum netdev_flags old_flags;
1932 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1934 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1939 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1940 const char *argv[], void *aux OVS_UNUSED)
1944 if (!strcasecmp(argv[argc - 1], "up")) {
1946 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1949 unixctl_command_reply_error(conn, "Invalid Admin State");
1954 struct netdev *netdev = netdev_from_name(argv[1]);
1955 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1956 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1958 ovs_mutex_lock(&dpdk_dev->mutex);
1959 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1960 ovs_mutex_unlock(&dpdk_dev->mutex);
1962 netdev_close(netdev);
1964 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1965 netdev_close(netdev);
1969 struct netdev_dpdk *netdev;
1971 ovs_mutex_lock(&dpdk_mutex);
1972 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1973 ovs_mutex_lock(&netdev->mutex);
1974 netdev_dpdk_set_admin_state__(netdev, up);
1975 ovs_mutex_unlock(&netdev->mutex);
1977 ovs_mutex_unlock(&dpdk_mutex);
1979 unixctl_command_reply(conn, "OK");
1983 * Set virtqueue flags so that we do not receive interrupts.
1986 set_irq_status(struct virtio_net *dev)
1991 for (i = 0; i < dev->virt_qp_nb; i++) {
1992 idx = i * VIRTIO_QNUM;
1993 rte_vhost_enable_guest_notification(dev, idx + VIRTIO_RXQ, 0);
1994 rte_vhost_enable_guest_notification(dev, idx + VIRTIO_TXQ, 0);
1999 * Fixes mapping for vhost-user tx queues. Must be called after each
2000 * enabling/disabling of queues and real_n_txq modifications.
2003 netdev_dpdk_remap_txqs(struct netdev_dpdk *netdev)
2004 OVS_REQUIRES(netdev->mutex)
2006 int *enabled_queues, n_enabled = 0;
2007 int i, k, total_txqs = netdev->real_n_txq;
2009 enabled_queues = dpdk_rte_mzalloc(total_txqs * sizeof *enabled_queues);
2011 for (i = 0; i < total_txqs; i++) {
2012 /* Enabled queues always mapped to themselves. */
2013 if (netdev->tx_q[i].map == i) {
2014 enabled_queues[n_enabled++] = i;
2018 if (n_enabled == 0 && total_txqs != 0) {
2019 enabled_queues[0] = -1;
2024 for (i = 0; i < total_txqs; i++) {
2025 if (netdev->tx_q[i].map != i) {
2026 netdev->tx_q[i].map = enabled_queues[k];
2027 k = (k + 1) % n_enabled;
2031 VLOG_DBG("TX queue mapping for %s\n", netdev->vhost_id);
2032 for (i = 0; i < total_txqs; i++) {
2033 VLOG_DBG("%2d --> %2d", i, netdev->tx_q[i].map);
2036 rte_free(enabled_queues);
2040 netdev_dpdk_vhost_set_queues(struct netdev_dpdk *netdev, struct virtio_net *dev)
2041 OVS_REQUIRES(netdev->mutex)
2045 qp_num = dev->virt_qp_nb;
2046 if (qp_num > netdev->up.n_rxq) {
2047 VLOG_ERR("vHost Device '%s' %"PRIu64" can't be added - "
2048 "too many queues %d > %d", dev->ifname, dev->device_fh,
2049 qp_num, netdev->up.n_rxq);
2053 netdev->real_n_rxq = qp_num;
2054 netdev->real_n_txq = qp_num;
2055 netdev->txq_needs_locking = true;
2057 netdev_dpdk_remap_txqs(netdev);
2063 * A new virtio-net device is added to a vhost port.
2066 new_device(struct virtio_net *dev)
2068 struct netdev_dpdk *netdev;
2069 bool exists = false;
2071 ovs_mutex_lock(&dpdk_mutex);
2072 /* Add device to the vhost port with the same name as that passed down. */
2073 LIST_FOR_EACH(netdev, list_node, &dpdk_list) {
2074 if (strncmp(dev->ifname, netdev->vhost_id, IF_NAME_SZ) == 0) {
2075 ovs_mutex_lock(&netdev->mutex);
2076 if (netdev_dpdk_vhost_set_queues(netdev, dev)) {
2077 ovs_mutex_unlock(&netdev->mutex);
2078 ovs_mutex_unlock(&dpdk_mutex);
2081 ovsrcu_set(&netdev->virtio_dev, dev);
2083 dev->flags |= VIRTIO_DEV_RUNNING;
2084 /* Disable notifications. */
2085 set_irq_status(dev);
2086 ovs_mutex_unlock(&netdev->mutex);
2090 ovs_mutex_unlock(&dpdk_mutex);
2093 VLOG_INFO("vHost Device '%s' %"PRIu64" can't be added - name not "
2094 "found", dev->ifname, dev->device_fh);
2099 VLOG_INFO("vHost Device '%s' %"PRIu64" has been added", dev->ifname,
2105 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2106 * flag to stop any more packets from being sent or received to/from a VM and
2107 * ensure all currently queued packets have been sent/received before removing
2111 destroy_device(volatile struct virtio_net *dev)
2113 struct netdev_dpdk *vhost_dev;
2114 bool exists = false;
2116 ovs_mutex_lock(&dpdk_mutex);
2117 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
2118 if (netdev_dpdk_get_virtio(vhost_dev) == dev) {
2120 ovs_mutex_lock(&vhost_dev->mutex);
2121 dev->flags &= ~VIRTIO_DEV_RUNNING;
2122 ovsrcu_set(&vhost_dev->virtio_dev, NULL);
2124 ovs_mutex_unlock(&vhost_dev->mutex);
2129 ovs_mutex_unlock(&dpdk_mutex);
2131 if (exists == true) {
2133 * Wait for other threads to quiesce after setting the 'virtio_dev'
2134 * to NULL, before returning.
2136 ovsrcu_synchronize();
2138 * As call to ovsrcu_synchronize() will end the quiescent state,
2139 * put thread back into quiescent state before returning.
2141 ovsrcu_quiesce_start();
2142 VLOG_INFO("vHost Device '%s' %"PRIu64" has been removed", dev->ifname,
2145 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", dev->ifname,
2152 vring_state_changed(struct virtio_net *dev, uint16_t queue_id, int enable)
2154 struct netdev_dpdk *vhost_dev;
2155 bool exists = false;
2156 int qid = queue_id / VIRTIO_QNUM;
2158 if (queue_id % VIRTIO_QNUM == VIRTIO_TXQ) {
2162 ovs_mutex_lock(&dpdk_mutex);
2163 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
2164 if (strncmp(dev->ifname, vhost_dev->vhost_id, IF_NAME_SZ) == 0) {
2165 ovs_mutex_lock(&vhost_dev->mutex);
2167 vhost_dev->tx_q[qid].map = qid;
2169 vhost_dev->tx_q[qid].map = -1;
2171 netdev_dpdk_remap_txqs(vhost_dev);
2173 ovs_mutex_unlock(&vhost_dev->mutex);
2177 ovs_mutex_unlock(&dpdk_mutex);
2180 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s' %"
2181 PRIu64" changed to \'%s\'", queue_id, qid, dev->ifname,
2182 dev->device_fh, (enable == 1) ? "enabled" : "disabled");
2184 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", dev->ifname,
2193 netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
2195 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
2199 * These callbacks allow virtio-net devices to be added to vhost ports when
2200 * configuration has been fully complete.
2202 static const struct virtio_net_device_ops virtio_net_device_ops =
2204 .new_device = new_device,
2205 .destroy_device = destroy_device,
2206 .vring_state_changed = vring_state_changed
2210 start_vhost_loop(void *dummy OVS_UNUSED)
2212 pthread_detach(pthread_self());
2213 /* Put the cuse thread into quiescent state. */
2214 ovsrcu_quiesce_start();
2215 rte_vhost_driver_session_start();
2220 dpdk_vhost_class_init(void)
2222 rte_vhost_driver_callback_register(&virtio_net_device_ops);
2223 ovs_thread_create("vhost_thread", start_vhost_loop, NULL);
2228 dpdk_vhost_cuse_class_init(void)
2233 /* Register CUSE device to handle IOCTLs.
2234 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
2235 * is set to vhost-net.
2237 err = rte_vhost_driver_register(cuse_dev_name);
2240 VLOG_ERR("CUSE device setup failure.");
2244 dpdk_vhost_class_init();
2249 dpdk_vhost_user_class_init(void)
2251 dpdk_vhost_class_init();
2256 dpdk_common_init(void)
2258 unixctl_command_register("netdev-dpdk/set-admin-state",
2259 "[netdev] up|down", 1, 2,
2260 netdev_dpdk_set_admin_state, NULL);
2262 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
2268 dpdk_ring_create(const char dev_name[], unsigned int port_no,
2269 unsigned int *eth_port_id)
2271 struct dpdk_ring *ivshmem;
2272 char ring_name[RTE_RING_NAMESIZE];
2275 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
2276 if (ivshmem == NULL) {
2280 /* XXX: Add support for multiquque ring. */
2281 err = snprintf(ring_name, sizeof(ring_name), "%s_tx", dev_name);
2286 /* Create single producer tx ring, netdev does explicit locking. */
2287 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2289 if (ivshmem->cring_tx == NULL) {
2294 err = snprintf(ring_name, sizeof(ring_name), "%s_rx", dev_name);
2299 /* Create single consumer rx ring, netdev does explicit locking. */
2300 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2302 if (ivshmem->cring_rx == NULL) {
2307 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
2308 &ivshmem->cring_tx, 1, SOCKET0);
2315 ivshmem->user_port_id = port_no;
2316 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
2317 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
2319 *eth_port_id = ivshmem->eth_port_id;
2324 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
2326 struct dpdk_ring *ivshmem;
2327 unsigned int port_no;
2330 /* Names always start with "dpdkr" */
2331 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
2336 /* look through our list to find the device */
2337 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
2338 if (ivshmem->user_port_id == port_no) {
2339 VLOG_INFO("Found dpdk ring device %s:", dev_name);
2340 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
2344 /* Need to create the device rings */
2345 return dpdk_ring_create(dev_name, port_no, eth_port_id);
2349 netdev_dpdk_ring_send(struct netdev *netdev_, int qid,
2350 struct dp_packet **pkts, int cnt, bool may_steal)
2352 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
2355 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2356 * rss hash field is clear. This is because the same mbuf may be modified by
2357 * the consumer of the ring and return into the datapath without recalculating
2359 for (i = 0; i < cnt; i++) {
2360 dp_packet_rss_invalidate(pkts[i]);
2363 netdev_dpdk_send__(netdev, qid, pkts, cnt, may_steal);
2368 netdev_dpdk_ring_construct(struct netdev *netdev)
2370 unsigned int port_no = 0;
2373 if (rte_eal_init_ret) {
2374 return rte_eal_init_ret;
2377 ovs_mutex_lock(&dpdk_mutex);
2379 err = dpdk_ring_open(netdev->name, &port_no);
2384 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
2387 ovs_mutex_unlock(&dpdk_mutex);
2394 * Initialize QoS configuration operations.
2397 qos_conf_init(struct qos_conf *conf, const struct dpdk_qos_ops *ops)
2403 * Search existing QoS operations in qos_ops and compare each set of
2404 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2407 static const struct dpdk_qos_ops *
2408 qos_lookup_name(const char *name)
2410 const struct dpdk_qos_ops *const *opsp;
2412 for (opsp = qos_confs; *opsp != NULL; opsp++) {
2413 const struct dpdk_qos_ops *ops = *opsp;
2414 if (!strcmp(name, ops->qos_name)) {
2422 * Call qos_destruct to clean up items associated with the netdevs
2423 * qos_conf. Set netdevs qos_conf to NULL.
2426 qos_delete_conf(struct netdev *netdev_)
2428 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
2430 rte_spinlock_lock(&netdev->qos_lock);
2431 if (netdev->qos_conf) {
2432 if (netdev->qos_conf->ops->qos_destruct) {
2433 netdev->qos_conf->ops->qos_destruct(netdev_, netdev->qos_conf);
2435 netdev->qos_conf = NULL;
2437 rte_spinlock_unlock(&netdev->qos_lock);
2441 netdev_dpdk_get_qos_types(const struct netdev *netdev OVS_UNUSED,
2444 const struct dpdk_qos_ops *const *opsp;
2446 for (opsp = qos_confs; *opsp != NULL; opsp++) {
2447 const struct dpdk_qos_ops *ops = *opsp;
2448 if (ops->qos_construct && ops->qos_name[0] != '\0') {
2449 sset_add(types, ops->qos_name);
2456 netdev_dpdk_get_qos(const struct netdev *netdev_,
2457 const char **typep, struct smap *details)
2459 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
2462 ovs_mutex_lock(&netdev->mutex);
2463 if(netdev->qos_conf) {
2464 *typep = netdev->qos_conf->ops->qos_name;
2465 error = (netdev->qos_conf->ops->qos_get
2466 ? netdev->qos_conf->ops->qos_get(netdev_, details): 0);
2468 ovs_mutex_unlock(&netdev->mutex);
2474 netdev_dpdk_set_qos(struct netdev *netdev_,
2475 const char *type, const struct smap *details)
2477 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
2478 const struct dpdk_qos_ops *new_ops = NULL;
2481 /* If type is empty or unsupported then the current QoS configuration
2482 * for the dpdk-netdev can be destroyed */
2483 new_ops = qos_lookup_name(type);
2485 if (type[0] == '\0' || !new_ops || !new_ops->qos_construct) {
2486 qos_delete_conf(netdev_);
2490 ovs_mutex_lock(&netdev->mutex);
2492 if (netdev->qos_conf) {
2493 if (new_ops == netdev->qos_conf->ops) {
2494 error = new_ops->qos_set ? new_ops->qos_set(netdev_, details) : 0;
2496 /* Delete existing QoS configuration. */
2497 qos_delete_conf(netdev_);
2498 ovs_assert(netdev->qos_conf == NULL);
2500 /* Install new QoS configuration. */
2501 error = new_ops->qos_construct(netdev_, details);
2502 ovs_assert((error == 0) == (netdev->qos_conf != NULL));
2505 error = new_ops->qos_construct(netdev_, details);
2506 ovs_assert((error == 0) == (netdev->qos_conf != NULL));
2509 ovs_mutex_unlock(&netdev->mutex);
2513 /* egress-policer details */
2515 struct egress_policer {
2516 struct qos_conf qos_conf;
2517 struct rte_meter_srtcm_params app_srtcm_params;
2518 struct rte_meter_srtcm egress_meter;
2521 static struct egress_policer *
2522 egress_policer_get__(const struct netdev *netdev_)
2524 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
2525 return CONTAINER_OF(netdev->qos_conf, struct egress_policer, qos_conf);
2529 egress_policer_qos_construct(struct netdev *netdev_,
2530 const struct smap *details)
2532 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
2533 struct egress_policer *policer;
2538 rte_spinlock_lock(&netdev->qos_lock);
2539 policer = xmalloc(sizeof *policer);
2540 qos_conf_init(&policer->qos_conf, &egress_policer_ops);
2541 netdev->qos_conf = &policer->qos_conf;
2542 cir_s = smap_get(details, "cir");
2543 cbs_s = smap_get(details, "cbs");
2544 policer->app_srtcm_params.cir = cir_s ? strtoull(cir_s, NULL, 10) : 0;
2545 policer->app_srtcm_params.cbs = cbs_s ? strtoull(cbs_s, NULL, 10) : 0;
2546 policer->app_srtcm_params.ebs = 0;
2547 err = rte_meter_srtcm_config(&policer->egress_meter,
2548 &policer->app_srtcm_params);
2549 rte_spinlock_unlock(&netdev->qos_lock);
2555 egress_policer_qos_destruct(struct netdev *netdev_ OVS_UNUSED,
2556 struct qos_conf *conf)
2558 struct egress_policer *policer = CONTAINER_OF(conf, struct egress_policer,
2564 egress_policer_qos_get(const struct netdev *netdev, struct smap *details)
2566 struct egress_policer *policer = egress_policer_get__(netdev);
2567 smap_add_format(details, "cir", "%llu",
2568 1ULL * policer->app_srtcm_params.cir);
2569 smap_add_format(details, "cbs", "%llu",
2570 1ULL * policer->app_srtcm_params.cbs);
2575 egress_policer_qos_set(struct netdev *netdev_, const struct smap *details)
2577 struct egress_policer *policer;
2582 policer = egress_policer_get__(netdev_);
2583 cir_s = smap_get(details, "cir");
2584 cbs_s = smap_get(details, "cbs");
2585 policer->app_srtcm_params.cir = cir_s ? strtoull(cir_s, NULL, 10) : 0;
2586 policer->app_srtcm_params.cbs = cbs_s ? strtoull(cbs_s, NULL, 10) : 0;
2587 policer->app_srtcm_params.ebs = 0;
2588 err = rte_meter_srtcm_config(&policer->egress_meter,
2589 &policer->app_srtcm_params);
2595 egress_policer_pkt_handle__(struct rte_meter_srtcm *meter,
2596 struct rte_mbuf *pkt, uint64_t time)
2598 uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct ether_hdr);
2600 return rte_meter_srtcm_color_blind_check(meter, time, pkt_len) ==
2605 egress_policer_run(struct netdev *netdev_, struct rte_mbuf **pkts,
2610 struct egress_policer *policer = egress_policer_get__(netdev_);
2611 struct rte_mbuf *pkt = NULL;
2612 uint64_t current_time = rte_rdtsc();
2614 for(i = 0; i < pkt_cnt; i++) {
2616 /* Handle current packet */
2617 if (egress_policer_pkt_handle__(&policer->egress_meter, pkt,
2624 rte_pktmbuf_free(pkt);
2631 static const struct dpdk_qos_ops egress_policer_ops = {
2632 "egress-policer", /* qos_name */
2633 egress_policer_qos_construct,
2634 egress_policer_qos_destruct,
2635 egress_policer_qos_get,
2636 egress_policer_qos_set,
2640 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
2641 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
2645 NULL, /* netdev_dpdk_run */ \
2646 NULL, /* netdev_dpdk_wait */ \
2648 netdev_dpdk_alloc, \
2651 netdev_dpdk_dealloc, \
2652 netdev_dpdk_get_config, \
2653 netdev_dpdk_set_config, \
2654 NULL, /* get_tunnel_config */ \
2655 NULL, /* build header */ \
2656 NULL, /* push header */ \
2657 NULL, /* pop header */ \
2658 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2659 MULTIQ, /* set_multiq */ \
2662 NULL, /* send_wait */ \
2664 netdev_dpdk_set_etheraddr, \
2665 netdev_dpdk_get_etheraddr, \
2666 netdev_dpdk_get_mtu, \
2667 netdev_dpdk_set_mtu, \
2668 netdev_dpdk_get_ifindex, \
2670 netdev_dpdk_get_carrier_resets, \
2671 netdev_dpdk_set_miimon, \
2674 NULL, /* set_advertisements */ \
2676 NULL, /* set_policing */ \
2677 netdev_dpdk_get_qos_types, \
2678 NULL, /* get_qos_capabilities */ \
2679 netdev_dpdk_get_qos, \
2680 netdev_dpdk_set_qos, \
2681 NULL, /* get_queue */ \
2682 NULL, /* set_queue */ \
2683 NULL, /* delete_queue */ \
2684 NULL, /* get_queue_stats */ \
2685 NULL, /* queue_dump_start */ \
2686 NULL, /* queue_dump_next */ \
2687 NULL, /* queue_dump_done */ \
2688 NULL, /* dump_queue_stats */ \
2690 NULL, /* get_in4 */ \
2691 NULL, /* set_in4 */ \
2692 NULL, /* get_in6 */ \
2693 NULL, /* add_router */ \
2694 NULL, /* get_next_hop */ \
2696 NULL, /* arp_lookup */ \
2698 netdev_dpdk_update_flags, \
2700 netdev_dpdk_rxq_alloc, \
2701 netdev_dpdk_rxq_construct, \
2702 netdev_dpdk_rxq_destruct, \
2703 netdev_dpdk_rxq_dealloc, \
2705 NULL, /* rx_wait */ \
2706 NULL, /* rxq_drain */ \
2710 process_vhost_flags(char *flag, char *default_val, int size,
2711 char **argv, char **new_val)
2715 /* Depending on which version of vhost is in use, process the vhost-specific
2716 * flag if it is provided on the vswitchd command line, otherwise resort to
2719 * For vhost-user: Process "-vhost_sock_dir" to set the custom location of
2720 * the vhost-user socket(s).
2721 * For vhost-cuse: Process "-cuse_dev_name" to set the custom name of the
2722 * vhost-cuse character device.
2724 if (!strcmp(argv[1], flag) && (strlen(argv[2]) <= size)) {
2726 *new_val = xstrdup(argv[2]);
2727 VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
2729 VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
2730 *new_val = default_val;
2737 dpdk_init(int argc, char **argv)
2741 char *pragram_name = argv[0];
2743 if (argc < 2 || strcmp(argv[1], "--dpdk"))
2746 /* Remove the --dpdk argument from arg list.*/
2750 /* Reject --user option */
2752 for (i = 0; i < argc; i++) {
2753 if (!strcmp(argv[i], "--user")) {
2754 VLOG_ERR("Can not mix --dpdk and --user options, aborting.");
2759 if (process_vhost_flags("-cuse_dev_name", xstrdup("vhost-net"),
2760 PATH_MAX, argv, &cuse_dev_name)) {
2762 if (process_vhost_flags("-vhost_sock_dir", xstrdup(ovs_rundir()),
2763 NAME_MAX, argv, &vhost_sock_dir)) {
2767 err = stat(vhost_sock_dir, &s);
2769 VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
2774 /* Remove the vhost flag configuration parameters from the argument
2775 * list, so that the correct elements are passed to the DPDK
2776 * initialization function
2779 argv += 2; /* Increment by two to bypass the vhost flag arguments */
2783 /* Keep the program name argument as this is needed for call to
2786 argv[0] = pragram_name;
2788 /* Make sure things are initialized ... */
2789 result = rte_eal_init(argc, argv);
2791 ovs_abort(result, "Cannot init EAL");
2794 rte_memzone_dump(stdout);
2795 rte_eal_init_ret = 0;
2797 if (argc > result) {
2798 argv[result] = argv[0];
2801 /* We are called from the main thread here */
2802 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
2804 return result + 1 + base;
2807 static const struct netdev_class dpdk_class =
2811 netdev_dpdk_construct,
2812 netdev_dpdk_destruct,
2813 netdev_dpdk_set_multiq,
2814 netdev_dpdk_eth_send,
2815 netdev_dpdk_get_carrier,
2816 netdev_dpdk_get_stats,
2817 netdev_dpdk_get_features,
2818 netdev_dpdk_get_status,
2819 netdev_dpdk_rxq_recv);
2821 static const struct netdev_class dpdk_ring_class =
2825 netdev_dpdk_ring_construct,
2826 netdev_dpdk_destruct,
2827 netdev_dpdk_set_multiq,
2828 netdev_dpdk_ring_send,
2829 netdev_dpdk_get_carrier,
2830 netdev_dpdk_get_stats,
2831 netdev_dpdk_get_features,
2832 netdev_dpdk_get_status,
2833 netdev_dpdk_rxq_recv);
2835 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class =
2838 dpdk_vhost_cuse_class_init,
2839 netdev_dpdk_vhost_cuse_construct,
2840 netdev_dpdk_vhost_destruct,
2841 netdev_dpdk_vhost_cuse_set_multiq,
2842 netdev_dpdk_vhost_send,
2843 netdev_dpdk_vhost_get_carrier,
2844 netdev_dpdk_vhost_get_stats,
2847 netdev_dpdk_vhost_rxq_recv);
2849 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class =
2852 dpdk_vhost_user_class_init,
2853 netdev_dpdk_vhost_user_construct,
2854 netdev_dpdk_vhost_destruct,
2855 netdev_dpdk_vhost_set_multiq,
2856 netdev_dpdk_vhost_send,
2857 netdev_dpdk_vhost_get_carrier,
2858 netdev_dpdk_vhost_get_stats,
2861 netdev_dpdk_vhost_rxq_recv);
2864 netdev_dpdk_register(void)
2866 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2868 if (rte_eal_init_ret) {
2872 if (ovsthread_once_start(&once)) {
2874 netdev_register_provider(&dpdk_class);
2875 netdev_register_provider(&dpdk_ring_class);
2877 netdev_register_provider(&dpdk_vhost_cuse_class);
2879 netdev_register_provider(&dpdk_vhost_user_class);
2881 ovsthread_once_done(&once);
2886 pmd_thread_setaffinity_cpu(unsigned cpu)
2892 CPU_SET(cpu, &cpuset);
2893 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
2895 VLOG_ERR("Thread affinity error %d",err);
2898 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2899 ovs_assert(cpu != NON_PMD_CORE_ID);
2900 RTE_PER_LCORE(_lcore_id) = cpu;
2906 dpdk_thread_is_pmd(void)
2908 return rte_lcore_id() != NON_PMD_CORE_ID;