2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
36 #include "dp-packet.h"
37 #include "dpif-netdev.h"
38 #include "fatal-signal.h"
39 #include "netdev-dpdk.h"
40 #include "netdev-provider.h"
41 #include "netdev-vport.h"
43 #include "openvswitch/dynamic-string.h"
44 #include "openvswitch/list.h"
45 #include "openvswitch/ofp-print.h"
46 #include "openvswitch/vlog.h"
48 #include "ovs-thread.h"
54 #include "unaligned.h"
58 #include "rte_config.h"
60 #include "rte_meter.h"
61 #include "rte_virtio_net.h"
63 VLOG_DEFINE_THIS_MODULE(dpdk);
64 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
66 #define DPDK_PORT_WATCHDOG_INTERVAL 5
68 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
69 #define OVS_VPORT_DPDK "ovs_dpdk"
72 * need to reserve tons of extra space in the mbufs so we can align the
73 * DMA addresses to 4KB.
74 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
75 * performance for standard Ethernet MTU.
77 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_HEADER_LEN))
78 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
79 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
80 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len)- ETHER_HDR_LEN - ETHER_CRC_LEN)
81 #define MBUF_SIZE(mtu) ( MTU_TO_MAX_FRAME_LEN(mtu) \
82 + sizeof(struct dp_packet) \
83 + RTE_PKTMBUF_HEADROOM)
84 #define NETDEV_DPDK_MBUF_ALIGN 1024
86 /* Max and min number of packets in the mempool. OVS tries to allocate a
87 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
88 * enough hugepages) we keep halving the number until the allocation succeeds
89 * or we reach MIN_NB_MBUF */
91 #define MAX_NB_MBUF (4096 * 64)
92 #define MIN_NB_MBUF (4096 * 4)
93 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
95 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
96 BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
98 /* The smallest possible NB_MBUF that we're going to try should be a multiple
99 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
100 BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
104 * DPDK XSTATS Counter names definition
106 #define XSTAT_RX_64_PACKETS "rx_size_64_packets"
107 #define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
108 #define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
109 #define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
110 #define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
111 #define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
112 #define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
114 #define XSTAT_TX_64_PACKETS "tx_size_64_packets"
115 #define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
116 #define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
117 #define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
118 #define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
119 #define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
120 #define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
122 #define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
123 #define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
124 #define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
125 #define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
126 #define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
127 #define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
128 #define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
132 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
133 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
135 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
136 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
137 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
138 * yet mapped to another queue. */
141 static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
143 static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
145 #define VHOST_ENQ_RETRY_NUM 8
147 static const struct rte_eth_conf port_conf = {
149 .mq_mode = ETH_MQ_RX_RSS,
151 .header_split = 0, /* Header Split disabled */
152 .hw_ip_checksum = 0, /* IP checksum offload disabled */
153 .hw_vlan_filter = 0, /* VLAN filtering disabled */
154 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
160 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
164 .mq_mode = ETH_MQ_TX_NONE,
168 enum { MAX_TX_QUEUE_LEN = 384 };
169 enum { DPDK_RING_SIZE = 256 };
170 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
171 enum { DRAIN_TSC = 200000ULL };
178 static int rte_eal_init_ret = ENODEV;
180 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
182 /* Quality of Service */
184 /* An instance of a QoS configuration. Always associated with a particular
187 * Each QoS implementation subclasses this with whatever additional data it
191 const struct dpdk_qos_ops *ops;
194 /* A particular implementation of dpdk QoS operations.
196 * The functions below return 0 if successful or a positive errno value on
197 * failure, except where otherwise noted. All of them must be provided, except
198 * where otherwise noted.
200 struct dpdk_qos_ops {
202 /* Name of the QoS type */
203 const char *qos_name;
205 /* Called to construct the QoS implementation on 'netdev'. The
206 * implementation should make the appropriate calls to configure QoS
207 * according to 'details'. The implementation may assume that any current
208 * QoS configuration already installed should be destroyed before
209 * constructing the new configuration.
211 * The contents of 'details' should be documented as valid for 'ovs_name'
212 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
213 * (which is built as ovs-vswitchd.conf.db(8)).
215 * This function must return 0 if and only if it sets 'netdev->qos_conf'
216 * to an initialized 'struct qos_conf'.
218 * For all QoS implementations it should always be non-null.
220 int (*qos_construct)(struct netdev *netdev, const struct smap *details);
222 /* Destroys the data structures allocated by the implementation as part of
225 * For all QoS implementations it should always be non-null.
227 void (*qos_destruct)(struct netdev *netdev, struct qos_conf *conf);
229 /* Retrieves details of 'netdev->qos_conf' configuration into 'details'.
231 * The contents of 'details' should be documented as valid for 'ovs_name'
232 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
233 * (which is built as ovs-vswitchd.conf.db(8)).
235 int (*qos_get)(const struct netdev *netdev, struct smap *details);
237 /* Reconfigures 'netdev->qos_conf' according to 'details', performing any
238 * required calls to complete the reconfiguration.
240 * The contents of 'details' should be documented as valid for 'ovs_name'
241 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
242 * (which is built as ovs-vswitchd.conf.db(8)).
244 * This function may be null if 'qos_conf' is not configurable.
246 int (*qos_set)(struct netdev *netdev, const struct smap *details);
248 /* Modify an array of rte_mbufs. The modification is specific to
249 * each qos implementation.
251 * The function should take and array of mbufs and an int representing
252 * the current number of mbufs present in the array.
254 * After the function has performed a qos modification to the array of
255 * mbufs it returns an int representing the number of mbufs now present in
256 * the array. This value is can then be passed to the port send function
257 * along with the modified array for transmission.
259 * For all QoS implementations it should always be non-null.
261 int (*qos_run)(struct netdev *netdev, struct rte_mbuf **pkts,
265 /* dpdk_qos_ops for each type of user space QoS implementation */
266 static const struct dpdk_qos_ops egress_policer_ops;
269 * Array of dpdk_qos_ops, contains pointer to all supported QoS
272 static const struct dpdk_qos_ops *const qos_confs[] = {
277 /* Contains all 'struct dpdk_dev's. */
278 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
279 = OVS_LIST_INITIALIZER(&dpdk_list);
281 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
282 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
284 /* This mutex must be used by non pmd threads when allocating or freeing
285 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
286 * use mempools, a non pmd thread should hold this mutex while calling them */
287 static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
290 struct rte_mempool *mp;
294 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
297 /* There should be one 'struct dpdk_tx_queue' created for
299 struct dpdk_tx_queue {
300 bool flush_tx; /* Set to true to flush queue everytime */
301 /* pkts are queued. */
303 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
304 * from concurrent access. It is used only
305 * if the queue is shared among different
306 * pmd threads (see 'txq_needs_locking'). */
307 int map; /* Mapping of configured vhost-user queues
308 * to enabled by guest. */
310 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
313 /* dpdk has no way to remove dpdk ring ethernet devices
314 so we have to keep them around once they've been created
317 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
318 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
321 /* For the client rings */
322 struct rte_ring *cring_tx;
323 struct rte_ring *cring_rx;
324 unsigned int user_port_id; /* User given port no, parsed from port name */
325 int eth_port_id; /* ethernet device port id */
326 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
329 struct ingress_policer {
330 struct rte_meter_srtcm_params app_srtcm_params;
331 struct rte_meter_srtcm in_policer;
332 rte_spinlock_t policer_lock;
339 enum dpdk_dev_type type;
341 struct dpdk_tx_queue *tx_q;
343 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
345 struct dpdk_mp *dpdk_mp;
349 struct netdev_stats stats;
351 rte_spinlock_t stats_lock;
353 struct eth_addr hwaddr;
354 enum netdev_flags flags;
356 struct rte_eth_link link;
359 /* The user might request more txqs than the NIC has. We remap those
360 * ('up.n_txq') on these ('real_n_txq').
361 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
362 * true and we will take a spinlock on transmission */
365 bool txq_needs_locking;
367 /* virtio-net structure for vhost device */
368 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
370 /* Identifier used to distinguish vhost devices from each other */
371 char vhost_id[PATH_MAX];
374 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
376 /* QoS configuration and lock for the device */
377 struct qos_conf *qos_conf;
378 rte_spinlock_t qos_lock;
380 /* The following properties cannot be changed when a device is running,
381 * so we remember the request and update them next time
382 * netdev_dpdk*_reconfigure() is called */
386 /* Socket ID detected when vHost device is brought up */
387 int requested_socket_id;
389 /* Ingress Policer */
390 OVSRCU_TYPE(struct ingress_policer *) ingress_policer;
391 uint32_t policer_rate;
392 uint32_t policer_burst;
395 struct netdev_rxq_dpdk {
396 struct netdev_rxq up;
400 static bool dpdk_thread_is_pmd(void);
402 static int netdev_dpdk_construct(struct netdev *);
404 struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
406 struct ingress_policer *
407 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev);
410 is_dpdk_class(const struct netdev_class *class)
412 return class->construct == netdev_dpdk_construct;
415 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
416 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
417 * value, insufficient buffers are allocated to accomodate the packet in its
418 * entirety. Furthermore, certain drivers need to ensure that there is also
419 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
420 * frames). If the RX buffer is too small, then the driver enables scatter RX
421 * behaviour, which reduces performance. To prevent this, use a buffer size that
422 * is closest to 'mtu', but which satisfies the aforementioned criteria.
425 dpdk_buf_size(int mtu)
427 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) + RTE_PKTMBUF_HEADROOM),
428 NETDEV_DPDK_MBUF_ALIGN);
431 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
432 * for all other segments data, bss and text. */
435 dpdk_rte_mzalloc(size_t sz)
439 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
446 /* XXX this function should be called only by pmd threads (or by non pmd
447 * threads holding the nonpmd_mempool_mutex) */
449 free_dpdk_buf(struct dp_packet *p)
451 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
453 rte_pktmbuf_free(pkt);
457 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
458 void *opaque_arg OVS_UNUSED,
460 unsigned i OVS_UNUSED)
462 struct rte_mbuf *m = _m;
464 rte_pktmbuf_init(mp, opaque_arg, _m, i);
466 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
469 static struct dpdk_mp *
470 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
472 struct dpdk_mp *dmp = NULL;
473 char mp_name[RTE_MEMPOOL_NAMESIZE];
475 struct rte_pktmbuf_pool_private mbp_priv;
477 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
478 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
484 dmp = dpdk_rte_mzalloc(sizeof *dmp);
485 dmp->socket_id = socket_id;
488 mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct dp_packet);
489 mbp_priv.mbuf_priv_size = sizeof (struct dp_packet) - sizeof (struct rte_mbuf);
491 mp_size = MAX_NB_MBUF;
493 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
494 dmp->mtu, dmp->socket_id, mp_size) < 0) {
498 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
500 sizeof(struct rte_pktmbuf_pool_private),
501 rte_pktmbuf_pool_init, &mbp_priv,
502 ovs_rte_pktmbuf_init, NULL,
504 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
506 if (dmp->mp == NULL) {
509 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
512 ovs_list_push_back(&dpdk_mp_list, &dmp->list_node);
517 dpdk_mp_put(struct dpdk_mp *dmp)
525 ovs_assert(dmp->refcount >= 0);
528 /* I could not find any API to destroy mp. */
529 if (dmp->refcount == 0) {
530 list_delete(dmp->list_node);
531 /* destroy mp-pool. */
537 check_link_status(struct netdev_dpdk *dev)
539 struct rte_eth_link link;
541 rte_eth_link_get_nowait(dev->port_id, &link);
543 if (dev->link.link_status != link.link_status) {
544 netdev_change_seq_changed(&dev->up);
546 dev->link_reset_cnt++;
548 if (dev->link.link_status) {
549 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
550 dev->port_id, (unsigned)dev->link.link_speed,
551 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
552 ("full-duplex") : ("half-duplex"));
554 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
560 dpdk_watchdog(void *dummy OVS_UNUSED)
562 struct netdev_dpdk *dev;
564 pthread_detach(pthread_self());
567 ovs_mutex_lock(&dpdk_mutex);
568 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
569 ovs_mutex_lock(&dev->mutex);
570 check_link_status(dev);
571 ovs_mutex_unlock(&dev->mutex);
573 ovs_mutex_unlock(&dpdk_mutex);
574 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
581 dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
586 /* A device may report more queues than it makes available (this has
587 * been observed for Intel xl710, which reserves some of them for
588 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
589 * available. When this happens we can retry the configuration
590 * and request less queues */
591 while (n_rxq && n_txq) {
593 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
596 diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &port_conf);
601 for (i = 0; i < n_txq; i++) {
602 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
603 dev->socket_id, NULL);
605 VLOG_INFO("Interface %s txq(%d) setup error: %s",
606 dev->up.name, i, rte_strerror(-diag));
612 /* Retry with less tx queues */
617 for (i = 0; i < n_rxq; i++) {
618 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
619 dev->socket_id, NULL,
622 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
623 dev->up.name, i, rte_strerror(-diag));
629 /* Retry with less rx queues */
634 dev->up.n_rxq = n_rxq;
635 dev->real_n_txq = n_txq;
645 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
647 struct rte_pktmbuf_pool_private *mbp_priv;
648 struct rte_eth_dev_info info;
649 struct ether_addr eth_addr;
653 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
657 rte_eth_dev_info_get(dev->port_id, &info);
659 n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
660 n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
662 diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
664 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
665 dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
669 diag = rte_eth_dev_start(dev->port_id);
671 VLOG_ERR("Interface %s start error: %s", dev->up.name,
672 rte_strerror(-diag));
676 rte_eth_promiscuous_enable(dev->port_id);
677 rte_eth_allmulticast_enable(dev->port_id);
679 memset(ð_addr, 0x0, sizeof(eth_addr));
680 rte_eth_macaddr_get(dev->port_id, ð_addr);
681 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
682 dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
684 memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
685 rte_eth_link_get_nowait(dev->port_id, &dev->link);
687 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
688 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
690 dev->flags = NETDEV_UP | NETDEV_PROMISC;
694 static struct netdev_dpdk *
695 netdev_dpdk_cast(const struct netdev *netdev)
697 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
700 static struct netdev *
701 netdev_dpdk_alloc(void)
703 struct netdev_dpdk *dev;
705 if (!rte_eal_init_ret) { /* Only after successful initialization */
706 dev = dpdk_rte_mzalloc(sizeof *dev);
715 netdev_dpdk_alloc_txq(struct netdev_dpdk *dev, unsigned int n_txqs)
719 dev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *dev->tx_q);
720 for (i = 0; i < n_txqs; i++) {
721 int numa_id = ovs_numa_get_numa_id(i);
723 if (!dev->txq_needs_locking) {
724 /* Each index is considered as a cpu core id, since there should
725 * be one tx queue for each cpu core. If the corresponding core
726 * is not on the same numa node as 'dev', flags the
728 dev->tx_q[i].flush_tx = dev->socket_id == numa_id;
730 /* Queues are shared among CPUs. Always flush */
731 dev->tx_q[i].flush_tx = true;
734 /* Initialize map for vhost devices. */
735 dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
736 rte_spinlock_init(&dev->tx_q[i].tx_lock);
741 netdev_dpdk_init(struct netdev *netdev, unsigned int port_no,
742 enum dpdk_dev_type type)
743 OVS_REQUIRES(dpdk_mutex)
745 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
750 ovs_mutex_init(&dev->mutex);
751 ovs_mutex_lock(&dev->mutex);
753 rte_spinlock_init(&dev->stats_lock);
755 /* If the 'sid' is negative, it means that the kernel fails
756 * to obtain the pci numa info. In that situation, always
758 if (type == DPDK_DEV_ETH) {
759 sid = rte_eth_dev_socket_id(port_no);
761 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
764 dev->socket_id = sid < 0 ? SOCKET0 : sid;
765 dev->requested_socket_id = dev->socket_id;
766 dev->port_id = port_no;
769 dev->mtu = ETHER_MTU;
770 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
772 buf_size = dpdk_buf_size(dev->mtu);
773 dev->dpdk_mp = dpdk_mp_get(dev->socket_id, FRAME_LEN_TO_MTU(buf_size));
779 /* Initialise QoS configuration to NULL and qos lock to unlocked */
780 dev->qos_conf = NULL;
781 rte_spinlock_init(&dev->qos_lock);
783 /* Initialise rcu pointer for ingress policer to NULL */
784 ovsrcu_init(&dev->ingress_policer, NULL);
785 dev->policer_rate = 0;
786 dev->policer_burst = 0;
788 netdev->n_txq = NR_QUEUE;
789 netdev->n_rxq = NR_QUEUE;
790 dev->requested_n_rxq = NR_QUEUE;
791 dev->requested_n_txq = NR_QUEUE;
792 dev->real_n_txq = NR_QUEUE;
794 if (type == DPDK_DEV_ETH) {
795 netdev_dpdk_alloc_txq(dev, NR_QUEUE);
796 err = dpdk_eth_dev_init(dev);
801 netdev_dpdk_alloc_txq(dev, OVS_VHOST_MAX_QUEUE_NUM);
802 /* Enable DPDK_DEV_VHOST device and set promiscuous mode flag. */
803 dev->flags = NETDEV_UP | NETDEV_PROMISC;
806 ovs_list_push_back(&dpdk_list, &dev->list_node);
812 ovs_mutex_unlock(&dev->mutex);
816 /* dev_name must be the prefix followed by a positive decimal number.
817 * (no leading + or - signs are allowed) */
819 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
820 unsigned int *port_no)
824 if (strncmp(dev_name, prefix, strlen(prefix))) {
828 cport = dev_name + strlen(prefix);
830 if (str_to_uint(cport, 10, port_no)) {
838 vhost_construct_helper(struct netdev *netdev) OVS_REQUIRES(dpdk_mutex)
840 if (rte_eal_init_ret) {
841 return rte_eal_init_ret;
844 return netdev_dpdk_init(netdev, -1, DPDK_DEV_VHOST);
848 netdev_dpdk_vhost_cuse_construct(struct netdev *netdev)
850 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
853 if (rte_eal_init_ret) {
854 return rte_eal_init_ret;
857 ovs_mutex_lock(&dpdk_mutex);
858 strncpy(dev->vhost_id, netdev->name, sizeof(dev->vhost_id));
859 err = vhost_construct_helper(netdev);
860 ovs_mutex_unlock(&dpdk_mutex);
865 netdev_dpdk_vhost_user_construct(struct netdev *netdev)
867 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
868 const char *name = netdev->name;
871 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
872 * the file system. '/' or '\' would traverse directories, so they're not
873 * acceptable in 'name'. */
874 if (strchr(name, '/') || strchr(name, '\\')) {
875 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
876 "A valid name must not include '/' or '\\'",
881 if (rte_eal_init_ret) {
882 return rte_eal_init_ret;
885 ovs_mutex_lock(&dpdk_mutex);
886 /* Take the name of the vhost-user port and append it to the location where
887 * the socket is to be created, then register the socket.
889 snprintf(dev->vhost_id, sizeof(dev->vhost_id), "%s/%s",
890 vhost_sock_dir, name);
892 err = rte_vhost_driver_register(dev->vhost_id);
894 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
897 fatal_signal_add_file_to_unlink(dev->vhost_id);
898 VLOG_INFO("Socket %s created for vhost-user port %s\n",
899 dev->vhost_id, name);
900 err = vhost_construct_helper(netdev);
903 ovs_mutex_unlock(&dpdk_mutex);
908 netdev_dpdk_construct(struct netdev *netdev)
910 unsigned int port_no;
913 if (rte_eal_init_ret) {
914 return rte_eal_init_ret;
917 /* Names always start with "dpdk" */
918 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
923 ovs_mutex_lock(&dpdk_mutex);
924 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
925 ovs_mutex_unlock(&dpdk_mutex);
930 netdev_dpdk_destruct(struct netdev *netdev)
932 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
934 ovs_mutex_lock(&dev->mutex);
935 rte_eth_dev_stop(dev->port_id);
936 free(ovsrcu_get_protected(struct ingress_policer *,
937 &dev->ingress_policer));
938 ovs_mutex_unlock(&dev->mutex);
940 ovs_mutex_lock(&dpdk_mutex);
942 ovs_list_remove(&dev->list_node);
943 dpdk_mp_put(dev->dpdk_mp);
944 ovs_mutex_unlock(&dpdk_mutex);
948 netdev_dpdk_vhost_destruct(struct netdev *netdev)
950 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
952 /* Guest becomes an orphan if still attached. */
953 if (netdev_dpdk_get_virtio(dev) != NULL) {
954 VLOG_ERR("Removing port '%s' while vhost device still attached.",
956 VLOG_ERR("To restore connectivity after re-adding of port, VM on socket"
957 " '%s' must be restarted.",
961 if (rte_vhost_driver_unregister(dev->vhost_id)) {
962 VLOG_ERR("Unable to remove vhost-user socket %s", dev->vhost_id);
964 fatal_signal_remove_file_to_unlink(dev->vhost_id);
967 ovs_mutex_lock(&dev->mutex);
968 free(ovsrcu_get_protected(struct ingress_policer *,
969 &dev->ingress_policer));
970 ovs_mutex_unlock(&dev->mutex);
972 ovs_mutex_lock(&dpdk_mutex);
974 ovs_list_remove(&dev->list_node);
975 dpdk_mp_put(dev->dpdk_mp);
976 ovs_mutex_unlock(&dpdk_mutex);
980 netdev_dpdk_dealloc(struct netdev *netdev)
982 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
988 netdev_dpdk_get_config(const struct netdev *netdev, struct smap *args)
990 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
992 ovs_mutex_lock(&dev->mutex);
994 smap_add_format(args, "requested_rx_queues", "%d", dev->requested_n_rxq);
995 smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq);
996 smap_add_format(args, "requested_tx_queues", "%d", netdev->n_txq);
997 smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
998 ovs_mutex_unlock(&dev->mutex);
1004 netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args)
1006 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1009 ovs_mutex_lock(&dev->mutex);
1010 new_n_rxq = MAX(smap_get_int(args, "n_rxq", dev->requested_n_rxq), 1);
1011 if (new_n_rxq != dev->requested_n_rxq) {
1012 dev->requested_n_rxq = new_n_rxq;
1013 netdev_request_reconfigure(netdev);
1015 ovs_mutex_unlock(&dev->mutex);
1021 netdev_dpdk_get_numa_id(const struct netdev *netdev)
1023 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1025 return dev->socket_id;
1028 /* Sets the number of tx queues for the dpdk interface. */
1030 netdev_dpdk_set_tx_multiq(struct netdev *netdev, unsigned int n_txq)
1032 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1034 ovs_mutex_lock(&dev->mutex);
1036 if (dev->requested_n_txq == n_txq) {
1040 dev->requested_n_txq = n_txq;
1041 netdev_request_reconfigure(netdev);
1044 ovs_mutex_unlock(&dev->mutex);
1048 static struct netdev_rxq *
1049 netdev_dpdk_rxq_alloc(void)
1051 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
1056 static struct netdev_rxq_dpdk *
1057 netdev_rxq_dpdk_cast(const struct netdev_rxq *rxq)
1059 return CONTAINER_OF(rxq, struct netdev_rxq_dpdk, up);
1063 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq)
1065 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1066 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1068 ovs_mutex_lock(&dev->mutex);
1069 rx->port_id = dev->port_id;
1070 ovs_mutex_unlock(&dev->mutex);
1076 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq OVS_UNUSED)
1081 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq)
1083 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1089 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
1091 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1094 while (nb_tx != txq->count) {
1097 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
1098 txq->count - nb_tx);
1106 if (OVS_UNLIKELY(nb_tx != txq->count)) {
1107 /* free buffers, which we couldn't transmit, one at a time (each
1108 * packet could come from a different mempool) */
1111 for (i = nb_tx; i < txq->count; i++) {
1112 rte_pktmbuf_free(txq->burst_pkts[i]);
1114 rte_spinlock_lock(&dev->stats_lock);
1115 dev->stats.tx_dropped += txq->count-nb_tx;
1116 rte_spinlock_unlock(&dev->stats_lock);
1120 txq->tsc = rte_get_timer_cycles();
1124 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
1126 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1128 if (txq->count == 0) {
1131 dpdk_queue_flush__(dev, qid);
1135 netdev_dpdk_policer_pkt_handle(struct rte_meter_srtcm *meter,
1136 struct rte_mbuf *pkt, uint64_t time)
1138 uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct ether_hdr);
1140 return rte_meter_srtcm_color_blind_check(meter, time, pkt_len) ==
1145 netdev_dpdk_policer_run(struct rte_meter_srtcm *meter,
1146 struct rte_mbuf **pkts, int pkt_cnt)
1150 struct rte_mbuf *pkt = NULL;
1151 uint64_t current_time = rte_rdtsc();
1153 for (i = 0; i < pkt_cnt; i++) {
1155 /* Handle current packet */
1156 if (netdev_dpdk_policer_pkt_handle(meter, pkt, current_time)) {
1162 rte_pktmbuf_free(pkt);
1170 ingress_policer_run(struct ingress_policer *policer, struct rte_mbuf **pkts,
1175 rte_spinlock_lock(&policer->policer_lock);
1176 cnt = netdev_dpdk_policer_run(&policer->in_policer, pkts, pkt_cnt);
1177 rte_spinlock_unlock(&policer->policer_lock);
1183 is_vhost_running(struct virtio_net *virtio_dev)
1185 return (virtio_dev != NULL && (virtio_dev->flags & VIRTIO_DEV_RUNNING));
1189 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats *stats,
1190 unsigned int packet_size)
1192 /* Hard-coded search for the size bucket. */
1193 if (packet_size < 256) {
1194 if (packet_size >= 128) {
1195 stats->rx_128_to_255_packets++;
1196 } else if (packet_size <= 64) {
1197 stats->rx_1_to_64_packets++;
1199 stats->rx_65_to_127_packets++;
1202 if (packet_size >= 1523) {
1203 stats->rx_1523_to_max_packets++;
1204 } else if (packet_size >= 1024) {
1205 stats->rx_1024_to_1522_packets++;
1206 } else if (packet_size < 512) {
1207 stats->rx_256_to_511_packets++;
1209 stats->rx_512_to_1023_packets++;
1215 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
1216 struct dp_packet **packets, int count,
1220 unsigned int packet_size;
1221 struct dp_packet *packet;
1223 stats->rx_packets += count;
1224 stats->rx_dropped += dropped;
1225 for (i = 0; i < count; i++) {
1226 packet = packets[i];
1227 packet_size = dp_packet_size(packet);
1229 if (OVS_UNLIKELY(packet_size < ETH_HEADER_LEN)) {
1230 /* This only protects the following multicast counting from
1231 * too short packets, but it does not stop the packet from
1232 * further processing. */
1234 stats->rx_length_errors++;
1238 netdev_dpdk_vhost_update_rx_size_counters(stats, packet_size);
1240 struct eth_header *eh = (struct eth_header *) dp_packet_data(packet);
1241 if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) {
1245 stats->rx_bytes += packet_size;
1250 * The receive path for the vhost port is the TX path out from guest.
1253 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
1254 struct dp_packet **packets, int *c)
1256 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1257 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1258 int qid = rxq->queue_id;
1259 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
1261 uint16_t dropped = 0;
1263 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev)
1264 || !(dev->flags & NETDEV_UP))) {
1268 if (rxq->queue_id >= dev->real_n_rxq) {
1272 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid * VIRTIO_QNUM + VIRTIO_TXQ,
1274 (struct rte_mbuf **)packets,
1282 nb_rx = ingress_policer_run(policer, (struct rte_mbuf **)packets, nb_rx);
1286 rte_spinlock_lock(&dev->stats_lock);
1287 netdev_dpdk_vhost_update_rx_counters(&dev->stats, packets, nb_rx, dropped);
1288 rte_spinlock_unlock(&dev->stats_lock);
1295 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet **packets,
1298 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1299 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1300 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
1304 /* There is only one tx queue for this core. Do not flush other
1306 * Do not flush tx queue which is shared among CPUs
1307 * since it is always flushed */
1308 if (rxq->queue_id == rte_lcore_id() &&
1309 OVS_LIKELY(!dev->txq_needs_locking)) {
1310 dpdk_queue_flush(dev, rxq->queue_id);
1313 nb_rx = rte_eth_rx_burst(rx->port_id, rxq->queue_id,
1314 (struct rte_mbuf **) packets,
1322 nb_rx = ingress_policer_run(policer, (struct rte_mbuf **) packets, nb_rx);
1326 /* Update stats to reflect dropped packets */
1327 if (OVS_UNLIKELY(dropped)) {
1328 rte_spinlock_lock(&dev->stats_lock);
1329 dev->stats.rx_dropped += dropped;
1330 rte_spinlock_unlock(&dev->stats_lock);
1339 netdev_dpdk_qos_run__(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
1342 struct netdev *netdev = &dev->up;
1344 if (dev->qos_conf != NULL) {
1345 rte_spinlock_lock(&dev->qos_lock);
1346 if (dev->qos_conf != NULL) {
1347 cnt = dev->qos_conf->ops->qos_run(netdev, pkts, cnt);
1349 rte_spinlock_unlock(&dev->qos_lock);
1356 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
1357 struct dp_packet **packets,
1362 int sent = attempted - dropped;
1364 stats->tx_packets += sent;
1365 stats->tx_dropped += dropped;
1367 for (i = 0; i < sent; i++) {
1368 stats->tx_bytes += dp_packet_size(packets[i]);
1373 __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
1374 struct dp_packet **pkts, int cnt,
1377 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1378 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1379 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
1380 unsigned int total_pkts = cnt;
1381 unsigned int qos_pkts = cnt;
1384 qid = dev->tx_q[qid % dev->real_n_txq].map;
1386 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev) || qid < 0
1387 || !(dev->flags & NETDEV_UP))) {
1388 rte_spinlock_lock(&dev->stats_lock);
1389 dev->stats.tx_dropped+= cnt;
1390 rte_spinlock_unlock(&dev->stats_lock);
1394 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1396 /* Check has QoS has been configured for the netdev */
1397 cnt = netdev_dpdk_qos_run__(dev, cur_pkts, cnt);
1401 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
1402 unsigned int tx_pkts;
1404 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, vhost_qid,
1406 if (OVS_LIKELY(tx_pkts)) {
1407 /* Packets have been sent.*/
1409 /* Prepare for possible retry.*/
1410 cur_pkts = &cur_pkts[tx_pkts];
1412 /* No packets sent - do not retry.*/
1415 } while (cnt && (retries++ < VHOST_ENQ_RETRY_NUM));
1417 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1419 rte_spinlock_lock(&dev->stats_lock);
1421 netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts, cnt);
1422 rte_spinlock_unlock(&dev->stats_lock);
1428 for (i = 0; i < total_pkts; i++) {
1429 dp_packet_delete(pkts[i]);
1435 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
1436 struct rte_mbuf **pkts, int cnt)
1438 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1444 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
1445 int tocopy = MIN(freeslots, cnt-i);
1447 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
1448 tocopy * sizeof (struct rte_mbuf *));
1450 txq->count += tocopy;
1453 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
1454 dpdk_queue_flush__(dev, qid);
1456 diff_tsc = rte_get_timer_cycles() - txq->tsc;
1457 if (diff_tsc >= DRAIN_TSC) {
1458 dpdk_queue_flush__(dev, qid);
1463 /* Tx function. Transmit packets indefinitely */
1465 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
1467 OVS_NO_THREAD_SAFETY_ANALYSIS
1469 #if !defined(__CHECKER__) && !defined(_WIN32)
1470 const size_t PKT_ARRAY_SIZE = cnt;
1472 /* Sparse or MSVC doesn't like variable length array. */
1473 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
1475 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1476 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
1481 /* If we are on a non pmd thread we have to use the mempool mutex, because
1482 * every non pmd thread shares the same mempool cache */
1484 if (!dpdk_thread_is_pmd()) {
1485 ovs_mutex_lock(&nonpmd_mempool_mutex);
1488 for (i = 0; i < cnt; i++) {
1489 int size = dp_packet_size(pkts[i]);
1491 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1492 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1493 (int)size , dev->max_packet_len);
1499 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
1501 if (!mbufs[newcnt]) {
1506 /* We have to do a copy for now */
1507 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
1509 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1510 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1515 if (dev->type == DPDK_DEV_VHOST) {
1516 __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs, newcnt, true);
1518 unsigned int qos_pkts = newcnt;
1520 /* Check if QoS has been configured for this netdev. */
1521 newcnt = netdev_dpdk_qos_run__(dev, mbufs, newcnt);
1523 dropped += qos_pkts - newcnt;
1524 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1525 dpdk_queue_flush(dev, qid);
1528 if (OVS_UNLIKELY(dropped)) {
1529 rte_spinlock_lock(&dev->stats_lock);
1530 dev->stats.tx_dropped += dropped;
1531 rte_spinlock_unlock(&dev->stats_lock);
1534 if (!dpdk_thread_is_pmd()) {
1535 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1540 netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet **pkts,
1541 int cnt, bool may_steal)
1543 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1546 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1548 for (i = 0; i < cnt; i++) {
1549 dp_packet_delete(pkts[i]);
1553 __netdev_dpdk_vhost_send(netdev, qid, pkts, cnt, may_steal);
1559 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
1560 struct dp_packet **pkts, int cnt, bool may_steal)
1564 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1565 qid = qid % dev->real_n_txq;
1566 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1569 if (OVS_UNLIKELY(!may_steal ||
1570 pkts[0]->source != DPBUF_DPDK)) {
1571 struct netdev *netdev = &dev->up;
1573 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1576 for (i = 0; i < cnt; i++) {
1577 dp_packet_delete(pkts[i]);
1581 int next_tx_idx = 0;
1583 unsigned int qos_pkts = 0;
1584 unsigned int temp_cnt = 0;
1586 for (i = 0; i < cnt; i++) {
1587 int size = dp_packet_size(pkts[i]);
1589 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1590 if (next_tx_idx != i) {
1591 temp_cnt = i - next_tx_idx;
1592 qos_pkts = temp_cnt;
1594 temp_cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts,
1596 dropped += qos_pkts - temp_cnt;
1597 dpdk_queue_pkts(dev, qid,
1598 (struct rte_mbuf **)&pkts[next_tx_idx],
1603 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1604 (int)size , dev->max_packet_len);
1606 dp_packet_delete(pkts[i]);
1608 next_tx_idx = i + 1;
1611 if (next_tx_idx != cnt) {
1615 cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts, cnt);
1616 dropped += qos_pkts - cnt;
1617 dpdk_queue_pkts(dev, qid, (struct rte_mbuf **)&pkts[next_tx_idx],
1621 if (OVS_UNLIKELY(dropped)) {
1622 rte_spinlock_lock(&dev->stats_lock);
1623 dev->stats.tx_dropped += dropped;
1624 rte_spinlock_unlock(&dev->stats_lock);
1628 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1629 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1634 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
1635 struct dp_packet **pkts, int cnt, bool may_steal)
1637 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1639 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1644 netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1646 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1648 ovs_mutex_lock(&dev->mutex);
1649 if (!eth_addr_equals(dev->hwaddr, mac)) {
1651 netdev_change_seq_changed(netdev);
1653 ovs_mutex_unlock(&dev->mutex);
1659 netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1661 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1663 ovs_mutex_lock(&dev->mutex);
1665 ovs_mutex_unlock(&dev->mutex);
1671 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1673 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1675 ovs_mutex_lock(&dev->mutex);
1677 ovs_mutex_unlock(&dev->mutex);
1683 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1685 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1686 int old_mtu, err, dpdk_mtu;
1687 struct dpdk_mp *old_mp;
1691 ovs_mutex_lock(&dpdk_mutex);
1692 ovs_mutex_lock(&dev->mutex);
1693 if (dev->mtu == mtu) {
1698 buf_size = dpdk_buf_size(mtu);
1699 dpdk_mtu = FRAME_LEN_TO_MTU(buf_size);
1701 mp = dpdk_mp_get(dev->socket_id, dpdk_mtu);
1707 rte_eth_dev_stop(dev->port_id);
1710 old_mp = dev->dpdk_mp;
1713 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
1715 err = dpdk_eth_dev_init(dev);
1719 dev->dpdk_mp = old_mp;
1720 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
1721 dpdk_eth_dev_init(dev);
1725 dpdk_mp_put(old_mp);
1726 netdev_change_seq_changed(netdev);
1728 ovs_mutex_unlock(&dev->mutex);
1729 ovs_mutex_unlock(&dpdk_mutex);
1734 netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier);
1737 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1738 struct netdev_stats *stats)
1740 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1742 ovs_mutex_lock(&dev->mutex);
1744 rte_spinlock_lock(&dev->stats_lock);
1745 /* Supported Stats */
1746 stats->rx_packets += dev->stats.rx_packets;
1747 stats->tx_packets += dev->stats.tx_packets;
1748 stats->rx_dropped = dev->stats.rx_dropped;
1749 stats->tx_dropped += dev->stats.tx_dropped;
1750 stats->multicast = dev->stats.multicast;
1751 stats->rx_bytes = dev->stats.rx_bytes;
1752 stats->tx_bytes = dev->stats.tx_bytes;
1753 stats->rx_errors = dev->stats.rx_errors;
1754 stats->rx_length_errors = dev->stats.rx_length_errors;
1756 stats->rx_1_to_64_packets = dev->stats.rx_1_to_64_packets;
1757 stats->rx_65_to_127_packets = dev->stats.rx_65_to_127_packets;
1758 stats->rx_128_to_255_packets = dev->stats.rx_128_to_255_packets;
1759 stats->rx_256_to_511_packets = dev->stats.rx_256_to_511_packets;
1760 stats->rx_512_to_1023_packets = dev->stats.rx_512_to_1023_packets;
1761 stats->rx_1024_to_1522_packets = dev->stats.rx_1024_to_1522_packets;
1762 stats->rx_1523_to_max_packets = dev->stats.rx_1523_to_max_packets;
1764 rte_spinlock_unlock(&dev->stats_lock);
1766 ovs_mutex_unlock(&dev->mutex);
1772 netdev_dpdk_convert_xstats(struct netdev_stats *stats,
1773 const struct rte_eth_xstats *xstats,
1774 const unsigned int size)
1776 /* XXX Current implementation is simple search through an array
1777 * to find hardcoded counter names. In future DPDK release (TBD)
1778 * XSTATS API will change so each counter will be represented by
1779 * unique ID instead of String. */
1781 for (unsigned int i = 0; i < size; i++) {
1782 if (strcmp(XSTAT_RX_64_PACKETS, xstats[i].name) == 0) {
1783 stats->rx_1_to_64_packets = xstats[i].value;
1784 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS, xstats[i].name) == 0) {
1785 stats->rx_65_to_127_packets = xstats[i].value;
1786 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS, xstats[i].name) == 0) {
1787 stats->rx_128_to_255_packets = xstats[i].value;
1788 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS, xstats[i].name) == 0) {
1789 stats->rx_256_to_511_packets = xstats[i].value;
1790 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS,
1791 xstats[i].name) == 0) {
1792 stats->rx_512_to_1023_packets = xstats[i].value;
1793 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS,
1794 xstats[i].name) == 0) {
1795 stats->rx_1024_to_1522_packets = xstats[i].value;
1796 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS,
1797 xstats[i].name) == 0) {
1798 stats->rx_1523_to_max_packets = xstats[i].value;
1799 } else if (strcmp(XSTAT_TX_64_PACKETS, xstats[i].name) == 0) {
1800 stats->tx_1_to_64_packets = xstats[i].value;
1801 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS, xstats[i].name) == 0) {
1802 stats->tx_65_to_127_packets = xstats[i].value;
1803 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS, xstats[i].name) == 0) {
1804 stats->tx_128_to_255_packets = xstats[i].value;
1805 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS, xstats[i].name) == 0) {
1806 stats->tx_256_to_511_packets = xstats[i].value;
1807 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS,
1808 xstats[i].name) == 0) {
1809 stats->tx_512_to_1023_packets = xstats[i].value;
1810 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS,
1811 xstats[i].name) == 0) {
1812 stats->tx_1024_to_1522_packets = xstats[i].value;
1813 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS,
1814 xstats[i].name) == 0) {
1815 stats->tx_1523_to_max_packets = xstats[i].value;
1816 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS, xstats[i].name) == 0) {
1817 stats->tx_multicast_packets = xstats[i].value;
1818 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS, xstats[i].name) == 0) {
1819 stats->rx_broadcast_packets = xstats[i].value;
1820 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS, xstats[i].name) == 0) {
1821 stats->tx_broadcast_packets = xstats[i].value;
1822 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS, xstats[i].name) == 0) {
1823 stats->rx_undersized_errors = xstats[i].value;
1824 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS, xstats[i].name) == 0) {
1825 stats->rx_fragmented_errors = xstats[i].value;
1826 } else if (strcmp(XSTAT_RX_JABBER_ERRORS, xstats[i].name) == 0) {
1827 stats->rx_jabber_errors = xstats[i].value;
1833 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1835 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1836 struct rte_eth_stats rte_stats;
1839 netdev_dpdk_get_carrier(netdev, &gg);
1840 ovs_mutex_lock(&dev->mutex);
1842 struct rte_eth_xstats *rte_xstats;
1843 int rte_xstats_len, rte_xstats_ret;
1845 if (rte_eth_stats_get(dev->port_id, &rte_stats)) {
1846 VLOG_ERR("Can't get ETH statistics for port: %i.", dev->port_id);
1847 ovs_mutex_unlock(&dev->mutex);
1851 rte_xstats_len = rte_eth_xstats_get(dev->port_id, NULL, 0);
1852 if (rte_xstats_len > 0) {
1853 rte_xstats = dpdk_rte_mzalloc(sizeof(*rte_xstats) * rte_xstats_len);
1854 memset(rte_xstats, 0xff, sizeof(*rte_xstats) * rte_xstats_len);
1855 rte_xstats_ret = rte_eth_xstats_get(dev->port_id, rte_xstats,
1857 if (rte_xstats_ret > 0 && rte_xstats_ret <= rte_xstats_len) {
1858 netdev_dpdk_convert_xstats(stats, rte_xstats, rte_xstats_ret);
1860 rte_free(rte_xstats);
1862 VLOG_WARN("Can't get XSTATS counters for port: %i.", dev->port_id);
1865 stats->rx_packets = rte_stats.ipackets;
1866 stats->tx_packets = rte_stats.opackets;
1867 stats->rx_bytes = rte_stats.ibytes;
1868 stats->tx_bytes = rte_stats.obytes;
1869 /* DPDK counts imissed as errors, but count them here as dropped instead */
1870 stats->rx_errors = rte_stats.ierrors - rte_stats.imissed;
1871 stats->tx_errors = rte_stats.oerrors;
1872 stats->multicast = rte_stats.imcasts;
1874 rte_spinlock_lock(&dev->stats_lock);
1875 stats->tx_dropped = dev->stats.tx_dropped;
1876 stats->rx_dropped = dev->stats.rx_dropped;
1877 rte_spinlock_unlock(&dev->stats_lock);
1879 /* These are the available DPDK counters for packets not received due to
1880 * local resource constraints in DPDK and NIC respectively. */
1881 stats->rx_dropped += rte_stats.rx_nombuf + rte_stats.imissed;
1882 stats->rx_missed_errors = rte_stats.imissed;
1884 ovs_mutex_unlock(&dev->mutex);
1890 netdev_dpdk_get_features(const struct netdev *netdev,
1891 enum netdev_features *current,
1892 enum netdev_features *advertised OVS_UNUSED,
1893 enum netdev_features *supported OVS_UNUSED,
1894 enum netdev_features *peer OVS_UNUSED)
1896 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1897 struct rte_eth_link link;
1899 ovs_mutex_lock(&dev->mutex);
1901 ovs_mutex_unlock(&dev->mutex);
1903 if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1904 if (link.link_speed == ETH_SPEED_NUM_10M) {
1905 *current = NETDEV_F_10MB_HD;
1907 if (link.link_speed == ETH_SPEED_NUM_100M) {
1908 *current = NETDEV_F_100MB_HD;
1910 if (link.link_speed == ETH_SPEED_NUM_1G) {
1911 *current = NETDEV_F_1GB_HD;
1913 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1914 if (link.link_speed == ETH_SPEED_NUM_10M) {
1915 *current = NETDEV_F_10MB_FD;
1917 if (link.link_speed == ETH_SPEED_NUM_100M) {
1918 *current = NETDEV_F_100MB_FD;
1920 if (link.link_speed == ETH_SPEED_NUM_1G) {
1921 *current = NETDEV_F_1GB_FD;
1923 if (link.link_speed == ETH_SPEED_NUM_10G) {
1924 *current = NETDEV_F_10GB_FD;
1928 if (link.link_autoneg) {
1929 *current |= NETDEV_F_AUTONEG;
1935 static struct ingress_policer *
1936 netdev_dpdk_policer_construct(uint32_t rate, uint32_t burst)
1938 struct ingress_policer *policer = NULL;
1939 uint64_t rate_bytes;
1940 uint64_t burst_bytes;
1943 policer = xmalloc(sizeof *policer);
1944 rte_spinlock_init(&policer->policer_lock);
1946 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
1947 rate_bytes = rate * 1000/8;
1948 burst_bytes = burst * 1000/8;
1950 policer->app_srtcm_params.cir = rate_bytes;
1951 policer->app_srtcm_params.cbs = burst_bytes;
1952 policer->app_srtcm_params.ebs = 0;
1953 err = rte_meter_srtcm_config(&policer->in_policer,
1954 &policer->app_srtcm_params);
1956 VLOG_ERR("Could not create rte meter for ingress policer");
1964 netdev_dpdk_set_policing(struct netdev* netdev, uint32_t policer_rate,
1965 uint32_t policer_burst)
1967 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1968 struct ingress_policer *policer;
1970 /* Force to 0 if no rate specified,
1971 * default to 8000 kbits if burst is 0,
1972 * else stick with user-specified value.
1974 policer_burst = (!policer_rate ? 0
1975 : !policer_burst ? 8000
1978 ovs_mutex_lock(&dev->mutex);
1980 policer = ovsrcu_get_protected(struct ingress_policer *,
1981 &dev->ingress_policer);
1983 if (dev->policer_rate == policer_rate &&
1984 dev->policer_burst == policer_burst) {
1985 /* Assume that settings haven't changed since we last set them. */
1986 ovs_mutex_unlock(&dev->mutex);
1990 /* Destroy any existing ingress policer for the device if one exists */
1992 ovsrcu_postpone(free, policer);
1995 if (policer_rate != 0) {
1996 policer = netdev_dpdk_policer_construct(policer_rate, policer_burst);
2000 ovsrcu_set(&dev->ingress_policer, policer);
2001 dev->policer_rate = policer_rate;
2002 dev->policer_burst = policer_burst;
2003 ovs_mutex_unlock(&dev->mutex);
2009 netdev_dpdk_get_ifindex(const struct netdev *netdev)
2011 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2014 ovs_mutex_lock(&dev->mutex);
2015 ifindex = dev->port_id;
2016 ovs_mutex_unlock(&dev->mutex);
2022 netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier)
2024 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2026 ovs_mutex_lock(&dev->mutex);
2027 check_link_status(dev);
2028 *carrier = dev->link.link_status;
2030 ovs_mutex_unlock(&dev->mutex);
2036 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev, bool *carrier)
2038 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2039 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
2041 ovs_mutex_lock(&dev->mutex);
2043 if (is_vhost_running(virtio_dev)) {
2049 ovs_mutex_unlock(&dev->mutex);
2054 static long long int
2055 netdev_dpdk_get_carrier_resets(const struct netdev *netdev)
2057 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2058 long long int carrier_resets;
2060 ovs_mutex_lock(&dev->mutex);
2061 carrier_resets = dev->link_reset_cnt;
2062 ovs_mutex_unlock(&dev->mutex);
2064 return carrier_resets;
2068 netdev_dpdk_set_miimon(struct netdev *netdev OVS_UNUSED,
2069 long long int interval OVS_UNUSED)
2075 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
2076 enum netdev_flags off, enum netdev_flags on,
2077 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
2081 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
2085 *old_flagsp = dev->flags;
2089 if (dev->flags == *old_flagsp) {
2093 if (dev->type == DPDK_DEV_ETH) {
2094 if (dev->flags & NETDEV_UP) {
2095 err = rte_eth_dev_start(dev->port_id);
2100 if (dev->flags & NETDEV_PROMISC) {
2101 rte_eth_promiscuous_enable(dev->port_id);
2104 if (!(dev->flags & NETDEV_UP)) {
2105 rte_eth_dev_stop(dev->port_id);
2108 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
2109 * running then change netdev's change_seq to trigger link state
2111 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
2113 if ((NETDEV_UP & ((*old_flagsp ^ on) | (*old_flagsp ^ off)))
2114 && is_vhost_running(virtio_dev)) {
2115 netdev_change_seq_changed(&dev->up);
2117 /* Clear statistics if device is getting up. */
2118 if (NETDEV_UP & on) {
2119 rte_spinlock_lock(&dev->stats_lock);
2120 memset(&dev->stats, 0, sizeof(dev->stats));
2121 rte_spinlock_unlock(&dev->stats_lock);
2130 netdev_dpdk_update_flags(struct netdev *netdev,
2131 enum netdev_flags off, enum netdev_flags on,
2132 enum netdev_flags *old_flagsp)
2134 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2137 ovs_mutex_lock(&dev->mutex);
2138 error = netdev_dpdk_update_flags__(dev, off, on, old_flagsp);
2139 ovs_mutex_unlock(&dev->mutex);
2145 netdev_dpdk_get_status(const struct netdev *netdev, struct smap *args)
2147 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2148 struct rte_eth_dev_info dev_info;
2150 if (dev->port_id < 0)
2153 ovs_mutex_lock(&dev->mutex);
2154 rte_eth_dev_info_get(dev->port_id, &dev_info);
2155 ovs_mutex_unlock(&dev->mutex);
2157 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
2159 smap_add_format(args, "port_no", "%d", dev->port_id);
2160 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
2161 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
2162 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
2163 smap_add_format(args, "max_rx_pktlen", "%u", dev->max_packet_len);
2164 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
2165 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
2166 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
2167 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
2168 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
2169 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
2171 if (dev_info.pci_dev) {
2172 smap_add_format(args, "pci-vendor_id", "0x%u",
2173 dev_info.pci_dev->id.vendor_id);
2174 smap_add_format(args, "pci-device_id", "0x%x",
2175 dev_info.pci_dev->id.device_id);
2182 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
2183 OVS_REQUIRES(dev->mutex)
2185 enum netdev_flags old_flags;
2188 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
2190 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
2195 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
2196 const char *argv[], void *aux OVS_UNUSED)
2200 if (!strcasecmp(argv[argc - 1], "up")) {
2202 } else if ( !strcasecmp(argv[argc - 1], "down")) {
2205 unixctl_command_reply_error(conn, "Invalid Admin State");
2210 struct netdev *netdev = netdev_from_name(argv[1]);
2211 if (netdev && is_dpdk_class(netdev->netdev_class)) {
2212 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
2214 ovs_mutex_lock(&dpdk_dev->mutex);
2215 netdev_dpdk_set_admin_state__(dpdk_dev, up);
2216 ovs_mutex_unlock(&dpdk_dev->mutex);
2218 netdev_close(netdev);
2220 unixctl_command_reply_error(conn, "Not a DPDK Interface");
2221 netdev_close(netdev);
2225 struct netdev_dpdk *netdev;
2227 ovs_mutex_lock(&dpdk_mutex);
2228 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
2229 ovs_mutex_lock(&netdev->mutex);
2230 netdev_dpdk_set_admin_state__(netdev, up);
2231 ovs_mutex_unlock(&netdev->mutex);
2233 ovs_mutex_unlock(&dpdk_mutex);
2235 unixctl_command_reply(conn, "OK");
2239 * Set virtqueue flags so that we do not receive interrupts.
2242 set_irq_status(struct virtio_net *virtio_dev)
2247 for (i = 0; i < virtio_dev->virt_qp_nb; i++) {
2248 idx = i * VIRTIO_QNUM;
2249 rte_vhost_enable_guest_notification(virtio_dev, idx + VIRTIO_RXQ, 0);
2250 rte_vhost_enable_guest_notification(virtio_dev, idx + VIRTIO_TXQ, 0);
2255 * Fixes mapping for vhost-user tx queues. Must be called after each
2256 * enabling/disabling of queues and real_n_txq modifications.
2259 netdev_dpdk_remap_txqs(struct netdev_dpdk *dev)
2260 OVS_REQUIRES(dev->mutex)
2262 int *enabled_queues, n_enabled = 0;
2263 int i, k, total_txqs = dev->real_n_txq;
2265 enabled_queues = dpdk_rte_mzalloc(total_txqs * sizeof *enabled_queues);
2267 for (i = 0; i < total_txqs; i++) {
2268 /* Enabled queues always mapped to themselves. */
2269 if (dev->tx_q[i].map == i) {
2270 enabled_queues[n_enabled++] = i;
2274 if (n_enabled == 0 && total_txqs != 0) {
2275 enabled_queues[0] = OVS_VHOST_QUEUE_DISABLED;
2280 for (i = 0; i < total_txqs; i++) {
2281 if (dev->tx_q[i].map != i) {
2282 dev->tx_q[i].map = enabled_queues[k];
2283 k = (k + 1) % n_enabled;
2287 VLOG_DBG("TX queue mapping for %s\n", dev->vhost_id);
2288 for (i = 0; i < total_txqs; i++) {
2289 VLOG_DBG("%2d --> %2d", i, dev->tx_q[i].map);
2292 rte_free(enabled_queues);
2296 netdev_dpdk_vhost_set_queues(struct netdev_dpdk *dev, struct virtio_net *virtio_dev)
2297 OVS_REQUIRES(dev->mutex)
2301 qp_num = virtio_dev->virt_qp_nb;
2302 if (qp_num > dev->up.n_rxq) {
2303 VLOG_ERR("vHost Device '%s' %"PRIu64" can't be added - "
2304 "too many queues %d > %d", virtio_dev->ifname, virtio_dev->device_fh,
2305 qp_num, dev->up.n_rxq);
2309 dev->real_n_rxq = qp_num;
2310 dev->real_n_txq = qp_num;
2311 dev->txq_needs_locking = true;
2312 /* Enable TX queue 0 by default if it wasn't disabled. */
2313 if (dev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) {
2314 dev->tx_q[0].map = 0;
2317 netdev_dpdk_remap_txqs(dev);
2323 * A new virtio-net device is added to a vhost port.
2326 new_device(struct virtio_net *virtio_dev)
2328 struct netdev_dpdk *dev;
2329 bool exists = false;
2333 ovs_mutex_lock(&dpdk_mutex);
2334 /* Add device to the vhost port with the same name as that passed down. */
2335 LIST_FOR_EACH(dev, list_node, &dpdk_list) {
2336 if (strncmp(virtio_dev->ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
2337 ovs_mutex_lock(&dev->mutex);
2338 if (netdev_dpdk_vhost_set_queues(dev, virtio_dev)) {
2339 ovs_mutex_unlock(&dev->mutex);
2340 ovs_mutex_unlock(&dpdk_mutex);
2343 ovsrcu_set(&dev->virtio_dev, virtio_dev);
2346 /* Get NUMA information */
2347 err = get_mempolicy(&newnode, NULL, 0, virtio_dev,
2348 MPOL_F_NODE | MPOL_F_ADDR);
2350 VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
2351 virtio_dev->ifname);
2352 newnode = dev->socket_id;
2353 } else if (newnode != dev->socket_id) {
2354 dev->requested_socket_id = newnode;
2355 netdev_request_reconfigure(&dev->up);
2358 virtio_dev->flags |= VIRTIO_DEV_RUNNING;
2359 /* Disable notifications. */
2360 set_irq_status(virtio_dev);
2361 netdev_change_seq_changed(&dev->up);
2362 ovs_mutex_unlock(&dev->mutex);
2366 ovs_mutex_unlock(&dpdk_mutex);
2369 VLOG_INFO("vHost Device '%s' %"PRIu64" can't be added - name not "
2370 "found", virtio_dev->ifname, virtio_dev->device_fh);
2375 VLOG_INFO("vHost Device '%s' %"PRIu64" has been added on numa node %i",
2376 virtio_dev->ifname, virtio_dev->device_fh, newnode);
2380 /* Clears mapping for all available queues of vhost interface. */
2382 netdev_dpdk_txq_map_clear(struct netdev_dpdk *dev)
2383 OVS_REQUIRES(dev->mutex)
2387 for (i = 0; i < dev->real_n_txq; i++) {
2388 dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
2393 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2394 * flag to stop any more packets from being sent or received to/from a VM and
2395 * ensure all currently queued packets have been sent/received before removing
2399 destroy_device(volatile struct virtio_net *virtio_dev)
2401 struct netdev_dpdk *dev;
2402 bool exists = false;
2404 ovs_mutex_lock(&dpdk_mutex);
2405 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
2406 if (netdev_dpdk_get_virtio(dev) == virtio_dev) {
2408 ovs_mutex_lock(&dev->mutex);
2409 virtio_dev->flags &= ~VIRTIO_DEV_RUNNING;
2410 ovsrcu_set(&dev->virtio_dev, NULL);
2411 netdev_dpdk_txq_map_clear(dev);
2413 netdev_change_seq_changed(&dev->up);
2414 ovs_mutex_unlock(&dev->mutex);
2419 ovs_mutex_unlock(&dpdk_mutex);
2421 if (exists == true) {
2423 * Wait for other threads to quiesce after setting the 'virtio_dev'
2424 * to NULL, before returning.
2426 ovsrcu_synchronize();
2428 * As call to ovsrcu_synchronize() will end the quiescent state,
2429 * put thread back into quiescent state before returning.
2431 ovsrcu_quiesce_start();
2432 VLOG_INFO("vHost Device '%s' %"PRIu64" has been removed",
2433 virtio_dev->ifname, virtio_dev->device_fh);
2435 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", virtio_dev->ifname,
2436 virtio_dev->device_fh);
2441 vring_state_changed(struct virtio_net *virtio_dev, uint16_t queue_id,
2444 struct netdev_dpdk *dev;
2445 bool exists = false;
2446 int qid = queue_id / VIRTIO_QNUM;
2448 if (queue_id % VIRTIO_QNUM == VIRTIO_TXQ) {
2452 ovs_mutex_lock(&dpdk_mutex);
2453 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
2454 if (strncmp(virtio_dev->ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
2455 ovs_mutex_lock(&dev->mutex);
2457 dev->tx_q[qid].map = qid;
2459 dev->tx_q[qid].map = OVS_VHOST_QUEUE_DISABLED;
2461 netdev_dpdk_remap_txqs(dev);
2463 ovs_mutex_unlock(&dev->mutex);
2467 ovs_mutex_unlock(&dpdk_mutex);
2470 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s' %"
2471 PRIu64" changed to \'%s\'", queue_id, qid,
2472 virtio_dev->ifname, virtio_dev->device_fh,
2473 (enable == 1) ? "enabled" : "disabled");
2475 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", virtio_dev->ifname,
2476 virtio_dev->device_fh);
2484 netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
2486 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
2489 struct ingress_policer *
2490 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev)
2492 return ovsrcu_get(struct ingress_policer *, &dev->ingress_policer);
2496 * These callbacks allow virtio-net devices to be added to vhost ports when
2497 * configuration has been fully complete.
2499 static const struct virtio_net_device_ops virtio_net_device_ops =
2501 .new_device = new_device,
2502 .destroy_device = destroy_device,
2503 .vring_state_changed = vring_state_changed
2507 start_vhost_loop(void *dummy OVS_UNUSED)
2509 pthread_detach(pthread_self());
2510 /* Put the cuse thread into quiescent state. */
2511 ovsrcu_quiesce_start();
2512 rte_vhost_driver_session_start();
2517 dpdk_vhost_class_init(void)
2519 rte_vhost_driver_callback_register(&virtio_net_device_ops);
2520 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4
2521 | 1ULL << VIRTIO_NET_F_HOST_TSO6
2522 | 1ULL << VIRTIO_NET_F_CSUM);
2524 ovs_thread_create("vhost_thread", start_vhost_loop, NULL);
2529 dpdk_vhost_cuse_class_init(void)
2535 dpdk_vhost_user_class_init(void)
2541 dpdk_common_init(void)
2543 unixctl_command_register("netdev-dpdk/set-admin-state",
2544 "[netdev] up|down", 1, 2,
2545 netdev_dpdk_set_admin_state, NULL);
2552 dpdk_ring_create(const char dev_name[], unsigned int port_no,
2553 unsigned int *eth_port_id)
2555 struct dpdk_ring *ivshmem;
2556 char ring_name[RTE_RING_NAMESIZE];
2559 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
2560 if (ivshmem == NULL) {
2564 /* XXX: Add support for multiquque ring. */
2565 err = snprintf(ring_name, sizeof(ring_name), "%s_tx", dev_name);
2570 /* Create single producer tx ring, netdev does explicit locking. */
2571 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2573 if (ivshmem->cring_tx == NULL) {
2578 err = snprintf(ring_name, sizeof(ring_name), "%s_rx", dev_name);
2583 /* Create single consumer rx ring, netdev does explicit locking. */
2584 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2586 if (ivshmem->cring_rx == NULL) {
2591 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
2592 &ivshmem->cring_tx, 1, SOCKET0);
2599 ivshmem->user_port_id = port_no;
2600 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
2601 ovs_list_push_back(&dpdk_ring_list, &ivshmem->list_node);
2603 *eth_port_id = ivshmem->eth_port_id;
2608 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
2610 struct dpdk_ring *ivshmem;
2611 unsigned int port_no;
2614 /* Names always start with "dpdkr" */
2615 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
2620 /* look through our list to find the device */
2621 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
2622 if (ivshmem->user_port_id == port_no) {
2623 VLOG_INFO("Found dpdk ring device %s:", dev_name);
2624 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
2628 /* Need to create the device rings */
2629 return dpdk_ring_create(dev_name, port_no, eth_port_id);
2633 netdev_dpdk_ring_send(struct netdev *netdev, int qid,
2634 struct dp_packet **pkts, int cnt, bool may_steal)
2636 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2639 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2640 * rss hash field is clear. This is because the same mbuf may be modified by
2641 * the consumer of the ring and return into the datapath without recalculating
2643 for (i = 0; i < cnt; i++) {
2644 dp_packet_rss_invalidate(pkts[i]);
2647 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
2652 netdev_dpdk_ring_construct(struct netdev *netdev)
2654 unsigned int port_no = 0;
2657 if (rte_eal_init_ret) {
2658 return rte_eal_init_ret;
2661 ovs_mutex_lock(&dpdk_mutex);
2663 err = dpdk_ring_open(netdev->name, &port_no);
2668 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
2671 ovs_mutex_unlock(&dpdk_mutex);
2678 * Initialize QoS configuration operations.
2681 qos_conf_init(struct qos_conf *conf, const struct dpdk_qos_ops *ops)
2687 * Search existing QoS operations in qos_ops and compare each set of
2688 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2691 static const struct dpdk_qos_ops *
2692 qos_lookup_name(const char *name)
2694 const struct dpdk_qos_ops *const *opsp;
2696 for (opsp = qos_confs; *opsp != NULL; opsp++) {
2697 const struct dpdk_qos_ops *ops = *opsp;
2698 if (!strcmp(name, ops->qos_name)) {
2706 * Call qos_destruct to clean up items associated with the netdevs
2707 * qos_conf. Set netdevs qos_conf to NULL.
2710 qos_delete_conf(struct netdev *netdev)
2712 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2714 rte_spinlock_lock(&dev->qos_lock);
2715 if (dev->qos_conf) {
2716 if (dev->qos_conf->ops->qos_destruct) {
2717 dev->qos_conf->ops->qos_destruct(netdev, dev->qos_conf);
2719 dev->qos_conf = NULL;
2721 rte_spinlock_unlock(&dev->qos_lock);
2725 netdev_dpdk_get_qos_types(const struct netdev *netdev OVS_UNUSED,
2728 const struct dpdk_qos_ops *const *opsp;
2730 for (opsp = qos_confs; *opsp != NULL; opsp++) {
2731 const struct dpdk_qos_ops *ops = *opsp;
2732 if (ops->qos_construct && ops->qos_name[0] != '\0') {
2733 sset_add(types, ops->qos_name);
2740 netdev_dpdk_get_qos(const struct netdev *netdev,
2741 const char **typep, struct smap *details)
2743 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2746 ovs_mutex_lock(&dev->mutex);
2748 *typep = dev->qos_conf->ops->qos_name;
2749 error = (dev->qos_conf->ops->qos_get
2750 ? dev->qos_conf->ops->qos_get(netdev, details): 0);
2752 ovs_mutex_unlock(&dev->mutex);
2758 netdev_dpdk_set_qos(struct netdev *netdev,
2759 const char *type, const struct smap *details)
2761 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2762 const struct dpdk_qos_ops *new_ops = NULL;
2765 /* If type is empty or unsupported then the current QoS configuration
2766 * for the dpdk-netdev can be destroyed */
2767 new_ops = qos_lookup_name(type);
2769 if (type[0] == '\0' || !new_ops || !new_ops->qos_construct) {
2770 qos_delete_conf(netdev);
2774 ovs_mutex_lock(&dev->mutex);
2776 if (dev->qos_conf) {
2777 if (new_ops == dev->qos_conf->ops) {
2778 error = new_ops->qos_set ? new_ops->qos_set(netdev, details) : 0;
2780 /* Delete existing QoS configuration. */
2781 qos_delete_conf(netdev);
2782 ovs_assert(dev->qos_conf == NULL);
2784 /* Install new QoS configuration. */
2785 error = new_ops->qos_construct(netdev, details);
2786 ovs_assert((error == 0) == (dev->qos_conf != NULL));
2789 error = new_ops->qos_construct(netdev, details);
2790 ovs_assert((error == 0) == (dev->qos_conf != NULL));
2793 ovs_mutex_unlock(&dev->mutex);
2797 /* egress-policer details */
2799 struct egress_policer {
2800 struct qos_conf qos_conf;
2801 struct rte_meter_srtcm_params app_srtcm_params;
2802 struct rte_meter_srtcm egress_meter;
2805 static struct egress_policer *
2806 egress_policer_get__(const struct netdev *netdev)
2808 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2809 return CONTAINER_OF(dev->qos_conf, struct egress_policer, qos_conf);
2813 egress_policer_qos_construct(struct netdev *netdev,
2814 const struct smap *details)
2816 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2817 struct egress_policer *policer;
2822 rte_spinlock_lock(&dev->qos_lock);
2823 policer = xmalloc(sizeof *policer);
2824 qos_conf_init(&policer->qos_conf, &egress_policer_ops);
2825 dev->qos_conf = &policer->qos_conf;
2826 cir_s = smap_get(details, "cir");
2827 cbs_s = smap_get(details, "cbs");
2828 policer->app_srtcm_params.cir = cir_s ? strtoull(cir_s, NULL, 10) : 0;
2829 policer->app_srtcm_params.cbs = cbs_s ? strtoull(cbs_s, NULL, 10) : 0;
2830 policer->app_srtcm_params.ebs = 0;
2831 err = rte_meter_srtcm_config(&policer->egress_meter,
2832 &policer->app_srtcm_params);
2833 rte_spinlock_unlock(&dev->qos_lock);
2839 egress_policer_qos_destruct(struct netdev *netdev OVS_UNUSED,
2840 struct qos_conf *conf)
2842 struct egress_policer *policer = CONTAINER_OF(conf, struct egress_policer,
2848 egress_policer_qos_get(const struct netdev *netdev, struct smap *details)
2850 struct egress_policer *policer = egress_policer_get__(netdev);
2851 smap_add_format(details, "cir", "%llu",
2852 1ULL * policer->app_srtcm_params.cir);
2853 smap_add_format(details, "cbs", "%llu",
2854 1ULL * policer->app_srtcm_params.cbs);
2860 egress_policer_qos_set(struct netdev *netdev, const struct smap *details)
2862 struct egress_policer *policer;
2867 policer = egress_policer_get__(netdev);
2868 cir_s = smap_get(details, "cir");
2869 cbs_s = smap_get(details, "cbs");
2870 policer->app_srtcm_params.cir = cir_s ? strtoull(cir_s, NULL, 10) : 0;
2871 policer->app_srtcm_params.cbs = cbs_s ? strtoull(cbs_s, NULL, 10) : 0;
2872 policer->app_srtcm_params.ebs = 0;
2873 err = rte_meter_srtcm_config(&policer->egress_meter,
2874 &policer->app_srtcm_params);
2880 egress_policer_run(struct netdev *netdev, struct rte_mbuf **pkts, int pkt_cnt)
2883 struct egress_policer *policer = egress_policer_get__(netdev);
2885 cnt = netdev_dpdk_policer_run(&policer->egress_meter, pkts, pkt_cnt);
2890 static const struct dpdk_qos_ops egress_policer_ops = {
2891 "egress-policer", /* qos_name */
2892 egress_policer_qos_construct,
2893 egress_policer_qos_destruct,
2894 egress_policer_qos_get,
2895 egress_policer_qos_set,
2900 netdev_dpdk_reconfigure(struct netdev *netdev)
2902 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2905 ovs_mutex_lock(&dpdk_mutex);
2906 ovs_mutex_lock(&dev->mutex);
2908 if (netdev->n_txq == dev->requested_n_txq
2909 && netdev->n_rxq == dev->requested_n_rxq) {
2910 /* Reconfiguration is unnecessary */
2915 rte_eth_dev_stop(dev->port_id);
2917 netdev->n_txq = dev->requested_n_txq;
2918 netdev->n_rxq = dev->requested_n_rxq;
2920 rte_free(dev->tx_q);
2921 err = dpdk_eth_dev_init(dev);
2922 netdev_dpdk_alloc_txq(dev, dev->real_n_txq);
2924 dev->txq_needs_locking = dev->real_n_txq != netdev->n_txq;
2928 ovs_mutex_unlock(&dev->mutex);
2929 ovs_mutex_unlock(&dpdk_mutex);
2935 netdev_dpdk_vhost_user_reconfigure(struct netdev *netdev)
2937 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2940 ovs_mutex_lock(&dpdk_mutex);
2941 ovs_mutex_lock(&dev->mutex);
2943 netdev->n_txq = dev->requested_n_txq;
2944 netdev->n_rxq = dev->requested_n_rxq;
2946 if (dev->requested_socket_id != dev->socket_id) {
2947 dev->socket_id = dev->requested_socket_id;
2948 /* Change mempool to new NUMA Node */
2949 dpdk_mp_put(dev->dpdk_mp);
2950 dev->dpdk_mp = dpdk_mp_get(dev->socket_id, dev->mtu);
2951 if (!dev->dpdk_mp) {
2956 ovs_mutex_unlock(&dev->mutex);
2957 ovs_mutex_unlock(&dpdk_mutex);
2963 netdev_dpdk_vhost_cuse_reconfigure(struct netdev *netdev)
2965 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2967 ovs_mutex_lock(&dpdk_mutex);
2968 ovs_mutex_lock(&dev->mutex);
2970 netdev->n_txq = dev->requested_n_txq;
2971 dev->real_n_txq = 1;
2973 dev->txq_needs_locking = dev->real_n_txq != netdev->n_txq;
2975 ovs_mutex_unlock(&dev->mutex);
2976 ovs_mutex_unlock(&dpdk_mutex);
2981 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, SEND, \
2982 GET_CARRIER, GET_STATS, GET_FEATURES, \
2983 GET_STATUS, RECONFIGURE, RXQ_RECV) \
2986 true, /* is_pmd */ \
2988 NULL, /* netdev_dpdk_run */ \
2989 NULL, /* netdev_dpdk_wait */ \
2991 netdev_dpdk_alloc, \
2994 netdev_dpdk_dealloc, \
2995 netdev_dpdk_get_config, \
2996 netdev_dpdk_set_config, \
2997 NULL, /* get_tunnel_config */ \
2998 NULL, /* build header */ \
2999 NULL, /* push header */ \
3000 NULL, /* pop header */ \
3001 netdev_dpdk_get_numa_id, /* get_numa_id */ \
3002 netdev_dpdk_set_tx_multiq, \
3005 NULL, /* send_wait */ \
3007 netdev_dpdk_set_etheraddr, \
3008 netdev_dpdk_get_etheraddr, \
3009 netdev_dpdk_get_mtu, \
3010 netdev_dpdk_set_mtu, \
3011 netdev_dpdk_get_ifindex, \
3013 netdev_dpdk_get_carrier_resets, \
3014 netdev_dpdk_set_miimon, \
3017 NULL, /* set_advertisements */ \
3019 netdev_dpdk_set_policing, \
3020 netdev_dpdk_get_qos_types, \
3021 NULL, /* get_qos_capabilities */ \
3022 netdev_dpdk_get_qos, \
3023 netdev_dpdk_set_qos, \
3024 NULL, /* get_queue */ \
3025 NULL, /* set_queue */ \
3026 NULL, /* delete_queue */ \
3027 NULL, /* get_queue_stats */ \
3028 NULL, /* queue_dump_start */ \
3029 NULL, /* queue_dump_next */ \
3030 NULL, /* queue_dump_done */ \
3031 NULL, /* dump_queue_stats */ \
3033 NULL, /* set_in4 */ \
3034 NULL, /* get_addr_list */ \
3035 NULL, /* add_router */ \
3036 NULL, /* get_next_hop */ \
3038 NULL, /* arp_lookup */ \
3040 netdev_dpdk_update_flags, \
3043 netdev_dpdk_rxq_alloc, \
3044 netdev_dpdk_rxq_construct, \
3045 netdev_dpdk_rxq_destruct, \
3046 netdev_dpdk_rxq_dealloc, \
3048 NULL, /* rx_wait */ \
3049 NULL, /* rxq_drain */ \
3053 process_vhost_flags(char *flag, char *default_val, int size,
3054 const struct smap *ovs_other_config,
3060 val = smap_get(ovs_other_config, flag);
3062 /* Depending on which version of vhost is in use, process the vhost-specific
3063 * flag if it is provided, otherwise resort to default value.
3065 if (val && (strlen(val) <= size)) {
3067 *new_val = xstrdup(val);
3068 VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
3070 VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
3071 *new_val = default_val;
3078 grow_argv(char ***argv, size_t cur_siz, size_t grow_by)
3080 return xrealloc(*argv, sizeof(char *) * (cur_siz + grow_by));
3084 dpdk_option_extend(char ***argv, int argc, const char *option,
3087 char **newargv = grow_argv(argv, argc, 2);
3089 newargv[argc] = xstrdup(option);
3090 newargv[argc+1] = xstrdup(value);
3094 move_argv(char ***argv, size_t cur_size, char **src_argv, size_t src_argc)
3096 char **newargv = grow_argv(argv, cur_size, src_argc);
3097 while (src_argc--) {
3098 newargv[cur_size+src_argc] = src_argv[src_argc];
3099 src_argv[src_argc] = NULL;
3105 extra_dpdk_args(const char *ovs_extra_config, char ***argv, int argc)
3108 char *release_tok = xstrdup(ovs_extra_config);
3109 char *tok = release_tok, *endptr = NULL;
3111 for (tok = strtok_r(release_tok, " ", &endptr); tok != NULL;
3112 tok = strtok_r(NULL, " ", &endptr)) {
3113 char **newarg = grow_argv(argv, ret, 1);
3115 newarg[ret++] = xstrdup(tok);
3122 argv_contains(char **argv_haystack, const size_t argc_haystack,
3125 for (size_t i = 0; i < argc_haystack; ++i) {
3126 if (!strcmp(argv_haystack[i], needle))
3133 construct_dpdk_options(const struct smap *ovs_other_config,
3134 char ***argv, const int initial_size,
3135 char **extra_args, const size_t extra_argc)
3137 struct dpdk_options_map {
3138 const char *ovs_configuration;
3139 const char *dpdk_option;
3140 bool default_enabled;
3141 const char *default_value;
3143 {"dpdk-lcore-mask", "-c", false, NULL},
3144 {"dpdk-hugepage-dir", "--huge-dir", false, NULL},
3147 int i, ret = initial_size;
3149 /*First, construct from the flat-options (non-mutex)*/
3150 for (i = 0; i < ARRAY_SIZE(opts); ++i) {
3151 const char *lookup = smap_get(ovs_other_config,
3152 opts[i].ovs_configuration);
3153 if (!lookup && opts[i].default_enabled) {
3154 lookup = opts[i].default_value;
3158 if (!argv_contains(extra_args, extra_argc, opts[i].dpdk_option)) {
3159 dpdk_option_extend(argv, ret, opts[i].dpdk_option, lookup);
3162 VLOG_WARN("Ignoring database defined option '%s' due to "
3163 "dpdk_extras config", opts[i].dpdk_option);
3171 #define MAX_DPDK_EXCL_OPTS 10
3174 construct_dpdk_mutex_options(const struct smap *ovs_other_config,
3175 char ***argv, const int initial_size,
3176 char **extra_args, const size_t extra_argc)
3178 struct dpdk_exclusive_options_map {
3179 const char *category;
3180 const char *ovs_dpdk_options[MAX_DPDK_EXCL_OPTS];
3181 const char *eal_dpdk_options[MAX_DPDK_EXCL_OPTS];
3182 const char *default_value;
3186 {"dpdk-alloc-mem", "dpdk-socket-mem", NULL,},
3187 {"-m", "--socket-mem", NULL,},
3192 int i, ret = initial_size;
3193 for (i = 0; i < ARRAY_SIZE(excl_opts); ++i) {
3194 int found_opts = 0, scan, found_pos = -1;
3195 const char *found_value;
3196 struct dpdk_exclusive_options_map *popt = &excl_opts[i];
3198 for (scan = 0; scan < MAX_DPDK_EXCL_OPTS
3199 && popt->ovs_dpdk_options[scan]; ++scan) {
3200 const char *lookup = smap_get(ovs_other_config,
3201 popt->ovs_dpdk_options[scan]);
3202 if (lookup && strlen(lookup)) {
3205 found_value = lookup;
3210 if (popt->default_option) {
3211 found_pos = popt->default_option;
3212 found_value = popt->default_value;
3218 if (found_opts > 1) {
3219 VLOG_ERR("Multiple defined options for %s. Please check your"
3220 " database settings and reconfigure if necessary.",
3224 if (!argv_contains(extra_args, extra_argc,
3225 popt->eal_dpdk_options[found_pos])) {
3226 dpdk_option_extend(argv, ret, popt->eal_dpdk_options[found_pos],
3230 VLOG_WARN("Ignoring database defined option '%s' due to "
3231 "dpdk_extras config", popt->eal_dpdk_options[found_pos]);
3239 get_dpdk_args(const struct smap *ovs_other_config, char ***argv,
3242 const char *extra_configuration;
3243 char **extra_args = NULL;
3245 size_t extra_argc = 0;
3247 extra_configuration = smap_get(ovs_other_config, "dpdk-extra");
3248 if (extra_configuration) {
3249 extra_argc = extra_dpdk_args(extra_configuration, &extra_args, 0);
3252 i = construct_dpdk_options(ovs_other_config, argv, argc, extra_args,
3254 i = construct_dpdk_mutex_options(ovs_other_config, argv, i, extra_args,
3257 if (extra_configuration) {
3258 *argv = move_argv(argv, i, extra_args, extra_argc);
3261 return i + extra_argc;
3264 static char **dpdk_argv;
3265 static int dpdk_argc;
3268 deferred_argv_release(void)
3271 for (result = 0; result < dpdk_argc; ++result) {
3272 free(dpdk_argv[result]);
3279 dpdk_init__(const struct smap *ovs_other_config)
3284 bool auto_determine = true;
3288 char *sock_dir_subcomponent;
3291 if (!smap_get_bool(ovs_other_config, "dpdk-init", false)) {
3292 VLOG_INFO("DPDK Disabled - to change this requires a restart.\n");
3296 VLOG_INFO("DPDK Enabled, initializing");
3299 if (process_vhost_flags("cuse-dev-name", xstrdup("vhost-net"),
3300 PATH_MAX, ovs_other_config, &cuse_dev_name)) {
3302 if (process_vhost_flags("vhost-sock-dir", xstrdup(ovs_rundir()),
3303 NAME_MAX, ovs_other_config,
3304 &sock_dir_subcomponent)) {
3306 if (!strstr(sock_dir_subcomponent, "..")) {
3307 vhost_sock_dir = xasprintf("%s/%s", ovs_rundir(),
3308 sock_dir_subcomponent);
3310 err = stat(vhost_sock_dir, &s);
3312 VLOG_ERR("vhost-user sock directory '%s' does not exist.",
3316 vhost_sock_dir = xstrdup(ovs_rundir());
3317 VLOG_ERR("vhost-user sock directory request '%s/%s' has invalid"
3318 "characters '..' - using %s instead.",
3319 ovs_rundir(), sock_dir_subcomponent, ovs_rundir());
3321 free(sock_dir_subcomponent);
3323 vhost_sock_dir = sock_dir_subcomponent;
3327 argv = grow_argv(&argv, 0, 1);
3329 argv[0] = xstrdup(ovs_get_program_name());
3330 argc_tmp = get_dpdk_args(ovs_other_config, &argv, argc);
3332 while (argc_tmp != argc) {
3333 if (!strcmp("-c", argv[argc]) || !strcmp("-l", argv[argc])) {
3334 auto_determine = false;
3342 * NOTE: This is an unsophisticated mechanism for determining the DPDK
3343 * lcore for the DPDK Master.
3345 if (auto_determine) {
3347 /* Get the main thread affinity */
3349 err = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
3352 for (i = 0; i < CPU_SETSIZE; i++) {
3353 if (CPU_ISSET(i, &cpuset)) {
3354 argv = grow_argv(&argv, argc, 2);
3355 argv[argc++] = xstrdup("-c");
3356 argv[argc++] = xasprintf("0x%08llX", (1ULL<<i));
3361 VLOG_ERR("Thread getaffinity error %d. Using core 0x1", err);
3362 /* User did not set dpdk-lcore-mask and unable to get current
3363 * thread affintity - default to core 0x1 */
3364 argv = grow_argv(&argv, argc, 2);
3365 argv[argc++] = xstrdup("-c");
3366 argv[argc++] = xasprintf("0x%X", 1);
3370 argv = grow_argv(&argv, argc, 1);
3375 if (VLOG_IS_INFO_ENABLED()) {
3379 ds_put_cstr(&eal_args, "EAL ARGS:");
3380 for (opt = 0; opt < argc; ++opt) {
3381 ds_put_cstr(&eal_args, " ");
3382 ds_put_cstr(&eal_args, argv[opt]);
3384 VLOG_INFO("%s", ds_cstr_ro(&eal_args));
3385 ds_destroy(&eal_args);
3388 /* Make sure things are initialized ... */
3389 result = rte_eal_init(argc, argv);
3391 ovs_abort(result, "Cannot init EAL");
3394 /* Set the main thread affinity back to pre rte_eal_init() value */
3395 if (auto_determine && !err) {
3396 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t),
3399 VLOG_ERR("Thread setaffinity error %d", err);
3406 atexit(deferred_argv_release);
3408 rte_memzone_dump(stdout);
3409 rte_eal_init_ret = 0;
3411 /* We are called from the main thread here */
3412 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
3414 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
3417 /* Register CUSE device to handle IOCTLs.
3418 * Unless otherwise specified, cuse_dev_name is set to vhost-net.
3420 err = rte_vhost_driver_register(cuse_dev_name);
3423 VLOG_ERR("CUSE device setup failure.");
3428 dpdk_vhost_class_init();
3430 /* Finally, register the dpdk classes */
3431 netdev_dpdk_register();
3435 dpdk_init(const struct smap *ovs_other_config)
3437 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
3439 if (ovs_other_config && ovsthread_once_start(&once)) {
3440 dpdk_init__(ovs_other_config);
3441 ovsthread_once_done(&once);
3445 static const struct netdev_class dpdk_class =
3449 netdev_dpdk_construct,
3450 netdev_dpdk_destruct,
3451 netdev_dpdk_eth_send,
3452 netdev_dpdk_get_carrier,
3453 netdev_dpdk_get_stats,
3454 netdev_dpdk_get_features,
3455 netdev_dpdk_get_status,
3456 netdev_dpdk_reconfigure,
3457 netdev_dpdk_rxq_recv);
3459 static const struct netdev_class dpdk_ring_class =
3463 netdev_dpdk_ring_construct,
3464 netdev_dpdk_destruct,
3465 netdev_dpdk_ring_send,
3466 netdev_dpdk_get_carrier,
3467 netdev_dpdk_get_stats,
3468 netdev_dpdk_get_features,
3469 netdev_dpdk_get_status,
3470 netdev_dpdk_reconfigure,
3471 netdev_dpdk_rxq_recv);
3473 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class =
3476 dpdk_vhost_cuse_class_init,
3477 netdev_dpdk_vhost_cuse_construct,
3478 netdev_dpdk_vhost_destruct,
3479 netdev_dpdk_vhost_send,
3480 netdev_dpdk_vhost_get_carrier,
3481 netdev_dpdk_vhost_get_stats,
3484 netdev_dpdk_vhost_cuse_reconfigure,
3485 netdev_dpdk_vhost_rxq_recv);
3487 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class =
3490 dpdk_vhost_user_class_init,
3491 netdev_dpdk_vhost_user_construct,
3492 netdev_dpdk_vhost_destruct,
3493 netdev_dpdk_vhost_send,
3494 netdev_dpdk_vhost_get_carrier,
3495 netdev_dpdk_vhost_get_stats,
3498 netdev_dpdk_vhost_user_reconfigure,
3499 netdev_dpdk_vhost_rxq_recv);
3502 netdev_dpdk_register(void)
3505 netdev_register_provider(&dpdk_class);
3506 netdev_register_provider(&dpdk_ring_class);
3508 netdev_register_provider(&dpdk_vhost_cuse_class);
3510 netdev_register_provider(&dpdk_vhost_user_class);
3515 dpdk_set_lcore_id(unsigned cpu)
3517 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
3518 ovs_assert(cpu != NON_PMD_CORE_ID);
3519 RTE_PER_LCORE(_lcore_id) = cpu;
3523 dpdk_thread_is_pmd(void)
3525 return rte_lcore_id() != NON_PMD_CORE_ID;