2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
36 #include "dp-packet.h"
37 #include "dpif-netdev.h"
38 #include "fatal-signal.h"
39 #include "netdev-dpdk.h"
40 #include "netdev-provider.h"
41 #include "netdev-vport.h"
43 #include "openvswitch/dynamic-string.h"
44 #include "openvswitch/list.h"
45 #include "openvswitch/ofp-print.h"
46 #include "openvswitch/vlog.h"
48 #include "ovs-thread.h"
54 #include "unaligned.h"
58 #include "rte_config.h"
60 #include "rte_meter.h"
61 #include "rte_virtio_net.h"
63 VLOG_DEFINE_THIS_MODULE(dpdk);
64 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
66 #define DPDK_PORT_WATCHDOG_INTERVAL 5
68 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
69 #define OVS_VPORT_DPDK "ovs_dpdk"
72 * need to reserve tons of extra space in the mbufs so we can align the
73 * DMA addresses to 4KB.
74 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
75 * performance for standard Ethernet MTU.
77 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_HEADER_LEN))
78 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
79 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
80 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len)- ETHER_HDR_LEN - ETHER_CRC_LEN)
81 #define MBUF_SIZE(mtu) ( MTU_TO_MAX_FRAME_LEN(mtu) \
82 + sizeof(struct dp_packet) \
83 + RTE_PKTMBUF_HEADROOM)
84 #define NETDEV_DPDK_MBUF_ALIGN 1024
86 /* Max and min number of packets in the mempool. OVS tries to allocate a
87 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
88 * enough hugepages) we keep halving the number until the allocation succeeds
89 * or we reach MIN_NB_MBUF */
91 #define MAX_NB_MBUF (4096 * 64)
92 #define MIN_NB_MBUF (4096 * 4)
93 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
95 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
96 BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
98 /* The smallest possible NB_MBUF that we're going to try should be a multiple
99 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
100 BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
104 * DPDK XSTATS Counter names definition
106 #define XSTAT_RX_64_PACKETS "rx_size_64_packets"
107 #define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
108 #define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
109 #define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
110 #define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
111 #define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
112 #define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
114 #define XSTAT_TX_64_PACKETS "tx_size_64_packets"
115 #define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
116 #define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
117 #define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
118 #define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
119 #define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
120 #define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
122 #define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
123 #define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
124 #define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
125 #define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
126 #define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
127 #define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
128 #define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
132 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
133 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
135 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
136 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
137 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
138 * yet mapped to another queue. */
141 static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
143 static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
145 #define VHOST_ENQ_RETRY_NUM 8
147 static const struct rte_eth_conf port_conf = {
149 .mq_mode = ETH_MQ_RX_RSS,
151 .header_split = 0, /* Header Split disabled */
152 .hw_ip_checksum = 0, /* IP checksum offload disabled */
153 .hw_vlan_filter = 0, /* VLAN filtering disabled */
154 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
160 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
164 .mq_mode = ETH_MQ_TX_NONE,
168 enum { DPDK_RING_SIZE = 256 };
169 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
170 enum { DRAIN_TSC = 200000ULL };
177 static int rte_eal_init_ret = ENODEV;
179 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
181 /* Quality of Service */
183 /* An instance of a QoS configuration. Always associated with a particular
186 * Each QoS implementation subclasses this with whatever additional data it
190 const struct dpdk_qos_ops *ops;
193 /* A particular implementation of dpdk QoS operations.
195 * The functions below return 0 if successful or a positive errno value on
196 * failure, except where otherwise noted. All of them must be provided, except
197 * where otherwise noted.
199 struct dpdk_qos_ops {
201 /* Name of the QoS type */
202 const char *qos_name;
204 /* Called to construct the QoS implementation on 'netdev'. The
205 * implementation should make the appropriate calls to configure QoS
206 * according to 'details'. The implementation may assume that any current
207 * QoS configuration already installed should be destroyed before
208 * constructing the new configuration.
210 * The contents of 'details' should be documented as valid for 'ovs_name'
211 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
212 * (which is built as ovs-vswitchd.conf.db(8)).
214 * This function must return 0 if and only if it sets 'netdev->qos_conf'
215 * to an initialized 'struct qos_conf'.
217 * For all QoS implementations it should always be non-null.
219 int (*qos_construct)(struct netdev *netdev, const struct smap *details);
221 /* Destroys the data structures allocated by the implementation as part of
224 * For all QoS implementations it should always be non-null.
226 void (*qos_destruct)(struct netdev *netdev, struct qos_conf *conf);
228 /* Retrieves details of 'netdev->qos_conf' configuration into 'details'.
230 * The contents of 'details' should be documented as valid for 'ovs_name'
231 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
232 * (which is built as ovs-vswitchd.conf.db(8)).
234 int (*qos_get)(const struct netdev *netdev, struct smap *details);
236 /* Reconfigures 'netdev->qos_conf' according to 'details', performing any
237 * required calls to complete the reconfiguration.
239 * The contents of 'details' should be documented as valid for 'ovs_name'
240 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
241 * (which is built as ovs-vswitchd.conf.db(8)).
243 * This function may be null if 'qos_conf' is not configurable.
245 int (*qos_set)(struct netdev *netdev, const struct smap *details);
247 /* Modify an array of rte_mbufs. The modification is specific to
248 * each qos implementation.
250 * The function should take and array of mbufs and an int representing
251 * the current number of mbufs present in the array.
253 * After the function has performed a qos modification to the array of
254 * mbufs it returns an int representing the number of mbufs now present in
255 * the array. This value is can then be passed to the port send function
256 * along with the modified array for transmission.
258 * For all QoS implementations it should always be non-null.
260 int (*qos_run)(struct netdev *netdev, struct rte_mbuf **pkts,
264 /* dpdk_qos_ops for each type of user space QoS implementation */
265 static const struct dpdk_qos_ops egress_policer_ops;
268 * Array of dpdk_qos_ops, contains pointer to all supported QoS
271 static const struct dpdk_qos_ops *const qos_confs[] = {
276 /* Contains all 'struct dpdk_dev's. */
277 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
278 = OVS_LIST_INITIALIZER(&dpdk_list);
280 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
281 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
283 /* This mutex must be used by non pmd threads when allocating or freeing
284 * mbufs through mempools. */
285 static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
288 struct rte_mempool *mp;
292 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
295 /* There should be one 'struct dpdk_tx_queue' created for
297 struct dpdk_tx_queue {
298 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
299 * from concurrent access. It is used only
300 * if the queue is shared among different
301 * pmd threads (see 'txq_needs_locking'). */
302 int map; /* Mapping of configured vhost-user queues
303 * to enabled by guest. */
306 /* dpdk has no way to remove dpdk ring ethernet devices
307 so we have to keep them around once they've been created
310 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
311 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
314 /* For the client rings */
315 struct rte_ring *cring_tx;
316 struct rte_ring *cring_rx;
317 unsigned int user_port_id; /* User given port no, parsed from port name */
318 int eth_port_id; /* ethernet device port id */
319 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
322 struct ingress_policer {
323 struct rte_meter_srtcm_params app_srtcm_params;
324 struct rte_meter_srtcm in_policer;
325 rte_spinlock_t policer_lock;
332 enum dpdk_dev_type type;
334 struct dpdk_tx_queue *tx_q;
336 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
338 struct dpdk_mp *dpdk_mp;
342 struct netdev_stats stats;
344 rte_spinlock_t stats_lock;
346 struct eth_addr hwaddr;
347 enum netdev_flags flags;
349 struct rte_eth_link link;
352 /* Caller of netdev_send() might want to use more txqs than the device has.
353 * For physical NICs, if the 'requested_n_txq' less or equal to 'up.n_txq',
354 * 'txq_needs_locking' is false, otherwise it is true and we will take a
355 * spinlock on transmission. For vhost devices, 'requested_n_txq' is
357 bool txq_needs_locking;
359 /* virtio-net structure for vhost device */
360 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
362 /* Identifier used to distinguish vhost devices from each other */
363 char vhost_id[PATH_MAX];
366 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
368 /* QoS configuration and lock for the device */
369 struct qos_conf *qos_conf;
370 rte_spinlock_t qos_lock;
372 /* The following properties cannot be changed when a device is running,
373 * so we remember the request and update them next time
374 * netdev_dpdk*_reconfigure() is called */
378 /* Socket ID detected when vHost device is brought up */
379 int requested_socket_id;
381 /* Ingress Policer */
382 OVSRCU_TYPE(struct ingress_policer *) ingress_policer;
383 uint32_t policer_rate;
384 uint32_t policer_burst;
387 struct netdev_rxq_dpdk {
388 struct netdev_rxq up;
392 static bool dpdk_thread_is_pmd(void);
394 static int netdev_dpdk_construct(struct netdev *);
396 struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
398 struct ingress_policer *
399 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev);
402 is_dpdk_class(const struct netdev_class *class)
404 return class->construct == netdev_dpdk_construct;
407 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
408 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
409 * value, insufficient buffers are allocated to accomodate the packet in its
410 * entirety. Furthermore, certain drivers need to ensure that there is also
411 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
412 * frames). If the RX buffer is too small, then the driver enables scatter RX
413 * behaviour, which reduces performance. To prevent this, use a buffer size that
414 * is closest to 'mtu', but which satisfies the aforementioned criteria.
417 dpdk_buf_size(int mtu)
419 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) + RTE_PKTMBUF_HEADROOM),
420 NETDEV_DPDK_MBUF_ALIGN);
423 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
424 * for all other segments data, bss and text. */
427 dpdk_rte_mzalloc(size_t sz)
431 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
438 /* XXX this function should be called only by pmd threads (or by non pmd
439 * threads holding the nonpmd_mempool_mutex) */
441 free_dpdk_buf(struct dp_packet *p)
443 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
445 rte_pktmbuf_free(pkt);
449 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
450 void *opaque_arg OVS_UNUSED,
452 unsigned i OVS_UNUSED)
454 struct rte_mbuf *m = _m;
456 rte_pktmbuf_init(mp, opaque_arg, _m, i);
458 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
461 static struct dpdk_mp *
462 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
464 struct dpdk_mp *dmp = NULL;
465 char mp_name[RTE_MEMPOOL_NAMESIZE];
467 struct rte_pktmbuf_pool_private mbp_priv;
469 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
470 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
476 dmp = dpdk_rte_mzalloc(sizeof *dmp);
477 dmp->socket_id = socket_id;
480 mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct dp_packet);
481 mbp_priv.mbuf_priv_size = sizeof (struct dp_packet) - sizeof (struct rte_mbuf);
483 mp_size = MAX_NB_MBUF;
485 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
486 dmp->mtu, dmp->socket_id, mp_size) < 0) {
490 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
492 sizeof(struct rte_pktmbuf_pool_private),
493 rte_pktmbuf_pool_init, &mbp_priv,
494 ovs_rte_pktmbuf_init, NULL,
496 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
498 if (dmp->mp == NULL) {
501 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
504 ovs_list_push_back(&dpdk_mp_list, &dmp->list_node);
509 dpdk_mp_put(struct dpdk_mp *dmp)
517 ovs_assert(dmp->refcount >= 0);
520 /* I could not find any API to destroy mp. */
521 if (dmp->refcount == 0) {
522 list_delete(dmp->list_node);
523 /* destroy mp-pool. */
529 check_link_status(struct netdev_dpdk *dev)
531 struct rte_eth_link link;
533 rte_eth_link_get_nowait(dev->port_id, &link);
535 if (dev->link.link_status != link.link_status) {
536 netdev_change_seq_changed(&dev->up);
538 dev->link_reset_cnt++;
540 if (dev->link.link_status) {
541 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
542 dev->port_id, (unsigned)dev->link.link_speed,
543 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
544 ("full-duplex") : ("half-duplex"));
546 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
552 dpdk_watchdog(void *dummy OVS_UNUSED)
554 struct netdev_dpdk *dev;
556 pthread_detach(pthread_self());
559 ovs_mutex_lock(&dpdk_mutex);
560 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
561 ovs_mutex_lock(&dev->mutex);
562 if (dev->type == DPDK_DEV_ETH) {
563 check_link_status(dev);
565 ovs_mutex_unlock(&dev->mutex);
567 ovs_mutex_unlock(&dpdk_mutex);
568 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
575 dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
580 /* A device may report more queues than it makes available (this has
581 * been observed for Intel xl710, which reserves some of them for
582 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
583 * available. When this happens we can retry the configuration
584 * and request less queues */
585 while (n_rxq && n_txq) {
587 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
590 diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &port_conf);
595 for (i = 0; i < n_txq; i++) {
596 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
597 dev->socket_id, NULL);
599 VLOG_INFO("Interface %s txq(%d) setup error: %s",
600 dev->up.name, i, rte_strerror(-diag));
606 /* Retry with less tx queues */
611 for (i = 0; i < n_rxq; i++) {
612 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
613 dev->socket_id, NULL,
616 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
617 dev->up.name, i, rte_strerror(-diag));
623 /* Retry with less rx queues */
628 dev->up.n_rxq = n_rxq;
629 dev->up.n_txq = n_txq;
639 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
641 struct rte_pktmbuf_pool_private *mbp_priv;
642 struct rte_eth_dev_info info;
643 struct ether_addr eth_addr;
647 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
651 rte_eth_dev_info_get(dev->port_id, &info);
653 n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
654 n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
656 diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
658 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
659 dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
663 diag = rte_eth_dev_start(dev->port_id);
665 VLOG_ERR("Interface %s start error: %s", dev->up.name,
666 rte_strerror(-diag));
670 rte_eth_promiscuous_enable(dev->port_id);
671 rte_eth_allmulticast_enable(dev->port_id);
673 memset(ð_addr, 0x0, sizeof(eth_addr));
674 rte_eth_macaddr_get(dev->port_id, ð_addr);
675 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
676 dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
678 memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
679 rte_eth_link_get_nowait(dev->port_id, &dev->link);
681 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
682 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
684 dev->flags = NETDEV_UP | NETDEV_PROMISC;
688 static struct netdev_dpdk *
689 netdev_dpdk_cast(const struct netdev *netdev)
691 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
694 static struct netdev *
695 netdev_dpdk_alloc(void)
697 struct netdev_dpdk *dev;
699 if (!rte_eal_init_ret) { /* Only after successful initialization */
700 dev = dpdk_rte_mzalloc(sizeof *dev);
709 netdev_dpdk_alloc_txq(struct netdev_dpdk *dev, unsigned int n_txqs)
713 dev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *dev->tx_q);
714 for (i = 0; i < n_txqs; i++) {
715 /* Initialize map for vhost devices. */
716 dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
717 rte_spinlock_init(&dev->tx_q[i].tx_lock);
722 netdev_dpdk_init(struct netdev *netdev, unsigned int port_no,
723 enum dpdk_dev_type type)
724 OVS_REQUIRES(dpdk_mutex)
726 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
731 ovs_mutex_init(&dev->mutex);
732 ovs_mutex_lock(&dev->mutex);
734 rte_spinlock_init(&dev->stats_lock);
736 /* If the 'sid' is negative, it means that the kernel fails
737 * to obtain the pci numa info. In that situation, always
739 if (type == DPDK_DEV_ETH) {
740 sid = rte_eth_dev_socket_id(port_no);
742 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
745 dev->socket_id = sid < 0 ? SOCKET0 : sid;
746 dev->requested_socket_id = dev->socket_id;
747 dev->port_id = port_no;
750 dev->mtu = ETHER_MTU;
751 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
753 buf_size = dpdk_buf_size(dev->mtu);
754 dev->dpdk_mp = dpdk_mp_get(dev->socket_id, FRAME_LEN_TO_MTU(buf_size));
760 /* Initialise QoS configuration to NULL and qos lock to unlocked */
761 dev->qos_conf = NULL;
762 rte_spinlock_init(&dev->qos_lock);
764 /* Initialise rcu pointer for ingress policer to NULL */
765 ovsrcu_init(&dev->ingress_policer, NULL);
766 dev->policer_rate = 0;
767 dev->policer_burst = 0;
769 netdev->n_rxq = NR_QUEUE;
770 netdev->n_txq = NR_QUEUE;
771 dev->requested_n_rxq = netdev->n_rxq;
772 dev->requested_n_txq = netdev->n_txq;
774 if (type == DPDK_DEV_ETH) {
775 err = dpdk_eth_dev_init(dev);
779 netdev_dpdk_alloc_txq(dev, netdev->n_txq);
780 dev->txq_needs_locking = netdev->n_txq < dev->requested_n_txq;
782 netdev_dpdk_alloc_txq(dev, OVS_VHOST_MAX_QUEUE_NUM);
783 dev->txq_needs_locking = true;
784 /* Enable DPDK_DEV_VHOST device and set promiscuous mode flag. */
785 dev->flags = NETDEV_UP | NETDEV_PROMISC;
788 ovs_list_push_back(&dpdk_list, &dev->list_node);
791 ovs_mutex_unlock(&dev->mutex);
795 /* dev_name must be the prefix followed by a positive decimal number.
796 * (no leading + or - signs are allowed) */
798 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
799 unsigned int *port_no)
803 if (strncmp(dev_name, prefix, strlen(prefix))) {
807 cport = dev_name + strlen(prefix);
809 if (str_to_uint(cport, 10, port_no)) {
817 vhost_construct_helper(struct netdev *netdev) OVS_REQUIRES(dpdk_mutex)
819 if (rte_eal_init_ret) {
820 return rte_eal_init_ret;
823 return netdev_dpdk_init(netdev, -1, DPDK_DEV_VHOST);
827 netdev_dpdk_vhost_cuse_construct(struct netdev *netdev)
829 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
832 if (rte_eal_init_ret) {
833 return rte_eal_init_ret;
836 ovs_mutex_lock(&dpdk_mutex);
837 strncpy(dev->vhost_id, netdev->name, sizeof(dev->vhost_id));
838 err = vhost_construct_helper(netdev);
839 ovs_mutex_unlock(&dpdk_mutex);
844 netdev_dpdk_vhost_user_construct(struct netdev *netdev)
846 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
847 const char *name = netdev->name;
850 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
851 * the file system. '/' or '\' would traverse directories, so they're not
852 * acceptable in 'name'. */
853 if (strchr(name, '/') || strchr(name, '\\')) {
854 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
855 "A valid name must not include '/' or '\\'",
860 if (rte_eal_init_ret) {
861 return rte_eal_init_ret;
864 ovs_mutex_lock(&dpdk_mutex);
865 /* Take the name of the vhost-user port and append it to the location where
866 * the socket is to be created, then register the socket.
868 snprintf(dev->vhost_id, sizeof(dev->vhost_id), "%s/%s",
869 vhost_sock_dir, name);
871 err = rte_vhost_driver_register(dev->vhost_id);
873 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
876 fatal_signal_add_file_to_unlink(dev->vhost_id);
877 VLOG_INFO("Socket %s created for vhost-user port %s\n",
878 dev->vhost_id, name);
879 err = vhost_construct_helper(netdev);
882 ovs_mutex_unlock(&dpdk_mutex);
887 netdev_dpdk_construct(struct netdev *netdev)
889 unsigned int port_no;
892 if (rte_eal_init_ret) {
893 return rte_eal_init_ret;
896 /* Names always start with "dpdk" */
897 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
902 ovs_mutex_lock(&dpdk_mutex);
903 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
904 ovs_mutex_unlock(&dpdk_mutex);
909 netdev_dpdk_destruct(struct netdev *netdev)
911 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
913 ovs_mutex_lock(&dev->mutex);
914 rte_eth_dev_stop(dev->port_id);
915 free(ovsrcu_get_protected(struct ingress_policer *,
916 &dev->ingress_policer));
917 ovs_mutex_unlock(&dev->mutex);
919 ovs_mutex_lock(&dpdk_mutex);
921 ovs_list_remove(&dev->list_node);
922 dpdk_mp_put(dev->dpdk_mp);
923 ovs_mutex_unlock(&dpdk_mutex);
927 netdev_dpdk_vhost_destruct(struct netdev *netdev)
929 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
931 /* Guest becomes an orphan if still attached. */
932 if (netdev_dpdk_get_virtio(dev) != NULL) {
933 VLOG_ERR("Removing port '%s' while vhost device still attached.",
935 VLOG_ERR("To restore connectivity after re-adding of port, VM on socket"
936 " '%s' must be restarted.",
940 if (rte_vhost_driver_unregister(dev->vhost_id)) {
941 VLOG_ERR("Unable to remove vhost-user socket %s", dev->vhost_id);
943 fatal_signal_remove_file_to_unlink(dev->vhost_id);
946 ovs_mutex_lock(&dev->mutex);
947 free(ovsrcu_get_protected(struct ingress_policer *,
948 &dev->ingress_policer));
949 ovs_mutex_unlock(&dev->mutex);
951 ovs_mutex_lock(&dpdk_mutex);
953 ovs_list_remove(&dev->list_node);
954 dpdk_mp_put(dev->dpdk_mp);
955 ovs_mutex_unlock(&dpdk_mutex);
959 netdev_dpdk_dealloc(struct netdev *netdev)
961 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
967 netdev_dpdk_get_config(const struct netdev *netdev, struct smap *args)
969 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
971 ovs_mutex_lock(&dev->mutex);
973 smap_add_format(args, "requested_rx_queues", "%d", dev->requested_n_rxq);
974 smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq);
975 smap_add_format(args, "requested_tx_queues", "%d", dev->requested_n_txq);
976 smap_add_format(args, "configured_tx_queues", "%d", netdev->n_txq);
977 ovs_mutex_unlock(&dev->mutex);
983 netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args)
985 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
988 ovs_mutex_lock(&dev->mutex);
989 new_n_rxq = MAX(smap_get_int(args, "n_rxq", dev->requested_n_rxq), 1);
990 if (new_n_rxq != dev->requested_n_rxq) {
991 dev->requested_n_rxq = new_n_rxq;
992 netdev_request_reconfigure(netdev);
994 ovs_mutex_unlock(&dev->mutex);
1000 netdev_dpdk_get_numa_id(const struct netdev *netdev)
1002 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1004 return dev->socket_id;
1007 /* Sets the number of tx queues for the dpdk interface. */
1009 netdev_dpdk_set_tx_multiq(struct netdev *netdev, unsigned int n_txq)
1011 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1013 ovs_mutex_lock(&dev->mutex);
1015 if (dev->requested_n_txq == n_txq) {
1019 dev->requested_n_txq = n_txq;
1020 netdev_request_reconfigure(netdev);
1023 ovs_mutex_unlock(&dev->mutex);
1027 static struct netdev_rxq *
1028 netdev_dpdk_rxq_alloc(void)
1030 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
1035 static struct netdev_rxq_dpdk *
1036 netdev_rxq_dpdk_cast(const struct netdev_rxq *rxq)
1038 return CONTAINER_OF(rxq, struct netdev_rxq_dpdk, up);
1042 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq)
1044 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1045 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1047 ovs_mutex_lock(&dev->mutex);
1048 rx->port_id = dev->port_id;
1049 ovs_mutex_unlock(&dev->mutex);
1055 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq OVS_UNUSED)
1060 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq)
1062 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1068 netdev_dpdk_eth_tx_burst(struct netdev_dpdk *dev, int qid,
1069 struct rte_mbuf **pkts, int cnt)
1073 while (nb_tx != cnt) {
1076 ret = rte_eth_tx_burst(dev->port_id, qid, pkts + nb_tx, cnt - nb_tx);
1084 if (OVS_UNLIKELY(nb_tx != cnt)) {
1085 /* free buffers, which we couldn't transmit, one at a time (each
1086 * packet could come from a different mempool) */
1089 for (i = nb_tx; i < cnt; i++) {
1090 rte_pktmbuf_free(pkts[i]);
1092 rte_spinlock_lock(&dev->stats_lock);
1093 dev->stats.tx_dropped += cnt - nb_tx;
1094 rte_spinlock_unlock(&dev->stats_lock);
1099 netdev_dpdk_policer_pkt_handle(struct rte_meter_srtcm *meter,
1100 struct rte_mbuf *pkt, uint64_t time)
1102 uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct ether_hdr);
1104 return rte_meter_srtcm_color_blind_check(meter, time, pkt_len) ==
1109 netdev_dpdk_policer_run(struct rte_meter_srtcm *meter,
1110 struct rte_mbuf **pkts, int pkt_cnt)
1114 struct rte_mbuf *pkt = NULL;
1115 uint64_t current_time = rte_rdtsc();
1117 for (i = 0; i < pkt_cnt; i++) {
1119 /* Handle current packet */
1120 if (netdev_dpdk_policer_pkt_handle(meter, pkt, current_time)) {
1126 rte_pktmbuf_free(pkt);
1134 ingress_policer_run(struct ingress_policer *policer, struct rte_mbuf **pkts,
1139 rte_spinlock_lock(&policer->policer_lock);
1140 cnt = netdev_dpdk_policer_run(&policer->in_policer, pkts, pkt_cnt);
1141 rte_spinlock_unlock(&policer->policer_lock);
1147 is_vhost_running(struct virtio_net *virtio_dev)
1149 return (virtio_dev != NULL && (virtio_dev->flags & VIRTIO_DEV_RUNNING));
1153 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats *stats,
1154 unsigned int packet_size)
1156 /* Hard-coded search for the size bucket. */
1157 if (packet_size < 256) {
1158 if (packet_size >= 128) {
1159 stats->rx_128_to_255_packets++;
1160 } else if (packet_size <= 64) {
1161 stats->rx_1_to_64_packets++;
1163 stats->rx_65_to_127_packets++;
1166 if (packet_size >= 1523) {
1167 stats->rx_1523_to_max_packets++;
1168 } else if (packet_size >= 1024) {
1169 stats->rx_1024_to_1522_packets++;
1170 } else if (packet_size < 512) {
1171 stats->rx_256_to_511_packets++;
1173 stats->rx_512_to_1023_packets++;
1179 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
1180 struct dp_packet **packets, int count,
1184 unsigned int packet_size;
1185 struct dp_packet *packet;
1187 stats->rx_packets += count;
1188 stats->rx_dropped += dropped;
1189 for (i = 0; i < count; i++) {
1190 packet = packets[i];
1191 packet_size = dp_packet_size(packet);
1193 if (OVS_UNLIKELY(packet_size < ETH_HEADER_LEN)) {
1194 /* This only protects the following multicast counting from
1195 * too short packets, but it does not stop the packet from
1196 * further processing. */
1198 stats->rx_length_errors++;
1202 netdev_dpdk_vhost_update_rx_size_counters(stats, packet_size);
1204 struct eth_header *eh = (struct eth_header *) dp_packet_data(packet);
1205 if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) {
1209 stats->rx_bytes += packet_size;
1214 * The receive path for the vhost port is the TX path out from guest.
1217 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
1218 struct dp_packet **packets, int *c)
1220 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1221 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1222 int qid = rxq->queue_id;
1223 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
1225 uint16_t dropped = 0;
1227 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev)
1228 || !(dev->flags & NETDEV_UP))) {
1232 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid * VIRTIO_QNUM + VIRTIO_TXQ,
1234 (struct rte_mbuf **)packets,
1242 nb_rx = ingress_policer_run(policer, (struct rte_mbuf **)packets, nb_rx);
1246 rte_spinlock_lock(&dev->stats_lock);
1247 netdev_dpdk_vhost_update_rx_counters(&dev->stats, packets, nb_rx, dropped);
1248 rte_spinlock_unlock(&dev->stats_lock);
1255 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet **packets,
1258 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1259 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1260 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
1264 nb_rx = rte_eth_rx_burst(rx->port_id, rxq->queue_id,
1265 (struct rte_mbuf **) packets,
1273 nb_rx = ingress_policer_run(policer, (struct rte_mbuf **) packets, nb_rx);
1277 /* Update stats to reflect dropped packets */
1278 if (OVS_UNLIKELY(dropped)) {
1279 rte_spinlock_lock(&dev->stats_lock);
1280 dev->stats.rx_dropped += dropped;
1281 rte_spinlock_unlock(&dev->stats_lock);
1290 netdev_dpdk_qos_run__(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
1293 struct netdev *netdev = &dev->up;
1295 if (dev->qos_conf != NULL) {
1296 rte_spinlock_lock(&dev->qos_lock);
1297 if (dev->qos_conf != NULL) {
1298 cnt = dev->qos_conf->ops->qos_run(netdev, pkts, cnt);
1300 rte_spinlock_unlock(&dev->qos_lock);
1307 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
1308 struct dp_packet **packets,
1313 int sent = attempted - dropped;
1315 stats->tx_packets += sent;
1316 stats->tx_dropped += dropped;
1318 for (i = 0; i < sent; i++) {
1319 stats->tx_bytes += dp_packet_size(packets[i]);
1324 __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
1325 struct dp_packet **pkts, int cnt,
1328 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1329 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1330 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
1331 unsigned int total_pkts = cnt;
1332 unsigned int qos_pkts = cnt;
1335 qid = dev->tx_q[qid % netdev->n_txq].map;
1337 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev) || qid < 0
1338 || !(dev->flags & NETDEV_UP))) {
1339 rte_spinlock_lock(&dev->stats_lock);
1340 dev->stats.tx_dropped+= cnt;
1341 rte_spinlock_unlock(&dev->stats_lock);
1345 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1347 /* Check has QoS has been configured for the netdev */
1348 cnt = netdev_dpdk_qos_run__(dev, cur_pkts, cnt);
1352 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
1353 unsigned int tx_pkts;
1355 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, vhost_qid,
1357 if (OVS_LIKELY(tx_pkts)) {
1358 /* Packets have been sent.*/
1360 /* Prepare for possible retry.*/
1361 cur_pkts = &cur_pkts[tx_pkts];
1363 /* No packets sent - do not retry.*/
1366 } while (cnt && (retries++ < VHOST_ENQ_RETRY_NUM));
1368 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1370 rte_spinlock_lock(&dev->stats_lock);
1372 netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts, cnt);
1373 rte_spinlock_unlock(&dev->stats_lock);
1379 for (i = 0; i < total_pkts; i++) {
1380 dp_packet_delete(pkts[i]);
1385 /* Tx function. Transmit packets indefinitely */
1387 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
1389 OVS_NO_THREAD_SAFETY_ANALYSIS
1391 #if !defined(__CHECKER__) && !defined(_WIN32)
1392 const size_t PKT_ARRAY_SIZE = cnt;
1394 /* Sparse or MSVC doesn't like variable length array. */
1395 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
1397 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1398 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
1403 /* If we are on a non pmd thread we have to use the mempool mutex, because
1404 * every non pmd thread shares the same mempool cache */
1406 if (!dpdk_thread_is_pmd()) {
1407 ovs_mutex_lock(&nonpmd_mempool_mutex);
1410 for (i = 0; i < cnt; i++) {
1411 int size = dp_packet_size(pkts[i]);
1413 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1414 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1415 (int)size , dev->max_packet_len);
1421 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
1423 if (!mbufs[newcnt]) {
1428 /* Cut the size so only the truncated size is copied. */
1429 size -= dp_packet_get_cutlen(pkts[i]);
1430 dp_packet_reset_cutlen(pkts[i]);
1432 /* We have to do a copy for now */
1433 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
1435 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1436 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1441 if (dev->type == DPDK_DEV_VHOST) {
1442 __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs, newcnt, true);
1444 unsigned int qos_pkts = newcnt;
1446 /* Check if QoS has been configured for this netdev. */
1447 newcnt = netdev_dpdk_qos_run__(dev, mbufs, newcnt);
1449 dropped += qos_pkts - newcnt;
1450 netdev_dpdk_eth_tx_burst(dev, qid, mbufs, newcnt);
1453 if (OVS_UNLIKELY(dropped)) {
1454 rte_spinlock_lock(&dev->stats_lock);
1455 dev->stats.tx_dropped += dropped;
1456 rte_spinlock_unlock(&dev->stats_lock);
1459 if (!dpdk_thread_is_pmd()) {
1460 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1465 netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet **pkts,
1466 int cnt, bool may_steal)
1468 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1471 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1473 for (i = 0; i < cnt; i++) {
1474 dp_packet_delete(pkts[i]);
1480 for (i = 0; i < cnt; i++) {
1481 int cutlen = dp_packet_get_cutlen(pkts[i]);
1483 dp_packet_set_size(pkts[i], dp_packet_size(pkts[i]) - cutlen);
1484 dp_packet_reset_cutlen(pkts[i]);
1486 __netdev_dpdk_vhost_send(netdev, qid, pkts, cnt, may_steal);
1492 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
1493 struct dp_packet **pkts, int cnt, bool may_steal)
1497 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1498 qid = qid % dev->up.n_txq;
1499 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1502 if (OVS_UNLIKELY(!may_steal ||
1503 pkts[0]->source != DPBUF_DPDK)) {
1504 struct netdev *netdev = &dev->up;
1506 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1509 for (i = 0; i < cnt; i++) {
1510 dp_packet_delete(pkts[i]);
1514 int next_tx_idx = 0;
1516 unsigned int qos_pkts = 0;
1517 unsigned int temp_cnt = 0;
1519 for (i = 0; i < cnt; i++) {
1520 int size = dp_packet_size(pkts[i]);
1522 size -= dp_packet_get_cutlen(pkts[i]);
1523 dp_packet_set_size(pkts[i], size);
1525 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1526 if (next_tx_idx != i) {
1527 temp_cnt = i - next_tx_idx;
1528 qos_pkts = temp_cnt;
1530 temp_cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts,
1532 dropped += qos_pkts - temp_cnt;
1533 netdev_dpdk_eth_tx_burst(dev, qid,
1534 (struct rte_mbuf **)&pkts[next_tx_idx],
1539 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1540 (int)size , dev->max_packet_len);
1542 dp_packet_delete(pkts[i]);
1544 next_tx_idx = i + 1;
1547 if (next_tx_idx != cnt) {
1551 cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts, cnt);
1552 dropped += qos_pkts - cnt;
1553 netdev_dpdk_eth_tx_burst(dev, qid,
1554 (struct rte_mbuf **)&pkts[next_tx_idx],
1558 if (OVS_UNLIKELY(dropped)) {
1559 rte_spinlock_lock(&dev->stats_lock);
1560 dev->stats.tx_dropped += dropped;
1561 rte_spinlock_unlock(&dev->stats_lock);
1565 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1566 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1571 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
1572 struct dp_packet **pkts, int cnt, bool may_steal)
1574 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1576 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1581 netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1583 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1585 ovs_mutex_lock(&dev->mutex);
1586 if (!eth_addr_equals(dev->hwaddr, mac)) {
1588 netdev_change_seq_changed(netdev);
1590 ovs_mutex_unlock(&dev->mutex);
1596 netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1598 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1600 ovs_mutex_lock(&dev->mutex);
1602 ovs_mutex_unlock(&dev->mutex);
1608 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1610 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1612 ovs_mutex_lock(&dev->mutex);
1614 ovs_mutex_unlock(&dev->mutex);
1620 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1622 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1623 int old_mtu, err, dpdk_mtu;
1624 struct dpdk_mp *old_mp;
1628 ovs_mutex_lock(&dpdk_mutex);
1629 ovs_mutex_lock(&dev->mutex);
1630 if (dev->mtu == mtu) {
1635 buf_size = dpdk_buf_size(mtu);
1636 dpdk_mtu = FRAME_LEN_TO_MTU(buf_size);
1638 mp = dpdk_mp_get(dev->socket_id, dpdk_mtu);
1644 rte_eth_dev_stop(dev->port_id);
1647 old_mp = dev->dpdk_mp;
1650 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
1652 err = dpdk_eth_dev_init(dev);
1656 dev->dpdk_mp = old_mp;
1657 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
1658 dpdk_eth_dev_init(dev);
1662 dpdk_mp_put(old_mp);
1663 netdev_change_seq_changed(netdev);
1665 ovs_mutex_unlock(&dev->mutex);
1666 ovs_mutex_unlock(&dpdk_mutex);
1671 netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier);
1674 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1675 struct netdev_stats *stats)
1677 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1679 ovs_mutex_lock(&dev->mutex);
1681 rte_spinlock_lock(&dev->stats_lock);
1682 /* Supported Stats */
1683 stats->rx_packets += dev->stats.rx_packets;
1684 stats->tx_packets += dev->stats.tx_packets;
1685 stats->rx_dropped = dev->stats.rx_dropped;
1686 stats->tx_dropped += dev->stats.tx_dropped;
1687 stats->multicast = dev->stats.multicast;
1688 stats->rx_bytes = dev->stats.rx_bytes;
1689 stats->tx_bytes = dev->stats.tx_bytes;
1690 stats->rx_errors = dev->stats.rx_errors;
1691 stats->rx_length_errors = dev->stats.rx_length_errors;
1693 stats->rx_1_to_64_packets = dev->stats.rx_1_to_64_packets;
1694 stats->rx_65_to_127_packets = dev->stats.rx_65_to_127_packets;
1695 stats->rx_128_to_255_packets = dev->stats.rx_128_to_255_packets;
1696 stats->rx_256_to_511_packets = dev->stats.rx_256_to_511_packets;
1697 stats->rx_512_to_1023_packets = dev->stats.rx_512_to_1023_packets;
1698 stats->rx_1024_to_1522_packets = dev->stats.rx_1024_to_1522_packets;
1699 stats->rx_1523_to_max_packets = dev->stats.rx_1523_to_max_packets;
1701 rte_spinlock_unlock(&dev->stats_lock);
1703 ovs_mutex_unlock(&dev->mutex);
1709 netdev_dpdk_convert_xstats(struct netdev_stats *stats,
1710 const struct rte_eth_xstats *xstats,
1711 const unsigned int size)
1713 /* XXX Current implementation is simple search through an array
1714 * to find hardcoded counter names. In future DPDK release (TBD)
1715 * XSTATS API will change so each counter will be represented by
1716 * unique ID instead of String. */
1718 for (unsigned int i = 0; i < size; i++) {
1719 if (strcmp(XSTAT_RX_64_PACKETS, xstats[i].name) == 0) {
1720 stats->rx_1_to_64_packets = xstats[i].value;
1721 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS, xstats[i].name) == 0) {
1722 stats->rx_65_to_127_packets = xstats[i].value;
1723 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS, xstats[i].name) == 0) {
1724 stats->rx_128_to_255_packets = xstats[i].value;
1725 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS, xstats[i].name) == 0) {
1726 stats->rx_256_to_511_packets = xstats[i].value;
1727 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS,
1728 xstats[i].name) == 0) {
1729 stats->rx_512_to_1023_packets = xstats[i].value;
1730 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS,
1731 xstats[i].name) == 0) {
1732 stats->rx_1024_to_1522_packets = xstats[i].value;
1733 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS,
1734 xstats[i].name) == 0) {
1735 stats->rx_1523_to_max_packets = xstats[i].value;
1736 } else if (strcmp(XSTAT_TX_64_PACKETS, xstats[i].name) == 0) {
1737 stats->tx_1_to_64_packets = xstats[i].value;
1738 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS, xstats[i].name) == 0) {
1739 stats->tx_65_to_127_packets = xstats[i].value;
1740 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS, xstats[i].name) == 0) {
1741 stats->tx_128_to_255_packets = xstats[i].value;
1742 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS, xstats[i].name) == 0) {
1743 stats->tx_256_to_511_packets = xstats[i].value;
1744 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS,
1745 xstats[i].name) == 0) {
1746 stats->tx_512_to_1023_packets = xstats[i].value;
1747 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS,
1748 xstats[i].name) == 0) {
1749 stats->tx_1024_to_1522_packets = xstats[i].value;
1750 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS,
1751 xstats[i].name) == 0) {
1752 stats->tx_1523_to_max_packets = xstats[i].value;
1753 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS, xstats[i].name) == 0) {
1754 stats->tx_multicast_packets = xstats[i].value;
1755 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS, xstats[i].name) == 0) {
1756 stats->rx_broadcast_packets = xstats[i].value;
1757 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS, xstats[i].name) == 0) {
1758 stats->tx_broadcast_packets = xstats[i].value;
1759 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS, xstats[i].name) == 0) {
1760 stats->rx_undersized_errors = xstats[i].value;
1761 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS, xstats[i].name) == 0) {
1762 stats->rx_fragmented_errors = xstats[i].value;
1763 } else if (strcmp(XSTAT_RX_JABBER_ERRORS, xstats[i].name) == 0) {
1764 stats->rx_jabber_errors = xstats[i].value;
1770 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1772 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1773 struct rte_eth_stats rte_stats;
1776 netdev_dpdk_get_carrier(netdev, &gg);
1777 ovs_mutex_lock(&dev->mutex);
1779 struct rte_eth_xstats *rte_xstats;
1780 int rte_xstats_len, rte_xstats_ret;
1782 if (rte_eth_stats_get(dev->port_id, &rte_stats)) {
1783 VLOG_ERR("Can't get ETH statistics for port: %i.", dev->port_id);
1784 ovs_mutex_unlock(&dev->mutex);
1788 rte_xstats_len = rte_eth_xstats_get(dev->port_id, NULL, 0);
1789 if (rte_xstats_len > 0) {
1790 rte_xstats = dpdk_rte_mzalloc(sizeof(*rte_xstats) * rte_xstats_len);
1791 memset(rte_xstats, 0xff, sizeof(*rte_xstats) * rte_xstats_len);
1792 rte_xstats_ret = rte_eth_xstats_get(dev->port_id, rte_xstats,
1794 if (rte_xstats_ret > 0 && rte_xstats_ret <= rte_xstats_len) {
1795 netdev_dpdk_convert_xstats(stats, rte_xstats, rte_xstats_ret);
1797 rte_free(rte_xstats);
1799 VLOG_WARN("Can't get XSTATS counters for port: %i.", dev->port_id);
1802 stats->rx_packets = rte_stats.ipackets;
1803 stats->tx_packets = rte_stats.opackets;
1804 stats->rx_bytes = rte_stats.ibytes;
1805 stats->tx_bytes = rte_stats.obytes;
1806 /* DPDK counts imissed as errors, but count them here as dropped instead */
1807 stats->rx_errors = rte_stats.ierrors - rte_stats.imissed;
1808 stats->tx_errors = rte_stats.oerrors;
1809 stats->multicast = rte_stats.imcasts;
1811 rte_spinlock_lock(&dev->stats_lock);
1812 stats->tx_dropped = dev->stats.tx_dropped;
1813 stats->rx_dropped = dev->stats.rx_dropped;
1814 rte_spinlock_unlock(&dev->stats_lock);
1816 /* These are the available DPDK counters for packets not received due to
1817 * local resource constraints in DPDK and NIC respectively. */
1818 stats->rx_dropped += rte_stats.rx_nombuf + rte_stats.imissed;
1819 stats->rx_missed_errors = rte_stats.imissed;
1821 ovs_mutex_unlock(&dev->mutex);
1827 netdev_dpdk_get_features(const struct netdev *netdev,
1828 enum netdev_features *current,
1829 enum netdev_features *advertised OVS_UNUSED,
1830 enum netdev_features *supported OVS_UNUSED,
1831 enum netdev_features *peer OVS_UNUSED)
1833 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1834 struct rte_eth_link link;
1836 ovs_mutex_lock(&dev->mutex);
1838 ovs_mutex_unlock(&dev->mutex);
1840 if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1841 if (link.link_speed == ETH_SPEED_NUM_10M) {
1842 *current = NETDEV_F_10MB_HD;
1844 if (link.link_speed == ETH_SPEED_NUM_100M) {
1845 *current = NETDEV_F_100MB_HD;
1847 if (link.link_speed == ETH_SPEED_NUM_1G) {
1848 *current = NETDEV_F_1GB_HD;
1850 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1851 if (link.link_speed == ETH_SPEED_NUM_10M) {
1852 *current = NETDEV_F_10MB_FD;
1854 if (link.link_speed == ETH_SPEED_NUM_100M) {
1855 *current = NETDEV_F_100MB_FD;
1857 if (link.link_speed == ETH_SPEED_NUM_1G) {
1858 *current = NETDEV_F_1GB_FD;
1860 if (link.link_speed == ETH_SPEED_NUM_10G) {
1861 *current = NETDEV_F_10GB_FD;
1865 if (link.link_autoneg) {
1866 *current |= NETDEV_F_AUTONEG;
1872 static struct ingress_policer *
1873 netdev_dpdk_policer_construct(uint32_t rate, uint32_t burst)
1875 struct ingress_policer *policer = NULL;
1876 uint64_t rate_bytes;
1877 uint64_t burst_bytes;
1880 policer = xmalloc(sizeof *policer);
1881 rte_spinlock_init(&policer->policer_lock);
1883 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
1884 rate_bytes = rate * 1000/8;
1885 burst_bytes = burst * 1000/8;
1887 policer->app_srtcm_params.cir = rate_bytes;
1888 policer->app_srtcm_params.cbs = burst_bytes;
1889 policer->app_srtcm_params.ebs = 0;
1890 err = rte_meter_srtcm_config(&policer->in_policer,
1891 &policer->app_srtcm_params);
1893 VLOG_ERR("Could not create rte meter for ingress policer");
1901 netdev_dpdk_set_policing(struct netdev* netdev, uint32_t policer_rate,
1902 uint32_t policer_burst)
1904 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1905 struct ingress_policer *policer;
1907 /* Force to 0 if no rate specified,
1908 * default to 8000 kbits if burst is 0,
1909 * else stick with user-specified value.
1911 policer_burst = (!policer_rate ? 0
1912 : !policer_burst ? 8000
1915 ovs_mutex_lock(&dev->mutex);
1917 policer = ovsrcu_get_protected(struct ingress_policer *,
1918 &dev->ingress_policer);
1920 if (dev->policer_rate == policer_rate &&
1921 dev->policer_burst == policer_burst) {
1922 /* Assume that settings haven't changed since we last set them. */
1923 ovs_mutex_unlock(&dev->mutex);
1927 /* Destroy any existing ingress policer for the device if one exists */
1929 ovsrcu_postpone(free, policer);
1932 if (policer_rate != 0) {
1933 policer = netdev_dpdk_policer_construct(policer_rate, policer_burst);
1937 ovsrcu_set(&dev->ingress_policer, policer);
1938 dev->policer_rate = policer_rate;
1939 dev->policer_burst = policer_burst;
1940 ovs_mutex_unlock(&dev->mutex);
1946 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1948 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1951 ovs_mutex_lock(&dev->mutex);
1952 ifindex = dev->port_id;
1953 ovs_mutex_unlock(&dev->mutex);
1959 netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier)
1961 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1963 ovs_mutex_lock(&dev->mutex);
1964 check_link_status(dev);
1965 *carrier = dev->link.link_status;
1967 ovs_mutex_unlock(&dev->mutex);
1973 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev, bool *carrier)
1975 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1976 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1978 ovs_mutex_lock(&dev->mutex);
1980 if (is_vhost_running(virtio_dev)) {
1986 ovs_mutex_unlock(&dev->mutex);
1991 static long long int
1992 netdev_dpdk_get_carrier_resets(const struct netdev *netdev)
1994 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1995 long long int carrier_resets;
1997 ovs_mutex_lock(&dev->mutex);
1998 carrier_resets = dev->link_reset_cnt;
1999 ovs_mutex_unlock(&dev->mutex);
2001 return carrier_resets;
2005 netdev_dpdk_set_miimon(struct netdev *netdev OVS_UNUSED,
2006 long long int interval OVS_UNUSED)
2012 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
2013 enum netdev_flags off, enum netdev_flags on,
2014 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
2018 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
2022 *old_flagsp = dev->flags;
2026 if (dev->flags == *old_flagsp) {
2030 if (dev->type == DPDK_DEV_ETH) {
2031 if (dev->flags & NETDEV_UP) {
2032 err = rte_eth_dev_start(dev->port_id);
2037 if (dev->flags & NETDEV_PROMISC) {
2038 rte_eth_promiscuous_enable(dev->port_id);
2041 if (!(dev->flags & NETDEV_UP)) {
2042 rte_eth_dev_stop(dev->port_id);
2045 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
2046 * running then change netdev's change_seq to trigger link state
2048 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
2050 if ((NETDEV_UP & ((*old_flagsp ^ on) | (*old_flagsp ^ off)))
2051 && is_vhost_running(virtio_dev)) {
2052 netdev_change_seq_changed(&dev->up);
2054 /* Clear statistics if device is getting up. */
2055 if (NETDEV_UP & on) {
2056 rte_spinlock_lock(&dev->stats_lock);
2057 memset(&dev->stats, 0, sizeof(dev->stats));
2058 rte_spinlock_unlock(&dev->stats_lock);
2067 netdev_dpdk_update_flags(struct netdev *netdev,
2068 enum netdev_flags off, enum netdev_flags on,
2069 enum netdev_flags *old_flagsp)
2071 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2074 ovs_mutex_lock(&dev->mutex);
2075 error = netdev_dpdk_update_flags__(dev, off, on, old_flagsp);
2076 ovs_mutex_unlock(&dev->mutex);
2082 netdev_dpdk_get_status(const struct netdev *netdev, struct smap *args)
2084 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2085 struct rte_eth_dev_info dev_info;
2087 if (dev->port_id < 0)
2090 ovs_mutex_lock(&dev->mutex);
2091 rte_eth_dev_info_get(dev->port_id, &dev_info);
2092 ovs_mutex_unlock(&dev->mutex);
2094 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
2096 smap_add_format(args, "port_no", "%d", dev->port_id);
2097 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
2098 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
2099 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
2100 smap_add_format(args, "max_rx_pktlen", "%u", dev->max_packet_len);
2101 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
2102 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
2103 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
2104 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
2105 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
2106 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
2108 if (dev_info.pci_dev) {
2109 smap_add_format(args, "pci-vendor_id", "0x%u",
2110 dev_info.pci_dev->id.vendor_id);
2111 smap_add_format(args, "pci-device_id", "0x%x",
2112 dev_info.pci_dev->id.device_id);
2119 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
2120 OVS_REQUIRES(dev->mutex)
2122 enum netdev_flags old_flags;
2125 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
2127 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
2132 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
2133 const char *argv[], void *aux OVS_UNUSED)
2137 if (!strcasecmp(argv[argc - 1], "up")) {
2139 } else if ( !strcasecmp(argv[argc - 1], "down")) {
2142 unixctl_command_reply_error(conn, "Invalid Admin State");
2147 struct netdev *netdev = netdev_from_name(argv[1]);
2148 if (netdev && is_dpdk_class(netdev->netdev_class)) {
2149 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
2151 ovs_mutex_lock(&dpdk_dev->mutex);
2152 netdev_dpdk_set_admin_state__(dpdk_dev, up);
2153 ovs_mutex_unlock(&dpdk_dev->mutex);
2155 netdev_close(netdev);
2157 unixctl_command_reply_error(conn, "Not a DPDK Interface");
2158 netdev_close(netdev);
2162 struct netdev_dpdk *netdev;
2164 ovs_mutex_lock(&dpdk_mutex);
2165 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
2166 ovs_mutex_lock(&netdev->mutex);
2167 netdev_dpdk_set_admin_state__(netdev, up);
2168 ovs_mutex_unlock(&netdev->mutex);
2170 ovs_mutex_unlock(&dpdk_mutex);
2172 unixctl_command_reply(conn, "OK");
2176 * Set virtqueue flags so that we do not receive interrupts.
2179 set_irq_status(struct virtio_net *virtio_dev)
2184 for (i = 0; i < virtio_dev->virt_qp_nb; i++) {
2185 idx = i * VIRTIO_QNUM;
2186 rte_vhost_enable_guest_notification(virtio_dev, idx + VIRTIO_RXQ, 0);
2187 rte_vhost_enable_guest_notification(virtio_dev, idx + VIRTIO_TXQ, 0);
2192 * Fixes mapping for vhost-user tx queues. Must be called after each
2193 * enabling/disabling of queues and n_txq modifications.
2196 netdev_dpdk_remap_txqs(struct netdev_dpdk *dev)
2197 OVS_REQUIRES(dev->mutex)
2199 int *enabled_queues, n_enabled = 0;
2200 int i, k, total_txqs = dev->up.n_txq;
2202 enabled_queues = dpdk_rte_mzalloc(total_txqs * sizeof *enabled_queues);
2204 for (i = 0; i < total_txqs; i++) {
2205 /* Enabled queues always mapped to themselves. */
2206 if (dev->tx_q[i].map == i) {
2207 enabled_queues[n_enabled++] = i;
2211 if (n_enabled == 0 && total_txqs != 0) {
2212 enabled_queues[0] = OVS_VHOST_QUEUE_DISABLED;
2217 for (i = 0; i < total_txqs; i++) {
2218 if (dev->tx_q[i].map != i) {
2219 dev->tx_q[i].map = enabled_queues[k];
2220 k = (k + 1) % n_enabled;
2224 VLOG_DBG("TX queue mapping for %s\n", dev->vhost_id);
2225 for (i = 0; i < total_txqs; i++) {
2226 VLOG_DBG("%2d --> %2d", i, dev->tx_q[i].map);
2229 rte_free(enabled_queues);
2233 * A new virtio-net device is added to a vhost port.
2236 new_device(struct virtio_net *virtio_dev)
2238 struct netdev_dpdk *dev;
2239 bool exists = false;
2243 ovs_mutex_lock(&dpdk_mutex);
2244 /* Add device to the vhost port with the same name as that passed down. */
2245 LIST_FOR_EACH(dev, list_node, &dpdk_list) {
2246 if (strncmp(virtio_dev->ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
2247 uint32_t qp_num = virtio_dev->virt_qp_nb;
2249 ovs_mutex_lock(&dev->mutex);
2250 /* Get NUMA information */
2251 err = get_mempolicy(&newnode, NULL, 0, virtio_dev,
2252 MPOL_F_NODE | MPOL_F_ADDR);
2254 VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
2255 virtio_dev->ifname);
2256 newnode = dev->socket_id;
2259 dev->requested_socket_id = newnode;
2260 dev->requested_n_rxq = qp_num;
2261 dev->requested_n_txq = qp_num;
2262 netdev_request_reconfigure(&dev->up);
2264 ovsrcu_set(&dev->virtio_dev, virtio_dev);
2267 /* Disable notifications. */
2268 set_irq_status(virtio_dev);
2269 netdev_change_seq_changed(&dev->up);
2270 ovs_mutex_unlock(&dev->mutex);
2274 ovs_mutex_unlock(&dpdk_mutex);
2277 VLOG_INFO("vHost Device '%s' %"PRIu64" can't be added - name not "
2278 "found", virtio_dev->ifname, virtio_dev->device_fh);
2283 VLOG_INFO("vHost Device '%s' %"PRIu64" has been added on numa node %i",
2284 virtio_dev->ifname, virtio_dev->device_fh, newnode);
2288 /* Clears mapping for all available queues of vhost interface. */
2290 netdev_dpdk_txq_map_clear(struct netdev_dpdk *dev)
2291 OVS_REQUIRES(dev->mutex)
2295 for (i = 0; i < dev->up.n_txq; i++) {
2296 dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
2301 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2302 * flag to stop any more packets from being sent or received to/from a VM and
2303 * ensure all currently queued packets have been sent/received before removing
2307 destroy_device(volatile struct virtio_net *virtio_dev)
2309 struct netdev_dpdk *dev;
2310 bool exists = false;
2312 ovs_mutex_lock(&dpdk_mutex);
2313 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
2314 if (netdev_dpdk_get_virtio(dev) == virtio_dev) {
2316 ovs_mutex_lock(&dev->mutex);
2317 virtio_dev->flags &= ~VIRTIO_DEV_RUNNING;
2318 ovsrcu_set(&dev->virtio_dev, NULL);
2319 /* Clear tx/rx queue settings. */
2320 netdev_dpdk_txq_map_clear(dev);
2321 dev->requested_n_rxq = NR_QUEUE;
2322 dev->requested_n_txq = NR_QUEUE;
2323 netdev_request_reconfigure(&dev->up);
2325 netdev_change_seq_changed(&dev->up);
2326 ovs_mutex_unlock(&dev->mutex);
2332 ovs_mutex_unlock(&dpdk_mutex);
2334 if (exists == true) {
2336 * Wait for other threads to quiesce after setting the 'virtio_dev'
2337 * to NULL, before returning.
2339 ovsrcu_synchronize();
2341 * As call to ovsrcu_synchronize() will end the quiescent state,
2342 * put thread back into quiescent state before returning.
2344 ovsrcu_quiesce_start();
2345 VLOG_INFO("vHost Device '%s' %"PRIu64" has been removed",
2346 virtio_dev->ifname, virtio_dev->device_fh);
2348 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", virtio_dev->ifname,
2349 virtio_dev->device_fh);
2354 vring_state_changed(struct virtio_net *virtio_dev, uint16_t queue_id,
2357 struct netdev_dpdk *dev;
2358 bool exists = false;
2359 int qid = queue_id / VIRTIO_QNUM;
2361 if (queue_id % VIRTIO_QNUM == VIRTIO_TXQ) {
2365 ovs_mutex_lock(&dpdk_mutex);
2366 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
2367 if (strncmp(virtio_dev->ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
2368 ovs_mutex_lock(&dev->mutex);
2370 dev->tx_q[qid].map = qid;
2372 dev->tx_q[qid].map = OVS_VHOST_QUEUE_DISABLED;
2374 netdev_dpdk_remap_txqs(dev);
2376 ovs_mutex_unlock(&dev->mutex);
2380 ovs_mutex_unlock(&dpdk_mutex);
2383 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s' %"
2384 PRIu64" changed to \'%s\'", queue_id, qid,
2385 virtio_dev->ifname, virtio_dev->device_fh,
2386 (enable == 1) ? "enabled" : "disabled");
2388 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", virtio_dev->ifname,
2389 virtio_dev->device_fh);
2397 netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
2399 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
2402 struct ingress_policer *
2403 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev)
2405 return ovsrcu_get(struct ingress_policer *, &dev->ingress_policer);
2409 * These callbacks allow virtio-net devices to be added to vhost ports when
2410 * configuration has been fully complete.
2412 static const struct virtio_net_device_ops virtio_net_device_ops =
2414 .new_device = new_device,
2415 .destroy_device = destroy_device,
2416 .vring_state_changed = vring_state_changed
2420 start_vhost_loop(void *dummy OVS_UNUSED)
2422 pthread_detach(pthread_self());
2423 /* Put the cuse thread into quiescent state. */
2424 ovsrcu_quiesce_start();
2425 rte_vhost_driver_session_start();
2430 dpdk_vhost_class_init(void)
2432 rte_vhost_driver_callback_register(&virtio_net_device_ops);
2433 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4
2434 | 1ULL << VIRTIO_NET_F_HOST_TSO6
2435 | 1ULL << VIRTIO_NET_F_CSUM);
2437 ovs_thread_create("vhost_thread", start_vhost_loop, NULL);
2442 dpdk_vhost_cuse_class_init(void)
2448 dpdk_vhost_user_class_init(void)
2454 dpdk_common_init(void)
2456 unixctl_command_register("netdev-dpdk/set-admin-state",
2457 "[netdev] up|down", 1, 2,
2458 netdev_dpdk_set_admin_state, NULL);
2465 dpdk_ring_create(const char dev_name[], unsigned int port_no,
2466 unsigned int *eth_port_id)
2468 struct dpdk_ring *ivshmem;
2469 char ring_name[RTE_RING_NAMESIZE];
2472 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
2473 if (ivshmem == NULL) {
2477 /* XXX: Add support for multiquque ring. */
2478 err = snprintf(ring_name, sizeof(ring_name), "%s_tx", dev_name);
2483 /* Create single producer tx ring, netdev does explicit locking. */
2484 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2486 if (ivshmem->cring_tx == NULL) {
2491 err = snprintf(ring_name, sizeof(ring_name), "%s_rx", dev_name);
2496 /* Create single consumer rx ring, netdev does explicit locking. */
2497 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2499 if (ivshmem->cring_rx == NULL) {
2504 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
2505 &ivshmem->cring_tx, 1, SOCKET0);
2512 ivshmem->user_port_id = port_no;
2513 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
2514 ovs_list_push_back(&dpdk_ring_list, &ivshmem->list_node);
2516 *eth_port_id = ivshmem->eth_port_id;
2521 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
2523 struct dpdk_ring *ivshmem;
2524 unsigned int port_no;
2527 /* Names always start with "dpdkr" */
2528 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
2533 /* look through our list to find the device */
2534 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
2535 if (ivshmem->user_port_id == port_no) {
2536 VLOG_INFO("Found dpdk ring device %s:", dev_name);
2537 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
2541 /* Need to create the device rings */
2542 return dpdk_ring_create(dev_name, port_no, eth_port_id);
2546 netdev_dpdk_ring_send(struct netdev *netdev, int qid,
2547 struct dp_packet **pkts, int cnt, bool may_steal)
2549 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2552 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2553 * rss hash field is clear. This is because the same mbuf may be modified by
2554 * the consumer of the ring and return into the datapath without recalculating
2556 for (i = 0; i < cnt; i++) {
2557 dp_packet_rss_invalidate(pkts[i]);
2560 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
2565 netdev_dpdk_ring_construct(struct netdev *netdev)
2567 unsigned int port_no = 0;
2570 if (rte_eal_init_ret) {
2571 return rte_eal_init_ret;
2574 ovs_mutex_lock(&dpdk_mutex);
2576 err = dpdk_ring_open(netdev->name, &port_no);
2581 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
2584 ovs_mutex_unlock(&dpdk_mutex);
2591 * Initialize QoS configuration operations.
2594 qos_conf_init(struct qos_conf *conf, const struct dpdk_qos_ops *ops)
2600 * Search existing QoS operations in qos_ops and compare each set of
2601 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2604 static const struct dpdk_qos_ops *
2605 qos_lookup_name(const char *name)
2607 const struct dpdk_qos_ops *const *opsp;
2609 for (opsp = qos_confs; *opsp != NULL; opsp++) {
2610 const struct dpdk_qos_ops *ops = *opsp;
2611 if (!strcmp(name, ops->qos_name)) {
2619 * Call qos_destruct to clean up items associated with the netdevs
2620 * qos_conf. Set netdevs qos_conf to NULL.
2623 qos_delete_conf(struct netdev *netdev)
2625 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2627 rte_spinlock_lock(&dev->qos_lock);
2628 if (dev->qos_conf) {
2629 if (dev->qos_conf->ops->qos_destruct) {
2630 dev->qos_conf->ops->qos_destruct(netdev, dev->qos_conf);
2632 dev->qos_conf = NULL;
2634 rte_spinlock_unlock(&dev->qos_lock);
2638 netdev_dpdk_get_qos_types(const struct netdev *netdev OVS_UNUSED,
2641 const struct dpdk_qos_ops *const *opsp;
2643 for (opsp = qos_confs; *opsp != NULL; opsp++) {
2644 const struct dpdk_qos_ops *ops = *opsp;
2645 if (ops->qos_construct && ops->qos_name[0] != '\0') {
2646 sset_add(types, ops->qos_name);
2653 netdev_dpdk_get_qos(const struct netdev *netdev,
2654 const char **typep, struct smap *details)
2656 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2659 ovs_mutex_lock(&dev->mutex);
2661 *typep = dev->qos_conf->ops->qos_name;
2662 error = (dev->qos_conf->ops->qos_get
2663 ? dev->qos_conf->ops->qos_get(netdev, details): 0);
2665 ovs_mutex_unlock(&dev->mutex);
2671 netdev_dpdk_set_qos(struct netdev *netdev,
2672 const char *type, const struct smap *details)
2674 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2675 const struct dpdk_qos_ops *new_ops = NULL;
2678 /* If type is empty or unsupported then the current QoS configuration
2679 * for the dpdk-netdev can be destroyed */
2680 new_ops = qos_lookup_name(type);
2682 if (type[0] == '\0' || !new_ops || !new_ops->qos_construct) {
2683 qos_delete_conf(netdev);
2687 ovs_mutex_lock(&dev->mutex);
2689 if (dev->qos_conf) {
2690 if (new_ops == dev->qos_conf->ops) {
2691 error = new_ops->qos_set ? new_ops->qos_set(netdev, details) : 0;
2693 /* Delete existing QoS configuration. */
2694 qos_delete_conf(netdev);
2695 ovs_assert(dev->qos_conf == NULL);
2697 /* Install new QoS configuration. */
2698 error = new_ops->qos_construct(netdev, details);
2699 ovs_assert((error == 0) == (dev->qos_conf != NULL));
2702 error = new_ops->qos_construct(netdev, details);
2703 ovs_assert((error == 0) == (dev->qos_conf != NULL));
2706 ovs_mutex_unlock(&dev->mutex);
2710 /* egress-policer details */
2712 struct egress_policer {
2713 struct qos_conf qos_conf;
2714 struct rte_meter_srtcm_params app_srtcm_params;
2715 struct rte_meter_srtcm egress_meter;
2718 static struct egress_policer *
2719 egress_policer_get__(const struct netdev *netdev)
2721 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2722 return CONTAINER_OF(dev->qos_conf, struct egress_policer, qos_conf);
2726 egress_policer_qos_construct(struct netdev *netdev,
2727 const struct smap *details)
2729 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2730 struct egress_policer *policer;
2735 rte_spinlock_lock(&dev->qos_lock);
2736 policer = xmalloc(sizeof *policer);
2737 qos_conf_init(&policer->qos_conf, &egress_policer_ops);
2738 dev->qos_conf = &policer->qos_conf;
2739 cir_s = smap_get(details, "cir");
2740 cbs_s = smap_get(details, "cbs");
2741 policer->app_srtcm_params.cir = cir_s ? strtoull(cir_s, NULL, 10) : 0;
2742 policer->app_srtcm_params.cbs = cbs_s ? strtoull(cbs_s, NULL, 10) : 0;
2743 policer->app_srtcm_params.ebs = 0;
2744 err = rte_meter_srtcm_config(&policer->egress_meter,
2745 &policer->app_srtcm_params);
2746 rte_spinlock_unlock(&dev->qos_lock);
2752 egress_policer_qos_destruct(struct netdev *netdev OVS_UNUSED,
2753 struct qos_conf *conf)
2755 struct egress_policer *policer = CONTAINER_OF(conf, struct egress_policer,
2761 egress_policer_qos_get(const struct netdev *netdev, struct smap *details)
2763 struct egress_policer *policer = egress_policer_get__(netdev);
2764 smap_add_format(details, "cir", "%llu",
2765 1ULL * policer->app_srtcm_params.cir);
2766 smap_add_format(details, "cbs", "%llu",
2767 1ULL * policer->app_srtcm_params.cbs);
2773 egress_policer_qos_set(struct netdev *netdev, const struct smap *details)
2775 struct egress_policer *policer;
2780 policer = egress_policer_get__(netdev);
2781 cir_s = smap_get(details, "cir");
2782 cbs_s = smap_get(details, "cbs");
2783 policer->app_srtcm_params.cir = cir_s ? strtoull(cir_s, NULL, 10) : 0;
2784 policer->app_srtcm_params.cbs = cbs_s ? strtoull(cbs_s, NULL, 10) : 0;
2785 policer->app_srtcm_params.ebs = 0;
2786 err = rte_meter_srtcm_config(&policer->egress_meter,
2787 &policer->app_srtcm_params);
2793 egress_policer_run(struct netdev *netdev, struct rte_mbuf **pkts, int pkt_cnt)
2796 struct egress_policer *policer = egress_policer_get__(netdev);
2798 cnt = netdev_dpdk_policer_run(&policer->egress_meter, pkts, pkt_cnt);
2803 static const struct dpdk_qos_ops egress_policer_ops = {
2804 "egress-policer", /* qos_name */
2805 egress_policer_qos_construct,
2806 egress_policer_qos_destruct,
2807 egress_policer_qos_get,
2808 egress_policer_qos_set,
2813 netdev_dpdk_reconfigure(struct netdev *netdev)
2815 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2818 ovs_mutex_lock(&dpdk_mutex);
2819 ovs_mutex_lock(&dev->mutex);
2821 if (netdev->n_txq == dev->requested_n_txq
2822 && netdev->n_rxq == dev->requested_n_rxq) {
2823 /* Reconfiguration is unnecessary */
2828 rte_eth_dev_stop(dev->port_id);
2830 netdev->n_txq = dev->requested_n_txq;
2831 netdev->n_rxq = dev->requested_n_rxq;
2833 rte_free(dev->tx_q);
2834 err = dpdk_eth_dev_init(dev);
2835 netdev_dpdk_alloc_txq(dev, netdev->n_txq);
2837 dev->txq_needs_locking = netdev->n_txq < dev->requested_n_txq;
2841 ovs_mutex_unlock(&dev->mutex);
2842 ovs_mutex_unlock(&dpdk_mutex);
2848 netdev_dpdk_vhost_user_reconfigure(struct netdev *netdev)
2850 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2851 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
2854 ovs_mutex_lock(&dpdk_mutex);
2855 ovs_mutex_lock(&dev->mutex);
2857 netdev->n_txq = dev->requested_n_txq;
2858 netdev->n_rxq = dev->requested_n_rxq;
2860 /* Enable TX queue 0 by default if it wasn't disabled. */
2861 if (dev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) {
2862 dev->tx_q[0].map = 0;
2865 netdev_dpdk_remap_txqs(dev);
2867 if (dev->requested_socket_id != dev->socket_id) {
2868 dev->socket_id = dev->requested_socket_id;
2869 /* Change mempool to new NUMA Node */
2870 dpdk_mp_put(dev->dpdk_mp);
2871 dev->dpdk_mp = dpdk_mp_get(dev->socket_id, dev->mtu);
2872 if (!dev->dpdk_mp) {
2878 virtio_dev->flags |= VIRTIO_DEV_RUNNING;
2881 ovs_mutex_unlock(&dev->mutex);
2882 ovs_mutex_unlock(&dpdk_mutex);
2888 netdev_dpdk_vhost_cuse_reconfigure(struct netdev *netdev)
2890 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2892 ovs_mutex_lock(&dpdk_mutex);
2893 ovs_mutex_lock(&dev->mutex);
2895 netdev->n_txq = dev->requested_n_txq;
2898 ovs_mutex_unlock(&dev->mutex);
2899 ovs_mutex_unlock(&dpdk_mutex);
2904 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, \
2905 SET_CONFIG, SET_TX_MULTIQ, SEND, \
2906 GET_CARRIER, GET_STATS, \
2907 GET_FEATURES, GET_STATUS, \
2908 RECONFIGURE, RXQ_RECV) \
2911 true, /* is_pmd */ \
2913 NULL, /* netdev_dpdk_run */ \
2914 NULL, /* netdev_dpdk_wait */ \
2916 netdev_dpdk_alloc, \
2919 netdev_dpdk_dealloc, \
2920 netdev_dpdk_get_config, \
2922 NULL, /* get_tunnel_config */ \
2923 NULL, /* build header */ \
2924 NULL, /* push header */ \
2925 NULL, /* pop header */ \
2926 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2930 NULL, /* send_wait */ \
2932 netdev_dpdk_set_etheraddr, \
2933 netdev_dpdk_get_etheraddr, \
2934 netdev_dpdk_get_mtu, \
2935 netdev_dpdk_set_mtu, \
2936 netdev_dpdk_get_ifindex, \
2938 netdev_dpdk_get_carrier_resets, \
2939 netdev_dpdk_set_miimon, \
2942 NULL, /* set_advertisements */ \
2944 netdev_dpdk_set_policing, \
2945 netdev_dpdk_get_qos_types, \
2946 NULL, /* get_qos_capabilities */ \
2947 netdev_dpdk_get_qos, \
2948 netdev_dpdk_set_qos, \
2949 NULL, /* get_queue */ \
2950 NULL, /* set_queue */ \
2951 NULL, /* delete_queue */ \
2952 NULL, /* get_queue_stats */ \
2953 NULL, /* queue_dump_start */ \
2954 NULL, /* queue_dump_next */ \
2955 NULL, /* queue_dump_done */ \
2956 NULL, /* dump_queue_stats */ \
2958 NULL, /* set_in4 */ \
2959 NULL, /* get_addr_list */ \
2960 NULL, /* add_router */ \
2961 NULL, /* get_next_hop */ \
2963 NULL, /* arp_lookup */ \
2965 netdev_dpdk_update_flags, \
2968 netdev_dpdk_rxq_alloc, \
2969 netdev_dpdk_rxq_construct, \
2970 netdev_dpdk_rxq_destruct, \
2971 netdev_dpdk_rxq_dealloc, \
2973 NULL, /* rx_wait */ \
2974 NULL, /* rxq_drain */ \
2978 process_vhost_flags(char *flag, char *default_val, int size,
2979 const struct smap *ovs_other_config,
2985 val = smap_get(ovs_other_config, flag);
2987 /* Depending on which version of vhost is in use, process the vhost-specific
2988 * flag if it is provided, otherwise resort to default value.
2990 if (val && (strlen(val) <= size)) {
2992 *new_val = xstrdup(val);
2993 VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
2995 VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
2996 *new_val = default_val;
3003 grow_argv(char ***argv, size_t cur_siz, size_t grow_by)
3005 return xrealloc(*argv, sizeof(char *) * (cur_siz + grow_by));
3009 dpdk_option_extend(char ***argv, int argc, const char *option,
3012 char **newargv = grow_argv(argv, argc, 2);
3014 newargv[argc] = xstrdup(option);
3015 newargv[argc+1] = xstrdup(value);
3019 move_argv(char ***argv, size_t cur_size, char **src_argv, size_t src_argc)
3021 char **newargv = grow_argv(argv, cur_size, src_argc);
3022 while (src_argc--) {
3023 newargv[cur_size+src_argc] = src_argv[src_argc];
3024 src_argv[src_argc] = NULL;
3030 extra_dpdk_args(const char *ovs_extra_config, char ***argv, int argc)
3033 char *release_tok = xstrdup(ovs_extra_config);
3034 char *tok = release_tok, *endptr = NULL;
3036 for (tok = strtok_r(release_tok, " ", &endptr); tok != NULL;
3037 tok = strtok_r(NULL, " ", &endptr)) {
3038 char **newarg = grow_argv(argv, ret, 1);
3040 newarg[ret++] = xstrdup(tok);
3047 argv_contains(char **argv_haystack, const size_t argc_haystack,
3050 for (size_t i = 0; i < argc_haystack; ++i) {
3051 if (!strcmp(argv_haystack[i], needle))
3058 construct_dpdk_options(const struct smap *ovs_other_config,
3059 char ***argv, const int initial_size,
3060 char **extra_args, const size_t extra_argc)
3062 struct dpdk_options_map {
3063 const char *ovs_configuration;
3064 const char *dpdk_option;
3065 bool default_enabled;
3066 const char *default_value;
3068 {"dpdk-lcore-mask", "-c", false, NULL},
3069 {"dpdk-hugepage-dir", "--huge-dir", false, NULL},
3072 int i, ret = initial_size;
3074 /*First, construct from the flat-options (non-mutex)*/
3075 for (i = 0; i < ARRAY_SIZE(opts); ++i) {
3076 const char *lookup = smap_get(ovs_other_config,
3077 opts[i].ovs_configuration);
3078 if (!lookup && opts[i].default_enabled) {
3079 lookup = opts[i].default_value;
3083 if (!argv_contains(extra_args, extra_argc, opts[i].dpdk_option)) {
3084 dpdk_option_extend(argv, ret, opts[i].dpdk_option, lookup);
3087 VLOG_WARN("Ignoring database defined option '%s' due to "
3088 "dpdk_extras config", opts[i].dpdk_option);
3096 #define MAX_DPDK_EXCL_OPTS 10
3099 construct_dpdk_mutex_options(const struct smap *ovs_other_config,
3100 char ***argv, const int initial_size,
3101 char **extra_args, const size_t extra_argc)
3103 struct dpdk_exclusive_options_map {
3104 const char *category;
3105 const char *ovs_dpdk_options[MAX_DPDK_EXCL_OPTS];
3106 const char *eal_dpdk_options[MAX_DPDK_EXCL_OPTS];
3107 const char *default_value;
3111 {"dpdk-alloc-mem", "dpdk-socket-mem", NULL,},
3112 {"-m", "--socket-mem", NULL,},
3117 int i, ret = initial_size;
3118 for (i = 0; i < ARRAY_SIZE(excl_opts); ++i) {
3119 int found_opts = 0, scan, found_pos = -1;
3120 const char *found_value;
3121 struct dpdk_exclusive_options_map *popt = &excl_opts[i];
3123 for (scan = 0; scan < MAX_DPDK_EXCL_OPTS
3124 && popt->ovs_dpdk_options[scan]; ++scan) {
3125 const char *lookup = smap_get(ovs_other_config,
3126 popt->ovs_dpdk_options[scan]);
3127 if (lookup && strlen(lookup)) {
3130 found_value = lookup;
3135 if (popt->default_option) {
3136 found_pos = popt->default_option;
3137 found_value = popt->default_value;
3143 if (found_opts > 1) {
3144 VLOG_ERR("Multiple defined options for %s. Please check your"
3145 " database settings and reconfigure if necessary.",
3149 if (!argv_contains(extra_args, extra_argc,
3150 popt->eal_dpdk_options[found_pos])) {
3151 dpdk_option_extend(argv, ret, popt->eal_dpdk_options[found_pos],
3155 VLOG_WARN("Ignoring database defined option '%s' due to "
3156 "dpdk_extras config", popt->eal_dpdk_options[found_pos]);
3164 get_dpdk_args(const struct smap *ovs_other_config, char ***argv,
3167 const char *extra_configuration;
3168 char **extra_args = NULL;
3170 size_t extra_argc = 0;
3172 extra_configuration = smap_get(ovs_other_config, "dpdk-extra");
3173 if (extra_configuration) {
3174 extra_argc = extra_dpdk_args(extra_configuration, &extra_args, 0);
3177 i = construct_dpdk_options(ovs_other_config, argv, argc, extra_args,
3179 i = construct_dpdk_mutex_options(ovs_other_config, argv, i, extra_args,
3182 if (extra_configuration) {
3183 *argv = move_argv(argv, i, extra_args, extra_argc);
3186 return i + extra_argc;
3189 static char **dpdk_argv;
3190 static int dpdk_argc;
3193 deferred_argv_release(void)
3196 for (result = 0; result < dpdk_argc; ++result) {
3197 free(dpdk_argv[result]);
3204 dpdk_init__(const struct smap *ovs_other_config)
3209 bool auto_determine = true;
3213 char *sock_dir_subcomponent;
3216 if (!smap_get_bool(ovs_other_config, "dpdk-init", false)) {
3217 VLOG_INFO("DPDK Disabled - to change this requires a restart.\n");
3221 VLOG_INFO("DPDK Enabled, initializing");
3224 if (process_vhost_flags("cuse-dev-name", xstrdup("vhost-net"),
3225 PATH_MAX, ovs_other_config, &cuse_dev_name)) {
3227 if (process_vhost_flags("vhost-sock-dir", xstrdup(ovs_rundir()),
3228 NAME_MAX, ovs_other_config,
3229 &sock_dir_subcomponent)) {
3231 if (!strstr(sock_dir_subcomponent, "..")) {
3232 vhost_sock_dir = xasprintf("%s/%s", ovs_rundir(),
3233 sock_dir_subcomponent);
3235 err = stat(vhost_sock_dir, &s);
3237 VLOG_ERR("vhost-user sock directory '%s' does not exist.",
3241 vhost_sock_dir = xstrdup(ovs_rundir());
3242 VLOG_ERR("vhost-user sock directory request '%s/%s' has invalid"
3243 "characters '..' - using %s instead.",
3244 ovs_rundir(), sock_dir_subcomponent, ovs_rundir());
3246 free(sock_dir_subcomponent);
3248 vhost_sock_dir = sock_dir_subcomponent;
3252 argv = grow_argv(&argv, 0, 1);
3254 argv[0] = xstrdup(ovs_get_program_name());
3255 argc_tmp = get_dpdk_args(ovs_other_config, &argv, argc);
3257 while (argc_tmp != argc) {
3258 if (!strcmp("-c", argv[argc]) || !strcmp("-l", argv[argc])) {
3259 auto_determine = false;
3267 * NOTE: This is an unsophisticated mechanism for determining the DPDK
3268 * lcore for the DPDK Master.
3270 if (auto_determine) {
3272 /* Get the main thread affinity */
3274 err = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
3277 for (i = 0; i < CPU_SETSIZE; i++) {
3278 if (CPU_ISSET(i, &cpuset)) {
3279 argv = grow_argv(&argv, argc, 2);
3280 argv[argc++] = xstrdup("-c");
3281 argv[argc++] = xasprintf("0x%08llX", (1ULL<<i));
3286 VLOG_ERR("Thread getaffinity error %d. Using core 0x1", err);
3287 /* User did not set dpdk-lcore-mask and unable to get current
3288 * thread affintity - default to core 0x1 */
3289 argv = grow_argv(&argv, argc, 2);
3290 argv[argc++] = xstrdup("-c");
3291 argv[argc++] = xasprintf("0x%X", 1);
3295 argv = grow_argv(&argv, argc, 1);
3300 if (VLOG_IS_INFO_ENABLED()) {
3304 ds_put_cstr(&eal_args, "EAL ARGS:");
3305 for (opt = 0; opt < argc; ++opt) {
3306 ds_put_cstr(&eal_args, " ");
3307 ds_put_cstr(&eal_args, argv[opt]);
3309 VLOG_INFO("%s", ds_cstr_ro(&eal_args));
3310 ds_destroy(&eal_args);
3313 /* Make sure things are initialized ... */
3314 result = rte_eal_init(argc, argv);
3316 ovs_abort(result, "Cannot init EAL");
3319 /* Set the main thread affinity back to pre rte_eal_init() value */
3320 if (auto_determine && !err) {
3321 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t),
3324 VLOG_ERR("Thread setaffinity error %d", err);
3331 atexit(deferred_argv_release);
3333 rte_memzone_dump(stdout);
3334 rte_eal_init_ret = 0;
3336 /* We are called from the main thread here */
3337 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
3339 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
3342 /* Register CUSE device to handle IOCTLs.
3343 * Unless otherwise specified, cuse_dev_name is set to vhost-net.
3345 err = rte_vhost_driver_register(cuse_dev_name);
3348 VLOG_ERR("CUSE device setup failure.");
3353 dpdk_vhost_class_init();
3355 /* Finally, register the dpdk classes */
3356 netdev_dpdk_register();
3360 dpdk_init(const struct smap *ovs_other_config)
3362 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
3364 if (ovs_other_config && ovsthread_once_start(&once)) {
3365 dpdk_init__(ovs_other_config);
3366 ovsthread_once_done(&once);
3370 static const struct netdev_class dpdk_class =
3374 netdev_dpdk_construct,
3375 netdev_dpdk_destruct,
3376 netdev_dpdk_set_config,
3377 netdev_dpdk_set_tx_multiq,
3378 netdev_dpdk_eth_send,
3379 netdev_dpdk_get_carrier,
3380 netdev_dpdk_get_stats,
3381 netdev_dpdk_get_features,
3382 netdev_dpdk_get_status,
3383 netdev_dpdk_reconfigure,
3384 netdev_dpdk_rxq_recv);
3386 static const struct netdev_class dpdk_ring_class =
3390 netdev_dpdk_ring_construct,
3391 netdev_dpdk_destruct,
3392 netdev_dpdk_set_config,
3393 netdev_dpdk_set_tx_multiq,
3394 netdev_dpdk_ring_send,
3395 netdev_dpdk_get_carrier,
3396 netdev_dpdk_get_stats,
3397 netdev_dpdk_get_features,
3398 netdev_dpdk_get_status,
3399 netdev_dpdk_reconfigure,
3400 netdev_dpdk_rxq_recv);
3402 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class =
3405 dpdk_vhost_cuse_class_init,
3406 netdev_dpdk_vhost_cuse_construct,
3407 netdev_dpdk_vhost_destruct,
3410 netdev_dpdk_vhost_send,
3411 netdev_dpdk_vhost_get_carrier,
3412 netdev_dpdk_vhost_get_stats,
3415 netdev_dpdk_vhost_cuse_reconfigure,
3416 netdev_dpdk_vhost_rxq_recv);
3418 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class =
3421 dpdk_vhost_user_class_init,
3422 netdev_dpdk_vhost_user_construct,
3423 netdev_dpdk_vhost_destruct,
3426 netdev_dpdk_vhost_send,
3427 netdev_dpdk_vhost_get_carrier,
3428 netdev_dpdk_vhost_get_stats,
3431 netdev_dpdk_vhost_user_reconfigure,
3432 netdev_dpdk_vhost_rxq_recv);
3435 netdev_dpdk_register(void)
3438 netdev_register_provider(&dpdk_class);
3439 netdev_register_provider(&dpdk_ring_class);
3441 netdev_register_provider(&dpdk_vhost_cuse_class);
3443 netdev_register_provider(&dpdk_vhost_user_class);
3448 dpdk_set_lcore_id(unsigned cpu)
3450 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
3451 ovs_assert(cpu != NON_PMD_CORE_ID);
3452 RTE_PER_LCORE(_lcore_id) = cpu;
3456 dpdk_thread_is_pmd(void)
3458 return rte_lcore_id() != NON_PMD_CORE_ID;