/*
- * Copyright (c) 2014 Nicira, Inc.
+ * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <config.h>
-#include <stdio.h>
#include <string.h>
#include <signal.h>
#include <stdlib.h>
#include <sched.h>
#include <stdlib.h>
#include <unistd.h>
+#include <sys/stat.h>
#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include "dirs.h"
#include "dp-packet.h"
#include "dpif-netdev.h"
+#include "fatal-signal.h"
#include "list.h"
#include "netdev-dpdk.h"
#include "netdev-provider.h"
/*
* need to reserve tons of extra space in the mbufs so we can align the
* DMA addresses to 4KB.
+ * The minimum mbuf size is limited to avoid scatter behaviour and drop in
+ * performance for standard Ethernet MTU.
*/
-
#define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
-#define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
- sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_SIZE_MTU(mtu) (MTU_TO_MAX_LEN(mtu) \
+ + sizeof(struct dp_packet) \
+ + RTE_PKTMBUF_HEADROOM)
+#define MBUF_SIZE_DRIVER (2048 \
+ + sizeof (struct rte_mbuf) \
+ + RTE_PKTMBUF_HEADROOM)
+#define MBUF_SIZE(mtu) MAX(MBUF_SIZE_MTU(mtu), MBUF_SIZE_DRIVER)
/* Max and min number of packets in the mempool. OVS tries to allocate a
* mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
#define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
#define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
-/* Character device cuse_dev_name. */
-static char *cuse_dev_name = NULL;
+static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
+static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
/*
* Maximum amount of time in micro seconds to try and enqueue to vhost.
enum dpdk_dev_type {
DPDK_DEV_ETH = 0,
- DPDK_DEV_VHOST = 1
+ DPDK_DEV_VHOST = 1,
};
static int rte_eal_init_ret = ENODEV;
bool flush_tx; /* Set to true to flush queue everytime */
/* pkts are queued. */
int count;
+ rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
+ * from concurrent access. It is used only
+ * if the queue is shared among different
+ * pmd threads (see 'txq_needs_locking'). */
uint64_t tsc;
struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
};
/* Protects stats */
rte_spinlock_t stats_lock;
- uint8_t hwaddr[ETH_ADDR_LEN];
+ struct eth_addr hwaddr;
enum netdev_flags flags;
struct rte_eth_link link;
int link_reset_cnt;
+ /* The user might request more txqs than the NIC has. We remap those
+ * ('up.n_txq') on these ('real_n_txq').
+ * If the numbers match, 'txq_needs_locking' is false, otherwise it is
+ * true and we will take a spinlock on transmission */
+ int real_n_txq;
+ int real_n_rxq;
+ bool txq_needs_locking;
+
/* virtio-net structure for vhost device */
OVSRCU_TYPE(struct virtio_net *) virtio_dev;
+ /* Identifier used to distinguish vhost devices from each other */
+ char vhost_id[PATH_MAX];
+
/* In dpdk_list. */
struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
- rte_spinlock_t txq_lock;
};
struct netdev_rxq_dpdk {
int port_id;
};
-static bool thread_is_pmd(void);
+static bool dpdk_thread_is_pmd(void);
static int netdev_dpdk_construct(struct netdev *);
return NULL;
}
+static int
+dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
+{
+ int diag = 0;
+ int i;
+
+ /* A device may report more queues than it makes available (this has
+ * been observed for Intel xl710, which reserves some of them for
+ * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
+ * available. When this happens we can retry the configuration
+ * and request less queues */
+ while (n_rxq && n_txq) {
+ if (diag) {
+ VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
+ }
+
+ diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &port_conf);
+ if (diag) {
+ break;
+ }
+
+ for (i = 0; i < n_txq; i++) {
+ diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
+ dev->socket_id, NULL);
+ if (diag) {
+ VLOG_INFO("Interface %s txq(%d) setup error: %s",
+ dev->up.name, i, rte_strerror(-diag));
+ break;
+ }
+ }
+
+ if (i != n_txq) {
+ /* Retry with less tx queues */
+ n_txq = i;
+ continue;
+ }
+
+ for (i = 0; i < n_rxq; i++) {
+ diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
+ dev->socket_id, NULL,
+ dev->dpdk_mp->mp);
+ if (diag) {
+ VLOG_INFO("Interface %s rxq(%d) setup error: %s",
+ dev->up.name, i, rte_strerror(-diag));
+ break;
+ }
+ }
+
+ if (i != n_rxq) {
+ /* Retry with less rx queues */
+ n_rxq = i;
+ continue;
+ }
+
+ dev->up.n_rxq = n_rxq;
+ dev->real_n_txq = n_txq;
+
+ return 0;
+ }
+
+ return diag;
+}
+
+
static int
dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
{
struct rte_pktmbuf_pool_private *mbp_priv;
+ struct rte_eth_dev_info info;
struct ether_addr eth_addr;
int diag;
- int i;
+ int n_rxq, n_txq;
if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
return ENODEV;
}
- diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->up.n_txq,
- &port_conf);
- if (diag) {
- VLOG_ERR("eth dev config error %d",diag);
- return -diag;
- }
+ rte_eth_dev_info_get(dev->port_id, &info);
- for (i = 0; i < dev->up.n_txq; i++) {
- diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
- dev->socket_id, NULL);
- if (diag) {
- VLOG_ERR("eth dev tx queue setup error %d",diag);
- return -diag;
- }
- }
+ n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
+ n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
- for (i = 0; i < dev->up.n_rxq; i++) {
- diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
- dev->socket_id,
- NULL, dev->dpdk_mp->mp);
- if (diag) {
- VLOG_ERR("eth dev rx queue setup error %d",diag);
- return -diag;
- }
+ diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
+ if (diag) {
+ VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
+ dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
+ return -diag;
}
diag = rte_eth_dev_start(dev->port_id);
if (diag) {
- VLOG_ERR("eth dev start error %d",diag);
+ VLOG_ERR("Interface %s start error: %s", dev->up.name,
+ rte_strerror(-diag));
return -diag;
}
memset(ð_addr, 0x0, sizeof(eth_addr));
rte_eth_macaddr_get(dev->port_id, ð_addr);
VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
- dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
+ dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
- memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
+ memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
rte_eth_link_get_nowait(dev->port_id, &dev->link);
mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
unsigned i;
netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
- /* Each index is considered as a cpu core id, since there should
- * be one tx queue for each cpu core. */
for (i = 0; i < n_txqs; i++) {
int numa_id = ovs_numa_get_numa_id(i);
- /* If the corresponding core is not on the same numa node
- * as 'netdev', flags the 'flush_tx'. */
- netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
+ if (!netdev->txq_needs_locking) {
+ /* Each index is considered as a cpu core id, since there should
+ * be one tx queue for each cpu core. If the corresponding core
+ * is not on the same numa node as 'netdev', flags the
+ * 'flush_tx'. */
+ netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
+ } else {
+ /* Queues are shared among CPUs. Always flush */
+ netdev->tx_q[i].flush_tx = true;
+ }
+ rte_spinlock_init(&netdev->tx_q[i].tx_lock);
}
}
netdev->flags = 0;
netdev->mtu = ETHER_MTU;
netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
- rte_spinlock_init(&netdev->txq_lock);
netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
if (!netdev->dpdk_mp) {
netdev_->n_txq = NR_QUEUE;
netdev_->n_rxq = NR_QUEUE;
+ netdev_->requested_n_rxq = NR_QUEUE;
+ netdev->real_n_txq = NR_QUEUE;
if (type == DPDK_DEV_ETH) {
netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
}
static int
-netdev_dpdk_vhost_construct(struct netdev *netdev_)
+vhost_construct_helper(struct netdev *netdev_) OVS_REQUIRES(dpdk_mutex)
{
- int err;
-
if (rte_eal_init_ret) {
return rte_eal_init_ret;
}
+ return netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
+}
+
+static int
+netdev_dpdk_vhost_cuse_construct(struct netdev *netdev_)
+{
+ struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
+ int err;
+
ovs_mutex_lock(&dpdk_mutex);
- err = netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
+ strncpy(netdev->vhost_id, netdev->up.name, sizeof(netdev->vhost_id));
+ err = vhost_construct_helper(netdev_);
ovs_mutex_unlock(&dpdk_mutex);
+ return err;
+}
+
+static int
+netdev_dpdk_vhost_user_construct(struct netdev *netdev_)
+{
+ struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
+ int err;
+ ovs_mutex_lock(&dpdk_mutex);
+ /* Take the name of the vhost-user port and append it to the location where
+ * the socket is to be created, then register the socket.
+ */
+ snprintf(netdev->vhost_id, sizeof(netdev->vhost_id), "%s/%s",
+ vhost_sock_dir, netdev_->name);
+ err = rte_vhost_driver_register(netdev->vhost_id);
+ if (err) {
+ VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
+ netdev->vhost_id);
+ } else {
+ fatal_signal_add_file_to_unlink(netdev->vhost_id);
+ VLOG_INFO("Socket %s created for vhost-user port %s\n",
+ netdev->vhost_id, netdev_->name);
+ err = vhost_construct_helper(netdev_);
+ }
+
+ ovs_mutex_unlock(&dpdk_mutex);
return err;
}
return;
}
+ if (rte_vhost_driver_unregister(dev->vhost_id)) {
+ VLOG_ERR("Unable to remove vhost-user socket %s", dev->vhost_id);
+ } else {
+ fatal_signal_remove_file_to_unlink(dev->vhost_id);
+ }
+
ovs_mutex_lock(&dpdk_mutex);
list_remove(&dev->list_node);
dpdk_mp_put(dev->dpdk_mp);
}
static int
-netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
+netdev_dpdk_get_config(const struct netdev *netdev, struct smap *args)
{
- struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
+ struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
ovs_mutex_lock(&dev->mutex);
- smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
- smap_add_format(args, "configured_tx_queues", "%d", netdev_->n_txq);
+ smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
+ smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq);
+ smap_add_format(args, "requested_tx_queues", "%d", netdev->n_txq);
+ smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
+ ovs_mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static int
+netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args)
+{
+ struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+
+ ovs_mutex_lock(&dev->mutex);
+ netdev->requested_n_rxq = MAX(smap_get_int(args, "n_rxq",
+ netdev->requested_n_rxq), 1);
+ netdev_change_seq_changed(netdev);
ovs_mutex_unlock(&dev->mutex);
return 0;
{
struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
int err = 0;
+ int old_rxq, old_txq;
if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
return err;
rte_eth_dev_stop(netdev->port_id);
+ old_txq = netdev->up.n_txq;
+ old_rxq = netdev->up.n_rxq;
netdev->up.n_txq = n_txq;
netdev->up.n_rxq = n_rxq;
rte_free(netdev->tx_q);
- netdev_dpdk_alloc_txq(netdev, n_txq);
err = dpdk_eth_dev_init(netdev);
+ netdev_dpdk_alloc_txq(netdev, netdev->real_n_txq);
+ if (err) {
+ /* If there has been an error, it means that the requested queues
+ * have not been created. Restore the old numbers. */
+ netdev->up.n_txq = old_txq;
+ netdev->up.n_rxq = old_rxq;
+ }
+
+ netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
+
+ ovs_mutex_unlock(&netdev->mutex);
+ ovs_mutex_unlock(&dpdk_mutex);
+
+ return err;
+}
+
+static int
+netdev_dpdk_vhost_cuse_set_multiq(struct netdev *netdev_, unsigned int n_txq,
+ unsigned int n_rxq)
+{
+ struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
+ int err = 0;
+
+ if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
+ return err;
+ }
+
+ ovs_mutex_lock(&dpdk_mutex);
+ ovs_mutex_lock(&netdev->mutex);
+
+ netdev->up.n_txq = n_txq;
+ netdev->real_n_txq = 1;
+ netdev->up.n_rxq = 1;
+ netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
ovs_mutex_unlock(&netdev->mutex);
ovs_mutex_unlock(&dpdk_mutex);
static int
netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
- unsigned int n_rxq)
+ unsigned int n_rxq)
{
struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
int err = 0;
ovs_mutex_lock(&dpdk_mutex);
ovs_mutex_lock(&netdev->mutex);
+ rte_free(netdev->tx_q);
netdev->up.n_txq = n_txq;
netdev->up.n_rxq = n_rxq;
+ netdev_dpdk_alloc_txq(netdev, netdev->up.n_txq);
ovs_mutex_unlock(&netdev->mutex);
ovs_mutex_unlock(&dpdk_mutex);
return (dev != NULL && (dev->flags & VIRTIO_DEV_RUNNING));
}
+static inline void
+netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
+ struct dp_packet **packets, int count)
+{
+ int i;
+ struct dp_packet *packet;
+
+ stats->rx_packets += count;
+ for (i = 0; i < count; i++) {
+ packet = packets[i];
+
+ if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) {
+ /* This only protects the following multicast counting from
+ * too short packets, but it does not stop the packet from
+ * further processing. */
+ stats->rx_errors++;
+ stats->rx_length_errors++;
+ continue;
+ }
+
+ struct eth_header *eh = (struct eth_header *) dp_packet_data(packet);
+ if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) {
+ stats->multicast++;
+ }
+
+ stats->rx_bytes += dp_packet_size(packet);
+ }
+}
+
/*
* The receive path for the vhost port is the TX path out from guest.
*/
struct netdev *netdev = rx->up.netdev;
struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
- int qid = 1;
+ int qid = rxq_->queue_id;
uint16_t nb_rx = 0;
if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
return EAGAIN;
}
- nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid,
+ if (rxq_->queue_id >= vhost_dev->real_n_rxq) {
+ return EOPNOTSUPP;
+ }
+
+ nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid * VIRTIO_QNUM + VIRTIO_TXQ,
vhost_dev->dpdk_mp->mp,
(struct rte_mbuf **)packets,
NETDEV_MAX_BURST);
}
rte_spinlock_lock(&vhost_dev->stats_lock);
- vhost_dev->stats.rx_packets += (uint64_t)nb_rx;
+ netdev_dpdk_vhost_update_rx_counters(&vhost_dev->stats, packets, nb_rx);
rte_spinlock_unlock(&vhost_dev->stats_lock);
*c = (int) nb_rx;
int nb_rx;
/* There is only one tx queue for this core. Do not flush other
- * queueus. */
- if (rxq_->queue_id == rte_lcore_id()) {
+ * queues.
+ * Do not flush tx queue which is shared among CPUs
+ * since it is always flushed */
+ if (rxq_->queue_id == rte_lcore_id() &&
+ OVS_LIKELY(!dev->txq_needs_locking)) {
dpdk_queue_flush(dev, rxq_->queue_id);
}
return 0;
}
+static inline void
+netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
+ struct dp_packet **packets,
+ int attempted,
+ int dropped)
+{
+ int i;
+ int sent = attempted - dropped;
+
+ stats->tx_packets += sent;
+ stats->tx_dropped += dropped;
+
+ for (i = 0; i < sent; i++) {
+ stats->tx_bytes += dp_packet_size(packets[i]);
+ }
+}
+
static void
-__netdev_dpdk_vhost_send(struct netdev *netdev, struct dp_packet **pkts,
- int cnt, bool may_steal)
+__netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
+ struct dp_packet **pkts, int cnt,
+ bool may_steal)
{
struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
goto out;
}
- /* There is vHost TX single queue, So we need to lock it for TX. */
- rte_spinlock_lock(&vhost_dev->txq_lock);
+ if (vhost_dev->txq_needs_locking) {
+ qid = qid % vhost_dev->real_n_txq;
+ rte_spinlock_lock(&vhost_dev->tx_q[qid].tx_lock);
+ }
do {
+ int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
unsigned int tx_pkts;
- tx_pkts = rte_vhost_enqueue_burst(virtio_dev, VIRTIO_RXQ,
+ tx_pkts = rte_vhost_enqueue_burst(virtio_dev, vhost_qid,
cur_pkts, cnt);
if (OVS_LIKELY(tx_pkts)) {
/* Packets have been sent.*/
* Unable to enqueue packets to vhost interface.
* Check available entries before retrying.
*/
- while (!rte_vring_available_entries(virtio_dev, VIRTIO_RXQ)) {
+ while (!rte_vring_available_entries(virtio_dev, vhost_qid)) {
if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
expired = 1;
break;
}
} while (cnt);
+ if (vhost_dev->txq_needs_locking) {
+ rte_spinlock_unlock(&vhost_dev->tx_q[qid].tx_lock);
+ }
+
rte_spinlock_lock(&vhost_dev->stats_lock);
- vhost_dev->stats.tx_packets += (total_pkts - cnt);
- vhost_dev->stats.tx_dropped += cnt;
+ netdev_dpdk_vhost_update_tx_counters(&vhost_dev->stats, pkts, total_pkts,
+ cnt);
rte_spinlock_unlock(&vhost_dev->stats_lock);
- rte_spinlock_unlock(&vhost_dev->txq_lock);
out:
if (may_steal) {
/* If we are on a non pmd thread we have to use the mempool mutex, because
* every non pmd thread shares the same mempool cache */
- if (!thread_is_pmd()) {
+ if (!dpdk_thread_is_pmd()) {
ovs_mutex_lock(&nonpmd_mempool_mutex);
}
}
if (dev->type == DPDK_DEV_VHOST) {
- __netdev_dpdk_vhost_send(netdev, (struct dp_packet **) mbufs, newcnt, true);
+ __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs, newcnt, true);
} else {
dpdk_queue_pkts(dev, qid, mbufs, newcnt);
dpdk_queue_flush(dev, qid);
}
- if (!thread_is_pmd()) {
+ if (!dpdk_thread_is_pmd()) {
ovs_mutex_unlock(&nonpmd_mempool_mutex);
}
}
static int
-netdev_dpdk_vhost_send(struct netdev *netdev, int qid OVS_UNUSED, struct dp_packet **pkts,
+netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet **pkts,
int cnt, bool may_steal)
{
if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
}
}
} else {
- __netdev_dpdk_vhost_send(netdev, pkts, cnt, may_steal);
+ __netdev_dpdk_vhost_send(netdev, qid, pkts, cnt, may_steal);
}
return 0;
}
{
int i;
+ if (OVS_UNLIKELY(dev->txq_needs_locking)) {
+ qid = qid % dev->real_n_txq;
+ rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
+ }
+
if (OVS_UNLIKELY(!may_steal ||
pkts[0]->source != DPBUF_DPDK)) {
struct netdev *netdev = &dev->up;
rte_spinlock_unlock(&dev->stats_lock);
}
}
+
+ if (OVS_UNLIKELY(dev->txq_needs_locking)) {
+ rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
+ }
}
static int
}
static int
-netdev_dpdk_set_etheraddr(struct netdev *netdev,
- const uint8_t mac[ETH_ADDR_LEN])
+netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
ovs_mutex_lock(&dev->mutex);
if (!eth_addr_equals(dev->hwaddr, mac)) {
- memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
+ dev->hwaddr = mac;
netdev_change_seq_changed(netdev);
}
ovs_mutex_unlock(&dev->mutex);
}
static int
-netdev_dpdk_get_etheraddr(const struct netdev *netdev,
- uint8_t mac[ETH_ADDR_LEN])
+netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
ovs_mutex_lock(&dev->mutex);
- memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
+ *mac = dev->hwaddr;
ovs_mutex_unlock(&dev->mutex);
return 0;
ovs_mutex_lock(&dev->mutex);
memset(stats, 0, sizeof(*stats));
/* Unsupported Stats */
- stats->rx_errors = UINT64_MAX;
- stats->tx_errors = UINT64_MAX;
- stats->multicast = UINT64_MAX;
stats->collisions = UINT64_MAX;
stats->rx_crc_errors = UINT64_MAX;
stats->rx_fifo_errors = UINT64_MAX;
stats->rx_frame_errors = UINT64_MAX;
- stats->rx_length_errors = UINT64_MAX;
stats->rx_missed_errors = UINT64_MAX;
stats->rx_over_errors = UINT64_MAX;
stats->tx_aborted_errors = UINT64_MAX;
stats->tx_fifo_errors = UINT64_MAX;
stats->tx_heartbeat_errors = UINT64_MAX;
stats->tx_window_errors = UINT64_MAX;
- stats->rx_bytes += UINT64_MAX;
stats->rx_dropped += UINT64_MAX;
- stats->tx_bytes += UINT64_MAX;
rte_spinlock_lock(&dev->stats_lock);
/* Supported Stats */
stats->rx_packets += dev->stats.rx_packets;
stats->tx_packets += dev->stats.tx_packets;
stats->tx_dropped += dev->stats.tx_dropped;
+ stats->multicast = dev->stats.multicast;
+ stats->rx_bytes = dev->stats.rx_bytes;
+ stats->tx_bytes = dev->stats.tx_bytes;
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_length_errors = dev->stats.rx_length_errors;
rte_spinlock_unlock(&dev->stats_lock);
+
ovs_mutex_unlock(&dev->mutex);
return 0;
stats->tx_packets = rte_stats.opackets;
stats->rx_bytes = rte_stats.ibytes;
stats->tx_bytes = rte_stats.obytes;
- stats->rx_errors = rte_stats.ierrors;
+ /* DPDK counts imissed as errors, but count them here as dropped instead */
+ stats->rx_errors = rte_stats.ierrors - rte_stats.imissed;
stats->tx_errors = rte_stats.oerrors;
stats->multicast = rte_stats.imcasts;
rte_spinlock_lock(&dev->stats_lock);
stats->tx_dropped = dev->stats.tx_dropped;
rte_spinlock_unlock(&dev->stats_lock);
+
+ /* These are the available DPDK counters for packets not received due to
+ * local resource constraints in DPDK and NIC respectively. */
+ stats->rx_dropped = rte_stats.rx_nombuf + rte_stats.imissed;
+ stats->collisions = UINT64_MAX;
+
+ stats->rx_length_errors = UINT64_MAX;
+ stats->rx_over_errors = UINT64_MAX;
+ stats->rx_crc_errors = UINT64_MAX;
+ stats->rx_frame_errors = UINT64_MAX;
+ stats->rx_fifo_errors = UINT64_MAX;
+ stats->rx_missed_errors = rte_stats.imissed;
+
+ stats->tx_aborted_errors = UINT64_MAX;
+ stats->tx_carrier_errors = UINT64_MAX;
+ stats->tx_fifo_errors = UINT64_MAX;
+ stats->tx_heartbeat_errors = UINT64_MAX;
+ stats->tx_window_errors = UINT64_MAX;
+
ovs_mutex_unlock(&dev->mutex);
return 0;
smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
- smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
- smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
+ if (dev_info.pci_dev) {
+ smap_add_format(args, "pci-vendor_id", "0x%u",
+ dev_info.pci_dev->id.vendor_id);
+ smap_add_format(args, "pci-device_id", "0x%x",
+ dev_info.pci_dev->id.device_id);
+ }
return 0;
}
static void
set_irq_status(struct virtio_net *dev)
{
- dev->virtqueue[VIRTIO_RXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
- dev->virtqueue[VIRTIO_TXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
+ uint32_t i;
+ uint64_t idx;
+
+ for (i = 0; i < dev->virt_qp_nb; i++) {
+ idx = i * VIRTIO_QNUM;
+ rte_vhost_enable_guest_notification(dev, idx + VIRTIO_RXQ, 0);
+ rte_vhost_enable_guest_notification(dev, idx + VIRTIO_TXQ, 0);
+ }
+}
+
+
+static int
+netdev_dpdk_vhost_set_queues(struct netdev_dpdk *netdev, struct virtio_net *dev)
+{
+ uint32_t qp_num;
+
+ qp_num = dev->virt_qp_nb;
+ if (qp_num > netdev->up.n_rxq) {
+ VLOG_ERR("vHost Device '%s' %"PRIu64" can't be added - "
+ "too many queues %d > %d", dev->ifname, dev->device_fh,
+ qp_num, netdev->up.n_rxq);
+ return -1;
+ }
+
+ netdev->real_n_rxq = qp_num;
+ netdev->real_n_txq = qp_num;
+ if (netdev->up.n_txq > netdev->real_n_txq) {
+ netdev->txq_needs_locking = true;
+ } else {
+ netdev->txq_needs_locking = false;
+ }
+
+ return 0;
}
/*
ovs_mutex_lock(&dpdk_mutex);
/* Add device to the vhost port with the same name as that passed down. */
LIST_FOR_EACH(netdev, list_node, &dpdk_list) {
- if (strncmp(dev->ifname, netdev->up.name, IFNAMSIZ) == 0) {
+ if (strncmp(dev->ifname, netdev->vhost_id, IF_NAME_SZ) == 0) {
ovs_mutex_lock(&netdev->mutex);
+ if (netdev_dpdk_vhost_set_queues(netdev, dev)) {
+ ovs_mutex_unlock(&netdev->mutex);
+ ovs_mutex_unlock(&dpdk_mutex);
+ return -1;
+ }
ovsrcu_set(&netdev->virtio_dev, dev);
- ovs_mutex_unlock(&netdev->mutex);
exists = true;
dev->flags |= VIRTIO_DEV_RUNNING;
/* Disable notifications. */
set_irq_status(dev);
+ ovs_mutex_unlock(&netdev->mutex);
break;
}
}
ovs_mutex_unlock(&dpdk_mutex);
if (!exists) {
- VLOG_INFO("vHost Device '%s' (%ld) can't be added - name not found",
- dev->ifname, dev->device_fh);
+ VLOG_INFO("vHost Device '%s' %"PRIu64" can't be added - name not "
+ "found", dev->ifname, dev->device_fh);
return -1;
}
- VLOG_INFO("vHost Device '%s' (%ld) has been added",
- dev->ifname, dev->device_fh);
+ VLOG_INFO("vHost Device '%s' %"PRIu64" has been added", dev->ifname,
+ dev->device_fh);
return 0;
}
destroy_device(volatile struct virtio_net *dev)
{
struct netdev_dpdk *vhost_dev;
+ bool exists = false;
ovs_mutex_lock(&dpdk_mutex);
LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
ovs_mutex_lock(&vhost_dev->mutex);
dev->flags &= ~VIRTIO_DEV_RUNNING;
ovsrcu_set(&vhost_dev->virtio_dev, NULL);
+ exists = true;
ovs_mutex_unlock(&vhost_dev->mutex);
-
- /*
- * Wait for other threads to quiesce before
- * setting the virtio_dev to NULL.
- */
- ovsrcu_synchronize();
- /*
- * As call to ovsrcu_synchronize() will end the quiescent state,
- * put thread back into quiescent state before returning.
- */
- ovsrcu_quiesce_start();
+ break;
}
}
+
ovs_mutex_unlock(&dpdk_mutex);
- VLOG_INFO("vHost Device '%s' (%ld) has been removed",
- dev->ifname, dev->device_fh);
+ if (exists == true) {
+ /*
+ * Wait for other threads to quiesce after setting the 'virtio_dev'
+ * to NULL, before returning.
+ */
+ ovsrcu_synchronize();
+ /*
+ * As call to ovsrcu_synchronize() will end the quiescent state,
+ * put thread back into quiescent state before returning.
+ */
+ ovsrcu_quiesce_start();
+ VLOG_INFO("vHost Device '%s' %"PRIu64" has been removed", dev->ifname,
+ dev->device_fh);
+ } else {
+ VLOG_INFO("vHost Device '%s' %"PRIu64" not found", dev->ifname,
+ dev->device_fh);
+ }
+
}
struct virtio_net *
};
static void *
-start_cuse_session_loop(void *dummy OVS_UNUSED)
+start_vhost_loop(void *dummy OVS_UNUSED)
{
pthread_detach(pthread_self());
/* Put the cuse thread into quiescent state. */
static int
dpdk_vhost_class_init(void)
+{
+ rte_vhost_driver_callback_register(&virtio_net_device_ops);
+ ovs_thread_create("vhost_thread", start_vhost_loop, NULL);
+ return 0;
+}
+
+static int
+dpdk_vhost_cuse_class_init(void)
{
int err = -1;
- rte_vhost_driver_callback_register(&virtio_net_device_ops);
/* Register CUSE device to handle IOCTLs.
* Unless otherwise specified on the vswitchd command line, cuse_dev_name
return -1;
}
- ovs_thread_create("cuse_thread", start_cuse_session_loop, NULL);
+ dpdk_vhost_class_init();
+ return 0;
+}
+
+static int
+dpdk_vhost_user_class_init(void)
+{
+ dpdk_vhost_class_init();
return 0;
}
return -err;
}
- /* Create single consumer/producer rings, netdev does explicit locking. */
+ /* Create single producer tx ring, netdev does explicit locking. */
ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
- RING_F_SP_ENQ | RING_F_SC_DEQ);
+ RING_F_SP_ENQ);
if (ivshmem->cring_tx == NULL) {
rte_free(ivshmem);
return ENOMEM;
return -err;
}
- /* Create single consumer/producer rings, netdev does explicit locking. */
+ /* Create single consumer rx ring, netdev does explicit locking. */
ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
- RING_F_SP_ENQ | RING_F_SC_DEQ);
+ RING_F_SC_DEQ);
if (ivshmem->cring_rx == NULL) {
rte_free(ivshmem);
return ENOMEM;
}
static int
-netdev_dpdk_ring_send(struct netdev *netdev, int qid OVS_UNUSED,
+netdev_dpdk_ring_send(struct netdev *netdev_, int qid,
struct dp_packet **pkts, int cnt, bool may_steal)
{
- struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+ struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
unsigned i;
/* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
* the consumer of the ring and return into the datapath without recalculating
* the RSS hash. */
for (i = 0; i < cnt; i++) {
- dp_packet_set_rss_hash(pkts[i], 0);
+ dp_packet_rss_invalidate(pkts[i]);
}
- /* DPDK Rings have a single TX queue, Therefore needs locking. */
- rte_spinlock_lock(&dev->txq_lock);
- netdev_dpdk_send__(dev, 0, pkts, cnt, may_steal);
- rte_spinlock_unlock(&dev->txq_lock);
+ netdev_dpdk_send__(netdev, qid, pkts, cnt, may_steal);
return 0;
}
DESTRUCT, \
netdev_dpdk_dealloc, \
netdev_dpdk_get_config, \
- NULL, /* netdev_dpdk_set_config */ \
+ netdev_dpdk_set_config, \
NULL, /* get_tunnel_config */ \
NULL, /* build header */ \
NULL, /* push header */ \
NULL, /* rxq_drain */ \
}
+static int
+process_vhost_flags(char *flag, char *default_val, int size,
+ char **argv, char **new_val)
+{
+ int changed = 0;
+
+ /* Depending on which version of vhost is in use, process the vhost-specific
+ * flag if it is provided on the vswitchd command line, otherwise resort to
+ * a default value.
+ *
+ * For vhost-user: Process "-vhost_sock_dir" to set the custom location of
+ * the vhost-user socket(s).
+ * For vhost-cuse: Process "-cuse_dev_name" to set the custom name of the
+ * vhost-cuse character device.
+ */
+ if (!strcmp(argv[1], flag) && (strlen(argv[2]) <= size)) {
+ changed = 1;
+ *new_val = xstrdup(argv[2]);
+ VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
+ } else {
+ VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
+ *new_val = default_val;
+ }
+
+ return changed;
+}
+
int
dpdk_init(int argc, char **argv)
{
argc--;
argv++;
- /* If the cuse_dev_name parameter has been provided, set 'cuse_dev_name' to
- * this string if it meets the correct criteria. Otherwise, set it to the
- * default (vhost-net).
- */
- if (!strcmp(argv[1], "--cuse_dev_name") &&
- (strlen(argv[2]) <= NAME_MAX)) {
+ /* Reject --user option */
+ int i;
+ for (i = 0; i < argc; i++) {
+ if (!strcmp(argv[i], "--user")) {
+ VLOG_ERR("Can not mix --dpdk and --user options, aborting.");
+ }
+ }
- cuse_dev_name = strdup(argv[2]);
+#ifdef VHOST_CUSE
+ if (process_vhost_flags("-cuse_dev_name", xstrdup("vhost-net"),
+ PATH_MAX, argv, &cuse_dev_name)) {
+#else
+ if (process_vhost_flags("-vhost_sock_dir", xstrdup(ovs_rundir()),
+ NAME_MAX, argv, &vhost_sock_dir)) {
+ struct stat s;
+ int err;
- /* Remove the cuse_dev_name configuration parameters from the argument
+ err = stat(vhost_sock_dir, &s);
+ if (err) {
+ VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
+ vhost_sock_dir);
+ return err;
+ }
+#endif
+ /* Remove the vhost flag configuration parameters from the argument
* list, so that the correct elements are passed to the DPDK
* initialization function
*/
argc -= 2;
- argv += 2; /* Increment by two to bypass the cuse_dev_name arguments */
+ argv += 2; /* Increment by two to bypass the vhost flag arguments */
base = 2;
-
- VLOG_ERR("User-provided cuse_dev_name in use: /dev/%s", cuse_dev_name);
- } else {
- cuse_dev_name = "vhost-net";
- VLOG_INFO("No cuse_dev_name provided - defaulting to /dev/vhost-net");
}
/* Keep the program name argument as this is needed for call to
NULL,
netdev_dpdk_ring_construct,
netdev_dpdk_destruct,
- NULL,
+ netdev_dpdk_set_multiq,
netdev_dpdk_ring_send,
netdev_dpdk_get_carrier,
netdev_dpdk_get_stats,
netdev_dpdk_get_status,
netdev_dpdk_rxq_recv);
-static const struct netdev_class dpdk_vhost_class =
+static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class =
+ NETDEV_DPDK_CLASS(
+ "dpdkvhostcuse",
+ dpdk_vhost_cuse_class_init,
+ netdev_dpdk_vhost_cuse_construct,
+ netdev_dpdk_vhost_destruct,
+ netdev_dpdk_vhost_cuse_set_multiq,
+ netdev_dpdk_vhost_send,
+ netdev_dpdk_vhost_get_carrier,
+ netdev_dpdk_vhost_get_stats,
+ NULL,
+ NULL,
+ netdev_dpdk_vhost_rxq_recv);
+
+static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class =
NETDEV_DPDK_CLASS(
- "dpdkvhost",
- dpdk_vhost_class_init,
- netdev_dpdk_vhost_construct,
+ "dpdkvhostuser",
+ dpdk_vhost_user_class_init,
+ netdev_dpdk_vhost_user_construct,
netdev_dpdk_vhost_destruct,
netdev_dpdk_vhost_set_multiq,
netdev_dpdk_vhost_send,
dpdk_common_init();
netdev_register_provider(&dpdk_class);
netdev_register_provider(&dpdk_ring_class);
- netdev_register_provider(&dpdk_vhost_class);
+#ifdef VHOST_CUSE
+ netdev_register_provider(&dpdk_vhost_cuse_class);
+#else
+ netdev_register_provider(&dpdk_vhost_user_class);
+#endif
ovsthread_once_done(&once);
}
}
}
static bool
-thread_is_pmd(void)
+dpdk_thread_is_pmd(void)
{
return rte_lcore_id() != NON_PMD_CORE_ID;
}