bool flush_tx; /* Set to true to flush queue everytime */
/* pkts are queued. */
int count;
+ rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
+ * from concurrent access. It is used only
+ * if the queue is shared among different
+ * pmd threads (see 'txq_needs_locking'). */
uint64_t tsc;
struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
};
struct rte_eth_link link;
int link_reset_cnt;
+ /* The user might request more txqs than the NIC has. We remap those
+ * ('up.n_txq') on these ('real_n_txq').
+ * If the numbers match, 'txq_needs_locking' is false, otherwise it is
+ * true and we will take a spinlock on transmission */
+ int real_n_txq;
+ bool txq_needs_locking;
+
+ /* Spinlock for vhost transmission. Other DPDK devices use spinlocks in
+ * dpdk_tx_queue */
+ rte_spinlock_t vhost_tx_lock;
+
/* virtio-net structure for vhost device */
OVSRCU_TYPE(struct virtio_net *) virtio_dev;
/* In dpdk_list. */
struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
- rte_spinlock_t txq_lock;
};
struct netdev_rxq_dpdk {
dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
{
struct rte_pktmbuf_pool_private *mbp_priv;
+ struct rte_eth_dev_info info;
struct ether_addr eth_addr;
int diag;
int i;
return ENODEV;
}
- diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->up.n_txq,
+ rte_eth_dev_info_get(dev->port_id, &info);
+ dev->up.n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
+ dev->real_n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
+
+ diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->real_n_txq,
&port_conf);
if (diag) {
- VLOG_ERR("eth dev config error %d",diag);
+ VLOG_ERR("eth dev config error %d. rxq:%d txq:%d", diag, dev->up.n_rxq,
+ dev->real_n_txq);
return -diag;
}
- for (i = 0; i < dev->up.n_txq; i++) {
+ for (i = 0; i < dev->real_n_txq; i++) {
diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
dev->socket_id, NULL);
if (diag) {
unsigned i;
netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
- /* Each index is considered as a cpu core id, since there should
- * be one tx queue for each cpu core. */
for (i = 0; i < n_txqs; i++) {
int numa_id = ovs_numa_get_numa_id(i);
- /* If the corresponding core is not on the same numa node
- * as 'netdev', flags the 'flush_tx'. */
- netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
+ if (!netdev->txq_needs_locking) {
+ /* Each index is considered as a cpu core id, since there should
+ * be one tx queue for each cpu core. If the corresponding core
+ * is not on the same numa node as 'netdev', flags the
+ * 'flush_tx'. */
+ netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
+ } else {
+ /* Queues are shared among CPUs. Always flush */
+ netdev->tx_q[i].flush_tx = true;
+ }
+ rte_spinlock_init(&netdev->tx_q[i].tx_lock);
}
}
netdev->flags = 0;
netdev->mtu = ETHER_MTU;
netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
- rte_spinlock_init(&netdev->txq_lock);
netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
if (!netdev->dpdk_mp) {
netdev_->n_txq = NR_QUEUE;
netdev_->n_rxq = NR_QUEUE;
+ netdev->real_n_txq = NR_QUEUE;
if (type == DPDK_DEV_ETH) {
netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
static int
netdev_dpdk_vhost_construct(struct netdev *netdev_)
{
+ struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
int err;
if (rte_eal_init_ret) {
err = netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
ovs_mutex_unlock(&dpdk_mutex);
+ rte_spinlock_init(&netdev->vhost_tx_lock);
+
return err;
}
ovs_mutex_lock(&dev->mutex);
smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
- smap_add_format(args, "configured_tx_queues", "%d", netdev_->n_txq);
+ smap_add_format(args, "requested_tx_queues", "%d", netdev_->n_txq);
+ smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
ovs_mutex_unlock(&dev->mutex);
return 0;
netdev->up.n_rxq = n_rxq;
rte_free(netdev->tx_q);
- netdev_dpdk_alloc_txq(netdev, n_txq);
err = dpdk_eth_dev_init(netdev);
+ netdev_dpdk_alloc_txq(netdev, netdev->real_n_txq);
+
+ netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
ovs_mutex_unlock(&netdev->mutex);
ovs_mutex_unlock(&dpdk_mutex);
static int
netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
- unsigned int n_rxq)
+ unsigned int n_rxq)
{
struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
int err = 0;
ovs_mutex_lock(&netdev->mutex);
netdev->up.n_txq = n_txq;
- netdev->up.n_rxq = n_rxq;
+ netdev->real_n_txq = 1;
+ netdev->up.n_rxq = 1;
ovs_mutex_unlock(&netdev->mutex);
ovs_mutex_unlock(&dpdk_mutex);
}
/* There is vHost TX single queue, So we need to lock it for TX. */
- rte_spinlock_lock(&vhost_dev->txq_lock);
+ rte_spinlock_lock(&vhost_dev->vhost_tx_lock);
do {
unsigned int tx_pkts;
}
}
} while (cnt);
+ rte_spinlock_unlock(&vhost_dev->vhost_tx_lock);
rte_spinlock_lock(&vhost_dev->stats_lock);
vhost_dev->stats.tx_packets += (total_pkts - cnt);
vhost_dev->stats.tx_dropped += cnt;
rte_spinlock_unlock(&vhost_dev->stats_lock);
- rte_spinlock_unlock(&vhost_dev->txq_lock);
out:
if (may_steal) {
{
int i;
+ if (OVS_UNLIKELY(dev->txq_needs_locking)) {
+ qid = qid % dev->real_n_txq;
+ rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
+ }
+
if (OVS_UNLIKELY(!may_steal ||
pkts[0]->source != DPBUF_DPDK)) {
struct netdev *netdev = &dev->up;
rte_spinlock_unlock(&dev->stats_lock);
}
}
+
+ if (OVS_UNLIKELY(dev->txq_needs_locking)) {
+ rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
+ }
}
static int
}
static int
-netdev_dpdk_ring_send(struct netdev *netdev, int qid OVS_UNUSED,
+netdev_dpdk_ring_send(struct netdev *netdev_, int qid,
struct dp_packet **pkts, int cnt, bool may_steal)
{
- struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+ struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
unsigned i;
/* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
dp_packet_set_rss_hash(pkts[i], 0);
}
- /* DPDK Rings have a single TX queue, Therefore needs locking. */
- rte_spinlock_lock(&dev->txq_lock);
- netdev_dpdk_send__(dev, 0, pkts, cnt, may_steal);
- rte_spinlock_unlock(&dev->txq_lock);
+ netdev_dpdk_send__(netdev, qid, pkts, cnt, may_steal);
return 0;
}
NULL,
netdev_dpdk_ring_construct,
netdev_dpdk_destruct,
- NULL,
+ netdev_dpdk_set_multiq,
netdev_dpdk_ring_send,
netdev_dpdk_get_carrier,
netdev_dpdk_get_stats,