#define MP_CACHE_SZ (256 * 2)
#define SOCKET0 0
-#define NON_PMD_THREAD_TX_QUEUE 0
-
#define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
#define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
}
static void
-netdev_dpdk_set_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
+netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
{
int i;
/* Each index is considered as a cpu core id, since there should
* be one tx queue for each cpu core. */
for (i = 0; i < n_txqs; i++) {
- int core_id = ovs_numa_get_numa_id(i);
+ int numa_id = ovs_numa_get_numa_id(i);
/* If the corresponding core is not on the same numa node
* as 'netdev', flags the 'flush_tx'. */
- netdev->tx_q[i].flush_tx = netdev->socket_id == core_id;
+ netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
}
}
OVS_REQUIRES(dpdk_mutex)
{
struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
+ int sid;
int err = 0;
ovs_mutex_init(&netdev->mutex);
ovs_mutex_lock(&netdev->mutex);
- netdev->socket_id = rte_eth_dev_socket_id(port_no);
- netdev_dpdk_set_txq(netdev, NR_QUEUE);
+ /* If the 'sid' is negative, it means that the kernel fails
+ * to obtain the pci numa info. In that situation, always
+ * use 'SOCKET0'. */
+ sid = rte_eth_dev_socket_id(port_no);
+ netdev->socket_id = sid < 0 ? SOCKET0 : sid;
+ netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
netdev->port_id = port_no;
netdev->flags = 0;
netdev->mtu = ETHER_MTU;
ovs_mutex_lock(&dev->mutex);
- /* XXX: Allow to configure number of queues. */
- smap_add_format(args, "configured_rx_queues", "%u", netdev_->n_rxq);
- smap_add_format(args, "configured_tx_queues", "%u", netdev_->n_rxq);
+ smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
+ smap_add_format(args, "configured_tx_queues", "%d", netdev_->n_txq);
ovs_mutex_unlock(&dev->mutex);
return 0;
return err;
}
+ ovs_mutex_lock(&dpdk_mutex);
ovs_mutex_lock(&netdev->mutex);
+
rte_eth_dev_stop(netdev->port_id);
+
netdev->up.n_txq = n_txq;
netdev->up.n_rxq = n_rxq;
+ rte_free(netdev->tx_q);
+ netdev_dpdk_alloc_txq(netdev, n_txq);
err = dpdk_eth_dev_init(netdev);
- if (!err && netdev->up.n_txq != n_txq) {
- rte_free(netdev->tx_q);
- netdev_dpdk_set_txq(netdev, n_txq);
- }
+
ovs_mutex_unlock(&netdev->mutex);
+ ovs_mutex_unlock(&dpdk_mutex);
return err;
}
/* Tx function. Transmit packets indefinitely */
static void
-dpdk_do_tx_copy(struct netdev *netdev, struct dpif_packet ** pkts, int cnt)
+dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dpif_packet ** pkts,
+ int cnt)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
ovs_mutex_unlock(&dev->mutex);
}
- dpdk_queue_pkts(dev, NON_PMD_THREAD_TX_QUEUE, mbufs, newcnt);
- dpdk_queue_flush(dev, NON_PMD_THREAD_TX_QUEUE);
+ dpdk_queue_pkts(dev, qid, mbufs, newcnt);
+ dpdk_queue_flush(dev, qid);
if (!thread_is_pmd()) {
ovs_mutex_unlock(&nonpmd_mempool_mutex);
int i;
if (!may_steal || pkts[0]->ofpbuf.source != OFPBUF_DPDK) {
- dpdk_do_tx_copy(netdev, pkts, cnt);
+ dpdk_do_tx_copy(netdev, qid, pkts, cnt);
if (may_steal) {
for (i = 0; i < cnt; i++) {