/* There should be one 'struct dpdk_tx_queue' created for
* each cpu core. */
struct dpdk_tx_queue {
- rte_spinlock_t tx_lock;
bool flush_tx; /* Set to true to flush queue everytime */
/* pkts are queued. */
int count;
for (i = 0; i < n_txqs; i++) {
int core_id = ovs_numa_get_numa_id(i);
- rte_spinlock_init(&netdev->tx_q[i].tx_lock);
/* If the corresponding core is not on the same numa node
* as 'netdev', flags the 'flush_tx'. */
netdev->tx_q[i].flush_tx = netdev->socket_id == core_id;
if (txq->count == 0) {
return;
}
- rte_spinlock_lock(&txq->tx_lock);
dpdk_queue_flush__(dev, qid);
- rte_spinlock_unlock(&txq->tx_lock);
}
static int
int i = 0;
- rte_spinlock_lock(&txq->tx_lock);
while (i < cnt) {
int freeslots = MAX_TX_QUEUE_LEN - txq->count;
int tocopy = MIN(freeslots, cnt-i);
dpdk_queue_flush__(dev, qid);
}
}
- rte_spinlock_unlock(&txq->tx_lock);
}
/* Tx function. Transmit packets indefinitely */