}
while (!list_empty(&pch->completed_list)) {
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
desc = list_first_entry(&pch->completed_list,
struct dma_pl330_desc, node);
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ dmaengine_desc_get_callback(&desc->txd, &cb);
if (pch->cyclic) {
desc->status = PREP;
dma_descriptor_unmap(&desc->txd);
- if (callback) {
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irqrestore(&pch->lock, flags);
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&pch->lock, flags);
}
}
{
enum dma_status ret;
unsigned long flags;
- struct dma_pl330_desc *desc, *running = NULL;
+ struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
unsigned int transferred, residual = 0;
goto out;
spin_lock_irqsave(&pch->lock, flags);
+ spin_lock(&pch->thread->dmac->lock);
if (pch->thread->req_running != -1)
running = pch->thread->req[pch->thread->req_running].desc;
+ last_enq = pch->thread->req[pch->thread->lstenq].desc;
+
/* Check in pending list */
list_for_each_entry(desc, &pch->work_list, node) {
if (desc->status == DONE)
else if (running && desc == running)
transferred =
pl330_get_current_xferred_count(pch, desc);
+ else if (desc->status == BUSY)
+ /*
+ * Busy but not running means either just enqueued,
+ * or finished and not yet marked done
+ */
+ if (desc == last_enq)
+ transferred = 0;
+ else
+ transferred = desc->bytes_requested;
else
transferred = 0;
residual += desc->bytes_requested - transferred;
if (desc->last)
residual = 0;
}
+ spin_unlock(&pch->thread->dmac->lock);
spin_unlock_irqrestore(&pch->lock, flags);
out: