2 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
4 * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * The full GNU General Public License is included in this distribution in the
18 * file called COPYING.
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/ioport.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/rio.h>
29 #include <linux/rio_drv.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/interrupt.h>
32 #include <linux/kfifo.h>
33 #include <linux/delay.h>
34 #include "../../dma/dmaengine.h"
38 #define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */
41 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
43 static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
45 static unsigned int dma_desc_per_channel = 128;
46 module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO);
47 MODULE_PARM_DESC(dma_desc_per_channel,
48 "Number of DMA descriptors per channel (default: 128)");
50 static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
52 return container_of(chan, struct tsi721_bdma_chan, dchan);
55 static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
57 return container_of(ddev, struct rio_mport, dma)->priv;
61 struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
63 return container_of(txd, struct tsi721_tx_desc, txd);
66 static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
68 struct tsi721_dma_desc *bd_ptr;
69 struct device *dev = bdma_chan->dchan.device->dev;
75 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
78 dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
81 * Allocate space for DMA descriptors
82 * (add an extra element for link descriptor)
84 bd_ptr = dma_zalloc_coherent(dev,
85 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
86 &bd_phys, GFP_KERNEL);
90 bdma_chan->bd_num = bd_num;
91 bdma_chan->bd_phys = bd_phys;
92 bdma_chan->bd_base = bd_ptr;
94 dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
95 bd_ptr, (unsigned long long)bd_phys);
97 /* Allocate space for descriptor status FIFO */
98 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
99 (bd_num + 1) : TSI721_DMA_MINSTSSZ;
100 sts_size = roundup_pow_of_two(sts_size);
101 sts_ptr = dma_zalloc_coherent(dev,
102 sts_size * sizeof(struct tsi721_dma_sts),
103 &sts_phys, GFP_KERNEL);
105 /* Free space allocated for DMA descriptors */
106 dma_free_coherent(dev,
107 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
109 bdma_chan->bd_base = NULL;
113 bdma_chan->sts_phys = sts_phys;
114 bdma_chan->sts_base = sts_ptr;
115 bdma_chan->sts_size = sts_size;
118 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
119 sts_ptr, (unsigned long long)sts_phys, sts_size);
121 /* Initialize DMA descriptors ring using added link descriptor */
122 bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
123 bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
124 TSI721_DMAC_DPTRL_MASK);
125 bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
127 /* Setup DMA descriptor pointers */
128 iowrite32(((u64)bd_phys >> 32),
129 bdma_chan->regs + TSI721_DMAC_DPTRH);
130 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
131 bdma_chan->regs + TSI721_DMAC_DPTRL);
133 /* Setup descriptor status FIFO */
134 iowrite32(((u64)sts_phys >> 32),
135 bdma_chan->regs + TSI721_DMAC_DSBH);
136 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
137 bdma_chan->regs + TSI721_DMAC_DSBL);
138 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
139 bdma_chan->regs + TSI721_DMAC_DSSZ);
141 /* Clear interrupt bits */
142 iowrite32(TSI721_DMAC_INT_ALL,
143 bdma_chan->regs + TSI721_DMAC_INT);
145 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
147 #ifdef CONFIG_PCI_MSI
148 /* Request interrupt service if we are in MSI-X mode */
149 if (priv->flags & TSI721_USING_MSIX) {
152 idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
154 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
155 priv->msix[idx].irq_name, (void *)bdma_chan);
158 dev_dbg(dev, "Unable to get MSI-X for BDMA%d-DONE\n",
163 idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
165 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
166 priv->msix[idx].irq_name, (void *)bdma_chan);
169 dev_dbg(dev, "Unable to get MSI-X for BDMA%d-INT\n",
172 priv->msix[TSI721_VECT_DMA0_DONE +
173 bdma_chan->id].vector,
179 /* Free space allocated for DMA descriptors */
180 dma_free_coherent(dev,
181 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
183 bdma_chan->bd_base = NULL;
185 /* Free space allocated for status descriptors */
186 dma_free_coherent(dev,
187 sts_size * sizeof(struct tsi721_dma_sts),
189 bdma_chan->sts_base = NULL;
194 #endif /* CONFIG_PCI_MSI */
196 /* Toggle DMA channel initialization */
197 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
198 ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
199 bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
200 bdma_chan->sts_rdptr = 0;
206 static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
209 #ifdef CONFIG_PCI_MSI
210 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
213 if (bdma_chan->bd_base == NULL)
216 /* Check if DMA channel still running */
217 ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
218 if (ch_stat & TSI721_DMAC_STS_RUN)
221 /* Put DMA channel into init state */
222 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
224 #ifdef CONFIG_PCI_MSI
225 if (priv->flags & TSI721_USING_MSIX) {
226 free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
227 bdma_chan->id].vector, (void *)bdma_chan);
228 free_irq(priv->msix[TSI721_VECT_DMA0_INT +
229 bdma_chan->id].vector, (void *)bdma_chan);
231 #endif /* CONFIG_PCI_MSI */
233 /* Free space allocated for DMA descriptors */
234 dma_free_coherent(bdma_chan->dchan.device->dev,
235 (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
236 bdma_chan->bd_base, bdma_chan->bd_phys);
237 bdma_chan->bd_base = NULL;
239 /* Free space allocated for status FIFO */
240 dma_free_coherent(bdma_chan->dchan.device->dev,
241 bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
242 bdma_chan->sts_base, bdma_chan->sts_phys);
243 bdma_chan->sts_base = NULL;
248 tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
251 /* Clear pending BDMA channel interrupts */
252 iowrite32(TSI721_DMAC_INT_ALL,
253 bdma_chan->regs + TSI721_DMAC_INT);
254 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
255 /* Enable BDMA channel interrupts */
256 iowrite32(TSI721_DMAC_INT_ALL,
257 bdma_chan->regs + TSI721_DMAC_INTE);
259 /* Disable BDMA channel interrupts */
260 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
261 /* Clear pending BDMA channel interrupts */
262 iowrite32(TSI721_DMAC_INT_ALL,
263 bdma_chan->regs + TSI721_DMAC_INT);
268 static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
272 sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
273 return ((sts & TSI721_DMAC_STS_RUN) == 0);
276 void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
278 /* Disable BDMA channel interrupts */
279 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
280 if (bdma_chan->active)
281 tasklet_schedule(&bdma_chan->tasklet);
284 #ifdef CONFIG_PCI_MSI
286 * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
287 * @irq: Linux interrupt number
288 * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
290 * Handles BDMA channel interrupts signaled using MSI-X.
292 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
294 struct tsi721_bdma_chan *bdma_chan = ptr;
296 tsi721_bdma_handler(bdma_chan);
299 #endif /* CONFIG_PCI_MSI */
301 /* Must be called with the spinlock held */
302 static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
304 if (!tsi721_dma_is_idle(bdma_chan)) {
305 dev_err(bdma_chan->dchan.device->dev,
306 "BUG: Attempt to start non-idle channel\n");
310 if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
311 dev_err(bdma_chan->dchan.device->dev,
312 "BUG: Attempt to start DMA with no BDs ready\n");
316 dev_dbg(bdma_chan->dchan.device->dev,
317 "%s: chan_%d (wrc=%d)\n", __func__, bdma_chan->id,
318 bdma_chan->wr_count_next);
320 iowrite32(bdma_chan->wr_count_next,
321 bdma_chan->regs + TSI721_DMAC_DWRCNT);
322 ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
324 bdma_chan->wr_count = bdma_chan->wr_count_next;
328 tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
329 struct tsi721_dma_desc *bd_ptr,
330 struct scatterlist *sg, u32 sys_size)
337 /* Initialize DMA descriptor */
338 bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
339 (desc->rtype << 19) | desc->destid);
340 bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
342 rio_addr = (desc->rio_addr >> 2) |
343 ((u64)(desc->rio_addr_u & 0x3) << 62);
344 bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
345 bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
346 bd_ptr->t1.bufptr_lo = cpu_to_le32(
347 (u64)sg_dma_address(sg) & 0xffffffff);
348 bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
349 bd_ptr->t1.s_dist = 0;
350 bd_ptr->t1.s_size = 0;
356 tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
361 /* Update DMA descriptor */
363 bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
364 bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
369 static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
370 struct tsi721_tx_desc *desc)
372 struct dma_async_tx_descriptor *txd = &desc->txd;
373 dma_async_tx_callback callback = txd->callback;
374 void *param = txd->callback_param;
376 list_move(&desc->desc_node, &bdma_chan->free_list);
382 static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
388 /* Check and clear descriptor status FIFO entries */
389 srd_ptr = bdma_chan->sts_rdptr;
390 sts_ptr = bdma_chan->sts_base;
393 for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
397 srd_ptr %= bdma_chan->sts_size;
401 iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
402 bdma_chan->sts_rdptr = srd_ptr;
405 /* Must be called with the channel spinlock held */
406 static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
408 struct dma_chan *dchan = desc->txd.chan;
409 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
412 dma_addr_t next_addr;
414 struct scatterlist *sg;
417 struct tsi721_dma_desc *bd_ptr = NULL;
421 if (!tsi721_dma_is_idle(bdma_chan)) {
422 dev_err(bdma_chan->dchan.device->dev,
423 "BUG: Attempt to use non-idle channel\n");
428 * Fill DMA channel's hardware buffer descriptors.
429 * (NOTE: RapidIO destination address is limited to 64 bits for now)
431 rio_addr = desc->rio_addr;
434 sys_size = dma_to_mport(bdma_chan->dchan.device)->sys_size;
436 rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
437 rd_idx %= (bdma_chan->bd_num + 1);
439 idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
440 if (idx == bdma_chan->bd_num) {
441 /* wrap around link descriptor */
446 dev_dbg(dchan->device->dev, "%s: BD ring status: rdi=%d wri=%d\n",
447 __func__, rd_idx, idx);
449 for_each_sg(desc->sg, sg, desc->sg_len, i) {
451 dev_dbg(dchan->device->dev, "sg%d/%d addr: 0x%llx len: %d\n",
453 (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
455 if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
456 dev_err(dchan->device->dev,
457 "%s: SG entry %d is too large\n", __func__, i);
463 * If this sg entry forms contiguous block with previous one,
464 * try to merge it into existing DMA descriptor
466 if (next_addr == sg_dma_address(sg) &&
467 bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
468 /* Adjust byte count of the descriptor */
469 bcount += sg_dma_len(sg);
471 } else if (next_addr != -1) {
472 /* Finalize descriptor using total byte count value */
473 tsi721_desc_fill_end(bd_ptr, bcount, 0);
474 dev_dbg(dchan->device->dev,
475 "%s: prev desc final len: %d\n",
479 desc->rio_addr = rio_addr;
481 if (i && idx == rd_idx) {
482 dev_dbg(dchan->device->dev,
483 "%s: HW descriptor ring is full @ %d\n",
490 bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
491 err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
493 dev_err(dchan->device->dev,
494 "Failed to build desc: err=%d\n", err);
498 dev_dbg(dchan->device->dev, "bd_ptr = %p did=%d raddr=0x%llx\n",
499 bd_ptr, desc->destid, desc->rio_addr);
501 next_addr = sg_dma_address(sg);
502 bcount = sg_dma_len(sg);
505 if (++idx == bdma_chan->bd_num) {
506 /* wrap around link descriptor */
512 if (sg_is_last(sg)) {
513 tsi721_desc_fill_end(bd_ptr, bcount, 0);
514 dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n",
518 rio_addr += sg_dma_len(sg);
519 next_addr += sg_dma_len(sg);
524 bdma_chan->wr_count_next += add_count;
529 static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
530 struct tsi721_tx_desc *desc)
534 dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__);
536 if (!tsi721_dma_is_idle(bdma_chan))
540 * If there is no data transfer in progress, fetch new descriptor from
544 if (desc == NULL && bdma_chan->active_tx == NULL &&
545 !list_empty(&bdma_chan->queue)) {
546 desc = list_first_entry(&bdma_chan->queue,
547 struct tsi721_tx_desc, desc_node);
548 list_del_init((&desc->desc_node));
549 bdma_chan->active_tx = desc;
553 err = tsi721_submit_sg(desc);
555 tsi721_start_dma(bdma_chan);
557 tsi721_dma_tx_err(bdma_chan, desc);
558 dev_dbg(bdma_chan->dchan.device->dev,
559 "ERR: tsi721_submit_sg failed with err=%d\n",
564 dev_dbg(bdma_chan->dchan.device->dev, "%s: Exit\n", __func__);
567 static void tsi721_dma_tasklet(unsigned long data)
569 struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
570 u32 dmac_int, dmac_sts;
572 dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
573 dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
574 __func__, bdma_chan->id, dmac_int);
575 /* Clear channel interrupts */
576 iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
578 if (dmac_int & TSI721_DMAC_INT_ERR) {
579 dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
580 dev_err(bdma_chan->dchan.device->dev,
581 "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
582 __func__, bdma_chan->id, dmac_sts);
584 spin_lock(&bdma_chan->lock);
585 bdma_chan->active_tx = NULL;
586 spin_unlock(&bdma_chan->lock);
589 if (dmac_int & TSI721_DMAC_INT_STFULL) {
590 dev_err(bdma_chan->dchan.device->dev,
591 "%s: DMAC%d descriptor status FIFO is full\n",
592 __func__, bdma_chan->id);
595 if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
596 struct tsi721_tx_desc *desc;
598 tsi721_clr_stat(bdma_chan);
599 spin_lock(&bdma_chan->lock);
600 desc = bdma_chan->active_tx;
602 if (desc->sg_len == 0) {
603 dma_async_tx_callback callback = NULL;
606 desc->status = DMA_COMPLETE;
607 dma_cookie_complete(&desc->txd);
608 if (desc->txd.flags & DMA_PREP_INTERRUPT) {
609 callback = desc->txd.callback;
610 param = desc->txd.callback_param;
612 list_add(&desc->desc_node, &bdma_chan->free_list);
613 bdma_chan->active_tx = NULL;
614 spin_unlock(&bdma_chan->lock);
617 spin_lock(&bdma_chan->lock);
620 tsi721_advance_work(bdma_chan, bdma_chan->active_tx);
621 spin_unlock(&bdma_chan->lock);
624 /* Re-Enable BDMA channel interrupts */
625 iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
628 static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
630 struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
631 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
634 /* Check if the descriptor is detached from any lists */
635 if (!list_empty(&desc->desc_node)) {
636 dev_err(bdma_chan->dchan.device->dev,
637 "%s: wrong state of descriptor %p\n", __func__, txd);
641 spin_lock_bh(&bdma_chan->lock);
643 if (!bdma_chan->active) {
644 spin_unlock_bh(&bdma_chan->lock);
648 cookie = dma_cookie_assign(txd);
649 desc->status = DMA_IN_PROGRESS;
650 list_add_tail(&desc->desc_node, &bdma_chan->queue);
652 spin_unlock_bh(&bdma_chan->lock);
656 static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
658 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
659 struct tsi721_tx_desc *desc = NULL;
662 dev_dbg(dchan->device->dev, "%s: for channel %d\n",
663 __func__, bdma_chan->id);
665 if (bdma_chan->bd_base)
666 return TSI721_DMA_TX_QUEUE_SZ;
668 /* Initialize BDMA channel */
669 if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
670 dev_err(dchan->device->dev, "Unable to initialize data DMA"
671 " channel %d, aborting\n", bdma_chan->id);
675 /* Allocate queue of transaction descriptors */
676 desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc),
679 dev_err(dchan->device->dev,
680 "Failed to allocate logical descriptors\n");
681 tsi721_bdma_ch_free(bdma_chan);
685 bdma_chan->tx_desc = desc;
687 for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) {
688 dma_async_tx_descriptor_init(&desc[i].txd, dchan);
689 desc[i].txd.tx_submit = tsi721_tx_submit;
690 desc[i].txd.flags = DMA_CTRL_ACK;
691 list_add(&desc[i].desc_node, &bdma_chan->free_list);
694 dma_cookie_init(dchan);
696 bdma_chan->active = true;
697 tsi721_bdma_interrupt_enable(bdma_chan, 1);
699 return TSI721_DMA_TX_QUEUE_SZ;
702 static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
704 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
706 #ifdef CONFIG_PCI_MSI
707 if (priv->flags & TSI721_USING_MSIX) {
708 synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
709 bdma_chan->id].vector);
710 synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
711 bdma_chan->id].vector);
714 synchronize_irq(priv->pdev->irq);
717 static void tsi721_free_chan_resources(struct dma_chan *dchan)
719 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
721 dev_dbg(dchan->device->dev, "%s: for channel %d\n",
722 __func__, bdma_chan->id);
724 if (bdma_chan->bd_base == NULL)
727 tsi721_bdma_interrupt_enable(bdma_chan, 0);
728 bdma_chan->active = false;
729 tsi721_sync_dma_irq(bdma_chan);
730 tasklet_kill(&bdma_chan->tasklet);
731 INIT_LIST_HEAD(&bdma_chan->free_list);
732 kfree(bdma_chan->tx_desc);
733 tsi721_bdma_ch_free(bdma_chan);
737 enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
738 struct dma_tx_state *txstate)
740 return dma_cookie_status(dchan, cookie, txstate);
743 static void tsi721_issue_pending(struct dma_chan *dchan)
745 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
747 dev_dbg(dchan->device->dev, "%s: Enter\n", __func__);
749 spin_lock_bh(&bdma_chan->lock);
750 if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
751 tsi721_advance_work(bdma_chan, NULL);
753 spin_unlock_bh(&bdma_chan->lock);
757 struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
758 struct scatterlist *sgl, unsigned int sg_len,
759 enum dma_transfer_direction dir, unsigned long flags,
762 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
763 struct tsi721_tx_desc *desc, *_d;
764 struct rio_dma_ext *rext = tinfo;
765 enum dma_rtype rtype;
766 struct dma_async_tx_descriptor *txd = NULL;
768 if (!sgl || !sg_len) {
769 dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
773 dev_dbg(dchan->device->dev, "%s: %s\n", __func__,
774 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
776 if (dir == DMA_DEV_TO_MEM)
778 else if (dir == DMA_MEM_TO_DEV) {
779 switch (rext->wr_type) {
783 case RDW_ALL_NWRITE_R:
784 rtype = ALL_NWRITE_R;
786 case RDW_LAST_NWRITE_R:
788 rtype = LAST_NWRITE_R;
792 dev_err(dchan->device->dev,
793 "%s: Unsupported DMA direction option\n", __func__);
797 spin_lock_bh(&bdma_chan->lock);
799 list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) {
800 if (async_tx_test_ack(&desc->txd)) {
801 list_del_init(&desc->desc_node);
802 desc->destid = rext->destid;
803 desc->rio_addr = rext->rio_addr;
804 desc->rio_addr_u = 0;
806 desc->sg_len = sg_len;
814 spin_unlock_bh(&bdma_chan->lock);
819 static int tsi721_terminate_all(struct dma_chan *dchan)
821 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
822 struct tsi721_tx_desc *desc, *_d;
826 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
828 spin_lock_bh(&bdma_chan->lock);
830 bdma_chan->active = false;
832 if (!tsi721_dma_is_idle(bdma_chan)) {
833 /* make sure to stop the transfer */
834 iowrite32(TSI721_DMAC_CTL_SUSP,
835 bdma_chan->regs + TSI721_DMAC_CTL);
837 /* Wait until DMA channel stops */
839 dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
840 } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
843 if (bdma_chan->active_tx)
844 list_add(&bdma_chan->active_tx->desc_node, &list);
845 list_splice_init(&bdma_chan->queue, &list);
847 list_for_each_entry_safe(desc, _d, &list, desc_node)
848 tsi721_dma_tx_err(bdma_chan, desc);
850 spin_unlock_bh(&bdma_chan->lock);
855 int tsi721_register_dma(struct tsi721_device *priv)
860 struct rio_mport *mport = priv->mport;
862 INIT_LIST_HEAD(&mport->dma.channels);
864 for (i = 0; i < TSI721_DMA_MAXCH; i++) {
865 struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
867 if (i == TSI721_DMACH_MAINT)
870 bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
872 bdma_chan->dchan.device = &mport->dma;
873 bdma_chan->dchan.cookie = 1;
874 bdma_chan->dchan.chan_id = i;
876 bdma_chan->active = false;
878 spin_lock_init(&bdma_chan->lock);
880 bdma_chan->active_tx = NULL;
881 INIT_LIST_HEAD(&bdma_chan->queue);
882 INIT_LIST_HEAD(&bdma_chan->free_list);
884 tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
885 (unsigned long)bdma_chan);
886 list_add_tail(&bdma_chan->dchan.device_node,
887 &mport->dma.channels);
891 mport->dma.chancnt = nr_channels;
892 dma_cap_zero(mport->dma.cap_mask);
893 dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
894 dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
896 mport->dma.dev = &priv->pdev->dev;
897 mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
898 mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
899 mport->dma.device_tx_status = tsi721_tx_status;
900 mport->dma.device_issue_pending = tsi721_issue_pending;
901 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
902 mport->dma.device_terminate_all = tsi721_terminate_all;
904 err = dma_async_device_register(&mport->dma);
906 dev_err(&priv->pdev->dev, "Failed to register DMA device\n");