rapidio/tsi721_dma: fix pending transaction queue handling
[cascardo/linux.git] / drivers / rapidio / devices / tsi721_dma.c
1 /*
2  * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
3  *
4  * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
5  * Alexandre Bounine <alexandre.bounine@idt.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License as published by the Free
9  * Software Foundation; either version 2 of the License, or (at your option)
10  * any later version.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * The full GNU General Public License is included in this distribution in the
18  * file called COPYING.
19  */
20
21 #include <linux/io.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/ioport.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/rio.h>
29 #include <linux/rio_drv.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/interrupt.h>
32 #include <linux/kfifo.h>
33 #include <linux/delay.h>
34 #include "../../dma/dmaengine.h"
35
36 #include "tsi721.h"
37
38 #define TSI721_DMA_TX_QUEUE_SZ  16      /* number of transaction descriptors */
39
40 #ifdef CONFIG_PCI_MSI
41 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
42 #endif
43 static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
44
45 static unsigned int dma_desc_per_channel = 128;
46 module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO);
47 MODULE_PARM_DESC(dma_desc_per_channel,
48                  "Number of DMA descriptors per channel (default: 128)");
49
50 static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
51 {
52         return container_of(chan, struct tsi721_bdma_chan, dchan);
53 }
54
55 static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
56 {
57         return container_of(ddev, struct rio_mport, dma)->priv;
58 }
59
60 static inline
61 struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
62 {
63         return container_of(txd, struct tsi721_tx_desc, txd);
64 }
65
66 static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
67 {
68         struct tsi721_dma_desc *bd_ptr;
69         struct device *dev = bdma_chan->dchan.device->dev;
70         u64             *sts_ptr;
71         dma_addr_t      bd_phys;
72         dma_addr_t      sts_phys;
73         int             sts_size;
74 #ifdef CONFIG_PCI_MSI
75         struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
76 #endif
77
78         dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
79
80         /*
81          * Allocate space for DMA descriptors
82          * (add an extra element for link descriptor)
83          */
84         bd_ptr = dma_zalloc_coherent(dev,
85                                 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
86                                 &bd_phys, GFP_KERNEL);
87         if (!bd_ptr)
88                 return -ENOMEM;
89
90         bdma_chan->bd_num = bd_num;
91         bdma_chan->bd_phys = bd_phys;
92         bdma_chan->bd_base = bd_ptr;
93
94         dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
95                 bd_ptr, (unsigned long long)bd_phys);
96
97         /* Allocate space for descriptor status FIFO */
98         sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
99                                         (bd_num + 1) : TSI721_DMA_MINSTSSZ;
100         sts_size = roundup_pow_of_two(sts_size);
101         sts_ptr = dma_zalloc_coherent(dev,
102                                      sts_size * sizeof(struct tsi721_dma_sts),
103                                      &sts_phys, GFP_KERNEL);
104         if (!sts_ptr) {
105                 /* Free space allocated for DMA descriptors */
106                 dma_free_coherent(dev,
107                                   (bd_num + 1) * sizeof(struct tsi721_dma_desc),
108                                   bd_ptr, bd_phys);
109                 bdma_chan->bd_base = NULL;
110                 return -ENOMEM;
111         }
112
113         bdma_chan->sts_phys = sts_phys;
114         bdma_chan->sts_base = sts_ptr;
115         bdma_chan->sts_size = sts_size;
116
117         dev_dbg(dev,
118                 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
119                 sts_ptr, (unsigned long long)sts_phys, sts_size);
120
121         /* Initialize DMA descriptors ring using added link descriptor */
122         bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
123         bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
124                                                  TSI721_DMAC_DPTRL_MASK);
125         bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
126
127         /* Setup DMA descriptor pointers */
128         iowrite32(((u64)bd_phys >> 32),
129                 bdma_chan->regs + TSI721_DMAC_DPTRH);
130         iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
131                 bdma_chan->regs + TSI721_DMAC_DPTRL);
132
133         /* Setup descriptor status FIFO */
134         iowrite32(((u64)sts_phys >> 32),
135                 bdma_chan->regs + TSI721_DMAC_DSBH);
136         iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
137                 bdma_chan->regs + TSI721_DMAC_DSBL);
138         iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
139                 bdma_chan->regs + TSI721_DMAC_DSSZ);
140
141         /* Clear interrupt bits */
142         iowrite32(TSI721_DMAC_INT_ALL,
143                 bdma_chan->regs + TSI721_DMAC_INT);
144
145         ioread32(bdma_chan->regs + TSI721_DMAC_INT);
146
147 #ifdef CONFIG_PCI_MSI
148         /* Request interrupt service if we are in MSI-X mode */
149         if (priv->flags & TSI721_USING_MSIX) {
150                 int rc, idx;
151
152                 idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
153
154                 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
155                                  priv->msix[idx].irq_name, (void *)bdma_chan);
156
157                 if (rc) {
158                         dev_dbg(dev, "Unable to get MSI-X for BDMA%d-DONE\n",
159                                 bdma_chan->id);
160                         goto err_out;
161                 }
162
163                 idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
164
165                 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
166                                 priv->msix[idx].irq_name, (void *)bdma_chan);
167
168                 if (rc) {
169                         dev_dbg(dev, "Unable to get MSI-X for BDMA%d-INT\n",
170                                 bdma_chan->id);
171                         free_irq(
172                                 priv->msix[TSI721_VECT_DMA0_DONE +
173                                             bdma_chan->id].vector,
174                                 (void *)bdma_chan);
175                 }
176
177 err_out:
178                 if (rc) {
179                         /* Free space allocated for DMA descriptors */
180                         dma_free_coherent(dev,
181                                 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
182                                 bd_ptr, bd_phys);
183                         bdma_chan->bd_base = NULL;
184
185                         /* Free space allocated for status descriptors */
186                         dma_free_coherent(dev,
187                                 sts_size * sizeof(struct tsi721_dma_sts),
188                                 sts_ptr, sts_phys);
189                         bdma_chan->sts_base = NULL;
190
191                         return -EIO;
192                 }
193         }
194 #endif /* CONFIG_PCI_MSI */
195
196         /* Toggle DMA channel initialization */
197         iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
198         ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
199         bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
200         bdma_chan->sts_rdptr = 0;
201         udelay(10);
202
203         return 0;
204 }
205
206 static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
207 {
208         u32 ch_stat;
209 #ifdef CONFIG_PCI_MSI
210         struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
211 #endif
212
213         if (bdma_chan->bd_base == NULL)
214                 return 0;
215
216         /* Check if DMA channel still running */
217         ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
218         if (ch_stat & TSI721_DMAC_STS_RUN)
219                 return -EFAULT;
220
221         /* Put DMA channel into init state */
222         iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
223
224 #ifdef CONFIG_PCI_MSI
225         if (priv->flags & TSI721_USING_MSIX) {
226                 free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
227                                     bdma_chan->id].vector, (void *)bdma_chan);
228                 free_irq(priv->msix[TSI721_VECT_DMA0_INT +
229                                     bdma_chan->id].vector, (void *)bdma_chan);
230         }
231 #endif /* CONFIG_PCI_MSI */
232
233         /* Free space allocated for DMA descriptors */
234         dma_free_coherent(bdma_chan->dchan.device->dev,
235                 (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
236                 bdma_chan->bd_base, bdma_chan->bd_phys);
237         bdma_chan->bd_base = NULL;
238
239         /* Free space allocated for status FIFO */
240         dma_free_coherent(bdma_chan->dchan.device->dev,
241                 bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
242                 bdma_chan->sts_base, bdma_chan->sts_phys);
243         bdma_chan->sts_base = NULL;
244         return 0;
245 }
246
247 static void
248 tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
249 {
250         if (enable) {
251                 /* Clear pending BDMA channel interrupts */
252                 iowrite32(TSI721_DMAC_INT_ALL,
253                         bdma_chan->regs + TSI721_DMAC_INT);
254                 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
255                 /* Enable BDMA channel interrupts */
256                 iowrite32(TSI721_DMAC_INT_ALL,
257                         bdma_chan->regs + TSI721_DMAC_INTE);
258         } else {
259                 /* Disable BDMA channel interrupts */
260                 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
261                 /* Clear pending BDMA channel interrupts */
262                 iowrite32(TSI721_DMAC_INT_ALL,
263                         bdma_chan->regs + TSI721_DMAC_INT);
264         }
265
266 }
267
268 static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
269 {
270         u32 sts;
271
272         sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
273         return ((sts & TSI721_DMAC_STS_RUN) == 0);
274 }
275
276 void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
277 {
278         /* Disable BDMA channel interrupts */
279         iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
280         if (bdma_chan->active)
281                 tasklet_schedule(&bdma_chan->tasklet);
282 }
283
284 #ifdef CONFIG_PCI_MSI
285 /**
286  * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
287  * @irq: Linux interrupt number
288  * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
289  *
290  * Handles BDMA channel interrupts signaled using MSI-X.
291  */
292 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
293 {
294         struct tsi721_bdma_chan *bdma_chan = ptr;
295
296         tsi721_bdma_handler(bdma_chan);
297         return IRQ_HANDLED;
298 }
299 #endif /* CONFIG_PCI_MSI */
300
301 /* Must be called with the spinlock held */
302 static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
303 {
304         if (!tsi721_dma_is_idle(bdma_chan)) {
305                 dev_err(bdma_chan->dchan.device->dev,
306                         "BUG: Attempt to start non-idle channel\n");
307                 return;
308         }
309
310         if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
311                 dev_err(bdma_chan->dchan.device->dev,
312                         "BUG: Attempt to start DMA with no BDs ready\n");
313                 return;
314         }
315
316         dev_dbg(bdma_chan->dchan.device->dev,
317                 "%s: chan_%d (wrc=%d)\n", __func__, bdma_chan->id,
318                 bdma_chan->wr_count_next);
319
320         iowrite32(bdma_chan->wr_count_next,
321                 bdma_chan->regs + TSI721_DMAC_DWRCNT);
322         ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
323
324         bdma_chan->wr_count = bdma_chan->wr_count_next;
325 }
326
327 static int
328 tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
329                       struct tsi721_dma_desc *bd_ptr,
330                       struct scatterlist *sg, u32 sys_size)
331 {
332         u64 rio_addr;
333
334         if (bd_ptr == NULL)
335                 return -EINVAL;
336
337         /* Initialize DMA descriptor */
338         bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
339                                       (desc->rtype << 19) | desc->destid);
340         bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
341                                      (sys_size << 26));
342         rio_addr = (desc->rio_addr >> 2) |
343                                 ((u64)(desc->rio_addr_u & 0x3) << 62);
344         bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
345         bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
346         bd_ptr->t1.bufptr_lo = cpu_to_le32(
347                                         (u64)sg_dma_address(sg) & 0xffffffff);
348         bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
349         bd_ptr->t1.s_dist = 0;
350         bd_ptr->t1.s_size = 0;
351
352         return 0;
353 }
354
355 static int
356 tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
357 {
358         if (bd_ptr == NULL)
359                 return -EINVAL;
360
361         /* Update DMA descriptor */
362         if (interrupt)
363                 bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
364         bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
365
366         return 0;
367 }
368
369 static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
370                               struct tsi721_tx_desc *desc)
371 {
372         struct dma_async_tx_descriptor *txd = &desc->txd;
373         dma_async_tx_callback callback = txd->callback;
374         void *param = txd->callback_param;
375
376         list_move(&desc->desc_node, &bdma_chan->free_list);
377
378         if (callback)
379                 callback(param);
380 }
381
382 static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
383 {
384         u32 srd_ptr;
385         u64 *sts_ptr;
386         int i, j;
387
388         /* Check and clear descriptor status FIFO entries */
389         srd_ptr = bdma_chan->sts_rdptr;
390         sts_ptr = bdma_chan->sts_base;
391         j = srd_ptr * 8;
392         while (sts_ptr[j]) {
393                 for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
394                         sts_ptr[j] = 0;
395
396                 ++srd_ptr;
397                 srd_ptr %= bdma_chan->sts_size;
398                 j = srd_ptr * 8;
399         }
400
401         iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
402         bdma_chan->sts_rdptr = srd_ptr;
403 }
404
405 /* Must be called with the channel spinlock held */
406 static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
407 {
408         struct dma_chan *dchan = desc->txd.chan;
409         struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
410         u32 sys_size;
411         u64 rio_addr;
412         dma_addr_t next_addr;
413         u32 bcount;
414         struct scatterlist *sg;
415         unsigned int i;
416         int err = 0;
417         struct tsi721_dma_desc *bd_ptr = NULL;
418         u32 idx, rd_idx;
419         u32 add_count = 0;
420
421         if (!tsi721_dma_is_idle(bdma_chan)) {
422                 dev_err(bdma_chan->dchan.device->dev,
423                         "BUG: Attempt to use non-idle channel\n");
424                 return -EIO;
425         }
426
427         /*
428          * Fill DMA channel's hardware buffer descriptors.
429          * (NOTE: RapidIO destination address is limited to 64 bits for now)
430          */
431         rio_addr = desc->rio_addr;
432         next_addr = -1;
433         bcount = 0;
434         sys_size = dma_to_mport(bdma_chan->dchan.device)->sys_size;
435
436         rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
437         rd_idx %= (bdma_chan->bd_num + 1);
438
439         idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
440         if (idx == bdma_chan->bd_num) {
441                 /* wrap around link descriptor */
442                 idx = 0;
443                 add_count++;
444         }
445
446         dev_dbg(dchan->device->dev, "%s: BD ring status: rdi=%d wri=%d\n",
447                 __func__, rd_idx, idx);
448
449         for_each_sg(desc->sg, sg, desc->sg_len, i) {
450
451                 dev_dbg(dchan->device->dev, "sg%d/%d addr: 0x%llx len: %d\n",
452                         i, desc->sg_len,
453                         (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
454
455                 if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
456                         dev_err(dchan->device->dev,
457                                 "%s: SG entry %d is too large\n", __func__, i);
458                         err = -EINVAL;
459                         break;
460                 }
461
462                 /*
463                  * If this sg entry forms contiguous block with previous one,
464                  * try to merge it into existing DMA descriptor
465                  */
466                 if (next_addr == sg_dma_address(sg) &&
467                     bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
468                         /* Adjust byte count of the descriptor */
469                         bcount += sg_dma_len(sg);
470                         goto entry_done;
471                 } else if (next_addr != -1) {
472                         /* Finalize descriptor using total byte count value */
473                         tsi721_desc_fill_end(bd_ptr, bcount, 0);
474                         dev_dbg(dchan->device->dev,
475                                 "%s: prev desc final len: %d\n",
476                                 __func__, bcount);
477                 }
478
479                 desc->rio_addr = rio_addr;
480
481                 if (i && idx == rd_idx) {
482                         dev_dbg(dchan->device->dev,
483                                 "%s: HW descriptor ring is full @ %d\n",
484                                 __func__, i);
485                         desc->sg = sg;
486                         desc->sg_len -= i;
487                         break;
488                 }
489
490                 bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
491                 err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
492                 if (err) {
493                         dev_err(dchan->device->dev,
494                                 "Failed to build desc: err=%d\n", err);
495                         break;
496                 }
497
498                 dev_dbg(dchan->device->dev, "bd_ptr = %p did=%d raddr=0x%llx\n",
499                         bd_ptr, desc->destid, desc->rio_addr);
500
501                 next_addr = sg_dma_address(sg);
502                 bcount = sg_dma_len(sg);
503
504                 add_count++;
505                 if (++idx == bdma_chan->bd_num) {
506                         /* wrap around link descriptor */
507                         idx = 0;
508                         add_count++;
509                 }
510
511 entry_done:
512                 if (sg_is_last(sg)) {
513                         tsi721_desc_fill_end(bd_ptr, bcount, 0);
514                         dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n",
515                                 __func__, bcount);
516                         desc->sg_len = 0;
517                 } else {
518                         rio_addr += sg_dma_len(sg);
519                         next_addr += sg_dma_len(sg);
520                 }
521         }
522
523         if (!err)
524                 bdma_chan->wr_count_next += add_count;
525
526         return err;
527 }
528
529 static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
530                                 struct tsi721_tx_desc *desc)
531 {
532         int err;
533
534         dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__);
535
536         if (!tsi721_dma_is_idle(bdma_chan))
537                 return;
538
539         /*
540          * If there is no data transfer in progress, fetch new descriptor from
541          * the pending queue.
542         */
543
544         if (desc == NULL && bdma_chan->active_tx == NULL &&
545                                         !list_empty(&bdma_chan->queue)) {
546                 desc = list_first_entry(&bdma_chan->queue,
547                                         struct tsi721_tx_desc, desc_node);
548                 list_del_init((&desc->desc_node));
549                 bdma_chan->active_tx = desc;
550         }
551
552         if (desc) {
553                 err = tsi721_submit_sg(desc);
554                 if (!err)
555                         tsi721_start_dma(bdma_chan);
556                 else {
557                         tsi721_dma_tx_err(bdma_chan, desc);
558                         dev_dbg(bdma_chan->dchan.device->dev,
559                                 "ERR: tsi721_submit_sg failed with err=%d\n",
560                                 err);
561                 }
562         }
563
564         dev_dbg(bdma_chan->dchan.device->dev, "%s: Exit\n", __func__);
565 }
566
567 static void tsi721_dma_tasklet(unsigned long data)
568 {
569         struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
570         u32 dmac_int, dmac_sts;
571
572         dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
573         dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
574                 __func__, bdma_chan->id, dmac_int);
575         /* Clear channel interrupts */
576         iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
577
578         if (dmac_int & TSI721_DMAC_INT_ERR) {
579                 dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
580                 dev_err(bdma_chan->dchan.device->dev,
581                         "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
582                         __func__, bdma_chan->id, dmac_sts);
583
584                 spin_lock(&bdma_chan->lock);
585                 bdma_chan->active_tx = NULL;
586                 spin_unlock(&bdma_chan->lock);
587         }
588
589         if (dmac_int & TSI721_DMAC_INT_STFULL) {
590                 dev_err(bdma_chan->dchan.device->dev,
591                         "%s: DMAC%d descriptor status FIFO is full\n",
592                         __func__, bdma_chan->id);
593         }
594
595         if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
596                 struct tsi721_tx_desc *desc;
597
598                 tsi721_clr_stat(bdma_chan);
599                 spin_lock(&bdma_chan->lock);
600                 desc = bdma_chan->active_tx;
601
602                 if (desc->sg_len == 0) {
603                         dma_async_tx_callback callback = NULL;
604                         void *param = NULL;
605
606                         desc->status = DMA_COMPLETE;
607                         dma_cookie_complete(&desc->txd);
608                         if (desc->txd.flags & DMA_PREP_INTERRUPT) {
609                                 callback = desc->txd.callback;
610                                 param = desc->txd.callback_param;
611                         }
612                         list_add(&desc->desc_node, &bdma_chan->free_list);
613                         bdma_chan->active_tx = NULL;
614                         spin_unlock(&bdma_chan->lock);
615                         if (callback)
616                                 callback(param);
617                         spin_lock(&bdma_chan->lock);
618                 }
619
620                 tsi721_advance_work(bdma_chan, bdma_chan->active_tx);
621                 spin_unlock(&bdma_chan->lock);
622         }
623
624         /* Re-Enable BDMA channel interrupts */
625         iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
626 }
627
628 static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
629 {
630         struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
631         struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
632         dma_cookie_t cookie;
633
634         /* Check if the descriptor is detached from any lists */
635         if (!list_empty(&desc->desc_node)) {
636                 dev_err(bdma_chan->dchan.device->dev,
637                         "%s: wrong state of descriptor %p\n", __func__, txd);
638                 return -EIO;
639         }
640
641         spin_lock_bh(&bdma_chan->lock);
642
643         if (!bdma_chan->active) {
644                 spin_unlock_bh(&bdma_chan->lock);
645                 return -ENODEV;
646         }
647
648         cookie = dma_cookie_assign(txd);
649         desc->status = DMA_IN_PROGRESS;
650         list_add_tail(&desc->desc_node, &bdma_chan->queue);
651
652         spin_unlock_bh(&bdma_chan->lock);
653         return cookie;
654 }
655
656 static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
657 {
658         struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
659         struct tsi721_tx_desc *desc = NULL;
660         int i;
661
662         dev_dbg(dchan->device->dev, "%s: for channel %d\n",
663                 __func__, bdma_chan->id);
664
665         if (bdma_chan->bd_base)
666                 return TSI721_DMA_TX_QUEUE_SZ;
667
668         /* Initialize BDMA channel */
669         if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
670                 dev_err(dchan->device->dev, "Unable to initialize data DMA"
671                         " channel %d, aborting\n", bdma_chan->id);
672                 return -ENODEV;
673         }
674
675         /* Allocate queue of transaction descriptors */
676         desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc),
677                         GFP_KERNEL);
678         if (!desc) {
679                 dev_err(dchan->device->dev,
680                         "Failed to allocate logical descriptors\n");
681                 tsi721_bdma_ch_free(bdma_chan);
682                 return -ENOMEM;
683         }
684
685         bdma_chan->tx_desc = desc;
686
687         for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) {
688                 dma_async_tx_descriptor_init(&desc[i].txd, dchan);
689                 desc[i].txd.tx_submit = tsi721_tx_submit;
690                 desc[i].txd.flags = DMA_CTRL_ACK;
691                 list_add(&desc[i].desc_node, &bdma_chan->free_list);
692         }
693
694         dma_cookie_init(dchan);
695
696         bdma_chan->active = true;
697         tsi721_bdma_interrupt_enable(bdma_chan, 1);
698
699         return TSI721_DMA_TX_QUEUE_SZ;
700 }
701
702 static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
703 {
704         struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
705
706 #ifdef CONFIG_PCI_MSI
707         if (priv->flags & TSI721_USING_MSIX) {
708                 synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
709                                            bdma_chan->id].vector);
710                 synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
711                                            bdma_chan->id].vector);
712         } else
713 #endif
714         synchronize_irq(priv->pdev->irq);
715 }
716
717 static void tsi721_free_chan_resources(struct dma_chan *dchan)
718 {
719         struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
720
721         dev_dbg(dchan->device->dev, "%s: for channel %d\n",
722                 __func__, bdma_chan->id);
723
724         if (bdma_chan->bd_base == NULL)
725                 return;
726
727         tsi721_bdma_interrupt_enable(bdma_chan, 0);
728         bdma_chan->active = false;
729         tsi721_sync_dma_irq(bdma_chan);
730         tasklet_kill(&bdma_chan->tasklet);
731         INIT_LIST_HEAD(&bdma_chan->free_list);
732         kfree(bdma_chan->tx_desc);
733         tsi721_bdma_ch_free(bdma_chan);
734 }
735
736 static
737 enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
738                                  struct dma_tx_state *txstate)
739 {
740         return dma_cookie_status(dchan, cookie, txstate);
741 }
742
743 static void tsi721_issue_pending(struct dma_chan *dchan)
744 {
745         struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
746
747         dev_dbg(dchan->device->dev, "%s: Enter\n", __func__);
748
749         spin_lock_bh(&bdma_chan->lock);
750         if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
751                 tsi721_advance_work(bdma_chan, NULL);
752         }
753         spin_unlock_bh(&bdma_chan->lock);
754 }
755
756 static
757 struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
758                         struct scatterlist *sgl, unsigned int sg_len,
759                         enum dma_transfer_direction dir, unsigned long flags,
760                         void *tinfo)
761 {
762         struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
763         struct tsi721_tx_desc *desc, *_d;
764         struct rio_dma_ext *rext = tinfo;
765         enum dma_rtype rtype;
766         struct dma_async_tx_descriptor *txd = NULL;
767
768         if (!sgl || !sg_len) {
769                 dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
770                 return NULL;
771         }
772
773         dev_dbg(dchan->device->dev, "%s: %s\n", __func__,
774                 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
775
776         if (dir == DMA_DEV_TO_MEM)
777                 rtype = NREAD;
778         else if (dir == DMA_MEM_TO_DEV) {
779                 switch (rext->wr_type) {
780                 case RDW_ALL_NWRITE:
781                         rtype = ALL_NWRITE;
782                         break;
783                 case RDW_ALL_NWRITE_R:
784                         rtype = ALL_NWRITE_R;
785                         break;
786                 case RDW_LAST_NWRITE_R:
787                 default:
788                         rtype = LAST_NWRITE_R;
789                         break;
790                 }
791         } else {
792                 dev_err(dchan->device->dev,
793                         "%s: Unsupported DMA direction option\n", __func__);
794                 return NULL;
795         }
796
797         spin_lock_bh(&bdma_chan->lock);
798
799         list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) {
800                 if (async_tx_test_ack(&desc->txd)) {
801                         list_del_init(&desc->desc_node);
802                         desc->destid = rext->destid;
803                         desc->rio_addr = rext->rio_addr;
804                         desc->rio_addr_u = 0;
805                         desc->rtype = rtype;
806                         desc->sg_len    = sg_len;
807                         desc->sg        = sgl;
808                         txd             = &desc->txd;
809                         txd->flags      = flags;
810                         break;
811                 }
812         }
813
814         spin_unlock_bh(&bdma_chan->lock);
815
816         return txd;
817 }
818
819 static int tsi721_terminate_all(struct dma_chan *dchan)
820 {
821         struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
822         struct tsi721_tx_desc *desc, *_d;
823         u32 dmac_int;
824         LIST_HEAD(list);
825
826         dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
827
828         spin_lock_bh(&bdma_chan->lock);
829
830         bdma_chan->active = false;
831
832         if (!tsi721_dma_is_idle(bdma_chan)) {
833                 /* make sure to stop the transfer */
834                 iowrite32(TSI721_DMAC_CTL_SUSP,
835                           bdma_chan->regs + TSI721_DMAC_CTL);
836
837                 /* Wait until DMA channel stops */
838                 do {
839                         dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
840                 } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
841         }
842
843         if (bdma_chan->active_tx)
844                 list_add(&bdma_chan->active_tx->desc_node, &list);
845         list_splice_init(&bdma_chan->queue, &list);
846
847         list_for_each_entry_safe(desc, _d, &list, desc_node)
848                 tsi721_dma_tx_err(bdma_chan, desc);
849
850         spin_unlock_bh(&bdma_chan->lock);
851
852         return 0;
853 }
854
855 int tsi721_register_dma(struct tsi721_device *priv)
856 {
857         int i;
858         int nr_channels = 0;
859         int err;
860         struct rio_mport *mport = priv->mport;
861
862         INIT_LIST_HEAD(&mport->dma.channels);
863
864         for (i = 0; i < TSI721_DMA_MAXCH; i++) {
865                 struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
866
867                 if (i == TSI721_DMACH_MAINT)
868                         continue;
869
870                 bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
871
872                 bdma_chan->dchan.device = &mport->dma;
873                 bdma_chan->dchan.cookie = 1;
874                 bdma_chan->dchan.chan_id = i;
875                 bdma_chan->id = i;
876                 bdma_chan->active = false;
877
878                 spin_lock_init(&bdma_chan->lock);
879
880                 bdma_chan->active_tx = NULL;
881                 INIT_LIST_HEAD(&bdma_chan->queue);
882                 INIT_LIST_HEAD(&bdma_chan->free_list);
883
884                 tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
885                              (unsigned long)bdma_chan);
886                 list_add_tail(&bdma_chan->dchan.device_node,
887                               &mport->dma.channels);
888                 nr_channels++;
889         }
890
891         mport->dma.chancnt = nr_channels;
892         dma_cap_zero(mport->dma.cap_mask);
893         dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
894         dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
895
896         mport->dma.dev = &priv->pdev->dev;
897         mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
898         mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
899         mport->dma.device_tx_status = tsi721_tx_status;
900         mport->dma.device_issue_pending = tsi721_issue_pending;
901         mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
902         mport->dma.device_terminate_all = tsi721_terminate_all;
903
904         err = dma_async_device_register(&mport->dma);
905         if (err)
906                 dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
907
908         return err;
909 }