dmaengine: edma: Use dev_dbg instead pr_debug
[cascardo/linux.git] / drivers / dma / edma.c
1 /*
2  * TI EDMA DMA engine driver
3  *
4  * Copyright 2012 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/edma.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/of.h>
28 #include <linux/of_dma.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/pm_runtime.h>
33
34 #include <linux/platform_data/edma.h>
35
36 #include "dmaengine.h"
37 #include "virt-dma.h"
38
39 /* Offsets matching "struct edmacc_param" */
40 #define PARM_OPT                0x00
41 #define PARM_SRC                0x04
42 #define PARM_A_B_CNT            0x08
43 #define PARM_DST                0x0c
44 #define PARM_SRC_DST_BIDX       0x10
45 #define PARM_LINK_BCNTRLD       0x14
46 #define PARM_SRC_DST_CIDX       0x18
47 #define PARM_CCNT               0x1c
48
49 #define PARM_SIZE               0x20
50
51 /* Offsets for EDMA CC global channel registers and their shadows */
52 #define SH_ER                   0x00    /* 64 bits */
53 #define SH_ECR                  0x08    /* 64 bits */
54 #define SH_ESR                  0x10    /* 64 bits */
55 #define SH_CER                  0x18    /* 64 bits */
56 #define SH_EER                  0x20    /* 64 bits */
57 #define SH_EECR                 0x28    /* 64 bits */
58 #define SH_EESR                 0x30    /* 64 bits */
59 #define SH_SER                  0x38    /* 64 bits */
60 #define SH_SECR                 0x40    /* 64 bits */
61 #define SH_IER                  0x50    /* 64 bits */
62 #define SH_IECR                 0x58    /* 64 bits */
63 #define SH_IESR                 0x60    /* 64 bits */
64 #define SH_IPR                  0x68    /* 64 bits */
65 #define SH_ICR                  0x70    /* 64 bits */
66 #define SH_IEVAL                0x78
67 #define SH_QER                  0x80
68 #define SH_QEER                 0x84
69 #define SH_QEECR                0x88
70 #define SH_QEESR                0x8c
71 #define SH_QSER                 0x90
72 #define SH_QSECR                0x94
73 #define SH_SIZE                 0x200
74
75 /* Offsets for EDMA CC global registers */
76 #define EDMA_REV                0x0000
77 #define EDMA_CCCFG              0x0004
78 #define EDMA_QCHMAP             0x0200  /* 8 registers */
79 #define EDMA_DMAQNUM            0x0240  /* 8 registers (4 on OMAP-L1xx) */
80 #define EDMA_QDMAQNUM           0x0260
81 #define EDMA_QUETCMAP           0x0280
82 #define EDMA_QUEPRI             0x0284
83 #define EDMA_EMR                0x0300  /* 64 bits */
84 #define EDMA_EMCR               0x0308  /* 64 bits */
85 #define EDMA_QEMR               0x0310
86 #define EDMA_QEMCR              0x0314
87 #define EDMA_CCERR              0x0318
88 #define EDMA_CCERRCLR           0x031c
89 #define EDMA_EEVAL              0x0320
90 #define EDMA_DRAE               0x0340  /* 4 x 64 bits*/
91 #define EDMA_QRAE               0x0380  /* 4 registers */
92 #define EDMA_QUEEVTENTRY        0x0400  /* 2 x 16 registers */
93 #define EDMA_QSTAT              0x0600  /* 2 registers */
94 #define EDMA_QWMTHRA            0x0620
95 #define EDMA_QWMTHRB            0x0624
96 #define EDMA_CCSTAT             0x0640
97
98 #define EDMA_M                  0x1000  /* global channel registers */
99 #define EDMA_ECR                0x1008
100 #define EDMA_ECRH               0x100C
101 #define EDMA_SHADOW0            0x2000  /* 4 shadow regions */
102 #define EDMA_PARM               0x4000  /* PaRAM entries */
103
104 #define PARM_OFFSET(param_no)   (EDMA_PARM + ((param_no) << 5))
105
106 #define EDMA_DCHMAP             0x0100  /* 64 registers */
107
108 /* CCCFG register */
109 #define GET_NUM_DMACH(x)        (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_PAENTRY(x)      ((x & 0x7000) >> 12) /* bits 12-14 */
111 #define GET_NUM_EVQUE(x)        ((x & 0x70000) >> 16) /* bits 16-18 */
112 #define GET_NUM_REGN(x)         ((x & 0x300000) >> 20) /* bits 20-21 */
113 #define CHMAP_EXIST             BIT(24)
114
115 /*
116  * Max of 20 segments per channel to conserve PaRAM slots
117  * Also note that MAX_NR_SG should be atleast the no.of periods
118  * that are required for ASoC, otherwise DMA prep calls will
119  * fail. Today davinci-pcm is the only user of this driver and
120  * requires atleast 17 slots, so we setup the default to 20.
121  */
122 #define MAX_NR_SG               20
123 #define EDMA_MAX_SLOTS          MAX_NR_SG
124 #define EDMA_DESCRIPTORS        16
125
126 #define EDMA_CHANNEL_ANY                -1      /* for edma_alloc_channel() */
127 #define EDMA_SLOT_ANY                   -1      /* for edma_alloc_slot() */
128 #define EDMA_CONT_PARAMS_ANY             1001
129 #define EDMA_CONT_PARAMS_FIXED_EXACT     1002
130 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
131
132 /* PaRAM slots are laid out like this */
133 struct edmacc_param {
134         u32 opt;
135         u32 src;
136         u32 a_b_cnt;
137         u32 dst;
138         u32 src_dst_bidx;
139         u32 link_bcntrld;
140         u32 src_dst_cidx;
141         u32 ccnt;
142 } __packed;
143
144 /* fields in edmacc_param.opt */
145 #define SAM             BIT(0)
146 #define DAM             BIT(1)
147 #define SYNCDIM         BIT(2)
148 #define STATIC          BIT(3)
149 #define EDMA_FWID       (0x07 << 8)
150 #define TCCMODE         BIT(11)
151 #define EDMA_TCC(t)     ((t) << 12)
152 #define TCINTEN         BIT(20)
153 #define ITCINTEN        BIT(21)
154 #define TCCHEN          BIT(22)
155 #define ITCCHEN         BIT(23)
156
157 /*ch_status parameter of callback function possible values*/
158 #define EDMA_DMA_COMPLETE 1
159 #define EDMA_DMA_CC_ERROR 2
160 #define EDMA_DMA_TC1_ERROR 3
161 #define EDMA_DMA_TC2_ERROR 4
162
163 struct edma_pset {
164         u32                             len;
165         dma_addr_t                      addr;
166         struct edmacc_param             param;
167 };
168
169 struct edma_desc {
170         struct virt_dma_desc            vdesc;
171         struct list_head                node;
172         enum dma_transfer_direction     direction;
173         int                             cyclic;
174         int                             absync;
175         int                             pset_nr;
176         struct edma_chan                *echan;
177         int                             processed;
178
179         /*
180          * The following 4 elements are used for residue accounting.
181          *
182          * - processed_stat: the number of SG elements we have traversed
183          * so far to cover accounting. This is updated directly to processed
184          * during edma_callback and is always <= processed, because processed
185          * refers to the number of pending transfer (programmed to EDMA
186          * controller), where as processed_stat tracks number of transfers
187          * accounted for so far.
188          *
189          * - residue: The amount of bytes we have left to transfer for this desc
190          *
191          * - residue_stat: The residue in bytes of data we have covered
192          * so far for accounting. This is updated directly to residue
193          * during callbacks to keep it current.
194          *
195          * - sg_len: Tracks the length of the current intermediate transfer,
196          * this is required to update the residue during intermediate transfer
197          * completion callback.
198          */
199         int                             processed_stat;
200         u32                             sg_len;
201         u32                             residue;
202         u32                             residue_stat;
203
204         struct edma_pset                pset[0];
205 };
206
207 struct edma_cc;
208
209 struct edma_chan {
210         struct virt_dma_chan            vchan;
211         struct list_head                node;
212         struct edma_desc                *edesc;
213         struct edma_cc                  *ecc;
214         int                             ch_num;
215         bool                            alloced;
216         int                             slot[EDMA_MAX_SLOTS];
217         int                             missed;
218         struct dma_slave_config         cfg;
219 };
220
221 struct edma_cc {
222         struct device                   *dev;
223         struct edma_soc_info            *info;
224         void __iomem                    *base;
225         int                             id;
226
227         /* eDMA3 resource information */
228         unsigned                        num_channels;
229         unsigned                        num_region;
230         unsigned                        num_slots;
231         unsigned                        num_tc;
232         enum dma_event_q                default_queue;
233
234         bool                            unused_chan_list_done;
235         /* The edma_inuse bit for each PaRAM slot is clear unless the
236          * channel is in use ... by ARM or DSP, for QDMA, or whatever.
237          */
238         unsigned long *edma_inuse;
239
240         /* The edma_unused bit for each channel is clear unless
241          * it is not being used on this platform. It uses a bit
242          * of SOC-specific initialization code.
243          */
244         unsigned long *edma_unused;
245
246         struct dma_interrupt_data {
247                 void (*callback)(unsigned channel, unsigned short ch_status,
248                                  void *data);
249                 void *data;
250         } *intr_data;
251
252         struct dma_device               dma_slave;
253         struct edma_chan                *slave_chans;
254         int                             dummy_slot;
255 };
256
257 /* dummy param set used to (re)initialize parameter RAM slots */
258 static const struct edmacc_param dummy_paramset = {
259         .link_bcntrld = 0xffff,
260         .ccnt = 1,
261 };
262
263 static const struct of_device_id edma_of_ids[] = {
264         { .compatible = "ti,edma3", },
265         {}
266 };
267
268 static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
269 {
270         return (unsigned int)__raw_readl(ecc->base + offset);
271 }
272
273 static inline void edma_write(struct edma_cc *ecc, int offset, int val)
274 {
275         __raw_writel(val, ecc->base + offset);
276 }
277
278 static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
279                                unsigned or)
280 {
281         unsigned val = edma_read(ecc, offset);
282
283         val &= and;
284         val |= or;
285         edma_write(ecc, offset, val);
286 }
287
288 static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
289 {
290         unsigned val = edma_read(ecc, offset);
291
292         val &= and;
293         edma_write(ecc, offset, val);
294 }
295
296 static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
297 {
298         unsigned val = edma_read(ecc, offset);
299
300         val |= or;
301         edma_write(ecc, offset, val);
302 }
303
304 static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
305                                            int i)
306 {
307         return edma_read(ecc, offset + (i << 2));
308 }
309
310 static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
311                                     unsigned val)
312 {
313         edma_write(ecc, offset + (i << 2), val);
314 }
315
316 static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
317                                      unsigned and, unsigned or)
318 {
319         edma_modify(ecc, offset + (i << 2), and, or);
320 }
321
322 static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
323                                  unsigned or)
324 {
325         edma_or(ecc, offset + (i << 2), or);
326 }
327
328 static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
329                                   unsigned or)
330 {
331         edma_or(ecc, offset + ((i * 2 + j) << 2), or);
332 }
333
334 static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
335                                      int j, unsigned val)
336 {
337         edma_write(ecc, offset + ((i * 2 + j) << 2), val);
338 }
339
340 static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
341 {
342         return edma_read(ecc, EDMA_SHADOW0 + offset);
343 }
344
345 static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
346                                                    int offset, int i)
347 {
348         return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
349 }
350
351 static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
352                                       unsigned val)
353 {
354         edma_write(ecc, EDMA_SHADOW0 + offset, val);
355 }
356
357 static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
358                                             int i, unsigned val)
359 {
360         edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
361 }
362
363 static inline unsigned int edma_parm_read(struct edma_cc *ecc, int offset,
364                                           int param_no)
365 {
366         return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
367 }
368
369 static inline void edma_parm_write(struct edma_cc *ecc, int offset,
370                                    int param_no, unsigned val)
371 {
372         edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
373 }
374
375 static inline void edma_parm_modify(struct edma_cc *ecc, int offset,
376                                     int param_no, unsigned and, unsigned or)
377 {
378         edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
379 }
380
381 static inline void edma_parm_and(struct edma_cc *ecc, int offset, int param_no,
382                                  unsigned and)
383 {
384         edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
385 }
386
387 static inline void edma_parm_or(struct edma_cc *ecc, int offset, int param_no,
388                                 unsigned or)
389 {
390         edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
391 }
392
393 static inline void set_bits(int offset, int len, unsigned long *p)
394 {
395         for (; len > 0; len--)
396                 set_bit(offset + (len - 1), p);
397 }
398
399 static inline void clear_bits(int offset, int len, unsigned long *p)
400 {
401         for (; len > 0; len--)
402                 clear_bit(offset + (len - 1), p);
403 }
404
405 static void edma_map_dmach_to_queue(struct edma_cc *ecc, unsigned ch_no,
406                                     enum dma_event_q queue_no)
407 {
408         int bit = (ch_no & 0x7) * 4;
409
410         /* default to low priority queue */
411         if (queue_no == EVENTQ_DEFAULT)
412                 queue_no = ecc->default_queue;
413
414         queue_no &= 7;
415         edma_modify_array(ecc, EDMA_DMAQNUM, (ch_no >> 3), ~(0x7 << bit),
416                           queue_no << bit);
417 }
418
419 static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
420                                           int priority)
421 {
422         int bit = queue_no * 4;
423
424         edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
425 }
426
427 static void edma_direct_dmach_to_param_mapping(struct edma_cc *ecc)
428 {
429         int i;
430
431         for (i = 0; i < ecc->num_channels; i++)
432                 edma_write_array(ecc, EDMA_DCHMAP, i, (i << 5));
433 }
434
435 static int prepare_unused_channel_list(struct device *dev, void *data)
436 {
437         struct platform_device *pdev = to_platform_device(dev);
438         struct edma_cc *ecc = data;
439         int dma_req_min = EDMA_CTLR_CHAN(ecc->id, 0);
440         int dma_req_max = dma_req_min + ecc->num_channels;
441         int i, count;
442         struct of_phandle_args  dma_spec;
443
444         if (dev->of_node) {
445                 struct platform_device *dma_pdev;
446
447                 count = of_property_count_strings(dev->of_node, "dma-names");
448                 if (count < 0)
449                         return 0;
450                 for (i = 0; i < count; i++) {
451                         if (of_parse_phandle_with_args(dev->of_node, "dmas",
452                                                        "#dma-cells", i,
453                                                        &dma_spec))
454                                 continue;
455
456                         if (!of_match_node(edma_of_ids, dma_spec.np)) {
457                                 of_node_put(dma_spec.np);
458                                 continue;
459                         }
460
461                         dma_pdev = of_find_device_by_node(dma_spec.np);
462                         if (&dma_pdev->dev != ecc->dev)
463                                 continue;
464
465                         clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
466                                   ecc->edma_unused);
467                         of_node_put(dma_spec.np);
468                 }
469                 return 0;
470         }
471
472         /* For non-OF case */
473         for (i = 0; i < pdev->num_resources; i++) {
474                 struct resource *res = &pdev->resource[i];
475                 int dma_req;
476
477                 if (!(res->flags & IORESOURCE_DMA))
478                         continue;
479
480                 dma_req = (int)res->start;
481                 if (dma_req >= dma_req_min && dma_req < dma_req_max)
482                         clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
483                                   ecc->edma_unused);
484         }
485
486         return 0;
487 }
488
489 static void edma_setup_interrupt(struct edma_cc *ecc, unsigned lch,
490         void (*callback)(unsigned channel, u16 ch_status, void *data),
491         void *data)
492 {
493         lch = EDMA_CHAN_SLOT(lch);
494
495         if (!callback)
496                 edma_shadow0_write_array(ecc, SH_IECR, lch >> 5,
497                                          BIT(lch & 0x1f));
498
499         ecc->intr_data[lch].callback = callback;
500         ecc->intr_data[lch].data = data;
501
502         if (callback) {
503                 edma_shadow0_write_array(ecc, SH_ICR, lch >> 5,
504                                          BIT(lch & 0x1f));
505                 edma_shadow0_write_array(ecc, SH_IESR, lch >> 5,
506                                          BIT(lch & 0x1f));
507         }
508 }
509
510 /*
511  * paRAM management functions
512  */
513
514 /**
515  * edma_write_slot - write parameter RAM data for slot
516  * @ecc: pointer to edma_cc struct
517  * @slot: number of parameter RAM slot being modified
518  * @param: data to be written into parameter RAM slot
519  *
520  * Use this to assign all parameters of a transfer at once.  This
521  * allows more efficient setup of transfers than issuing multiple
522  * calls to set up those parameters in small pieces, and provides
523  * complete control over all transfer options.
524  */
525 static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
526                             const struct edmacc_param *param)
527 {
528         slot = EDMA_CHAN_SLOT(slot);
529         if (slot >= ecc->num_slots)
530                 return;
531         memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
532 }
533
534 /**
535  * edma_read_slot - read parameter RAM data from slot
536  * @ecc: pointer to edma_cc struct
537  * @slot: number of parameter RAM slot being copied
538  * @param: where to store copy of parameter RAM data
539  *
540  * Use this to read data from a parameter RAM slot, perhaps to
541  * save them as a template for later reuse.
542  */
543 static void edma_read_slot(struct edma_cc *ecc, unsigned slot,
544                            struct edmacc_param *param)
545 {
546         slot = EDMA_CHAN_SLOT(slot);
547         if (slot >= ecc->num_slots)
548                 return;
549         memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
550 }
551
552 /**
553  * edma_alloc_slot - allocate DMA parameter RAM
554  * @ecc: pointer to edma_cc struct
555  * @slot: specific slot to allocate; negative for "any unused slot"
556  *
557  * This allocates a parameter RAM slot, initializing it to hold a
558  * dummy transfer.  Slots allocated using this routine have not been
559  * mapped to a hardware DMA channel, and will normally be used by
560  * linking to them from a slot associated with a DMA channel.
561  *
562  * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
563  * slots may be allocated on behalf of DSP firmware.
564  *
565  * Returns the number of the slot, else negative errno.
566  */
567 static int edma_alloc_slot(struct edma_cc *ecc, int slot)
568 {
569         if (slot > 0)
570                 slot = EDMA_CHAN_SLOT(slot);
571         if (slot < 0) {
572                 slot = ecc->num_channels;
573                 for (;;) {
574                         slot = find_next_zero_bit(ecc->edma_inuse,
575                                                   ecc->num_slots,
576                                                   slot);
577                         if (slot == ecc->num_slots)
578                                 return -ENOMEM;
579                         if (!test_and_set_bit(slot, ecc->edma_inuse))
580                                 break;
581                 }
582         } else if (slot < ecc->num_channels || slot >= ecc->num_slots) {
583                 return -EINVAL;
584         } else if (test_and_set_bit(slot, ecc->edma_inuse)) {
585                 return -EBUSY;
586         }
587
588         edma_write_slot(ecc, slot, &dummy_paramset);
589
590         return EDMA_CTLR_CHAN(ecc->id, slot);
591 }
592
593 /**
594  * edma_free_slot - deallocate DMA parameter RAM
595  * @ecc: pointer to edma_cc struct
596  * @slot: parameter RAM slot returned from edma_alloc_slot()
597  *
598  * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
599  * Callers are responsible for ensuring the slot is inactive, and will
600  * not be activated.
601  */
602 static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
603 {
604         slot = EDMA_CHAN_SLOT(slot);
605         if (slot < ecc->num_channels || slot >= ecc->num_slots)
606                 return;
607
608         edma_write_slot(ecc, slot, &dummy_paramset);
609         clear_bit(slot, ecc->edma_inuse);
610 }
611
612 /**
613  * edma_link - link one parameter RAM slot to another
614  * @ecc: pointer to edma_cc struct
615  * @from: parameter RAM slot originating the link
616  * @to: parameter RAM slot which is the link target
617  *
618  * The originating slot should not be part of any active DMA transfer.
619  */
620 static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
621 {
622         from = EDMA_CHAN_SLOT(from);
623         to = EDMA_CHAN_SLOT(to);
624         if (from >= ecc->num_slots || to >= ecc->num_slots)
625                 return;
626
627         edma_parm_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
628                          PARM_OFFSET(to));
629 }
630
631 /**
632  * edma_get_position - returns the current transfer point
633  * @ecc: pointer to edma_cc struct
634  * @slot: parameter RAM slot being examined
635  * @dst:  true selects the dest position, false the source
636  *
637  * Returns the position of the current active slot
638  */
639 static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
640                                     bool dst)
641 {
642         u32 offs;
643
644         slot = EDMA_CHAN_SLOT(slot);
645         offs = PARM_OFFSET(slot);
646         offs += dst ? PARM_DST : PARM_SRC;
647
648         return edma_read(ecc, offs);
649 }
650
651 /*-----------------------------------------------------------------------*/
652 /**
653  * edma_start - start dma on a channel
654  * @ecc: pointer to edma_cc struct
655  * @channel: channel being activated
656  *
657  * Channels with event associations will be triggered by their hardware
658  * events, and channels without such associations will be triggered by
659  * software.  (At this writing there is no interface for using software
660  * triggers except with channels that don't support hardware triggers.)
661  *
662  * Returns zero on success, else negative errno.
663  */
664 static int edma_start(struct edma_cc *ecc, unsigned channel)
665 {
666         if (ecc->id != EDMA_CTLR(channel)) {
667                 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
668                         ecc->id, EDMA_CTLR(channel));
669                 return -EINVAL;
670         }
671         channel = EDMA_CHAN_SLOT(channel);
672
673         if (channel < ecc->num_channels) {
674                 int j = channel >> 5;
675                 unsigned int mask = BIT(channel & 0x1f);
676
677                 /* EDMA channels without event association */
678                 if (test_bit(channel, ecc->edma_unused)) {
679                         dev_dbg(ecc->dev, "ESR%d %08x\n", j,
680                                 edma_shadow0_read_array(ecc, SH_ESR, j));
681                         edma_shadow0_write_array(ecc, SH_ESR, j, mask);
682                         return 0;
683                 }
684
685                 /* EDMA channel with event association */
686                 dev_dbg(ecc->dev, "ER%d %08x\n", j,
687                         edma_shadow0_read_array(ecc, SH_ER, j));
688                 /* Clear any pending event or error */
689                 edma_write_array(ecc, EDMA_ECR, j, mask);
690                 edma_write_array(ecc, EDMA_EMCR, j, mask);
691                 /* Clear any SER */
692                 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
693                 edma_shadow0_write_array(ecc, SH_EESR, j, mask);
694                 dev_dbg(ecc->dev, "EER%d %08x\n", j,
695                         edma_shadow0_read_array(ecc, SH_EER, j));
696                 return 0;
697         }
698
699         return -EINVAL;
700 }
701
702 /**
703  * edma_stop - stops dma on the channel passed
704  * @ecc: pointer to edma_cc struct
705  * @channel: channel being deactivated
706  *
707  * When @lch is a channel, any active transfer is paused and
708  * all pending hardware events are cleared.  The current transfer
709  * may not be resumed, and the channel's Parameter RAM should be
710  * reinitialized before being reused.
711  */
712 static void edma_stop(struct edma_cc *ecc, unsigned channel)
713 {
714         if (ecc->id != EDMA_CTLR(channel)) {
715                 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
716                         ecc->id, EDMA_CTLR(channel));
717                 return;
718         }
719         channel = EDMA_CHAN_SLOT(channel);
720
721         if (channel < ecc->num_channels) {
722                 int j = channel >> 5;
723                 unsigned int mask = BIT(channel & 0x1f);
724
725                 edma_shadow0_write_array(ecc, SH_EECR, j, mask);
726                 edma_shadow0_write_array(ecc, SH_ECR, j, mask);
727                 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
728                 edma_write_array(ecc, EDMA_EMCR, j, mask);
729
730                 /* clear possibly pending completion interrupt */
731                 edma_shadow0_write_array(ecc, SH_ICR, j, mask);
732
733                 dev_dbg(ecc->dev, "EER%d %08x\n", j,
734                         edma_shadow0_read_array(ecc, SH_EER, j));
735
736                 /* REVISIT:  consider guarding against inappropriate event
737                  * chaining by overwriting with dummy_paramset.
738                  */
739         }
740 }
741
742 /**
743  * edma_pause - pause dma on a channel
744  * @ecc: pointer to edma_cc struct
745  * @channel: on which edma_start() has been called
746  *
747  * This temporarily disables EDMA hardware events on the specified channel,
748  * preventing them from triggering new transfers on its behalf
749  */
750 static void edma_pause(struct edma_cc *ecc, unsigned channel)
751 {
752         if (ecc->id != EDMA_CTLR(channel)) {
753                 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
754                         ecc->id, EDMA_CTLR(channel));
755                 return;
756         }
757         channel = EDMA_CHAN_SLOT(channel);
758
759         if (channel < ecc->num_channels) {
760                 unsigned int mask = BIT(channel & 0x1f);
761
762                 edma_shadow0_write_array(ecc, SH_EECR, channel >> 5, mask);
763         }
764 }
765
766 /**
767  * edma_resume - resumes dma on a paused channel
768  * @ecc: pointer to edma_cc struct
769  * @channel: on which edma_pause() has been called
770  *
771  * This re-enables EDMA hardware events on the specified channel.
772  */
773 static void edma_resume(struct edma_cc *ecc, unsigned channel)
774 {
775         if (ecc->id != EDMA_CTLR(channel)) {
776                 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
777                         ecc->id, EDMA_CTLR(channel));
778                 return;
779         }
780         channel = EDMA_CHAN_SLOT(channel);
781
782         if (channel < ecc->num_channels) {
783                 unsigned int mask = BIT(channel & 0x1f);
784
785                 edma_shadow0_write_array(ecc, SH_EESR, channel >> 5, mask);
786         }
787 }
788
789 static int edma_trigger_channel(struct edma_cc *ecc, unsigned channel)
790 {
791         unsigned int mask;
792
793         if (ecc->id != EDMA_CTLR(channel)) {
794                 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
795                         ecc->id, EDMA_CTLR(channel));
796                 return -EINVAL;
797         }
798         channel = EDMA_CHAN_SLOT(channel);
799         mask = BIT(channel & 0x1f);
800
801         edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
802
803         dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
804                 edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
805         return 0;
806 }
807
808 /******************************************************************************
809  *
810  * It cleans ParamEntry qand bring back EDMA to initial state if media has
811  * been removed before EDMA has finished.It is usedful for removable media.
812  * Arguments:
813  *      ch_no     - channel no
814  *
815  * Return: zero on success, or corresponding error no on failure
816  *
817  * FIXME this should not be needed ... edma_stop() should suffice.
818  *
819  *****************************************************************************/
820
821 static void edma_clean_channel(struct edma_cc *ecc, unsigned channel)
822 {
823         if (ecc->id != EDMA_CTLR(channel)) {
824                 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
825                         ecc->id, EDMA_CTLR(channel));
826                 return;
827         }
828         channel = EDMA_CHAN_SLOT(channel);
829
830         if (channel < ecc->num_channels) {
831                 int j = (channel >> 5);
832                 unsigned int mask = BIT(channel & 0x1f);
833
834                 dev_dbg(ecc->dev, "EMR%d %08x\n", j,
835                         edma_read_array(ecc, EDMA_EMR, j));
836                 edma_shadow0_write_array(ecc, SH_ECR, j, mask);
837                 /* Clear the corresponding EMR bits */
838                 edma_write_array(ecc, EDMA_EMCR, j, mask);
839                 /* Clear any SER */
840                 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
841                 edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
842         }
843 }
844
845 /**
846  * edma_alloc_channel - allocate DMA channel and paired parameter RAM
847  * @ecc: pointer to edma_cc struct
848  * @channel: specific channel to allocate; negative for "any unmapped channel"
849  * @callback: optional; to be issued on DMA completion or errors
850  * @data: passed to callback
851  * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
852  *      Controller (TC) executes requests using this channel.  Use
853  *      EVENTQ_DEFAULT unless you really need a high priority queue.
854  *
855  * This allocates a DMA channel and its associated parameter RAM slot.
856  * The parameter RAM is initialized to hold a dummy transfer.
857  *
858  * Normal use is to pass a specific channel number as @channel, to make
859  * use of hardware events mapped to that channel.  When the channel will
860  * be used only for software triggering or event chaining, channels not
861  * mapped to hardware events (or mapped to unused events) are preferable.
862  *
863  * DMA transfers start from a channel using edma_start(), or by
864  * chaining.  When the transfer described in that channel's parameter RAM
865  * slot completes, that slot's data may be reloaded through a link.
866  *
867  * DMA errors are only reported to the @callback associated with the
868  * channel driving that transfer, but transfer completion callbacks can
869  * be sent to another channel under control of the TCC field in
870  * the option word of the transfer's parameter RAM set.  Drivers must not
871  * use DMA transfer completion callbacks for channels they did not allocate.
872  * (The same applies to TCC codes used in transfer chaining.)
873  *
874  * Returns the number of the channel, else negative errno.
875  */
876 static int edma_alloc_channel(struct edma_cc *ecc, int channel,
877                 void (*callback)(unsigned channel, u16 ch_status, void *data),
878                 void *data,
879                 enum dma_event_q eventq_no)
880 {
881         unsigned done = 0;
882         int ret = 0;
883
884         if (!ecc->unused_chan_list_done) {
885                 /*
886                  * Scan all the platform devices to find out the EDMA channels
887                  * used and clear them in the unused list, making the rest
888                  * available for ARM usage.
889                  */
890                 ret = bus_for_each_dev(&platform_bus_type, NULL, ecc,
891                                        prepare_unused_channel_list);
892                 if (ret < 0)
893                         return ret;
894
895                 ecc->unused_chan_list_done = true;
896         }
897
898         if (channel >= 0) {
899                 if (ecc->id != EDMA_CTLR(channel)) {
900                         dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n",
901                                 __func__, ecc->id, EDMA_CTLR(channel));
902                         return -EINVAL;
903                 }
904                 channel = EDMA_CHAN_SLOT(channel);
905         }
906
907         if (channel < 0) {
908                 channel = 0;
909                 for (;;) {
910                         channel = find_next_bit(ecc->edma_unused,
911                                                 ecc->num_channels, channel);
912                         if (channel == ecc->num_channels)
913                                 break;
914                         if (!test_and_set_bit(channel, ecc->edma_inuse)) {
915                                 done = 1;
916                                 break;
917                         }
918                         channel++;
919                 }
920                 if (!done)
921                         return -ENOMEM;
922         } else if (channel >= ecc->num_channels) {
923                 return -EINVAL;
924         } else if (test_and_set_bit(channel, ecc->edma_inuse)) {
925                 return -EBUSY;
926         }
927
928         /* ensure access through shadow region 0 */
929         edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
930
931         /* ensure no events are pending */
932         edma_stop(ecc, EDMA_CTLR_CHAN(ecc->id, channel));
933         edma_write_slot(ecc, channel, &dummy_paramset);
934
935         if (callback)
936                 edma_setup_interrupt(ecc, EDMA_CTLR_CHAN(ecc->id, channel),
937                                      callback, data);
938
939         edma_map_dmach_to_queue(ecc, channel, eventq_no);
940
941         return EDMA_CTLR_CHAN(ecc->id, channel);
942 }
943
944 /**
945  * edma_free_channel - deallocate DMA channel
946  * @ecc: pointer to edma_cc struct
947  * @channel: dma channel returned from edma_alloc_channel()
948  *
949  * This deallocates the DMA channel and associated parameter RAM slot
950  * allocated by edma_alloc_channel().
951  *
952  * Callers are responsible for ensuring the channel is inactive, and
953  * will not be reactivated by linking, chaining, or software calls to
954  * edma_start().
955  */
956 static void edma_free_channel(struct edma_cc *ecc, unsigned channel)
957 {
958         if (ecc->id != EDMA_CTLR(channel)) {
959                 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
960                         ecc->id, EDMA_CTLR(channel));
961                 return;
962         }
963         channel = EDMA_CHAN_SLOT(channel);
964
965         if (channel >= ecc->num_channels)
966                 return;
967
968         edma_setup_interrupt(ecc, channel, NULL, NULL);
969         /* REVISIT should probably take out of shadow region 0 */
970
971         memcpy_toio(ecc->base + PARM_OFFSET(channel), &dummy_paramset,
972                     PARM_SIZE);
973         clear_bit(channel, ecc->edma_inuse);
974 }
975
976 /*
977  * edma_assign_channel_eventq - move given channel to desired eventq
978  * Arguments:
979  *      channel - channel number
980  *      eventq_no - queue to move the channel
981  *
982  * Can be used to move a channel to a selected event queue.
983  */
984 static void edma_assign_channel_eventq(struct edma_cc *ecc, unsigned channel,
985                                        enum dma_event_q eventq_no)
986 {
987         if (ecc->id != EDMA_CTLR(channel)) {
988                 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
989                         ecc->id, EDMA_CTLR(channel));
990                 return;
991         }
992         channel = EDMA_CHAN_SLOT(channel);
993
994         if (channel >= ecc->num_channels)
995                 return;
996
997         /* default to low priority queue */
998         if (eventq_no == EVENTQ_DEFAULT)
999                 eventq_no = ecc->default_queue;
1000         if (eventq_no >= ecc->num_tc)
1001                 return;
1002
1003         edma_map_dmach_to_queue(ecc, channel, eventq_no);
1004 }
1005
1006 static irqreturn_t dma_irq_handler(int irq, void *data)
1007 {
1008         struct edma_cc *ecc = data;
1009         int ctlr;
1010         u32 sh_ier;
1011         u32 sh_ipr;
1012         u32 bank;
1013
1014         ctlr = ecc->id;
1015         if (ctlr < 0)
1016                 return IRQ_NONE;
1017
1018         dev_dbg(ecc->dev, "dma_irq_handler\n");
1019
1020         sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
1021         if (!sh_ipr) {
1022                 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
1023                 if (!sh_ipr)
1024                         return IRQ_NONE;
1025                 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
1026                 bank = 1;
1027         } else {
1028                 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
1029                 bank = 0;
1030         }
1031
1032         do {
1033                 u32 slot;
1034                 u32 channel;
1035
1036                 dev_dbg(ecc->dev, "IPR%d %08x\n", bank, sh_ipr);
1037
1038                 slot = __ffs(sh_ipr);
1039                 sh_ipr &= ~(BIT(slot));
1040
1041                 if (sh_ier & BIT(slot)) {
1042                         channel = (bank << 5) | slot;
1043                         /* Clear the corresponding IPR bits */
1044                         edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
1045                         if (ecc->intr_data[channel].callback)
1046                                 ecc->intr_data[channel].callback(
1047                                                 EDMA_CTLR_CHAN(ctlr, channel),
1048                                                 EDMA_DMA_COMPLETE,
1049                                                 ecc->intr_data[channel].data);
1050                 }
1051         } while (sh_ipr);
1052
1053         edma_shadow0_write(ecc, SH_IEVAL, 1);
1054         return IRQ_HANDLED;
1055 }
1056
1057 /******************************************************************************
1058  *
1059  * DMA error interrupt handler
1060  *
1061  *****************************************************************************/
1062 static irqreturn_t dma_ccerr_handler(int irq, void *data)
1063 {
1064         struct edma_cc *ecc = data;
1065         int i;
1066         int ctlr;
1067         unsigned int cnt = 0;
1068
1069         ctlr = ecc->id;
1070         if (ctlr < 0)
1071                 return IRQ_NONE;
1072
1073         dev_dbg(ecc->dev, "dma_ccerr_handler\n");
1074
1075         if ((edma_read_array(ecc, EDMA_EMR, 0) == 0) &&
1076             (edma_read_array(ecc, EDMA_EMR, 1) == 0) &&
1077             (edma_read(ecc, EDMA_QEMR) == 0) &&
1078             (edma_read(ecc, EDMA_CCERR) == 0))
1079                 return IRQ_NONE;
1080
1081         while (1) {
1082                 int j = -1;
1083
1084                 if (edma_read_array(ecc, EDMA_EMR, 0))
1085                         j = 0;
1086                 else if (edma_read_array(ecc, EDMA_EMR, 1))
1087                         j = 1;
1088                 if (j >= 0) {
1089                         dev_dbg(ecc->dev, "EMR%d %08x\n", j,
1090                                 edma_read_array(ecc, EDMA_EMR, j));
1091                         for (i = 0; i < 32; i++) {
1092                                 int k = (j << 5) + i;
1093
1094                                 if (edma_read_array(ecc, EDMA_EMR, j) &
1095                                                         BIT(i)) {
1096                                         /* Clear the corresponding EMR bits */
1097                                         edma_write_array(ecc, EDMA_EMCR, j,
1098                                                          BIT(i));
1099                                         /* Clear any SER */
1100                                         edma_shadow0_write_array(ecc, SH_SECR,
1101                                                                  j, BIT(i));
1102                                         if (ecc->intr_data[k].callback) {
1103                                                 ecc->intr_data[k].callback(
1104                                                         EDMA_CTLR_CHAN(ctlr, k),
1105                                                         EDMA_DMA_CC_ERROR,
1106                                                         ecc->intr_data[k].data);
1107                                         }
1108                                 }
1109                         }
1110                 } else if (edma_read(ecc, EDMA_QEMR)) {
1111                         dev_dbg(ecc->dev, "QEMR %02x\n",
1112                                 edma_read(ecc, EDMA_QEMR));
1113                         for (i = 0; i < 8; i++) {
1114                                 if (edma_read(ecc, EDMA_QEMR) & BIT(i)) {
1115                                         /* Clear the corresponding IPR bits */
1116                                         edma_write(ecc, EDMA_QEMCR, BIT(i));
1117                                         edma_shadow0_write(ecc, SH_QSECR,
1118                                                            BIT(i));
1119
1120                                         /* NOTE:  not reported!! */
1121                                 }
1122                         }
1123                 } else if (edma_read(ecc, EDMA_CCERR)) {
1124                         dev_dbg(ecc->dev, "CCERR %08x\n",
1125                                 edma_read(ecc, EDMA_CCERR));
1126                         /* FIXME:  CCERR.BIT(16) ignored!  much better
1127                          * to just write CCERRCLR with CCERR value...
1128                          */
1129                         for (i = 0; i < 8; i++) {
1130                                 if (edma_read(ecc, EDMA_CCERR) & BIT(i)) {
1131                                         /* Clear the corresponding IPR bits */
1132                                         edma_write(ecc, EDMA_CCERRCLR, BIT(i));
1133
1134                                         /* NOTE:  not reported!! */
1135                                 }
1136                         }
1137                 }
1138                 if ((edma_read_array(ecc, EDMA_EMR, 0) == 0) &&
1139                     (edma_read_array(ecc, EDMA_EMR, 1) == 0) &&
1140                     (edma_read(ecc, EDMA_QEMR) == 0) &&
1141                     (edma_read(ecc, EDMA_CCERR) == 0))
1142                         break;
1143                 cnt++;
1144                 if (cnt > 10)
1145                         break;
1146         }
1147         edma_write(ecc, EDMA_EEVAL, 1);
1148         return IRQ_HANDLED;
1149 }
1150
1151 static inline struct edma_cc *to_edma_cc(struct dma_device *d)
1152 {
1153         return container_of(d, struct edma_cc, dma_slave);
1154 }
1155
1156 static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
1157 {
1158         return container_of(c, struct edma_chan, vchan.chan);
1159 }
1160
1161 static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
1162 {
1163         return container_of(tx, struct edma_desc, vdesc.tx);
1164 }
1165
1166 static void edma_desc_free(struct virt_dma_desc *vdesc)
1167 {
1168         kfree(container_of(vdesc, struct edma_desc, vdesc));
1169 }
1170
1171 /* Dispatch a queued descriptor to the controller (caller holds lock) */
1172 static void edma_execute(struct edma_chan *echan)
1173 {
1174         struct edma_cc *ecc = echan->ecc;
1175         struct virt_dma_desc *vdesc;
1176         struct edma_desc *edesc;
1177         struct device *dev = echan->vchan.chan.device->dev;
1178         int i, j, left, nslots;
1179
1180         if (!echan->edesc) {
1181                 /* Setup is needed for the first transfer */
1182                 vdesc = vchan_next_desc(&echan->vchan);
1183                 if (!vdesc)
1184                         return;
1185                 list_del(&vdesc->node);
1186                 echan->edesc = to_edma_desc(&vdesc->tx);
1187         }
1188
1189         edesc = echan->edesc;
1190
1191         /* Find out how many left */
1192         left = edesc->pset_nr - edesc->processed;
1193         nslots = min(MAX_NR_SG, left);
1194         edesc->sg_len = 0;
1195
1196         /* Write descriptor PaRAM set(s) */
1197         for (i = 0; i < nslots; i++) {
1198                 j = i + edesc->processed;
1199                 edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
1200                 edesc->sg_len += edesc->pset[j].len;
1201                 dev_vdbg(dev,
1202                          "\n pset[%d]:\n"
1203                          "  chnum\t%d\n"
1204                          "  slot\t%d\n"
1205                          "  opt\t%08x\n"
1206                          "  src\t%08x\n"
1207                          "  dst\t%08x\n"
1208                          "  abcnt\t%08x\n"
1209                          "  ccnt\t%08x\n"
1210                          "  bidx\t%08x\n"
1211                          "  cidx\t%08x\n"
1212                          "  lkrld\t%08x\n",
1213                          j, echan->ch_num, echan->slot[i],
1214                          edesc->pset[j].param.opt,
1215                          edesc->pset[j].param.src,
1216                          edesc->pset[j].param.dst,
1217                          edesc->pset[j].param.a_b_cnt,
1218                          edesc->pset[j].param.ccnt,
1219                          edesc->pset[j].param.src_dst_bidx,
1220                          edesc->pset[j].param.src_dst_cidx,
1221                          edesc->pset[j].param.link_bcntrld);
1222                 /* Link to the previous slot if not the last set */
1223                 if (i != (nslots - 1))
1224                         edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
1225         }
1226
1227         edesc->processed += nslots;
1228
1229         /*
1230          * If this is either the last set in a set of SG-list transactions
1231          * then setup a link to the dummy slot, this results in all future
1232          * events being absorbed and that's OK because we're done
1233          */
1234         if (edesc->processed == edesc->pset_nr) {
1235                 if (edesc->cyclic)
1236                         edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
1237                 else
1238                         edma_link(ecc, echan->slot[nslots - 1],
1239                                   echan->ecc->dummy_slot);
1240         }
1241
1242         if (echan->missed) {
1243                 /*
1244                  * This happens due to setup times between intermediate
1245                  * transfers in long SG lists which have to be broken up into
1246                  * transfers of MAX_NR_SG
1247                  */
1248                 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
1249                 edma_clean_channel(ecc, echan->ch_num);
1250                 edma_stop(ecc, echan->ch_num);
1251                 edma_start(ecc, echan->ch_num);
1252                 edma_trigger_channel(ecc, echan->ch_num);
1253                 echan->missed = 0;
1254         } else if (edesc->processed <= MAX_NR_SG) {
1255                 dev_dbg(dev, "first transfer starting on channel %d\n",
1256                         echan->ch_num);
1257                 edma_start(ecc, echan->ch_num);
1258         } else {
1259                 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
1260                         echan->ch_num, edesc->processed);
1261                 edma_resume(ecc, echan->ch_num);
1262         }
1263 }
1264
1265 static int edma_terminate_all(struct dma_chan *chan)
1266 {
1267         struct edma_chan *echan = to_edma_chan(chan);
1268         unsigned long flags;
1269         LIST_HEAD(head);
1270
1271         spin_lock_irqsave(&echan->vchan.lock, flags);
1272
1273         /*
1274          * Stop DMA activity: we assume the callback will not be called
1275          * after edma_dma() returns (even if it does, it will see
1276          * echan->edesc is NULL and exit.)
1277          */
1278         if (echan->edesc) {
1279                 edma_stop(echan->ecc, echan->ch_num);
1280                 /* Move the cyclic channel back to default queue */
1281                 if (echan->edesc->cyclic)
1282                         edma_assign_channel_eventq(echan->ecc, echan->ch_num,
1283                                                    EVENTQ_DEFAULT);
1284                 /*
1285                  * free the running request descriptor
1286                  * since it is not in any of the vdesc lists
1287                  */
1288                 edma_desc_free(&echan->edesc->vdesc);
1289                 echan->edesc = NULL;
1290         }
1291
1292         vchan_get_all_descriptors(&echan->vchan, &head);
1293         spin_unlock_irqrestore(&echan->vchan.lock, flags);
1294         vchan_dma_desc_free_list(&echan->vchan, &head);
1295
1296         return 0;
1297 }
1298
1299 static int edma_slave_config(struct dma_chan *chan,
1300         struct dma_slave_config *cfg)
1301 {
1302         struct edma_chan *echan = to_edma_chan(chan);
1303
1304         if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1305             cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1306                 return -EINVAL;
1307
1308         memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
1309
1310         return 0;
1311 }
1312
1313 static int edma_dma_pause(struct dma_chan *chan)
1314 {
1315         struct edma_chan *echan = to_edma_chan(chan);
1316
1317         if (!echan->edesc)
1318                 return -EINVAL;
1319
1320         edma_pause(echan->ecc, echan->ch_num);
1321         return 0;
1322 }
1323
1324 static int edma_dma_resume(struct dma_chan *chan)
1325 {
1326         struct edma_chan *echan = to_edma_chan(chan);
1327
1328         edma_resume(echan->ecc, echan->ch_num);
1329         return 0;
1330 }
1331
1332 /*
1333  * A PaRAM set configuration abstraction used by other modes
1334  * @chan: Channel who's PaRAM set we're configuring
1335  * @pset: PaRAM set to initialize and setup.
1336  * @src_addr: Source address of the DMA
1337  * @dst_addr: Destination address of the DMA
1338  * @burst: In units of dev_width, how much to send
1339  * @dev_width: How much is the dev_width
1340  * @dma_length: Total length of the DMA transfer
1341  * @direction: Direction of the transfer
1342  */
1343 static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
1344                             dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
1345                             enum dma_slave_buswidth dev_width,
1346                             unsigned int dma_length,
1347                             enum dma_transfer_direction direction)
1348 {
1349         struct edma_chan *echan = to_edma_chan(chan);
1350         struct device *dev = chan->device->dev;
1351         struct edmacc_param *param = &epset->param;
1352         int acnt, bcnt, ccnt, cidx;
1353         int src_bidx, dst_bidx, src_cidx, dst_cidx;
1354         int absync;
1355
1356         acnt = dev_width;
1357
1358         /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
1359         if (!burst)
1360                 burst = 1;
1361         /*
1362          * If the maxburst is equal to the fifo width, use
1363          * A-synced transfers. This allows for large contiguous
1364          * buffer transfers using only one PaRAM set.
1365          */
1366         if (burst == 1) {
1367                 /*
1368                  * For the A-sync case, bcnt and ccnt are the remainder
1369                  * and quotient respectively of the division of:
1370                  * (dma_length / acnt) by (SZ_64K -1). This is so
1371                  * that in case bcnt over flows, we have ccnt to use.
1372                  * Note: In A-sync tranfer only, bcntrld is used, but it
1373                  * only applies for sg_dma_len(sg) >= SZ_64K.
1374                  * In this case, the best way adopted is- bccnt for the
1375                  * first frame will be the remainder below. Then for
1376                  * every successive frame, bcnt will be SZ_64K-1. This
1377                  * is assured as bcntrld = 0xffff in end of function.
1378                  */
1379                 absync = false;
1380                 ccnt = dma_length / acnt / (SZ_64K - 1);
1381                 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
1382                 /*
1383                  * If bcnt is non-zero, we have a remainder and hence an
1384                  * extra frame to transfer, so increment ccnt.
1385                  */
1386                 if (bcnt)
1387                         ccnt++;
1388                 else
1389                         bcnt = SZ_64K - 1;
1390                 cidx = acnt;
1391         } else {
1392                 /*
1393                  * If maxburst is greater than the fifo address_width,
1394                  * use AB-synced transfers where A count is the fifo
1395                  * address_width and B count is the maxburst. In this
1396                  * case, we are limited to transfers of C count frames
1397                  * of (address_width * maxburst) where C count is limited
1398                  * to SZ_64K-1. This places an upper bound on the length
1399                  * of an SG segment that can be handled.
1400                  */
1401                 absync = true;
1402                 bcnt = burst;
1403                 ccnt = dma_length / (acnt * bcnt);
1404                 if (ccnt > (SZ_64K - 1)) {
1405                         dev_err(dev, "Exceeded max SG segment size\n");
1406                         return -EINVAL;
1407                 }
1408                 cidx = acnt * bcnt;
1409         }
1410
1411         epset->len = dma_length;
1412
1413         if (direction == DMA_MEM_TO_DEV) {
1414                 src_bidx = acnt;
1415                 src_cidx = cidx;
1416                 dst_bidx = 0;
1417                 dst_cidx = 0;
1418                 epset->addr = src_addr;
1419         } else if (direction == DMA_DEV_TO_MEM)  {
1420                 src_bidx = 0;
1421                 src_cidx = 0;
1422                 dst_bidx = acnt;
1423                 dst_cidx = cidx;
1424                 epset->addr = dst_addr;
1425         } else if (direction == DMA_MEM_TO_MEM)  {
1426                 src_bidx = acnt;
1427                 src_cidx = cidx;
1428                 dst_bidx = acnt;
1429                 dst_cidx = cidx;
1430         } else {
1431                 dev_err(dev, "%s: direction not implemented yet\n", __func__);
1432                 return -EINVAL;
1433         }
1434
1435         param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
1436         /* Configure A or AB synchronized transfers */
1437         if (absync)
1438                 param->opt |= SYNCDIM;
1439
1440         param->src = src_addr;
1441         param->dst = dst_addr;
1442
1443         param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
1444         param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
1445
1446         param->a_b_cnt = bcnt << 16 | acnt;
1447         param->ccnt = ccnt;
1448         /*
1449          * Only time when (bcntrld) auto reload is required is for
1450          * A-sync case, and in this case, a requirement of reload value
1451          * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1452          * and then later will be populated by edma_execute.
1453          */
1454         param->link_bcntrld = 0xffffffff;
1455         return absync;
1456 }
1457
1458 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
1459         struct dma_chan *chan, struct scatterlist *sgl,
1460         unsigned int sg_len, enum dma_transfer_direction direction,
1461         unsigned long tx_flags, void *context)
1462 {
1463         struct edma_chan *echan = to_edma_chan(chan);
1464         struct device *dev = chan->device->dev;
1465         struct edma_desc *edesc;
1466         dma_addr_t src_addr = 0, dst_addr = 0;
1467         enum dma_slave_buswidth dev_width;
1468         u32 burst;
1469         struct scatterlist *sg;
1470         int i, nslots, ret;
1471
1472         if (unlikely(!echan || !sgl || !sg_len))
1473                 return NULL;
1474
1475         if (direction == DMA_DEV_TO_MEM) {
1476                 src_addr = echan->cfg.src_addr;
1477                 dev_width = echan->cfg.src_addr_width;
1478                 burst = echan->cfg.src_maxburst;
1479         } else if (direction == DMA_MEM_TO_DEV) {
1480                 dst_addr = echan->cfg.dst_addr;
1481                 dev_width = echan->cfg.dst_addr_width;
1482                 burst = echan->cfg.dst_maxburst;
1483         } else {
1484                 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1485                 return NULL;
1486         }
1487
1488         if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1489                 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1490                 return NULL;
1491         }
1492
1493         edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
1494                         GFP_ATOMIC);
1495         if (!edesc) {
1496                 dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
1497                 return NULL;
1498         }
1499
1500         edesc->pset_nr = sg_len;
1501         edesc->residue = 0;
1502         edesc->direction = direction;
1503         edesc->echan = echan;
1504
1505         /* Allocate a PaRAM slot, if needed */
1506         nslots = min_t(unsigned, MAX_NR_SG, sg_len);
1507
1508         for (i = 0; i < nslots; i++) {
1509                 if (echan->slot[i] < 0) {
1510                         echan->slot[i] =
1511                                 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1512                         if (echan->slot[i] < 0) {
1513                                 kfree(edesc);
1514                                 dev_err(dev, "%s: Failed to allocate slot\n",
1515                                         __func__);
1516                                 return NULL;
1517                         }
1518                 }
1519         }
1520
1521         /* Configure PaRAM sets for each SG */
1522         for_each_sg(sgl, sg, sg_len, i) {
1523                 /* Get address for each SG */
1524                 if (direction == DMA_DEV_TO_MEM)
1525                         dst_addr = sg_dma_address(sg);
1526                 else
1527                         src_addr = sg_dma_address(sg);
1528
1529                 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1530                                        dst_addr, burst, dev_width,
1531                                        sg_dma_len(sg), direction);
1532                 if (ret < 0) {
1533                         kfree(edesc);
1534                         return NULL;
1535                 }
1536
1537                 edesc->absync = ret;
1538                 edesc->residue += sg_dma_len(sg);
1539
1540                 /* If this is the last in a current SG set of transactions,
1541                    enable interrupts so that next set is processed */
1542                 if (!((i+1) % MAX_NR_SG))
1543                         edesc->pset[i].param.opt |= TCINTEN;
1544
1545                 /* If this is the last set, enable completion interrupt flag */
1546                 if (i == sg_len - 1)
1547                         edesc->pset[i].param.opt |= TCINTEN;
1548         }
1549         edesc->residue_stat = edesc->residue;
1550
1551         return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1552 }
1553
1554 static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1555         struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1556         size_t len, unsigned long tx_flags)
1557 {
1558         int ret;
1559         struct edma_desc *edesc;
1560         struct device *dev = chan->device->dev;
1561         struct edma_chan *echan = to_edma_chan(chan);
1562
1563         if (unlikely(!echan || !len))
1564                 return NULL;
1565
1566         edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC);
1567         if (!edesc) {
1568                 dev_dbg(dev, "Failed to allocate a descriptor\n");
1569                 return NULL;
1570         }
1571
1572         edesc->pset_nr = 1;
1573
1574         ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
1575                                DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM);
1576         if (ret < 0)
1577                 return NULL;
1578
1579         edesc->absync = ret;
1580
1581         /*
1582          * Enable intermediate transfer chaining to re-trigger channel
1583          * on completion of every TR, and enable transfer-completion
1584          * interrupt on completion of the whole transfer.
1585          */
1586         edesc->pset[0].param.opt |= ITCCHEN;
1587         edesc->pset[0].param.opt |= TCINTEN;
1588
1589         return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1590 }
1591
1592 static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1593         struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1594         size_t period_len, enum dma_transfer_direction direction,
1595         unsigned long tx_flags)
1596 {
1597         struct edma_chan *echan = to_edma_chan(chan);
1598         struct device *dev = chan->device->dev;
1599         struct edma_desc *edesc;
1600         dma_addr_t src_addr, dst_addr;
1601         enum dma_slave_buswidth dev_width;
1602         u32 burst;
1603         int i, ret, nslots;
1604
1605         if (unlikely(!echan || !buf_len || !period_len))
1606                 return NULL;
1607
1608         if (direction == DMA_DEV_TO_MEM) {
1609                 src_addr = echan->cfg.src_addr;
1610                 dst_addr = buf_addr;
1611                 dev_width = echan->cfg.src_addr_width;
1612                 burst = echan->cfg.src_maxburst;
1613         } else if (direction == DMA_MEM_TO_DEV) {
1614                 src_addr = buf_addr;
1615                 dst_addr = echan->cfg.dst_addr;
1616                 dev_width = echan->cfg.dst_addr_width;
1617                 burst = echan->cfg.dst_maxburst;
1618         } else {
1619                 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1620                 return NULL;
1621         }
1622
1623         if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1624                 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1625                 return NULL;
1626         }
1627
1628         if (unlikely(buf_len % period_len)) {
1629                 dev_err(dev, "Period should be multiple of Buffer length\n");
1630                 return NULL;
1631         }
1632
1633         nslots = (buf_len / period_len) + 1;
1634
1635         /*
1636          * Cyclic DMA users such as audio cannot tolerate delays introduced
1637          * by cases where the number of periods is more than the maximum
1638          * number of SGs the EDMA driver can handle at a time. For DMA types
1639          * such as Slave SGs, such delays are tolerable and synchronized,
1640          * but the synchronization is difficult to achieve with Cyclic and
1641          * cannot be guaranteed, so we error out early.
1642          */
1643         if (nslots > MAX_NR_SG)
1644                 return NULL;
1645
1646         edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1647                         GFP_ATOMIC);
1648         if (!edesc) {
1649                 dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
1650                 return NULL;
1651         }
1652
1653         edesc->cyclic = 1;
1654         edesc->pset_nr = nslots;
1655         edesc->residue = edesc->residue_stat = buf_len;
1656         edesc->direction = direction;
1657         edesc->echan = echan;
1658
1659         dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1660                 __func__, echan->ch_num, nslots, period_len, buf_len);
1661
1662         for (i = 0; i < nslots; i++) {
1663                 /* Allocate a PaRAM slot, if needed */
1664                 if (echan->slot[i] < 0) {
1665                         echan->slot[i] =
1666                                 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1667                         if (echan->slot[i] < 0) {
1668                                 kfree(edesc);
1669                                 dev_err(dev, "%s: Failed to allocate slot\n",
1670                                         __func__);
1671                                 return NULL;
1672                         }
1673                 }
1674
1675                 if (i == nslots - 1) {
1676                         memcpy(&edesc->pset[i], &edesc->pset[0],
1677                                sizeof(edesc->pset[0]));
1678                         break;
1679                 }
1680
1681                 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1682                                        dst_addr, burst, dev_width, period_len,
1683                                        direction);
1684                 if (ret < 0) {
1685                         kfree(edesc);
1686                         return NULL;
1687                 }
1688
1689                 if (direction == DMA_DEV_TO_MEM)
1690                         dst_addr += period_len;
1691                 else
1692                         src_addr += period_len;
1693
1694                 dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
1695                 dev_vdbg(dev,
1696                         "\n pset[%d]:\n"
1697                         "  chnum\t%d\n"
1698                         "  slot\t%d\n"
1699                         "  opt\t%08x\n"
1700                         "  src\t%08x\n"
1701                         "  dst\t%08x\n"
1702                         "  abcnt\t%08x\n"
1703                         "  ccnt\t%08x\n"
1704                         "  bidx\t%08x\n"
1705                         "  cidx\t%08x\n"
1706                         "  lkrld\t%08x\n",
1707                         i, echan->ch_num, echan->slot[i],
1708                         edesc->pset[i].param.opt,
1709                         edesc->pset[i].param.src,
1710                         edesc->pset[i].param.dst,
1711                         edesc->pset[i].param.a_b_cnt,
1712                         edesc->pset[i].param.ccnt,
1713                         edesc->pset[i].param.src_dst_bidx,
1714                         edesc->pset[i].param.src_dst_cidx,
1715                         edesc->pset[i].param.link_bcntrld);
1716
1717                 edesc->absync = ret;
1718
1719                 /*
1720                  * Enable period interrupt only if it is requested
1721                  */
1722                 if (tx_flags & DMA_PREP_INTERRUPT)
1723                         edesc->pset[i].param.opt |= TCINTEN;
1724         }
1725
1726         /* Place the cyclic channel to highest priority queue */
1727         edma_assign_channel_eventq(echan->ecc, echan->ch_num, EVENTQ_0);
1728
1729         return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1730 }
1731
1732 static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
1733 {
1734         struct edma_chan *echan = data;
1735         struct edma_cc *ecc = echan->ecc;
1736         struct device *dev = echan->vchan.chan.device->dev;
1737         struct edma_desc *edesc;
1738         struct edmacc_param p;
1739
1740         edesc = echan->edesc;
1741
1742         spin_lock(&echan->vchan.lock);
1743         switch (ch_status) {
1744         case EDMA_DMA_COMPLETE:
1745                 if (edesc) {
1746                         if (edesc->cyclic) {
1747                                 vchan_cyclic_callback(&edesc->vdesc);
1748                                 goto out;
1749                         } else if (edesc->processed == edesc->pset_nr) {
1750                                 dev_dbg(dev,
1751                                         "Transfer completed on channel %d\n",
1752                                         ch_num);
1753                                 edesc->residue = 0;
1754                                 edma_stop(ecc, echan->ch_num);
1755                                 vchan_cookie_complete(&edesc->vdesc);
1756                                 echan->edesc = NULL;
1757                         } else {
1758                                 dev_dbg(dev,
1759                                         "Sub transfer completed on channel %d\n",
1760                                         ch_num);
1761
1762                                 edma_pause(ecc, echan->ch_num);
1763
1764                                 /* Update statistics for tx_status */
1765                                 edesc->residue -= edesc->sg_len;
1766                                 edesc->residue_stat = edesc->residue;
1767                                 edesc->processed_stat = edesc->processed;
1768                         }
1769                         edma_execute(echan);
1770                 }
1771                 break;
1772         case EDMA_DMA_CC_ERROR:
1773                 edma_read_slot(ecc, echan->slot[0], &p);
1774
1775                 /*
1776                  * Issue later based on missed flag which will be sure
1777                  * to happen as:
1778                  * (1) we finished transmitting an intermediate slot and
1779                  *     edma_execute is coming up.
1780                  * (2) or we finished current transfer and issue will
1781                  *     call edma_execute.
1782                  *
1783                  * Important note: issuing can be dangerous here and
1784                  * lead to some nasty recursion when we are in a NULL
1785                  * slot. So we avoid doing so and set the missed flag.
1786                  */
1787                 if (p.a_b_cnt == 0 && p.ccnt == 0) {
1788                         dev_dbg(dev, "Error on null slot, setting miss\n");
1789                         echan->missed = 1;
1790                 } else {
1791                         /*
1792                          * The slot is already programmed but the event got
1793                          * missed, so its safe to issue it here.
1794                          */
1795                         dev_dbg(dev, "Missed event, TRIGGERING\n");
1796                         edma_clean_channel(ecc, echan->ch_num);
1797                         edma_stop(ecc, echan->ch_num);
1798                         edma_start(ecc, echan->ch_num);
1799                         edma_trigger_channel(ecc, echan->ch_num);
1800                 }
1801                 break;
1802         default:
1803                 break;
1804         }
1805 out:
1806         spin_unlock(&echan->vchan.lock);
1807 }
1808
1809 /* Alloc channel resources */
1810 static int edma_alloc_chan_resources(struct dma_chan *chan)
1811 {
1812         struct edma_chan *echan = to_edma_chan(chan);
1813         struct device *dev = chan->device->dev;
1814         int ret;
1815         int a_ch_num;
1816         LIST_HEAD(descs);
1817
1818         a_ch_num = edma_alloc_channel(echan->ecc, echan->ch_num,
1819                                       edma_callback, echan, EVENTQ_DEFAULT);
1820
1821         if (a_ch_num < 0) {
1822                 ret = -ENODEV;
1823                 goto err_no_chan;
1824         }
1825
1826         if (a_ch_num != echan->ch_num) {
1827                 dev_err(dev, "failed to allocate requested channel %u:%u\n",
1828                         EDMA_CTLR(echan->ch_num),
1829                         EDMA_CHAN_SLOT(echan->ch_num));
1830                 ret = -ENODEV;
1831                 goto err_wrong_chan;
1832         }
1833
1834         echan->alloced = true;
1835         echan->slot[0] = echan->ch_num;
1836
1837         dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
1838                 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
1839
1840         return 0;
1841
1842 err_wrong_chan:
1843         edma_free_channel(echan->ecc, a_ch_num);
1844 err_no_chan:
1845         return ret;
1846 }
1847
1848 /* Free channel resources */
1849 static void edma_free_chan_resources(struct dma_chan *chan)
1850 {
1851         struct edma_chan *echan = to_edma_chan(chan);
1852         int i;
1853
1854         /* Terminate transfers */
1855         edma_stop(echan->ecc, echan->ch_num);
1856
1857         vchan_free_chan_resources(&echan->vchan);
1858
1859         /* Free EDMA PaRAM slots */
1860         for (i = 1; i < EDMA_MAX_SLOTS; i++) {
1861                 if (echan->slot[i] >= 0) {
1862                         edma_free_slot(echan->ecc, echan->slot[i]);
1863                         echan->slot[i] = -1;
1864                 }
1865         }
1866
1867         /* Free EDMA channel */
1868         if (echan->alloced) {
1869                 edma_free_channel(echan->ecc, echan->ch_num);
1870                 echan->alloced = false;
1871         }
1872
1873         dev_dbg(chan->device->dev, "freeing channel for %u\n", echan->ch_num);
1874 }
1875
1876 /* Send pending descriptor to hardware */
1877 static void edma_issue_pending(struct dma_chan *chan)
1878 {
1879         struct edma_chan *echan = to_edma_chan(chan);
1880         unsigned long flags;
1881
1882         spin_lock_irqsave(&echan->vchan.lock, flags);
1883         if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
1884                 edma_execute(echan);
1885         spin_unlock_irqrestore(&echan->vchan.lock, flags);
1886 }
1887
1888 static u32 edma_residue(struct edma_desc *edesc)
1889 {
1890         bool dst = edesc->direction == DMA_DEV_TO_MEM;
1891         struct edma_pset *pset = edesc->pset;
1892         dma_addr_t done, pos;
1893         int i;
1894
1895         /*
1896          * We always read the dst/src position from the first RamPar
1897          * pset. That's the one which is active now.
1898          */
1899         pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst);
1900
1901         /*
1902          * Cyclic is simple. Just subtract pset[0].addr from pos.
1903          *
1904          * We never update edesc->residue in the cyclic case, so we
1905          * can tell the remaining room to the end of the circular
1906          * buffer.
1907          */
1908         if (edesc->cyclic) {
1909                 done = pos - pset->addr;
1910                 edesc->residue_stat = edesc->residue - done;
1911                 return edesc->residue_stat;
1912         }
1913
1914         /*
1915          * For SG operation we catch up with the last processed
1916          * status.
1917          */
1918         pset += edesc->processed_stat;
1919
1920         for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
1921                 /*
1922                  * If we are inside this pset address range, we know
1923                  * this is the active one. Get the current delta and
1924                  * stop walking the psets.
1925                  */
1926                 if (pos >= pset->addr && pos < pset->addr + pset->len)
1927                         return edesc->residue_stat - (pos - pset->addr);
1928
1929                 /* Otherwise mark it done and update residue_stat. */
1930                 edesc->processed_stat++;
1931                 edesc->residue_stat -= pset->len;
1932         }
1933         return edesc->residue_stat;
1934 }
1935
1936 /* Check request completion status */
1937 static enum dma_status edma_tx_status(struct dma_chan *chan,
1938                                       dma_cookie_t cookie,
1939                                       struct dma_tx_state *txstate)
1940 {
1941         struct edma_chan *echan = to_edma_chan(chan);
1942         struct virt_dma_desc *vdesc;
1943         enum dma_status ret;
1944         unsigned long flags;
1945
1946         ret = dma_cookie_status(chan, cookie, txstate);
1947         if (ret == DMA_COMPLETE || !txstate)
1948                 return ret;
1949
1950         spin_lock_irqsave(&echan->vchan.lock, flags);
1951         if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
1952                 txstate->residue = edma_residue(echan->edesc);
1953         else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
1954                 txstate->residue = to_edma_desc(&vdesc->tx)->residue;
1955         spin_unlock_irqrestore(&echan->vchan.lock, flags);
1956
1957         return ret;
1958 }
1959
1960 static void __init edma_chan_init(struct edma_cc *ecc, struct dma_device *dma,
1961                                   struct edma_chan *echans)
1962 {
1963         int i, j;
1964
1965         for (i = 0; i < ecc->num_channels; i++) {
1966                 struct edma_chan *echan = &echans[i];
1967                 echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
1968                 echan->ecc = ecc;
1969                 echan->vchan.desc_free = edma_desc_free;
1970
1971                 vchan_init(&echan->vchan, dma);
1972
1973                 INIT_LIST_HEAD(&echan->node);
1974                 for (j = 0; j < EDMA_MAX_SLOTS; j++)
1975                         echan->slot[j] = -1;
1976         }
1977 }
1978
1979 #define EDMA_DMA_BUSWIDTHS      (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1980                                  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1981                                  BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1982                                  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1983
1984 static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
1985                           struct device *dev)
1986 {
1987         dma->device_prep_slave_sg = edma_prep_slave_sg;
1988         dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1989         dma->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1990         dma->device_alloc_chan_resources = edma_alloc_chan_resources;
1991         dma->device_free_chan_resources = edma_free_chan_resources;
1992         dma->device_issue_pending = edma_issue_pending;
1993         dma->device_tx_status = edma_tx_status;
1994         dma->device_config = edma_slave_config;
1995         dma->device_pause = edma_dma_pause;
1996         dma->device_resume = edma_dma_resume;
1997         dma->device_terminate_all = edma_terminate_all;
1998
1999         dma->src_addr_widths = EDMA_DMA_BUSWIDTHS;
2000         dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
2001         dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2002         dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2003
2004         dma->dev = dev;
2005
2006         /*
2007          * code using dma memcpy must make sure alignment of
2008          * length is at dma->copy_align boundary.
2009          */
2010         dma->copy_align = DMAENGINE_ALIGN_4_BYTES;
2011
2012         INIT_LIST_HEAD(&dma->channels);
2013 }
2014
2015 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
2016                               struct edma_cc *ecc)
2017 {
2018         int i;
2019         u32 value, cccfg;
2020         s8 (*queue_priority_map)[2];
2021
2022         /* Decode the eDMA3 configuration from CCCFG register */
2023         cccfg = edma_read(ecc, EDMA_CCCFG);
2024
2025         value = GET_NUM_REGN(cccfg);
2026         ecc->num_region = BIT(value);
2027
2028         value = GET_NUM_DMACH(cccfg);
2029         ecc->num_channels = BIT(value + 1);
2030
2031         value = GET_NUM_PAENTRY(cccfg);
2032         ecc->num_slots = BIT(value + 4);
2033
2034         value = GET_NUM_EVQUE(cccfg);
2035         ecc->num_tc = value + 1;
2036
2037         dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
2038         dev_dbg(dev, "num_region: %u\n", ecc->num_region);
2039         dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
2040         dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
2041         dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
2042
2043         /* Nothing need to be done if queue priority is provided */
2044         if (pdata->queue_priority_mapping)
2045                 return 0;
2046
2047         /*
2048          * Configure TC/queue priority as follows:
2049          * Q0 - priority 0
2050          * Q1 - priority 1
2051          * Q2 - priority 2
2052          * ...
2053          * The meaning of priority numbers: 0 highest priority, 7 lowest
2054          * priority. So Q0 is the highest priority queue and the last queue has
2055          * the lowest priority.
2056          */
2057         queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
2058                                           GFP_KERNEL);
2059         if (!queue_priority_map)
2060                 return -ENOMEM;
2061
2062         for (i = 0; i < ecc->num_tc; i++) {
2063                 queue_priority_map[i][0] = i;
2064                 queue_priority_map[i][1] = i;
2065         }
2066         queue_priority_map[i][0] = -1;
2067         queue_priority_map[i][1] = -1;
2068
2069         pdata->queue_priority_mapping = queue_priority_map;
2070         /* Default queue has the lowest priority */
2071         pdata->default_queue = i - 1;
2072
2073         return 0;
2074 }
2075
2076 #if IS_ENABLED(CONFIG_OF)
2077 static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
2078                                size_t sz)
2079 {
2080         const char pname[] = "ti,edma-xbar-event-map";
2081         struct resource res;
2082         void __iomem *xbar;
2083         s16 (*xbar_chans)[2];
2084         size_t nelm = sz / sizeof(s16);
2085         u32 shift, offset, mux;
2086         int ret, i;
2087
2088         xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
2089         if (!xbar_chans)
2090                 return -ENOMEM;
2091
2092         ret = of_address_to_resource(dev->of_node, 1, &res);
2093         if (ret)
2094                 return -ENOMEM;
2095
2096         xbar = devm_ioremap(dev, res.start, resource_size(&res));
2097         if (!xbar)
2098                 return -ENOMEM;
2099
2100         ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
2101                                          nelm);
2102         if (ret)
2103                 return -EIO;
2104
2105         /* Invalidate last entry for the other user of this mess */
2106         nelm >>= 1;
2107         xbar_chans[nelm][0] = -1;
2108         xbar_chans[nelm][1] = -1;
2109
2110         for (i = 0; i < nelm; i++) {
2111                 shift = (xbar_chans[i][1] & 0x03) << 3;
2112                 offset = xbar_chans[i][1] & 0xfffffffc;
2113                 mux = readl(xbar + offset);
2114                 mux &= ~(0xff << shift);
2115                 mux |= xbar_chans[i][0] << shift;
2116                 writel(mux, (xbar + offset));
2117         }
2118
2119         pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
2120         return 0;
2121 }
2122
2123 static int edma_of_parse_dt(struct device *dev, struct edma_soc_info *pdata)
2124 {
2125         int ret = 0;
2126         struct property *prop;
2127         size_t sz;
2128         struct edma_rsv_info *rsv_info;
2129
2130         rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL);
2131         if (!rsv_info)
2132                 return -ENOMEM;
2133         pdata->rsv = rsv_info;
2134
2135         prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", &sz);
2136         if (prop)
2137                 ret = edma_xbar_event_map(dev, pdata, sz);
2138
2139         return ret;
2140 }
2141
2142 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev)
2143 {
2144         struct edma_soc_info *info;
2145         int ret;
2146
2147         info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
2148         if (!info)
2149                 return ERR_PTR(-ENOMEM);
2150
2151         ret = edma_of_parse_dt(dev, info);
2152         if (ret)
2153                 return ERR_PTR(ret);
2154
2155         return info;
2156 }
2157 #else
2158 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev)
2159 {
2160         return ERR_PTR(-EINVAL);
2161 }
2162 #endif
2163
2164 static int edma_probe(struct platform_device *pdev)
2165 {
2166         struct edma_soc_info    *info = pdev->dev.platform_data;
2167         s8                      (*queue_priority_mapping)[2];
2168         int                     i, off, ln;
2169         const s16               (*rsv_chans)[2];
2170         const s16               (*rsv_slots)[2];
2171         const s16               (*xbar_chans)[2];
2172         int                     irq;
2173         char                    *irq_name;
2174         struct resource         *mem;
2175         struct device_node      *node = pdev->dev.of_node;
2176         struct device           *dev = &pdev->dev;
2177         struct edma_cc          *ecc;
2178         int ret;
2179
2180         if (node) {
2181                 info = edma_setup_info_from_dt(dev);
2182                 if (IS_ERR(info)) {
2183                         dev_err(dev, "failed to get DT data\n");
2184                         return PTR_ERR(info);
2185                 }
2186         }
2187
2188         if (!info)
2189                 return -ENODEV;
2190
2191         pm_runtime_enable(dev);
2192         ret = pm_runtime_get_sync(dev);
2193         if (ret < 0) {
2194                 dev_err(dev, "pm_runtime_get_sync() failed\n");
2195                 return ret;
2196         }
2197
2198         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2199         if (ret)
2200                 return ret;
2201
2202         ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
2203         if (!ecc) {
2204                 dev_err(dev, "Can't allocate controller\n");
2205                 return -ENOMEM;
2206         }
2207
2208         ecc->dev = dev;
2209         ecc->id = pdev->id;
2210         /* When booting with DT the pdev->id is -1 */
2211         if (ecc->id < 0)
2212                 ecc->id = 0;
2213
2214         mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
2215         if (!mem) {
2216                 dev_dbg(dev, "mem resource not found, using index 0\n");
2217                 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2218                 if (!mem) {
2219                         dev_err(dev, "no mem resource?\n");
2220                         return -ENODEV;
2221                 }
2222         }
2223         ecc->base = devm_ioremap_resource(dev, mem);
2224         if (IS_ERR(ecc->base))
2225                 return PTR_ERR(ecc->base);
2226
2227         platform_set_drvdata(pdev, ecc);
2228
2229         /* Get eDMA3 configuration from IP */
2230         ret = edma_setup_from_hw(dev, info, ecc);
2231         if (ret)
2232                 return ret;
2233
2234         /* Allocate memory based on the information we got from the IP */
2235         ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
2236                                         sizeof(*ecc->slave_chans), GFP_KERNEL);
2237         if (!ecc->slave_chans)
2238                 return -ENOMEM;
2239
2240         ecc->intr_data = devm_kcalloc(dev, ecc->num_channels,
2241                                       sizeof(*ecc->intr_data), GFP_KERNEL);
2242         if (!ecc->intr_data)
2243                 return -ENOMEM;
2244
2245         ecc->edma_unused = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_channels),
2246                                         sizeof(unsigned long), GFP_KERNEL);
2247         if (!ecc->edma_unused)
2248                 return -ENOMEM;
2249
2250         ecc->edma_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
2251                                        sizeof(unsigned long), GFP_KERNEL);
2252         if (!ecc->edma_inuse)
2253                 return -ENOMEM;
2254
2255         ecc->default_queue = info->default_queue;
2256
2257         for (i = 0; i < ecc->num_slots; i++)
2258                 edma_write_slot(ecc, i, &dummy_paramset);
2259
2260         /* Mark all channels as unused */
2261         memset(ecc->edma_unused, 0xff, sizeof(ecc->edma_unused));
2262
2263         if (info->rsv) {
2264                 /* Clear the reserved channels in unused list */
2265                 rsv_chans = info->rsv->rsv_chans;
2266                 if (rsv_chans) {
2267                         for (i = 0; rsv_chans[i][0] != -1; i++) {
2268                                 off = rsv_chans[i][0];
2269                                 ln = rsv_chans[i][1];
2270                                 clear_bits(off, ln, ecc->edma_unused);
2271                         }
2272                 }
2273
2274                 /* Set the reserved slots in inuse list */
2275                 rsv_slots = info->rsv->rsv_slots;
2276                 if (rsv_slots) {
2277                         for (i = 0; rsv_slots[i][0] != -1; i++) {
2278                                 off = rsv_slots[i][0];
2279                                 ln = rsv_slots[i][1];
2280                                 set_bits(off, ln, ecc->edma_inuse);
2281                         }
2282                 }
2283         }
2284
2285         /* Clear the xbar mapped channels in unused list */
2286         xbar_chans = info->xbar_chans;
2287         if (xbar_chans) {
2288                 for (i = 0; xbar_chans[i][1] != -1; i++) {
2289                         off = xbar_chans[i][1];
2290                         clear_bits(off, 1, ecc->edma_unused);
2291                 }
2292         }
2293
2294         irq = platform_get_irq_byname(pdev, "edma3_ccint");
2295         if (irq < 0 && node)
2296                 irq = irq_of_parse_and_map(node, 0);
2297
2298         if (irq >= 0) {
2299                 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
2300                                           dev_name(dev));
2301                 ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
2302                                        ecc);
2303                 if (ret) {
2304                         dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
2305                         return ret;
2306                 }
2307         }
2308
2309         irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
2310         if (irq < 0 && node)
2311                 irq = irq_of_parse_and_map(node, 2);
2312
2313         if (irq >= 0) {
2314                 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
2315                                           dev_name(dev));
2316                 ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
2317                                        ecc);
2318                 if (ret) {
2319                         dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
2320                         return ret;
2321                 }
2322         }
2323
2324         for (i = 0; i < ecc->num_channels; i++)
2325                 edma_map_dmach_to_queue(ecc, i, info->default_queue);
2326
2327         queue_priority_mapping = info->queue_priority_mapping;
2328
2329         /* Event queue priority mapping */
2330         for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2331                 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2332                                               queue_priority_mapping[i][1]);
2333
2334         /* Map the channel to param entry if channel mapping logic exist */
2335         if (edma_read(ecc, EDMA_CCCFG) & CHMAP_EXIST)
2336                 edma_direct_dmach_to_param_mapping(ecc);
2337
2338         for (i = 0; i < ecc->num_region; i++) {
2339                 edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
2340                 edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
2341                 edma_write_array(ecc, EDMA_QRAE, i, 0x0);
2342         }
2343         ecc->info = info;
2344
2345         ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
2346         if (ecc->dummy_slot < 0) {
2347                 dev_err(dev, "Can't allocate PaRAM dummy slot\n");
2348                 return ecc->dummy_slot;
2349         }
2350
2351         dma_cap_zero(ecc->dma_slave.cap_mask);
2352         dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
2353         dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask);
2354         dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask);
2355
2356         edma_dma_init(ecc, &ecc->dma_slave, dev);
2357
2358         edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
2359
2360         ret = dma_async_device_register(&ecc->dma_slave);
2361         if (ret)
2362                 goto err_reg1;
2363
2364         if (node)
2365                 of_dma_controller_register(node, of_dma_xlate_by_chan_id,
2366                                            &ecc->dma_slave);
2367
2368         dev_info(dev, "TI EDMA DMA engine driver\n");
2369
2370         return 0;
2371
2372 err_reg1:
2373         edma_free_slot(ecc, ecc->dummy_slot);
2374         return ret;
2375 }
2376
2377 static int edma_remove(struct platform_device *pdev)
2378 {
2379         struct device *dev = &pdev->dev;
2380         struct edma_cc *ecc = dev_get_drvdata(dev);
2381
2382         if (dev->of_node)
2383                 of_dma_controller_free(dev->of_node);
2384         dma_async_device_unregister(&ecc->dma_slave);
2385         edma_free_slot(ecc, ecc->dummy_slot);
2386
2387         return 0;
2388 }
2389
2390 #ifdef CONFIG_PM_SLEEP
2391 static int edma_pm_resume(struct device *dev)
2392 {
2393         struct edma_cc *ecc = dev_get_drvdata(dev);
2394         int i;
2395         s8 (*queue_priority_mapping)[2];
2396
2397         queue_priority_mapping = ecc->info->queue_priority_mapping;
2398
2399         /* Event queue priority mapping */
2400         for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2401                 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2402                                               queue_priority_mapping[i][1]);
2403
2404         /* Map the channel to param entry if channel mapping logic */
2405         if (edma_read(ecc, EDMA_CCCFG) & CHMAP_EXIST)
2406                 edma_direct_dmach_to_param_mapping(ecc);
2407
2408         for (i = 0; i < ecc->num_channels; i++) {
2409                 if (test_bit(i, ecc->edma_inuse)) {
2410                         /* ensure access through shadow region 0 */
2411                         edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
2412                                        BIT(i & 0x1f));
2413
2414                         edma_setup_interrupt(ecc, EDMA_CTLR_CHAN(ecc->id, i),
2415                                              ecc->intr_data[i].callback,
2416                                              ecc->intr_data[i].data);
2417                 }
2418         }
2419
2420         return 0;
2421 }
2422 #endif
2423
2424 static const struct dev_pm_ops edma_pm_ops = {
2425         SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume)
2426 };
2427
2428 static struct platform_driver edma_driver = {
2429         .probe          = edma_probe,
2430         .remove         = edma_remove,
2431         .driver = {
2432                 .name   = "edma",
2433                 .pm     = &edma_pm_ops,
2434                 .of_match_table = edma_of_ids,
2435         },
2436 };
2437
2438 bool edma_filter_fn(struct dma_chan *chan, void *param)
2439 {
2440         if (chan->device->dev->driver == &edma_driver.driver) {
2441                 struct edma_chan *echan = to_edma_chan(chan);
2442                 unsigned ch_req = *(unsigned *)param;
2443                 return ch_req == echan->ch_num;
2444         }
2445         return false;
2446 }
2447 EXPORT_SYMBOL(edma_filter_fn);
2448
2449 static int edma_init(void)
2450 {
2451         return platform_driver_register(&edma_driver);
2452 }
2453 subsys_initcall(edma_init);
2454
2455 static void __exit edma_exit(void)
2456 {
2457         platform_driver_unregister(&edma_driver);
2458 }
2459 module_exit(edma_exit);
2460
2461 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2462 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2463 MODULE_LICENSE("GPL v2");