Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[cascardo/linux.git] / drivers / net / ethernet / ti / davinci_cpdma.c
1 /*
2  * Texas Instruments CPDMA Driver
3  *
4  * Copyright (C) 2010 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 #include <linux/kernel.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/io.h>
23 #include <linux/delay.h>
24 #include <linux/genalloc.h>
25 #include "davinci_cpdma.h"
26
27 /* DMA Registers */
28 #define CPDMA_TXIDVER           0x00
29 #define CPDMA_TXCONTROL         0x04
30 #define CPDMA_TXTEARDOWN        0x08
31 #define CPDMA_RXIDVER           0x10
32 #define CPDMA_RXCONTROL         0x14
33 #define CPDMA_SOFTRESET         0x1c
34 #define CPDMA_RXTEARDOWN        0x18
35 #define CPDMA_TXINTSTATRAW      0x80
36 #define CPDMA_TXINTSTATMASKED   0x84
37 #define CPDMA_TXINTMASKSET      0x88
38 #define CPDMA_TXINTMASKCLEAR    0x8c
39 #define CPDMA_MACINVECTOR       0x90
40 #define CPDMA_MACEOIVECTOR      0x94
41 #define CPDMA_RXINTSTATRAW      0xa0
42 #define CPDMA_RXINTSTATMASKED   0xa4
43 #define CPDMA_RXINTMASKSET      0xa8
44 #define CPDMA_RXINTMASKCLEAR    0xac
45 #define CPDMA_DMAINTSTATRAW     0xb0
46 #define CPDMA_DMAINTSTATMASKED  0xb4
47 #define CPDMA_DMAINTMASKSET     0xb8
48 #define CPDMA_DMAINTMASKCLEAR   0xbc
49 #define CPDMA_DMAINT_HOSTERR    BIT(1)
50
51 /* the following exist only if has_ext_regs is set */
52 #define CPDMA_DMACONTROL        0x20
53 #define CPDMA_DMASTATUS         0x24
54 #define CPDMA_RXBUFFOFS         0x28
55 #define CPDMA_EM_CONTROL        0x2c
56
57 /* Descriptor mode bits */
58 #define CPDMA_DESC_SOP          BIT(31)
59 #define CPDMA_DESC_EOP          BIT(30)
60 #define CPDMA_DESC_OWNER        BIT(29)
61 #define CPDMA_DESC_EOQ          BIT(28)
62 #define CPDMA_DESC_TD_COMPLETE  BIT(27)
63 #define CPDMA_DESC_PASS_CRC     BIT(26)
64 #define CPDMA_DESC_TO_PORT_EN   BIT(20)
65 #define CPDMA_TO_PORT_SHIFT     16
66 #define CPDMA_DESC_PORT_MASK    (BIT(18) | BIT(17) | BIT(16))
67 #define CPDMA_DESC_CRC_LEN      4
68
69 #define CPDMA_TEARDOWN_VALUE    0xfffffffc
70
71 struct cpdma_desc {
72         /* hardware fields */
73         u32                     hw_next;
74         u32                     hw_buffer;
75         u32                     hw_len;
76         u32                     hw_mode;
77         /* software fields */
78         void                    *sw_token;
79         u32                     sw_buffer;
80         u32                     sw_len;
81 };
82
83 struct cpdma_desc_pool {
84         phys_addr_t             phys;
85         dma_addr_t              hw_addr;
86         void __iomem            *iomap;         /* ioremap map */
87         void                    *cpumap;        /* dma_alloc map */
88         int                     desc_size, mem_size;
89         int                     num_desc, used_desc;
90         struct device           *dev;
91         struct gen_pool         *gen_pool;
92 };
93
94 enum cpdma_state {
95         CPDMA_STATE_IDLE,
96         CPDMA_STATE_ACTIVE,
97         CPDMA_STATE_TEARDOWN,
98 };
99
100 struct cpdma_ctlr {
101         enum cpdma_state        state;
102         struct cpdma_params     params;
103         struct device           *dev;
104         struct cpdma_desc_pool  *pool;
105         spinlock_t              lock;
106         struct cpdma_chan       *channels[2 * CPDMA_MAX_CHANNELS];
107 };
108
109 struct cpdma_chan {
110         struct cpdma_desc __iomem       *head, *tail;
111         void __iomem                    *hdp, *cp, *rxfree;
112         enum cpdma_state                state;
113         struct cpdma_ctlr               *ctlr;
114         int                             chan_num;
115         spinlock_t                      lock;
116         int                             count;
117         u32                             desc_num;
118         u32                             mask;
119         cpdma_handler_fn                handler;
120         enum dma_data_direction         dir;
121         struct cpdma_chan_stats         stats;
122         /* offsets into dmaregs */
123         int     int_set, int_clear, td;
124 };
125
126 /* The following make access to common cpdma_ctlr params more readable */
127 #define dmaregs         params.dmaregs
128 #define num_chan        params.num_chan
129
130 /* various accessors */
131 #define dma_reg_read(ctlr, ofs)         __raw_readl((ctlr)->dmaregs + (ofs))
132 #define chan_read(chan, fld)            __raw_readl((chan)->fld)
133 #define desc_read(desc, fld)            __raw_readl(&(desc)->fld)
134 #define dma_reg_write(ctlr, ofs, v)     __raw_writel(v, (ctlr)->dmaregs + (ofs))
135 #define chan_write(chan, fld, v)        __raw_writel(v, (chan)->fld)
136 #define desc_write(desc, fld, v)        __raw_writel((u32)(v), &(desc)->fld)
137
138 #define cpdma_desc_to_port(chan, mode, directed)                        \
139         do {                                                            \
140                 if (!is_rx_chan(chan) && ((directed == 1) ||            \
141                                           (directed == 2)))             \
142                         mode |= (CPDMA_DESC_TO_PORT_EN |                \
143                                  (directed << CPDMA_TO_PORT_SHIFT));    \
144         } while (0)
145
146 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
147 {
148         if (!pool)
149                 return;
150
151         WARN_ON(pool->used_desc);
152         if (pool->cpumap)
153                 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
154                                   pool->phys);
155         else
156                 iounmap(pool->iomap);
157 }
158
159 /*
160  * Utility constructs for a cpdma descriptor pool.  Some devices (e.g. davinci
161  * emac) have dedicated on-chip memory for these descriptors.  Some other
162  * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
163  * abstract out these details
164  */
165 static struct cpdma_desc_pool *
166 cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
167                                 int size, int align)
168 {
169         struct cpdma_desc_pool *pool;
170         int ret;
171
172         pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
173         if (!pool)
174                 goto gen_pool_create_fail;
175
176         pool->dev       = dev;
177         pool->mem_size  = size;
178         pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
179         pool->num_desc  = size / pool->desc_size;
180
181         pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
182                                               "cpdma");
183         if (IS_ERR(pool->gen_pool)) {
184                 dev_err(dev, "pool create failed %ld\n",
185                         PTR_ERR(pool->gen_pool));
186                 goto gen_pool_create_fail;
187         }
188
189         if (phys) {
190                 pool->phys  = phys;
191                 pool->iomap = ioremap(phys, size); /* should be memremap? */
192                 pool->hw_addr = hw_addr;
193         } else {
194                 pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
195                                                   GFP_KERNEL);
196                 pool->iomap = (void __iomem __force *)pool->cpumap;
197                 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
198         }
199
200         if (!pool->iomap)
201                 goto gen_pool_create_fail;
202
203         ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
204                                 pool->phys, pool->mem_size, -1);
205         if (ret < 0) {
206                 dev_err(dev, "pool add failed %d\n", ret);
207                 goto gen_pool_add_virt_fail;
208         }
209
210         return pool;
211
212 gen_pool_add_virt_fail:
213         cpdma_desc_pool_destroy(pool);
214 gen_pool_create_fail:
215         return NULL;
216 }
217
218 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
219                   struct cpdma_desc __iomem *desc)
220 {
221         if (!desc)
222                 return 0;
223         return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
224 }
225
226 static inline struct cpdma_desc __iomem *
227 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
228 {
229         return dma ? pool->iomap + dma - pool->hw_addr : NULL;
230 }
231
232 static struct cpdma_desc __iomem *
233 cpdma_desc_alloc(struct cpdma_desc_pool *pool)
234 {
235         struct cpdma_desc __iomem *desc = NULL;
236
237         desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool,
238                                                            pool->desc_size);
239         if (desc)
240                 pool->used_desc++;
241
242         return desc;
243 }
244
245 static void cpdma_desc_free(struct cpdma_desc_pool *pool,
246                             struct cpdma_desc __iomem *desc, int num_desc)
247 {
248         gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
249         pool->used_desc--;
250 }
251
252 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
253 {
254         struct cpdma_ctlr *ctlr;
255
256         ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
257         if (!ctlr)
258                 return NULL;
259
260         ctlr->state = CPDMA_STATE_IDLE;
261         ctlr->params = *params;
262         ctlr->dev = params->dev;
263         spin_lock_init(&ctlr->lock);
264
265         ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
266                                             ctlr->params.desc_mem_phys,
267                                             ctlr->params.desc_hw_addr,
268                                             ctlr->params.desc_mem_size,
269                                             ctlr->params.desc_align);
270         if (!ctlr->pool)
271                 return NULL;
272
273         if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
274                 ctlr->num_chan = CPDMA_MAX_CHANNELS;
275         return ctlr;
276 }
277 EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
278
279 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
280 {
281         unsigned long flags;
282         int i;
283
284         spin_lock_irqsave(&ctlr->lock, flags);
285         if (ctlr->state != CPDMA_STATE_IDLE) {
286                 spin_unlock_irqrestore(&ctlr->lock, flags);
287                 return -EBUSY;
288         }
289
290         if (ctlr->params.has_soft_reset) {
291                 unsigned timeout = 10 * 100;
292
293                 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
294                 while (timeout) {
295                         if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
296                                 break;
297                         udelay(10);
298                         timeout--;
299                 }
300                 WARN_ON(!timeout);
301         }
302
303         for (i = 0; i < ctlr->num_chan; i++) {
304                 __raw_writel(0, ctlr->params.txhdp + 4 * i);
305                 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
306                 __raw_writel(0, ctlr->params.txcp + 4 * i);
307                 __raw_writel(0, ctlr->params.rxcp + 4 * i);
308         }
309
310         dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
311         dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
312
313         dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
314         dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
315
316         ctlr->state = CPDMA_STATE_ACTIVE;
317
318         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
319                 if (ctlr->channels[i])
320                         cpdma_chan_start(ctlr->channels[i]);
321         }
322         spin_unlock_irqrestore(&ctlr->lock, flags);
323         return 0;
324 }
325 EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
326
327 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
328 {
329         unsigned long flags;
330         int i;
331
332         spin_lock_irqsave(&ctlr->lock, flags);
333         if (ctlr->state == CPDMA_STATE_TEARDOWN) {
334                 spin_unlock_irqrestore(&ctlr->lock, flags);
335                 return -EINVAL;
336         }
337
338         ctlr->state = CPDMA_STATE_TEARDOWN;
339
340         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
341                 if (ctlr->channels[i])
342                         cpdma_chan_stop(ctlr->channels[i]);
343         }
344
345         dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
346         dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
347
348         dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
349         dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
350
351         ctlr->state = CPDMA_STATE_IDLE;
352
353         spin_unlock_irqrestore(&ctlr->lock, flags);
354         return 0;
355 }
356 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
357
358 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
359 {
360         int ret = 0, i;
361
362         if (!ctlr)
363                 return -EINVAL;
364
365         if (ctlr->state != CPDMA_STATE_IDLE)
366                 cpdma_ctlr_stop(ctlr);
367
368         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
369                 cpdma_chan_destroy(ctlr->channels[i]);
370
371         cpdma_desc_pool_destroy(ctlr->pool);
372         return ret;
373 }
374 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
375
376 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
377 {
378         unsigned long flags;
379         int i, reg;
380
381         spin_lock_irqsave(&ctlr->lock, flags);
382         if (ctlr->state != CPDMA_STATE_ACTIVE) {
383                 spin_unlock_irqrestore(&ctlr->lock, flags);
384                 return -EINVAL;
385         }
386
387         reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
388         dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
389
390         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
391                 if (ctlr->channels[i])
392                         cpdma_chan_int_ctrl(ctlr->channels[i], enable);
393         }
394
395         spin_unlock_irqrestore(&ctlr->lock, flags);
396         return 0;
397 }
398 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
399
400 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
401 {
402         dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
403 }
404 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
405
406 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
407                                      cpdma_handler_fn handler)
408 {
409         struct cpdma_chan *chan;
410         int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
411         unsigned long flags;
412
413         if (__chan_linear(chan_num) >= ctlr->num_chan)
414                 return NULL;
415
416         chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
417         if (!chan)
418                 return ERR_PTR(-ENOMEM);
419
420         spin_lock_irqsave(&ctlr->lock, flags);
421         if (ctlr->channels[chan_num]) {
422                 spin_unlock_irqrestore(&ctlr->lock, flags);
423                 devm_kfree(ctlr->dev, chan);
424                 return ERR_PTR(-EBUSY);
425         }
426
427         chan->ctlr      = ctlr;
428         chan->state     = CPDMA_STATE_IDLE;
429         chan->chan_num  = chan_num;
430         chan->handler   = handler;
431         chan->desc_num = ctlr->pool->num_desc / 2;
432
433         if (is_rx_chan(chan)) {
434                 chan->hdp       = ctlr->params.rxhdp + offset;
435                 chan->cp        = ctlr->params.rxcp + offset;
436                 chan->rxfree    = ctlr->params.rxfree + offset;
437                 chan->int_set   = CPDMA_RXINTMASKSET;
438                 chan->int_clear = CPDMA_RXINTMASKCLEAR;
439                 chan->td        = CPDMA_RXTEARDOWN;
440                 chan->dir       = DMA_FROM_DEVICE;
441         } else {
442                 chan->hdp       = ctlr->params.txhdp + offset;
443                 chan->cp        = ctlr->params.txcp + offset;
444                 chan->int_set   = CPDMA_TXINTMASKSET;
445                 chan->int_clear = CPDMA_TXINTMASKCLEAR;
446                 chan->td        = CPDMA_TXTEARDOWN;
447                 chan->dir       = DMA_TO_DEVICE;
448         }
449         chan->mask = BIT(chan_linear(chan));
450
451         spin_lock_init(&chan->lock);
452
453         ctlr->channels[chan_num] = chan;
454         spin_unlock_irqrestore(&ctlr->lock, flags);
455         return chan;
456 }
457 EXPORT_SYMBOL_GPL(cpdma_chan_create);
458
459 int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr)
460 {
461         return ctlr->pool->num_desc / 2;
462 }
463 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
464
465 int cpdma_chan_destroy(struct cpdma_chan *chan)
466 {
467         struct cpdma_ctlr *ctlr;
468         unsigned long flags;
469
470         if (!chan)
471                 return -EINVAL;
472         ctlr = chan->ctlr;
473
474         spin_lock_irqsave(&ctlr->lock, flags);
475         if (chan->state != CPDMA_STATE_IDLE)
476                 cpdma_chan_stop(chan);
477         ctlr->channels[chan->chan_num] = NULL;
478         spin_unlock_irqrestore(&ctlr->lock, flags);
479         return 0;
480 }
481 EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
482
483 int cpdma_chan_get_stats(struct cpdma_chan *chan,
484                          struct cpdma_chan_stats *stats)
485 {
486         unsigned long flags;
487         if (!chan)
488                 return -EINVAL;
489         spin_lock_irqsave(&chan->lock, flags);
490         memcpy(stats, &chan->stats, sizeof(*stats));
491         spin_unlock_irqrestore(&chan->lock, flags);
492         return 0;
493 }
494 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
495
496 static void __cpdma_chan_submit(struct cpdma_chan *chan,
497                                 struct cpdma_desc __iomem *desc)
498 {
499         struct cpdma_ctlr               *ctlr = chan->ctlr;
500         struct cpdma_desc __iomem       *prev = chan->tail;
501         struct cpdma_desc_pool          *pool = ctlr->pool;
502         dma_addr_t                      desc_dma;
503         u32                             mode;
504
505         desc_dma = desc_phys(pool, desc);
506
507         /* simple case - idle channel */
508         if (!chan->head) {
509                 chan->stats.head_enqueue++;
510                 chan->head = desc;
511                 chan->tail = desc;
512                 if (chan->state == CPDMA_STATE_ACTIVE)
513                         chan_write(chan, hdp, desc_dma);
514                 return;
515         }
516
517         /* first chain the descriptor at the tail of the list */
518         desc_write(prev, hw_next, desc_dma);
519         chan->tail = desc;
520         chan->stats.tail_enqueue++;
521
522         /* next check if EOQ has been triggered already */
523         mode = desc_read(prev, hw_mode);
524         if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
525             (chan->state == CPDMA_STATE_ACTIVE)) {
526                 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
527                 chan_write(chan, hdp, desc_dma);
528                 chan->stats.misqueued++;
529         }
530 }
531
532 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
533                       int len, int directed)
534 {
535         struct cpdma_ctlr               *ctlr = chan->ctlr;
536         struct cpdma_desc __iomem       *desc;
537         dma_addr_t                      buffer;
538         unsigned long                   flags;
539         u32                             mode;
540         int                             ret = 0;
541
542         spin_lock_irqsave(&chan->lock, flags);
543
544         if (chan->state == CPDMA_STATE_TEARDOWN) {
545                 ret = -EINVAL;
546                 goto unlock_ret;
547         }
548
549         if (chan->count >= chan->desc_num)      {
550                 chan->stats.desc_alloc_fail++;
551                 ret = -ENOMEM;
552                 goto unlock_ret;
553         }
554
555         desc = cpdma_desc_alloc(ctlr->pool);
556         if (!desc) {
557                 chan->stats.desc_alloc_fail++;
558                 ret = -ENOMEM;
559                 goto unlock_ret;
560         }
561
562         if (len < ctlr->params.min_packet_size) {
563                 len = ctlr->params.min_packet_size;
564                 chan->stats.runt_transmit_buff++;
565         }
566
567         buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
568         ret = dma_mapping_error(ctlr->dev, buffer);
569         if (ret) {
570                 cpdma_desc_free(ctlr->pool, desc, 1);
571                 ret = -EINVAL;
572                 goto unlock_ret;
573         }
574
575         mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
576         cpdma_desc_to_port(chan, mode, directed);
577
578         desc_write(desc, hw_next,   0);
579         desc_write(desc, hw_buffer, buffer);
580         desc_write(desc, hw_len,    len);
581         desc_write(desc, hw_mode,   mode | len);
582         desc_write(desc, sw_token,  token);
583         desc_write(desc, sw_buffer, buffer);
584         desc_write(desc, sw_len,    len);
585
586         __cpdma_chan_submit(chan, desc);
587
588         if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
589                 chan_write(chan, rxfree, 1);
590
591         chan->count++;
592
593 unlock_ret:
594         spin_unlock_irqrestore(&chan->lock, flags);
595         return ret;
596 }
597 EXPORT_SYMBOL_GPL(cpdma_chan_submit);
598
599 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
600 {
601         struct cpdma_ctlr       *ctlr = chan->ctlr;
602         struct cpdma_desc_pool  *pool = ctlr->pool;
603         bool                    free_tx_desc;
604         unsigned long           flags;
605
606         spin_lock_irqsave(&chan->lock, flags);
607         free_tx_desc = (chan->count < chan->desc_num) &&
608                          gen_pool_avail(pool->gen_pool);
609         spin_unlock_irqrestore(&chan->lock, flags);
610         return free_tx_desc;
611 }
612 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
613
614 static void __cpdma_chan_free(struct cpdma_chan *chan,
615                               struct cpdma_desc __iomem *desc,
616                               int outlen, int status)
617 {
618         struct cpdma_ctlr               *ctlr = chan->ctlr;
619         struct cpdma_desc_pool          *pool = ctlr->pool;
620         dma_addr_t                      buff_dma;
621         int                             origlen;
622         void                            *token;
623
624         token      = (void *)desc_read(desc, sw_token);
625         buff_dma   = desc_read(desc, sw_buffer);
626         origlen    = desc_read(desc, sw_len);
627
628         dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
629         cpdma_desc_free(pool, desc, 1);
630         (*chan->handler)(token, outlen, status);
631 }
632
633 static int __cpdma_chan_process(struct cpdma_chan *chan)
634 {
635         struct cpdma_ctlr               *ctlr = chan->ctlr;
636         struct cpdma_desc __iomem       *desc;
637         int                             status, outlen;
638         int                             cb_status = 0;
639         struct cpdma_desc_pool          *pool = ctlr->pool;
640         dma_addr_t                      desc_dma;
641         unsigned long                   flags;
642
643         spin_lock_irqsave(&chan->lock, flags);
644
645         desc = chan->head;
646         if (!desc) {
647                 chan->stats.empty_dequeue++;
648                 status = -ENOENT;
649                 goto unlock_ret;
650         }
651         desc_dma = desc_phys(pool, desc);
652
653         status  = __raw_readl(&desc->hw_mode);
654         outlen  = status & 0x7ff;
655         if (status & CPDMA_DESC_OWNER) {
656                 chan->stats.busy_dequeue++;
657                 status = -EBUSY;
658                 goto unlock_ret;
659         }
660
661         if (status & CPDMA_DESC_PASS_CRC)
662                 outlen -= CPDMA_DESC_CRC_LEN;
663
664         status  = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
665                             CPDMA_DESC_PORT_MASK);
666
667         chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
668         chan_write(chan, cp, desc_dma);
669         chan->count--;
670         chan->stats.good_dequeue++;
671
672         if (status & CPDMA_DESC_EOQ) {
673                 chan->stats.requeue++;
674                 chan_write(chan, hdp, desc_phys(pool, chan->head));
675         }
676
677         spin_unlock_irqrestore(&chan->lock, flags);
678         if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
679                 cb_status = -ENOSYS;
680         else
681                 cb_status = status;
682
683         __cpdma_chan_free(chan, desc, outlen, cb_status);
684         return status;
685
686 unlock_ret:
687         spin_unlock_irqrestore(&chan->lock, flags);
688         return status;
689 }
690
691 int cpdma_chan_process(struct cpdma_chan *chan, int quota)
692 {
693         int used = 0, ret = 0;
694
695         if (chan->state != CPDMA_STATE_ACTIVE)
696                 return -EINVAL;
697
698         while (used < quota) {
699                 ret = __cpdma_chan_process(chan);
700                 if (ret < 0)
701                         break;
702                 used++;
703         }
704         return used;
705 }
706 EXPORT_SYMBOL_GPL(cpdma_chan_process);
707
708 int cpdma_chan_start(struct cpdma_chan *chan)
709 {
710         struct cpdma_ctlr       *ctlr = chan->ctlr;
711         struct cpdma_desc_pool  *pool = ctlr->pool;
712         unsigned long           flags;
713
714         spin_lock_irqsave(&chan->lock, flags);
715         if (chan->state != CPDMA_STATE_IDLE) {
716                 spin_unlock_irqrestore(&chan->lock, flags);
717                 return -EBUSY;
718         }
719         if (ctlr->state != CPDMA_STATE_ACTIVE) {
720                 spin_unlock_irqrestore(&chan->lock, flags);
721                 return -EINVAL;
722         }
723         dma_reg_write(ctlr, chan->int_set, chan->mask);
724         chan->state = CPDMA_STATE_ACTIVE;
725         if (chan->head) {
726                 chan_write(chan, hdp, desc_phys(pool, chan->head));
727                 if (chan->rxfree)
728                         chan_write(chan, rxfree, chan->count);
729         }
730
731         spin_unlock_irqrestore(&chan->lock, flags);
732         return 0;
733 }
734 EXPORT_SYMBOL_GPL(cpdma_chan_start);
735
736 int cpdma_chan_stop(struct cpdma_chan *chan)
737 {
738         struct cpdma_ctlr       *ctlr = chan->ctlr;
739         struct cpdma_desc_pool  *pool = ctlr->pool;
740         unsigned long           flags;
741         int                     ret;
742         unsigned                timeout;
743
744         spin_lock_irqsave(&chan->lock, flags);
745         if (chan->state == CPDMA_STATE_TEARDOWN) {
746                 spin_unlock_irqrestore(&chan->lock, flags);
747                 return -EINVAL;
748         }
749
750         chan->state = CPDMA_STATE_TEARDOWN;
751         dma_reg_write(ctlr, chan->int_clear, chan->mask);
752
753         /* trigger teardown */
754         dma_reg_write(ctlr, chan->td, chan_linear(chan));
755
756         /* wait for teardown complete */
757         timeout = 100 * 100; /* 100 ms */
758         while (timeout) {
759                 u32 cp = chan_read(chan, cp);
760                 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
761                         break;
762                 udelay(10);
763                 timeout--;
764         }
765         WARN_ON(!timeout);
766         chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
767
768         /* handle completed packets */
769         spin_unlock_irqrestore(&chan->lock, flags);
770         do {
771                 ret = __cpdma_chan_process(chan);
772                 if (ret < 0)
773                         break;
774         } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
775         spin_lock_irqsave(&chan->lock, flags);
776
777         /* remaining packets haven't been tx/rx'ed, clean them up */
778         while (chan->head) {
779                 struct cpdma_desc __iomem *desc = chan->head;
780                 dma_addr_t next_dma;
781
782                 next_dma = desc_read(desc, hw_next);
783                 chan->head = desc_from_phys(pool, next_dma);
784                 chan->count--;
785                 chan->stats.teardown_dequeue++;
786
787                 /* issue callback without locks held */
788                 spin_unlock_irqrestore(&chan->lock, flags);
789                 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
790                 spin_lock_irqsave(&chan->lock, flags);
791         }
792
793         chan->state = CPDMA_STATE_IDLE;
794         spin_unlock_irqrestore(&chan->lock, flags);
795         return 0;
796 }
797 EXPORT_SYMBOL_GPL(cpdma_chan_stop);
798
799 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
800 {
801         unsigned long flags;
802
803         spin_lock_irqsave(&chan->lock, flags);
804         if (chan->state != CPDMA_STATE_ACTIVE) {
805                 spin_unlock_irqrestore(&chan->lock, flags);
806                 return -EINVAL;
807         }
808
809         dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
810                       chan->mask);
811         spin_unlock_irqrestore(&chan->lock, flags);
812
813         return 0;
814 }
815
816 struct cpdma_control_info {
817         u32             reg;
818         u32             shift, mask;
819         int             access;
820 #define ACCESS_RO       BIT(0)
821 #define ACCESS_WO       BIT(1)
822 #define ACCESS_RW       (ACCESS_RO | ACCESS_WO)
823 };
824
825 static struct cpdma_control_info controls[] = {
826         [CPDMA_CMD_IDLE]          = {CPDMA_DMACONTROL,  3,  1,      ACCESS_WO},
827         [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL,  4,  1,      ACCESS_RW},
828         [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL,  2,  1,      ACCESS_RW},
829         [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL,  1,  1,      ACCESS_RW},
830         [CPDMA_TX_PRIO_FIXED]     = {CPDMA_DMACONTROL,  0,  1,      ACCESS_RW},
831         [CPDMA_STAT_IDLE]         = {CPDMA_DMASTATUS,   31, 1,      ACCESS_RO},
832         [CPDMA_STAT_TX_ERR_CODE]  = {CPDMA_DMASTATUS,   20, 0xf,    ACCESS_RW},
833         [CPDMA_STAT_TX_ERR_CHAN]  = {CPDMA_DMASTATUS,   16, 0x7,    ACCESS_RW},
834         [CPDMA_STAT_RX_ERR_CODE]  = {CPDMA_DMASTATUS,   12, 0xf,    ACCESS_RW},
835         [CPDMA_STAT_RX_ERR_CHAN]  = {CPDMA_DMASTATUS,   8,  0x7,    ACCESS_RW},
836         [CPDMA_RX_BUFFER_OFFSET]  = {CPDMA_RXBUFFOFS,   0,  0xffff, ACCESS_RW},
837 };
838
839 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
840 {
841         unsigned long flags;
842         struct cpdma_control_info *info = &controls[control];
843         int ret;
844
845         spin_lock_irqsave(&ctlr->lock, flags);
846
847         ret = -ENOTSUPP;
848         if (!ctlr->params.has_ext_regs)
849                 goto unlock_ret;
850
851         ret = -EINVAL;
852         if (ctlr->state != CPDMA_STATE_ACTIVE)
853                 goto unlock_ret;
854
855         ret = -ENOENT;
856         if (control < 0 || control >= ARRAY_SIZE(controls))
857                 goto unlock_ret;
858
859         ret = -EPERM;
860         if ((info->access & ACCESS_RO) != ACCESS_RO)
861                 goto unlock_ret;
862
863         ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
864
865 unlock_ret:
866         spin_unlock_irqrestore(&ctlr->lock, flags);
867         return ret;
868 }
869
870 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
871 {
872         unsigned long flags;
873         struct cpdma_control_info *info = &controls[control];
874         int ret;
875         u32 val;
876
877         spin_lock_irqsave(&ctlr->lock, flags);
878
879         ret = -ENOTSUPP;
880         if (!ctlr->params.has_ext_regs)
881                 goto unlock_ret;
882
883         ret = -EINVAL;
884         if (ctlr->state != CPDMA_STATE_ACTIVE)
885                 goto unlock_ret;
886
887         ret = -ENOENT;
888         if (control < 0 || control >= ARRAY_SIZE(controls))
889                 goto unlock_ret;
890
891         ret = -EPERM;
892         if ((info->access & ACCESS_WO) != ACCESS_WO)
893                 goto unlock_ret;
894
895         val  = dma_reg_read(ctlr, info->reg);
896         val &= ~(info->mask << info->shift);
897         val |= (value & info->mask) << info->shift;
898         dma_reg_write(ctlr, info->reg, val);
899         ret = 0;
900
901 unlock_ret:
902         spin_unlock_irqrestore(&ctlr->lock, flags);
903         return ret;
904 }
905 EXPORT_SYMBOL_GPL(cpdma_control_set);
906
907 MODULE_LICENSE("GPL");