dmaengine: omap-dma: provide a hook to get the underlying DMA platform ops
[cascardo/linux.git] / drivers / dma / omap-dma.c
1 /*
2  * OMAP DMAengine support
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/omap-dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/of_dma.h>
20 #include <linux/of_device.h>
21
22 #include "virt-dma.h"
23
24 struct omap_dmadev {
25         struct dma_device ddev;
26         spinlock_t lock;
27         struct tasklet_struct task;
28         struct list_head pending;
29         struct omap_system_dma_plat_info *plat;
30 };
31
32 struct omap_chan {
33         struct virt_dma_chan vc;
34         struct list_head node;
35         struct omap_system_dma_plat_info *plat;
36
37         struct dma_slave_config cfg;
38         unsigned dma_sig;
39         bool cyclic;
40         bool paused;
41
42         int dma_ch;
43         struct omap_desc *desc;
44         unsigned sgidx;
45 };
46
47 struct omap_sg {
48         dma_addr_t addr;
49         uint32_t en;            /* number of elements (24-bit) */
50         uint32_t fn;            /* number of frames (16-bit) */
51 };
52
53 struct omap_desc {
54         struct virt_dma_desc vd;
55         enum dma_transfer_direction dir;
56         dma_addr_t dev_addr;
57
58         int16_t fi;             /* for OMAP_DMA_SYNC_PACKET */
59         uint8_t es;             /* OMAP_DMA_DATA_TYPE_xxx */
60         uint8_t sync_mode;      /* OMAP_DMA_SYNC_xxx */
61         uint8_t sync_type;      /* OMAP_DMA_xxx_SYNC* */
62         uint8_t periph_port;    /* Peripheral port */
63
64         unsigned sglen;
65         struct omap_sg sg[0];
66 };
67
68 static const unsigned es_bytes[] = {
69         [OMAP_DMA_DATA_TYPE_S8] = 1,
70         [OMAP_DMA_DATA_TYPE_S16] = 2,
71         [OMAP_DMA_DATA_TYPE_S32] = 4,
72 };
73
74 static struct of_dma_filter_info omap_dma_info = {
75         .filter_fn = omap_dma_filter_fn,
76 };
77
78 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
79 {
80         return container_of(d, struct omap_dmadev, ddev);
81 }
82
83 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
84 {
85         return container_of(c, struct omap_chan, vc.chan);
86 }
87
88 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
89 {
90         return container_of(t, struct omap_desc, vd.tx);
91 }
92
93 static void omap_dma_desc_free(struct virt_dma_desc *vd)
94 {
95         kfree(container_of(vd, struct omap_desc, vd));
96 }
97
98 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
99         unsigned idx)
100 {
101         struct omap_sg *sg = d->sg + idx;
102
103         if (d->dir == DMA_DEV_TO_MEM)
104                 omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
105                         OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
106         else
107                 omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
108                         OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
109
110         omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
111                 d->sync_mode, c->dma_sig, d->sync_type);
112
113         omap_start_dma(c->dma_ch);
114 }
115
116 static void omap_dma_start_desc(struct omap_chan *c)
117 {
118         struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
119         struct omap_desc *d;
120
121         if (!vd) {
122                 c->desc = NULL;
123                 return;
124         }
125
126         list_del(&vd->node);
127
128         c->desc = d = to_omap_dma_desc(&vd->tx);
129         c->sgidx = 0;
130
131         if (d->dir == DMA_DEV_TO_MEM)
132                 omap_set_dma_src_params(c->dma_ch, d->periph_port,
133                         OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
134         else
135                 omap_set_dma_dest_params(c->dma_ch, d->periph_port,
136                         OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
137
138         omap_dma_start_sg(c, d, 0);
139 }
140
141 static void omap_dma_callback(int ch, u16 status, void *data)
142 {
143         struct omap_chan *c = data;
144         struct omap_desc *d;
145         unsigned long flags;
146
147         spin_lock_irqsave(&c->vc.lock, flags);
148         d = c->desc;
149         if (d) {
150                 if (!c->cyclic) {
151                         if (++c->sgidx < d->sglen) {
152                                 omap_dma_start_sg(c, d, c->sgidx);
153                         } else {
154                                 omap_dma_start_desc(c);
155                                 vchan_cookie_complete(&d->vd);
156                         }
157                 } else {
158                         vchan_cyclic_callback(&d->vd);
159                 }
160         }
161         spin_unlock_irqrestore(&c->vc.lock, flags);
162 }
163
164 /*
165  * This callback schedules all pending channels.  We could be more
166  * clever here by postponing allocation of the real DMA channels to
167  * this point, and freeing them when our virtual channel becomes idle.
168  *
169  * We would then need to deal with 'all channels in-use'
170  */
171 static void omap_dma_sched(unsigned long data)
172 {
173         struct omap_dmadev *d = (struct omap_dmadev *)data;
174         LIST_HEAD(head);
175
176         spin_lock_irq(&d->lock);
177         list_splice_tail_init(&d->pending, &head);
178         spin_unlock_irq(&d->lock);
179
180         while (!list_empty(&head)) {
181                 struct omap_chan *c = list_first_entry(&head,
182                         struct omap_chan, node);
183
184                 spin_lock_irq(&c->vc.lock);
185                 list_del_init(&c->node);
186                 omap_dma_start_desc(c);
187                 spin_unlock_irq(&c->vc.lock);
188         }
189 }
190
191 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
192 {
193         struct omap_chan *c = to_omap_dma_chan(chan);
194
195         dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
196
197         return omap_request_dma(c->dma_sig, "DMA engine",
198                 omap_dma_callback, c, &c->dma_ch);
199 }
200
201 static void omap_dma_free_chan_resources(struct dma_chan *chan)
202 {
203         struct omap_chan *c = to_omap_dma_chan(chan);
204
205         vchan_free_chan_resources(&c->vc);
206         omap_free_dma(c->dma_ch);
207
208         dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
209 }
210
211 static size_t omap_dma_sg_size(struct omap_sg *sg)
212 {
213         return sg->en * sg->fn;
214 }
215
216 static size_t omap_dma_desc_size(struct omap_desc *d)
217 {
218         unsigned i;
219         size_t size;
220
221         for (size = i = 0; i < d->sglen; i++)
222                 size += omap_dma_sg_size(&d->sg[i]);
223
224         return size * es_bytes[d->es];
225 }
226
227 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
228 {
229         unsigned i;
230         size_t size, es_size = es_bytes[d->es];
231
232         for (size = i = 0; i < d->sglen; i++) {
233                 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
234
235                 if (size)
236                         size += this_size;
237                 else if (addr >= d->sg[i].addr &&
238                          addr < d->sg[i].addr + this_size)
239                         size += d->sg[i].addr + this_size - addr;
240         }
241         return size;
242 }
243
244 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
245         dma_cookie_t cookie, struct dma_tx_state *txstate)
246 {
247         struct omap_chan *c = to_omap_dma_chan(chan);
248         struct virt_dma_desc *vd;
249         enum dma_status ret;
250         unsigned long flags;
251
252         ret = dma_cookie_status(chan, cookie, txstate);
253         if (ret == DMA_COMPLETE || !txstate)
254                 return ret;
255
256         spin_lock_irqsave(&c->vc.lock, flags);
257         vd = vchan_find_desc(&c->vc, cookie);
258         if (vd) {
259                 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
260         } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
261                 struct omap_desc *d = c->desc;
262                 dma_addr_t pos;
263
264                 if (d->dir == DMA_MEM_TO_DEV)
265                         pos = omap_get_dma_src_pos(c->dma_ch);
266                 else if (d->dir == DMA_DEV_TO_MEM)
267                         pos = omap_get_dma_dst_pos(c->dma_ch);
268                 else
269                         pos = 0;
270
271                 txstate->residue = omap_dma_desc_size_pos(d, pos);
272         } else {
273                 txstate->residue = 0;
274         }
275         spin_unlock_irqrestore(&c->vc.lock, flags);
276
277         return ret;
278 }
279
280 static void omap_dma_issue_pending(struct dma_chan *chan)
281 {
282         struct omap_chan *c = to_omap_dma_chan(chan);
283         unsigned long flags;
284
285         spin_lock_irqsave(&c->vc.lock, flags);
286         if (vchan_issue_pending(&c->vc) && !c->desc) {
287                 /*
288                  * c->cyclic is used only by audio and in this case the DMA need
289                  * to be started without delay.
290                  */
291                 if (!c->cyclic) {
292                         struct omap_dmadev *d = to_omap_dma_dev(chan->device);
293                         spin_lock(&d->lock);
294                         if (list_empty(&c->node))
295                                 list_add_tail(&c->node, &d->pending);
296                         spin_unlock(&d->lock);
297                         tasklet_schedule(&d->task);
298                 } else {
299                         omap_dma_start_desc(c);
300                 }
301         }
302         spin_unlock_irqrestore(&c->vc.lock, flags);
303 }
304
305 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
306         struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
307         enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
308 {
309         struct omap_chan *c = to_omap_dma_chan(chan);
310         enum dma_slave_buswidth dev_width;
311         struct scatterlist *sgent;
312         struct omap_desc *d;
313         dma_addr_t dev_addr;
314         unsigned i, j = 0, es, en, frame_bytes, sync_type;
315         u32 burst;
316
317         if (dir == DMA_DEV_TO_MEM) {
318                 dev_addr = c->cfg.src_addr;
319                 dev_width = c->cfg.src_addr_width;
320                 burst = c->cfg.src_maxburst;
321                 sync_type = OMAP_DMA_SRC_SYNC;
322         } else if (dir == DMA_MEM_TO_DEV) {
323                 dev_addr = c->cfg.dst_addr;
324                 dev_width = c->cfg.dst_addr_width;
325                 burst = c->cfg.dst_maxburst;
326                 sync_type = OMAP_DMA_DST_SYNC;
327         } else {
328                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
329                 return NULL;
330         }
331
332         /* Bus width translates to the element size (ES) */
333         switch (dev_width) {
334         case DMA_SLAVE_BUSWIDTH_1_BYTE:
335                 es = OMAP_DMA_DATA_TYPE_S8;
336                 break;
337         case DMA_SLAVE_BUSWIDTH_2_BYTES:
338                 es = OMAP_DMA_DATA_TYPE_S16;
339                 break;
340         case DMA_SLAVE_BUSWIDTH_4_BYTES:
341                 es = OMAP_DMA_DATA_TYPE_S32;
342                 break;
343         default: /* not reached */
344                 return NULL;
345         }
346
347         /* Now allocate and setup the descriptor. */
348         d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
349         if (!d)
350                 return NULL;
351
352         d->dir = dir;
353         d->dev_addr = dev_addr;
354         d->es = es;
355         d->sync_mode = OMAP_DMA_SYNC_FRAME;
356         d->sync_type = sync_type;
357         d->periph_port = OMAP_DMA_PORT_TIPB;
358
359         /*
360          * Build our scatterlist entries: each contains the address,
361          * the number of elements (EN) in each frame, and the number of
362          * frames (FN).  Number of bytes for this entry = ES * EN * FN.
363          *
364          * Burst size translates to number of elements with frame sync.
365          * Note: DMA engine defines burst to be the number of dev-width
366          * transfers.
367          */
368         en = burst;
369         frame_bytes = es_bytes[es] * en;
370         for_each_sg(sgl, sgent, sglen, i) {
371                 d->sg[j].addr = sg_dma_address(sgent);
372                 d->sg[j].en = en;
373                 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
374                 j++;
375         }
376
377         d->sglen = j;
378
379         return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
380 }
381
382 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
383         struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
384         size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
385         void *context)
386 {
387         struct omap_chan *c = to_omap_dma_chan(chan);
388         enum dma_slave_buswidth dev_width;
389         struct omap_desc *d;
390         dma_addr_t dev_addr;
391         unsigned es, sync_type;
392         u32 burst;
393
394         if (dir == DMA_DEV_TO_MEM) {
395                 dev_addr = c->cfg.src_addr;
396                 dev_width = c->cfg.src_addr_width;
397                 burst = c->cfg.src_maxburst;
398                 sync_type = OMAP_DMA_SRC_SYNC;
399         } else if (dir == DMA_MEM_TO_DEV) {
400                 dev_addr = c->cfg.dst_addr;
401                 dev_width = c->cfg.dst_addr_width;
402                 burst = c->cfg.dst_maxburst;
403                 sync_type = OMAP_DMA_DST_SYNC;
404         } else {
405                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
406                 return NULL;
407         }
408
409         /* Bus width translates to the element size (ES) */
410         switch (dev_width) {
411         case DMA_SLAVE_BUSWIDTH_1_BYTE:
412                 es = OMAP_DMA_DATA_TYPE_S8;
413                 break;
414         case DMA_SLAVE_BUSWIDTH_2_BYTES:
415                 es = OMAP_DMA_DATA_TYPE_S16;
416                 break;
417         case DMA_SLAVE_BUSWIDTH_4_BYTES:
418                 es = OMAP_DMA_DATA_TYPE_S32;
419                 break;
420         default: /* not reached */
421                 return NULL;
422         }
423
424         /* Now allocate and setup the descriptor. */
425         d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
426         if (!d)
427                 return NULL;
428
429         d->dir = dir;
430         d->dev_addr = dev_addr;
431         d->fi = burst;
432         d->es = es;
433         if (burst)
434                 d->sync_mode = OMAP_DMA_SYNC_PACKET;
435         else
436                 d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
437         d->sync_type = sync_type;
438         d->periph_port = OMAP_DMA_PORT_MPUI;
439         d->sg[0].addr = buf_addr;
440         d->sg[0].en = period_len / es_bytes[es];
441         d->sg[0].fn = buf_len / period_len;
442         d->sglen = 1;
443
444         if (!c->cyclic) {
445                 c->cyclic = true;
446                 omap_dma_link_lch(c->dma_ch, c->dma_ch);
447
448                 if (flags & DMA_PREP_INTERRUPT)
449                         omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
450
451                 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
452         }
453
454         if (dma_omap2plus()) {
455                 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
456                 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
457         }
458
459         return vchan_tx_prep(&c->vc, &d->vd, flags);
460 }
461
462 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
463 {
464         if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
465             cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
466                 return -EINVAL;
467
468         memcpy(&c->cfg, cfg, sizeof(c->cfg));
469
470         return 0;
471 }
472
473 static int omap_dma_terminate_all(struct omap_chan *c)
474 {
475         struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
476         unsigned long flags;
477         LIST_HEAD(head);
478
479         spin_lock_irqsave(&c->vc.lock, flags);
480
481         /* Prevent this channel being scheduled */
482         spin_lock(&d->lock);
483         list_del_init(&c->node);
484         spin_unlock(&d->lock);
485
486         /*
487          * Stop DMA activity: we assume the callback will not be called
488          * after omap_stop_dma() returns (even if it does, it will see
489          * c->desc is NULL and exit.)
490          */
491         if (c->desc) {
492                 c->desc = NULL;
493                 /* Avoid stopping the dma twice */
494                 if (!c->paused)
495                         omap_stop_dma(c->dma_ch);
496         }
497
498         if (c->cyclic) {
499                 c->cyclic = false;
500                 c->paused = false;
501                 omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
502         }
503
504         vchan_get_all_descriptors(&c->vc, &head);
505         spin_unlock_irqrestore(&c->vc.lock, flags);
506         vchan_dma_desc_free_list(&c->vc, &head);
507
508         return 0;
509 }
510
511 static int omap_dma_pause(struct omap_chan *c)
512 {
513         /* Pause/Resume only allowed with cyclic mode */
514         if (!c->cyclic)
515                 return -EINVAL;
516
517         if (!c->paused) {
518                 omap_stop_dma(c->dma_ch);
519                 c->paused = true;
520         }
521
522         return 0;
523 }
524
525 static int omap_dma_resume(struct omap_chan *c)
526 {
527         /* Pause/Resume only allowed with cyclic mode */
528         if (!c->cyclic)
529                 return -EINVAL;
530
531         if (c->paused) {
532                 omap_start_dma(c->dma_ch);
533                 c->paused = false;
534         }
535
536         return 0;
537 }
538
539 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
540         unsigned long arg)
541 {
542         struct omap_chan *c = to_omap_dma_chan(chan);
543         int ret;
544
545         switch (cmd) {
546         case DMA_SLAVE_CONFIG:
547                 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
548                 break;
549
550         case DMA_TERMINATE_ALL:
551                 ret = omap_dma_terminate_all(c);
552                 break;
553
554         case DMA_PAUSE:
555                 ret = omap_dma_pause(c);
556                 break;
557
558         case DMA_RESUME:
559                 ret = omap_dma_resume(c);
560                 break;
561
562         default:
563                 ret = -ENXIO;
564                 break;
565         }
566
567         return ret;
568 }
569
570 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
571 {
572         struct omap_chan *c;
573
574         c = kzalloc(sizeof(*c), GFP_KERNEL);
575         if (!c)
576                 return -ENOMEM;
577
578         c->plat = od->plat;
579         c->dma_sig = dma_sig;
580         c->vc.desc_free = omap_dma_desc_free;
581         vchan_init(&c->vc, &od->ddev);
582         INIT_LIST_HEAD(&c->node);
583
584         od->ddev.chancnt++;
585
586         return 0;
587 }
588
589 static void omap_dma_free(struct omap_dmadev *od)
590 {
591         tasklet_kill(&od->task);
592         while (!list_empty(&od->ddev.channels)) {
593                 struct omap_chan *c = list_first_entry(&od->ddev.channels,
594                         struct omap_chan, vc.chan.device_node);
595
596                 list_del(&c->vc.chan.device_node);
597                 tasklet_kill(&c->vc.task);
598                 kfree(c);
599         }
600 }
601
602 static int omap_dma_probe(struct platform_device *pdev)
603 {
604         struct omap_dmadev *od;
605         int rc, i;
606
607         od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
608         if (!od)
609                 return -ENOMEM;
610
611         od->plat = omap_get_plat_info();
612         if (!od->plat)
613                 return -EPROBE_DEFER;
614
615         dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
616         dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
617         od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
618         od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
619         od->ddev.device_tx_status = omap_dma_tx_status;
620         od->ddev.device_issue_pending = omap_dma_issue_pending;
621         od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
622         od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
623         od->ddev.device_control = omap_dma_control;
624         od->ddev.dev = &pdev->dev;
625         INIT_LIST_HEAD(&od->ddev.channels);
626         INIT_LIST_HEAD(&od->pending);
627         spin_lock_init(&od->lock);
628
629         tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
630
631         for (i = 0; i < 127; i++) {
632                 rc = omap_dma_chan_init(od, i);
633                 if (rc) {
634                         omap_dma_free(od);
635                         return rc;
636                 }
637         }
638
639         rc = dma_async_device_register(&od->ddev);
640         if (rc) {
641                 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
642                         rc);
643                 omap_dma_free(od);
644                 return rc;
645         }
646
647         platform_set_drvdata(pdev, od);
648
649         if (pdev->dev.of_node) {
650                 omap_dma_info.dma_cap = od->ddev.cap_mask;
651
652                 /* Device-tree DMA controller registration */
653                 rc = of_dma_controller_register(pdev->dev.of_node,
654                                 of_dma_simple_xlate, &omap_dma_info);
655                 if (rc) {
656                         pr_warn("OMAP-DMA: failed to register DMA controller\n");
657                         dma_async_device_unregister(&od->ddev);
658                         omap_dma_free(od);
659                 }
660         }
661
662         dev_info(&pdev->dev, "OMAP DMA engine driver\n");
663
664         return rc;
665 }
666
667 static int omap_dma_remove(struct platform_device *pdev)
668 {
669         struct omap_dmadev *od = platform_get_drvdata(pdev);
670
671         if (pdev->dev.of_node)
672                 of_dma_controller_free(pdev->dev.of_node);
673
674         dma_async_device_unregister(&od->ddev);
675         omap_dma_free(od);
676
677         return 0;
678 }
679
680 static const struct of_device_id omap_dma_match[] = {
681         { .compatible = "ti,omap2420-sdma", },
682         { .compatible = "ti,omap2430-sdma", },
683         { .compatible = "ti,omap3430-sdma", },
684         { .compatible = "ti,omap3630-sdma", },
685         { .compatible = "ti,omap4430-sdma", },
686         {},
687 };
688 MODULE_DEVICE_TABLE(of, omap_dma_match);
689
690 static struct platform_driver omap_dma_driver = {
691         .probe  = omap_dma_probe,
692         .remove = omap_dma_remove,
693         .driver = {
694                 .name = "omap-dma-engine",
695                 .owner = THIS_MODULE,
696                 .of_match_table = of_match_ptr(omap_dma_match),
697         },
698 };
699
700 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
701 {
702         if (chan->device->dev->driver == &omap_dma_driver.driver) {
703                 struct omap_chan *c = to_omap_dma_chan(chan);
704                 unsigned req = *(unsigned *)param;
705
706                 return req == c->dma_sig;
707         }
708         return false;
709 }
710 EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
711
712 static int omap_dma_init(void)
713 {
714         return platform_driver_register(&omap_dma_driver);
715 }
716 subsys_initcall(omap_dma_init);
717
718 static void __exit omap_dma_exit(void)
719 {
720         platform_driver_unregister(&omap_dma_driver);
721 }
722 module_exit(omap_dma_exit);
723
724 MODULE_AUTHOR("Russell King");
725 MODULE_LICENSE("GPL");