1 /* linux/drivers/dma/pl330.c
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/dmaengine.h>
17 #include <linux/interrupt.h>
18 #include <linux/amba/bus.h>
19 #include <linux/amba/pl330.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/scatterlist.h>
24 #include "dmaengine.h"
26 #define NR_DEFAULT_DESC 16
29 /* In the DMAC pool */
32 * Allocted to some channel during prep_xxx
33 * Also may be sitting on the work_list.
37 * Sitting on the work_list and already submitted
38 * to the PL330 core. Not more than two descriptors
39 * of a channel can be BUSY at any time.
43 * Sitting on the channel work_list but xfer done
49 struct dma_pl330_chan {
50 /* Schedule desc completion */
51 struct tasklet_struct task;
53 /* DMA-Engine Channel */
56 /* List of to be xfered descriptors */
57 struct list_head work_list;
59 /* Pointer to the DMAC that manages this channel,
60 * NULL if the channel is available to be acquired.
61 * As the parent, this DMAC also provides descriptors
64 struct dma_pl330_dmac *dmac;
66 /* To protect channel manipulation */
69 /* Token of a hardware channel thread of PL330 DMAC
70 * NULL if the channel is available to be acquired.
74 /* For D-to-M and M-to-D channels */
75 int burst_sz; /* the peripheral fifo width */
76 int burst_len; /* the number of burst */
79 /* for cyclic capability */
83 struct dma_pl330_dmac {
84 struct pl330_info pif;
86 /* DMA-Engine Device */
87 struct dma_device ddma;
89 /* Pool of descriptors available for the DMAC's channels */
90 struct list_head desc_pool;
91 /* To protect desc_pool manipulation */
94 /* Peripheral channels connected to this DMAC */
95 struct dma_pl330_chan *peripherals; /* keep at end */
100 struct dma_pl330_desc {
101 /* To attach to a queue as child */
102 struct list_head node;
104 /* Descriptor for the DMA Engine API */
105 struct dma_async_tx_descriptor txd;
107 /* Xfer for PL330 core */
108 struct pl330_xfer px;
110 struct pl330_reqcfg rqcfg;
111 struct pl330_req req;
113 enum desc_status status;
115 /* The channel which currently holds this desc */
116 struct dma_pl330_chan *pchan;
119 /* forward declaration */
120 static struct amba_driver pl330_driver;
122 static inline struct dma_pl330_chan *
123 to_pchan(struct dma_chan *ch)
128 return container_of(ch, struct dma_pl330_chan, chan);
131 static inline struct dma_pl330_desc *
132 to_desc(struct dma_async_tx_descriptor *tx)
134 return container_of(tx, struct dma_pl330_desc, txd);
137 static inline void free_desc_list(struct list_head *list)
139 struct dma_pl330_dmac *pdmac;
140 struct dma_pl330_desc *desc;
141 struct dma_pl330_chan *pch;
144 if (list_empty(list))
147 /* Finish off the work list */
148 list_for_each_entry(desc, list, node) {
149 dma_async_tx_callback callback;
152 /* All desc in a list belong to same channel */
154 callback = desc->txd.callback;
155 param = desc->txd.callback_param;
165 spin_lock_irqsave(&pdmac->pool_lock, flags);
166 list_splice_tail_init(list, &pdmac->desc_pool);
167 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
170 static inline void handle_cyclic_desc_list(struct list_head *list)
172 struct dma_pl330_desc *desc;
173 struct dma_pl330_chan *pch;
176 if (list_empty(list))
179 list_for_each_entry(desc, list, node) {
180 dma_async_tx_callback callback;
182 /* Change status to reload it */
185 callback = desc->txd.callback;
187 callback(desc->txd.callback_param);
190 spin_lock_irqsave(&pch->lock, flags);
191 list_splice_tail_init(list, &pch->work_list);
192 spin_unlock_irqrestore(&pch->lock, flags);
195 static inline void fill_queue(struct dma_pl330_chan *pch)
197 struct dma_pl330_desc *desc;
200 list_for_each_entry(desc, &pch->work_list, node) {
202 /* If already submitted */
203 if (desc->status == BUSY)
206 ret = pl330_submit_req(pch->pl330_chid,
211 } else if (ret == -EAGAIN) {
212 /* QFull or DMAC Dying */
215 /* Unacceptable request */
217 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
218 __func__, __LINE__, desc->txd.cookie);
219 tasklet_schedule(&pch->task);
224 static void pl330_tasklet(unsigned long data)
226 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
227 struct dma_pl330_desc *desc, *_dt;
231 spin_lock_irqsave(&pch->lock, flags);
233 /* Pick up ripe tomatoes */
234 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
235 if (desc->status == DONE) {
236 dma_cookie_complete(&desc->txd);
237 list_move_tail(&desc->node, &list);
240 /* Try to submit a req imm. next to the last completed cookie */
243 /* Make sure the PL330 Channel thread is active */
244 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
246 spin_unlock_irqrestore(&pch->lock, flags);
249 handle_cyclic_desc_list(&list);
251 free_desc_list(&list);
254 static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
256 struct dma_pl330_desc *desc = token;
257 struct dma_pl330_chan *pch = desc->pchan;
260 /* If desc aborted */
264 spin_lock_irqsave(&pch->lock, flags);
268 spin_unlock_irqrestore(&pch->lock, flags);
270 tasklet_schedule(&pch->task);
273 bool pl330_filter(struct dma_chan *chan, void *param)
277 if (chan->device->dev->driver != &pl330_driver.drv)
281 if (chan->device->dev->of_node) {
282 const __be32 *prop_value;
284 struct device_node *node;
286 prop_value = ((struct property *)param)->value;
287 phandle = be32_to_cpup(prop_value++);
288 node = of_find_node_by_phandle(phandle);
289 return ((chan->private == node) &&
290 (chan->chan_id == be32_to_cpup(prop_value)));
294 peri_id = chan->private;
295 return *peri_id == (unsigned)param;
297 EXPORT_SYMBOL(pl330_filter);
299 static int pl330_alloc_chan_resources(struct dma_chan *chan)
301 struct dma_pl330_chan *pch = to_pchan(chan);
302 struct dma_pl330_dmac *pdmac = pch->dmac;
305 spin_lock_irqsave(&pch->lock, flags);
307 dma_cookie_init(chan);
310 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
311 if (!pch->pl330_chid) {
312 spin_unlock_irqrestore(&pch->lock, flags);
316 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
318 spin_unlock_irqrestore(&pch->lock, flags);
323 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
325 struct dma_pl330_chan *pch = to_pchan(chan);
326 struct dma_pl330_desc *desc, *_dt;
328 struct dma_pl330_dmac *pdmac = pch->dmac;
329 struct dma_slave_config *slave_config;
333 case DMA_TERMINATE_ALL:
334 spin_lock_irqsave(&pch->lock, flags);
336 /* FLUSH the PL330 Channel thread */
337 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
339 /* Mark all desc done */
340 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
342 list_move_tail(&desc->node, &list);
345 list_splice_tail_init(&list, &pdmac->desc_pool);
346 spin_unlock_irqrestore(&pch->lock, flags);
348 case DMA_SLAVE_CONFIG:
349 slave_config = (struct dma_slave_config *)arg;
351 if (slave_config->direction == DMA_MEM_TO_DEV) {
352 if (slave_config->dst_addr)
353 pch->fifo_addr = slave_config->dst_addr;
354 if (slave_config->dst_addr_width)
355 pch->burst_sz = __ffs(slave_config->dst_addr_width);
356 if (slave_config->dst_maxburst)
357 pch->burst_len = slave_config->dst_maxburst;
358 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
359 if (slave_config->src_addr)
360 pch->fifo_addr = slave_config->src_addr;
361 if (slave_config->src_addr_width)
362 pch->burst_sz = __ffs(slave_config->src_addr_width);
363 if (slave_config->src_maxburst)
364 pch->burst_len = slave_config->src_maxburst;
368 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
375 static void pl330_free_chan_resources(struct dma_chan *chan)
377 struct dma_pl330_chan *pch = to_pchan(chan);
380 spin_lock_irqsave(&pch->lock, flags);
382 tasklet_kill(&pch->task);
384 pl330_release_channel(pch->pl330_chid);
385 pch->pl330_chid = NULL;
388 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
390 spin_unlock_irqrestore(&pch->lock, flags);
393 static enum dma_status
394 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
395 struct dma_tx_state *txstate)
397 return dma_cookie_status(chan, cookie, txstate);
400 static void pl330_issue_pending(struct dma_chan *chan)
402 pl330_tasklet((unsigned long) to_pchan(chan));
406 * We returned the last one of the circular list of descriptor(s)
407 * from prep_xxx, so the argument to submit corresponds to the last
408 * descriptor of the list.
410 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
412 struct dma_pl330_desc *desc, *last = to_desc(tx);
413 struct dma_pl330_chan *pch = to_pchan(tx->chan);
417 spin_lock_irqsave(&pch->lock, flags);
419 /* Assign cookies to all nodes */
420 while (!list_empty(&last->node)) {
421 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
423 dma_cookie_assign(&desc->txd);
425 list_move_tail(&desc->node, &pch->work_list);
428 cookie = dma_cookie_assign(&last->txd);
429 list_add_tail(&last->node, &pch->work_list);
430 spin_unlock_irqrestore(&pch->lock, flags);
435 static inline void _init_desc(struct dma_pl330_desc *desc)
438 desc->req.x = &desc->px;
439 desc->req.token = desc;
440 desc->rqcfg.swap = SWAP_NO;
441 desc->rqcfg.privileged = 0;
442 desc->rqcfg.insnaccess = 0;
443 desc->rqcfg.scctl = SCCTRL0;
444 desc->rqcfg.dcctl = DCCTRL0;
445 desc->req.cfg = &desc->rqcfg;
446 desc->req.xfer_cb = dma_pl330_rqcb;
447 desc->txd.tx_submit = pl330_tx_submit;
449 INIT_LIST_HEAD(&desc->node);
452 /* Returns the number of descriptors added to the DMAC pool */
453 int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
455 struct dma_pl330_desc *desc;
462 desc = kmalloc(count * sizeof(*desc), flg);
466 spin_lock_irqsave(&pdmac->pool_lock, flags);
468 for (i = 0; i < count; i++) {
469 _init_desc(&desc[i]);
470 list_add_tail(&desc[i].node, &pdmac->desc_pool);
473 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
478 static struct dma_pl330_desc *
479 pluck_desc(struct dma_pl330_dmac *pdmac)
481 struct dma_pl330_desc *desc = NULL;
487 spin_lock_irqsave(&pdmac->pool_lock, flags);
489 if (!list_empty(&pdmac->desc_pool)) {
490 desc = list_entry(pdmac->desc_pool.next,
491 struct dma_pl330_desc, node);
493 list_del_init(&desc->node);
496 desc->txd.callback = NULL;
499 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
504 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
506 struct dma_pl330_dmac *pdmac = pch->dmac;
507 u8 *peri_id = pch->chan.private;
508 struct dma_pl330_desc *desc;
510 /* Pluck one desc from the pool of DMAC */
511 desc = pluck_desc(pdmac);
513 /* If the DMAC pool is empty, alloc new */
515 if (!add_desc(pdmac, GFP_ATOMIC, 1))
519 desc = pluck_desc(pdmac);
521 dev_err(pch->dmac->pif.dev,
522 "%s:%d ALERT!\n", __func__, __LINE__);
527 /* Initialize the descriptor */
529 desc->txd.cookie = 0;
530 async_tx_ack(&desc->txd);
532 desc->req.peri = peri_id ? pch->chan.chan_id : 0;
534 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
539 static inline void fill_px(struct pl330_xfer *px,
540 dma_addr_t dst, dma_addr_t src, size_t len)
548 static struct dma_pl330_desc *
549 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
550 dma_addr_t src, size_t len)
552 struct dma_pl330_desc *desc = pl330_get_desc(pch);
555 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
561 * Ideally we should lookout for reqs bigger than
562 * those that can be programmed with 256 bytes of
563 * MC buffer, but considering a req size is seldom
564 * going to be word-unaligned and more than 200MB,
566 * Also, should the limit is reached we'd rather
567 * have the platform increase MC buffer size than
568 * complicating this API driver.
570 fill_px(&desc->px, dst, src, len);
575 /* Call after fixing burst size */
576 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
578 struct dma_pl330_chan *pch = desc->pchan;
579 struct pl330_info *pi = &pch->dmac->pif;
582 burst_len = pi->pcfg.data_bus_width / 8;
583 burst_len *= pi->pcfg.data_buf_dep;
584 burst_len >>= desc->rqcfg.brst_size;
586 /* src/dst_burst_len can't be more than 16 */
590 while (burst_len > 1) {
591 if (!(len % (burst_len << desc->rqcfg.brst_size)))
599 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
600 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
601 size_t period_len, enum dma_transfer_direction direction)
603 struct dma_pl330_desc *desc;
604 struct dma_pl330_chan *pch = to_pchan(chan);
608 desc = pl330_get_desc(pch);
610 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
617 desc->rqcfg.src_inc = 1;
618 desc->rqcfg.dst_inc = 0;
619 desc->req.rqtype = MEMTODEV;
621 dst = pch->fifo_addr;
624 desc->rqcfg.src_inc = 0;
625 desc->rqcfg.dst_inc = 1;
626 desc->req.rqtype = DEVTOMEM;
627 src = pch->fifo_addr;
631 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
636 desc->rqcfg.brst_size = pch->burst_sz;
637 desc->rqcfg.brst_len = 1;
641 fill_px(&desc->px, dst, src, period_len);
646 static struct dma_async_tx_descriptor *
647 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
648 dma_addr_t src, size_t len, unsigned long flags)
650 struct dma_pl330_desc *desc;
651 struct dma_pl330_chan *pch = to_pchan(chan);
652 struct pl330_info *pi;
655 if (unlikely(!pch || !len))
658 pi = &pch->dmac->pif;
660 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
664 desc->rqcfg.src_inc = 1;
665 desc->rqcfg.dst_inc = 1;
666 desc->req.rqtype = MEMTOMEM;
668 /* Select max possible burst size */
669 burst = pi->pcfg.data_bus_width / 8;
677 desc->rqcfg.brst_size = 0;
678 while (burst != (1 << desc->rqcfg.brst_size))
679 desc->rqcfg.brst_size++;
681 desc->rqcfg.brst_len = get_burst_len(desc, len);
683 desc->txd.flags = flags;
688 static struct dma_async_tx_descriptor *
689 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
690 unsigned int sg_len, enum dma_transfer_direction direction,
693 struct dma_pl330_desc *first, *desc = NULL;
694 struct dma_pl330_chan *pch = to_pchan(chan);
695 struct scatterlist *sg;
700 if (unlikely(!pch || !sgl || !sg_len))
703 addr = pch->fifo_addr;
707 for_each_sg(sgl, sg, sg_len, i) {
709 desc = pl330_get_desc(pch);
711 struct dma_pl330_dmac *pdmac = pch->dmac;
713 dev_err(pch->dmac->pif.dev,
714 "%s:%d Unable to fetch desc\n",
719 spin_lock_irqsave(&pdmac->pool_lock, flags);
721 while (!list_empty(&first->node)) {
722 desc = list_entry(first->node.next,
723 struct dma_pl330_desc, node);
724 list_move_tail(&desc->node, &pdmac->desc_pool);
727 list_move_tail(&first->node, &pdmac->desc_pool);
729 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
737 list_add_tail(&desc->node, &first->node);
739 if (direction == DMA_MEM_TO_DEV) {
740 desc->rqcfg.src_inc = 1;
741 desc->rqcfg.dst_inc = 0;
742 desc->req.rqtype = MEMTODEV;
744 addr, sg_dma_address(sg), sg_dma_len(sg));
746 desc->rqcfg.src_inc = 0;
747 desc->rqcfg.dst_inc = 1;
748 desc->req.rqtype = DEVTOMEM;
750 sg_dma_address(sg), addr, sg_dma_len(sg));
753 desc->rqcfg.brst_size = pch->burst_sz;
754 desc->rqcfg.brst_len = 1;
757 /* Return the last desc in the chain */
758 desc->txd.flags = flg;
762 static irqreturn_t pl330_irq_handler(int irq, void *data)
764 if (pl330_update(data))
771 pl330_probe(struct amba_device *adev, const struct amba_id *id)
773 struct dma_pl330_platdata *pdat;
774 struct dma_pl330_dmac *pdmac;
775 struct dma_pl330_chan *pch;
776 struct pl330_info *pi;
777 struct dma_device *pd;
778 struct resource *res;
782 pdat = adev->dev.platform_data;
784 /* Allocate a new DMAC and its Channels */
785 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
787 dev_err(&adev->dev, "unable to allocate mem\n");
792 pi->dev = &adev->dev;
793 pi->pl330_data = NULL;
794 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
797 request_mem_region(res->start, resource_size(res), "dma-pl330");
799 pi->base = ioremap(res->start, resource_size(res));
805 pdmac->clk = clk_get(&adev->dev, "dma");
806 if (IS_ERR(pdmac->clk)) {
807 dev_err(&adev->dev, "Cannot get operation clock.\n");
812 amba_set_drvdata(adev, pdmac);
814 #ifndef CONFIG_PM_RUNTIME
816 clk_enable(pdmac->clk);
820 ret = request_irq(irq, pl330_irq_handler, 0,
821 dev_name(&adev->dev), pi);
829 INIT_LIST_HEAD(&pdmac->desc_pool);
830 spin_lock_init(&pdmac->pool_lock);
832 /* Create a descriptor pool of default size */
833 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
834 dev_warn(&adev->dev, "unable to allocate desc\n");
837 INIT_LIST_HEAD(&pd->channels);
839 /* Initialize channel parameters */
840 num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
841 (u8)pi->pcfg.num_chan);
842 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
844 for (i = 0; i < num_chan; i++) {
845 pch = &pdmac->peripherals[i];
846 if (!adev->dev.of_node)
847 pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
849 pch->chan.private = adev->dev.of_node;
851 INIT_LIST_HEAD(&pch->work_list);
852 spin_lock_init(&pch->lock);
853 pch->pl330_chid = NULL;
854 pch->chan.device = pd;
857 /* Add the channel to the DMAC list */
858 list_add_tail(&pch->chan.device_node, &pd->channels);
861 pd->dev = &adev->dev;
863 pd->cap_mask = pdat->cap_mask;
865 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
866 if (pi->pcfg.num_peri) {
867 dma_cap_set(DMA_SLAVE, pd->cap_mask);
868 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
872 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
873 pd->device_free_chan_resources = pl330_free_chan_resources;
874 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
875 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
876 pd->device_tx_status = pl330_tx_status;
877 pd->device_prep_slave_sg = pl330_prep_slave_sg;
878 pd->device_control = pl330_control;
879 pd->device_issue_pending = pl330_issue_pending;
881 ret = dma_async_device_register(pd);
883 dev_err(&adev->dev, "unable to register DMAC\n");
888 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
890 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
891 pi->pcfg.data_buf_dep,
892 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
893 pi->pcfg.num_peri, pi->pcfg.num_events);
902 #ifndef CONFIG_PM_RUNTIME
903 clk_disable(pdmac->clk);
909 release_mem_region(res->start, resource_size(res));
915 static int __devexit pl330_remove(struct amba_device *adev)
917 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
918 struct dma_pl330_chan *pch, *_p;
919 struct pl330_info *pi;
920 struct resource *res;
926 amba_set_drvdata(adev, NULL);
929 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
932 /* Remove the channel */
933 list_del(&pch->chan.device_node);
935 /* Flush the channel */
936 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
937 pl330_free_chan_resources(&pch->chan);
950 release_mem_region(res->start, resource_size(res));
952 #ifndef CONFIG_PM_RUNTIME
953 clk_disable(pdmac->clk);
961 static struct amba_id pl330_ids[] = {
969 MODULE_DEVICE_TABLE(amba, pl330_ids);
971 #ifdef CONFIG_PM_RUNTIME
972 static int pl330_runtime_suspend(struct device *dev)
974 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
977 dev_err(dev, "failed to get dmac\n");
981 clk_disable(pdmac->clk);
986 static int pl330_runtime_resume(struct device *dev)
988 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
991 dev_err(dev, "failed to get dmac\n");
995 clk_enable(pdmac->clk);
1000 #define pl330_runtime_suspend NULL
1001 #define pl330_runtime_resume NULL
1002 #endif /* CONFIG_PM_RUNTIME */
1004 static const struct dev_pm_ops pl330_pm_ops = {
1005 .runtime_suspend = pl330_runtime_suspend,
1006 .runtime_resume = pl330_runtime_resume,
1009 static struct amba_driver pl330_driver = {
1011 .owner = THIS_MODULE,
1012 .name = "dma-pl330",
1013 .pm = &pl330_pm_ops,
1015 .id_table = pl330_ids,
1016 .probe = pl330_probe,
1017 .remove = pl330_remove,
1020 static int __init pl330_init(void)
1022 return amba_driver_register(&pl330_driver);
1024 module_init(pl330_init);
1026 static void __exit pl330_exit(void)
1028 amba_driver_unregister(&pl330_driver);
1031 module_exit(pl330_exit);
1033 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1034 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
1035 MODULE_LICENSE("GPL");