Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 10 Oct 2012 02:10:41 +0000 (11:10 +0900)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 10 Oct 2012 02:10:41 +0000 (11:10 +0900)
Pull slave-dmaengine updates from Vinod Koul:
 "This time we have Andy updates on dw_dmac which is attempting to make
  this IP block available as PCI and platform device though not fully
  complete this time.

  We also have TI EDMA moving the dma driver to use dmaengine APIs, also
  have a new driver for mmp-tdma, along with bunch of small updates.

  Now for your excitement the merge is little unusual here, while
  merging the auto merge on linux-next picks wrong choice for pl330
  (drivers/dma/pl330.c) and this causes build failure.  The correct
  resolution is in linux-next.  (DMA: PL330: Fix build error) I didn't
  back merge your tree this time as you are better than me so no point
  in doing that for me :)"

Fixed the pl330 conflict as in linux-next, along with trivial header
file conflicts due to changed includes.

* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (29 commits)
  dma: tegra: fix interrupt name issue with apb dma.
  dw_dmac: fix a regression in dwc_prep_dma_memcpy
  dw_dmac: introduce software emulation of LLP transfers
  dw_dmac: autoconfigure data_width or get it via platform data
  dw_dmac: autoconfigure block_size or use platform data
  dw_dmac: get number of channels from hardware if possible
  dw_dmac: fill optional encoded parameters in register structure
  dw_dmac: mark dwc_dump_chan_regs as inline
  DMA: PL330: return ENOMEM instead of 0 from pl330_alloc_chan_resources
  DMA: PL330: Remove redundant runtime_suspend/resume functions
  DMA: PL330: Remove controller clock enable/disable
  dmaengine: use kmem_cache_zalloc instead of kmem_cache_alloc/memset
  DMA: PL330: Set the capability of pdm0 and pdm1 as DMA_PRIVATE
  ARM: EXYNOS: Set the capability of pdm0 and pdm1 as DMA_PRIVATE
  dma: tegra: use list_move_tail instead of list_del/list_add_tail
  mxs/dma: Enlarge the CCW descriptor area to 4 pages
  dw_dmac: utilize slave_id to pass request line
  dmaengine: mmp_tdma: add dt support
  dmaengine: mmp-pdma support
  spi: davici - make davinci select edma
  ...

1  2 
arch/arm/mach-spear13xx/spear13xx.c
drivers/dma/mmp_tdma.c
drivers/dma/mxs-dma.c
drivers/dma/pl330.c
drivers/dma/sirf-dma.c
drivers/dma/ste_dma40.c
drivers/dma/tegra20-apb-dma.c
drivers/spi/Kconfig
drivers/spi/spi-davinci.c

@@@ -78,6 -78,9 +78,9 @@@ struct dw_dma_platform_data dmac_plat_d
        .nr_channels = 8,
        .chan_allocation_order = CHAN_ALLOCATION_DESCENDING,
        .chan_priority = CHAN_PRIORITY_DESCENDING,
+       .block_size = 4095U,
+       .nr_masters = 2,
+       .data_width = { 3, 3, 0, 0 },
  };
  
  void __init spear13xx_l2x0_init(void)
   */
  struct map_desc spear13xx_io_desc[] __initdata = {
        {
 -              .virtual        = VA_PERIP_GRP2_BASE,
 +              .virtual        = (unsigned long)VA_PERIP_GRP2_BASE,
                .pfn            = __phys_to_pfn(PERIP_GRP2_BASE),
                .length         = SZ_16M,
                .type           = MT_DEVICE
        }, {
 -              .virtual        = VA_PERIP_GRP1_BASE,
 +              .virtual        = (unsigned long)VA_PERIP_GRP1_BASE,
                .pfn            = __phys_to_pfn(PERIP_GRP1_BASE),
                .length         = SZ_16M,
                .type           = MT_DEVICE
        }, {
 -              .virtual        = VA_A9SM_AND_MPMC_BASE,
 +              .virtual        = (unsigned long)VA_A9SM_AND_MPMC_BASE,
                .pfn            = __phys_to_pfn(A9SM_AND_MPMC_BASE),
                .length         = SZ_16M,
                .type           = MT_DEVICE
diff --combined drivers/dma/mmp_tdma.c
@@@ -19,7 -19,8 +19,8 @@@
  #include <linux/platform_device.h>
  #include <linux/device.h>
  #include <mach/regs-icu.h>
 -#include <mach/sram.h>
 +#include <linux/platform_data/dma-mmp_tdma.h>
+ #include <linux/of_device.h>
  
  #include "dmaengine.h"
  
@@@ -127,7 -128,6 +128,6 @@@ struct mmp_tdma_device 
        void __iomem                    *base;
        struct dma_device               device;
        struct mmp_tdma_chan            *tdmac[TDMA_CHANNEL_NUM];
-       int                             irq;
  };
  
  #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
@@@ -358,7 -358,7 +358,7 @@@ struct mmp_tdma_desc *mmp_tdma_alloc_de
  static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
                size_t period_len, enum dma_transfer_direction direction,
 -              void *context)
 +              unsigned long flags, void *context)
  {
        struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
        struct mmp_tdma_desc *desc;
@@@ -492,7 -492,7 +492,7 @@@ static int __devinit mmp_tdma_chan_init
                return -ENOMEM;
        }
        if (irq)
-               tdmac->irq = irq + idx;
+               tdmac->irq = irq;
        tdmac->dev         = tdev->dev;
        tdmac->chan.device = &tdev->device;
        tdmac->idx         = idx;
        /* add the channel to tdma_chan list */
        list_add_tail(&tdmac->chan.device_node,
                        &tdev->device.channels);
        return 0;
  }
  
+ static struct of_device_id mmp_tdma_dt_ids[] = {
+       { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
+       { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
+       {}
+ };
+ MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids);
  static int __devinit mmp_tdma_probe(struct platform_device *pdev)
  {
-       const struct platform_device_id *id = platform_get_device_id(pdev);
-       enum mmp_tdma_type type = id->driver_data;
+       enum mmp_tdma_type type;
+       const struct of_device_id *of_id;
        struct mmp_tdma_device *tdev;
        struct resource *iores;
        int i, ret;
-       int irq = 0;
+       int irq = 0, irq_num = 0;
        int chan_num = TDMA_CHANNEL_NUM;
  
+       of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
+       if (of_id)
+               type = (enum mmp_tdma_type) of_id->data;
+       else
+               type = platform_get_device_id(pdev)->driver_data;
        /* always have couple channels */
        tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
        if (!tdev)
                return -ENOMEM;
  
        tdev->dev = &pdev->dev;
-       iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!iores)
-               return -EINVAL;
  
-       if (resource_size(iores) != chan_num)
-               tdev->irq = iores->start;
-       else
-               irq = iores->start;
+       for (i = 0; i < chan_num; i++) {
+               if (platform_get_irq(pdev, i) > 0)
+                       irq_num++;
+       }
  
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!iores)
        if (!tdev->base)
                return -EADDRNOTAVAIL;
  
-       if (tdev->irq) {
-               ret = devm_request_irq(&pdev->dev, tdev->irq,
+       INIT_LIST_HEAD(&tdev->device.channels);
+       if (irq_num != chan_num) {
+               irq = platform_get_irq(pdev, 0);
+               ret = devm_request_irq(&pdev->dev, irq,
                        mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
                if (ret)
                        return ret;
        }
  
-       dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
-       dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
-       INIT_LIST_HEAD(&tdev->device.channels);
        /* initialize channel parameters */
        for (i = 0; i < chan_num; i++) {
+               irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
                ret = mmp_tdma_chan_init(tdev, i, irq, type);
                if (ret)
                        return ret;
        }
  
+       dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
+       dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
        tdev->device.dev = &pdev->dev;
        tdev->device.device_alloc_chan_resources =
                                        mmp_tdma_alloc_chan_resources;
@@@ -595,6 -605,7 +605,7 @@@ static struct platform_driver mmp_tdma_
        .driver         = {
                .name   = "mmp-tdma",
                .owner  = THIS_MODULE,
+               .of_match_table = mmp_tdma_dt_ids,
        },
        .id_table       = mmp_tdma_id_table,
        .probe          = mmp_tdma_probe,
diff --combined drivers/dma/mxs-dma.c
@@@ -101,7 -101,8 +101,8 @@@ struct mxs_dma_ccw 
        u32             pio_words[MXS_PIO_WORDS];
  };
  
- #define NUM_CCW       (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw))
+ #define CCW_BLOCK_SIZE        (4 * PAGE_SIZE)
+ #define NUM_CCW       (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw))
  
  struct mxs_dma_chan {
        struct mxs_dma_engine           *mxs_dma;
@@@ -354,14 -355,15 +355,15 @@@ static int mxs_dma_alloc_chan_resources
  
        mxs_chan->chan_irq = data->chan_irq;
  
-       mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
-                               &mxs_chan->ccw_phys, GFP_KERNEL);
+       mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
+                               CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
+                               GFP_KERNEL);
        if (!mxs_chan->ccw) {
                ret = -ENOMEM;
                goto err_alloc;
        }
  
-       memset(mxs_chan->ccw, 0, PAGE_SIZE);
+       memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE);
  
        if (mxs_chan->chan_irq != NO_IRQ) {
                ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
  err_clk:
        free_irq(mxs_chan->chan_irq, mxs_dma);
  err_irq:
-       dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
+       dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
                        mxs_chan->ccw, mxs_chan->ccw_phys);
  err_alloc:
        return ret;
@@@ -402,7 -404,7 +404,7 @@@ static void mxs_dma_free_chan_resources
  
        free_irq(mxs_chan->chan_irq, mxs_dma);
  
-       dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
+       dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
                        mxs_chan->ccw, mxs_chan->ccw_phys);
  
        clk_disable_unprepare(mxs_dma->clk);
@@@ -531,7 -533,7 +533,7 @@@ err_out
  static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
                size_t period_len, enum dma_transfer_direction direction,
 -              void *context)
 +              unsigned long flags, void *context)
  {
        struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
diff --combined drivers/dma/pl330.c
@@@ -23,7 -23,6 +23,6 @@@
  #include <linux/dmaengine.h>
  #include <linux/amba/bus.h>
  #include <linux/amba/pl330.h>
- #include <linux/pm_runtime.h>
  #include <linux/scatterlist.h>
  #include <linux/of.h>
  
@@@ -522,7 -521,7 +521,7 @@@ enum desc_status 
        /* In the DMAC pool */
        FREE,
        /*
 -       * Allocted to some channel during prep_xxx
 +       * Allocated to some channel during prep_xxx
         * Also may be sitting on the work_list.
         */
        PREP,
@@@ -586,8 -585,6 +585,6 @@@ struct dma_pl330_dmac 
  
        /* Peripheral channels connected to this DMAC */
        struct dma_pl330_chan *peripherals; /* keep at end */
-       struct clk *clk;
  };
  
  struct dma_pl330_desc {
@@@ -1567,19 -1564,17 +1564,19 @@@ static int pl330_submit_req(void *ch_id
                goto xfer_exit;
        }
  
 -      /* Prefer Secure Channel */
 -      if (!_manager_ns(thrd))
 -              r->cfg->nonsecure = 0;
 -      else
 -              r->cfg->nonsecure = 1;
  
        /* Use last settings, if not provided */
 -      if (r->cfg)
 +      if (r->cfg) {
 +              /* Prefer Secure Channel */
 +              if (!_manager_ns(thrd))
 +                      r->cfg->nonsecure = 0;
 +              else
 +                      r->cfg->nonsecure = 1;
 +
                ccr = _prepare_ccr(r->cfg);
 -      else
 +      } else {
                ccr = readl(regs + CC(thrd->id));
 +      }
  
        /* If this req doesn't have valid xfer settings */
        if (!_is_valid(ccr)) {
@@@ -2395,7 -2390,7 +2392,7 @@@ static int pl330_alloc_chan_resources(s
        pch->pl330_chid = pl330_request_channel(&pdmac->pif);
        if (!pch->pl330_chid) {
                spin_unlock_irqrestore(&pch->lock, flags);
-               return 0;
+               return -ENOMEM;
        }
  
        tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
@@@ -2685,7 -2680,7 +2682,7 @@@ static inline int get_burst_len(struct 
  static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
                size_t period_len, enum dma_transfer_direction direction,
 -              void *context)
 +              unsigned long flags, void *context)
  {
        struct dma_pl330_desc *desc;
        struct dma_pl330_chan *pch = to_pchan(chan);
@@@ -2889,29 -2884,17 +2886,17 @@@ pl330_probe(struct amba_device *adev, c
                goto probe_err1;
        }
  
-       pdmac->clk = clk_get(&adev->dev, "dma");
-       if (IS_ERR(pdmac->clk)) {
-               dev_err(&adev->dev, "Cannot get operation clock.\n");
-               ret = -EINVAL;
-               goto probe_err2;
-       }
        amba_set_drvdata(adev, pdmac);
  
- #ifndef CONFIG_PM_RUNTIME
-       /* enable dma clk */
-       clk_enable(pdmac->clk);
- #endif
        irq = adev->irq[0];
        ret = request_irq(irq, pl330_irq_handler, 0,
                        dev_name(&adev->dev), pi);
        if (ret)
-               goto probe_err3;
+               goto probe_err2;
  
        ret = pl330_add(pi);
        if (ret)
-               goto probe_err4;
+               goto probe_err3;
  
        INIT_LIST_HEAD(&pdmac->desc_pool);
        spin_lock_init(&pdmac->pool_lock);
                num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
  
        pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
-               goto probe_err5;
 +      if (!pdmac->peripherals) {
 +              ret = -ENOMEM;
 +              dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
++              goto probe_err4;
 +      }
  
        for (i = 0; i < num_chan; i++) {
                pch = &pdmac->peripherals[i];
                if (pi->pcfg.num_peri) {
                        dma_cap_set(DMA_SLAVE, pd->cap_mask);
                        dma_cap_set(DMA_CYCLIC, pd->cap_mask);
+                       dma_cap_set(DMA_PRIVATE, pd->cap_mask);
                }
        }
  
        ret = dma_async_device_register(pd);
        if (ret) {
                dev_err(&adev->dev, "unable to register DMAC\n");
-               goto probe_err5;
+               goto probe_err4;
        }
  
        dev_info(&adev->dev,
  
        return 0;
  
- probe_err5:
-       pl330_del(pi);
  probe_err4:
-       free_irq(irq, pi);
+       pl330_del(pi);
  probe_err3:
- #ifndef CONFIG_PM_RUNTIME
-       clk_disable(pdmac->clk);
- #endif
-       clk_put(pdmac->clk);
+       free_irq(irq, pi);
  probe_err2:
        iounmap(pi->base);
  probe_err1:
@@@ -3044,10 -3018,6 +3025,6 @@@ static int __devexit pl330_remove(struc
        res = &adev->res;
        release_mem_region(res->start, resource_size(res));
  
- #ifndef CONFIG_PM_RUNTIME
-       clk_disable(pdmac->clk);
- #endif
        kfree(pdmac);
  
        return 0;
@@@ -3063,49 -3033,10 +3040,10 @@@ static struct amba_id pl330_ids[] = 
  
  MODULE_DEVICE_TABLE(amba, pl330_ids);
  
- #ifdef CONFIG_PM_RUNTIME
- static int pl330_runtime_suspend(struct device *dev)
- {
-       struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
-       if (!pdmac) {
-               dev_err(dev, "failed to get dmac\n");
-               return -ENODEV;
-       }
-       clk_disable(pdmac->clk);
-       return 0;
- }
- static int pl330_runtime_resume(struct device *dev)
- {
-       struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
-       if (!pdmac) {
-               dev_err(dev, "failed to get dmac\n");
-               return -ENODEV;
-       }
-       clk_enable(pdmac->clk);
-       return 0;
- }
- #else
- #define pl330_runtime_suspend NULL
- #define pl330_runtime_resume  NULL
- #endif /* CONFIG_PM_RUNTIME */
- static const struct dev_pm_ops pl330_pm_ops = {
-       .runtime_suspend = pl330_runtime_suspend,
-       .runtime_resume = pl330_runtime_resume,
- };
  static struct amba_driver pl330_driver = {
        .drv = {
                .owner = THIS_MODULE,
                .name = "dma-pl330",
-               .pm = &pl330_pm_ops,
        },
        .id_table = pl330_ids,
        .probe = pl330_probe,
diff --combined drivers/dma/sirf-dma.c
@@@ -489,7 -489,7 +489,7 @@@ err_dir
  static struct dma_async_tx_descriptor *
  sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
        size_t buf_len, size_t period_len,
 -      enum dma_transfer_direction direction, void *context)
 +      enum dma_transfer_direction direction, unsigned long flags, void *context)
  {
        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
        struct sirfsoc_dma_desc *sdesc = NULL;
@@@ -570,21 -570,19 +570,19 @@@ static int __devinit sirfsoc_dma_probe(
  
        if (of_property_read_u32(dn, "cell-index", &id)) {
                dev_err(dev, "Fail to get DMAC index\n");
-               ret = -ENODEV;
-               goto free_mem;
+               return -ENODEV;
        }
  
        sdma->irq = irq_of_parse_and_map(dn, 0);
        if (sdma->irq == NO_IRQ) {
                dev_err(dev, "Error mapping IRQ!\n");
-               ret = -EINVAL;
-               goto free_mem;
+               return -EINVAL;
        }
  
        ret = of_address_to_resource(dn, 0, &res);
        if (ret) {
                dev_err(dev, "Error parsing memory region!\n");
-               goto free_mem;
+               goto irq_dispose;
        }
  
        regs_start = res.start;
                goto irq_dispose;
        }
  
-       ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
-               sdma);
+       ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
        if (ret) {
                dev_err(dev, "Error requesting IRQ!\n");
                ret = -EINVAL;
-               goto unmap_mem;
+               goto irq_dispose;
        }
  
        dma = &sdma->dma;
        return 0;
  
  free_irq:
-       devm_free_irq(dev, sdma->irq, sdma);
+       free_irq(sdma->irq, sdma);
  irq_dispose:
        irq_dispose_mapping(sdma->irq);
- unmap_mem:
-       iounmap(sdma->base);
- free_mem:
-       devm_kfree(dev, sdma);
        return ret;
  }
  
@@@ -668,10 -661,8 +661,8 @@@ static int __devexit sirfsoc_dma_remove
        struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
  
        dma_async_device_unregister(&sdma->dma);
-       devm_free_irq(dev, sdma->irq, sdma);
+       free_irq(sdma->irq, sdma);
        irq_dispose_mapping(sdma->irq);
-       iounmap(sdma->base);
-       devm_kfree(dev, sdma);
        return 0;
  }
  
diff --combined drivers/dma/ste_dma40.c
@@@ -2347,8 -2347,7 +2347,8 @@@ static struct dma_async_tx_descriptor *
  static struct dma_async_tx_descriptor *
  dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
                     size_t buf_len, size_t period_len,
 -                   enum dma_transfer_direction direction, void *context)
 +                   enum dma_transfer_direction direction, unsigned long flags,
 +                   void *context)
  {
        unsigned int periods = buf_len / period_len;
        struct dma_async_tx_descriptor *txd;
@@@ -2921,19 -2920,23 +2921,23 @@@ static struct d40_base * __init d40_hw_
        struct d40_base *base = NULL;
        int num_log_chans = 0;
        int num_phy_chans;
+       int clk_ret = -EINVAL;
        int i;
        u32 pid;
        u32 cid;
        u8 rev;
  
        clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(clk)) {
                d40_err(&pdev->dev, "No matching clock found\n");
                goto failure;
        }
  
-       clk_enable(clk);
+       clk_ret = clk_prepare_enable(clk);
+       if (clk_ret) {
+               d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
+               goto failure;
+       }
  
        /* Get IO for DMAC base address */
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
        return base;
  
  failure:
-       if (!IS_ERR(clk)) {
-               clk_disable(clk);
+       if (!clk_ret)
+               clk_disable_unprepare(clk);
+       if (!IS_ERR(clk))
                clk_put(clk);
-       }
        if (virtbase)
                iounmap(virtbase);
        if (res)
@@@ -169,6 -169,7 +169,7 @@@ typedef void (*dma_isr_handler)(struct 
  /* tegra_dma_channel: Channel specific information */
  struct tegra_dma_channel {
        struct dma_chan         dma_chan;
+       char                    name[30];
        bool                    config_init;
        int                     id;
        int                     irq;
@@@ -201,7 -202,7 +202,7 @@@ struct tegra_dma 
        struct clk                      *dma_clk;
        spinlock_t                      global_lock;
        void __iomem                    *base_addr;
 -      struct tegra_dma_chip_data      *chip_data;
 +      const struct tegra_dma_chip_data *chip_data;
  
        /* Some register need to be cache before suspend */
        u32                             reg_gen;
@@@ -475,8 -476,7 +476,7 @@@ static void tegra_dma_abort_all(struct 
        while (!list_empty(&tdc->pending_sg_req)) {
                sgreq = list_first_entry(&tdc->pending_sg_req,
                                                typeof(*sgreq), node);
-               list_del(&sgreq->node);
-               list_add_tail(&sgreq->node, &tdc->free_sg_req);
+               list_move_tail(&sgreq->node, &tdc->free_sg_req);
                if (sgreq->last_sg) {
                        dma_desc = sgreq->dma_desc;
                        dma_desc->dma_status = DMA_ERROR;
@@@ -570,8 -570,7 +570,7 @@@ static void handle_cont_sngl_cycle_dma_
  
        /* If not last req then put at end of pending list */
        if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
-               list_del(&sgreq->node);
-               list_add_tail(&sgreq->node, &tdc->pending_sg_req);
+               list_move_tail(&sgreq->node, &tdc->pending_sg_req);
                sgreq->configured = false;
                st = handle_continuous_head_request(tdc, sgreq, to_terminate);
                if (!st)
@@@ -990,7 -989,7 +989,7 @@@ static struct dma_async_tx_descriptor *
  struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
        struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
        size_t period_len, enum dma_transfer_direction direction,
 -      void *context)
 +      unsigned long flags, void *context)
  {
        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
        struct tegra_dma_desc *dma_desc = NULL;
@@@ -1173,14 -1172,14 +1172,14 @@@ static void tegra_dma_free_chan_resourc
  }
  
  /* Tegra20 specific DMA controller information */
 -static struct tegra_dma_chip_data tegra20_dma_chip_data = {
 +static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
        .nr_channels            = 16,
        .max_dma_count          = 1024UL * 64,
  };
  
  #if defined(CONFIG_OF)
  /* Tegra30 specific DMA controller information */
 -static struct tegra_dma_chip_data tegra30_dma_chip_data = {
 +static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
        .nr_channels            = 32,
        .max_dma_count          = 1024UL * 64,
  };
@@@ -1204,7 -1203,7 +1203,7 @@@ static int __devinit tegra_dma_probe(st
        struct tegra_dma *tdma;
        int ret;
        int i;
 -      struct tegra_dma_chip_data *cdata = NULL;
 +      const struct tegra_dma_chip_data *cdata = NULL;
  
        if (pdev->dev.of_node) {
                const struct of_device_id *match;
        INIT_LIST_HEAD(&tdma->dma_dev.channels);
        for (i = 0; i < cdata->nr_channels; i++) {
                struct tegra_dma_channel *tdc = &tdma->channels[i];
-               char irq_name[30];
  
                tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
                                        i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
                        goto err_irq;
                }
                tdc->irq = res->start;
-               snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i);
+               snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
                ret = devm_request_irq(&pdev->dev, tdc->irq,
-                               tegra_dma_isr, 0, irq_name, tdc);
+                               tegra_dma_isr, 0, tdc->name, tdc);
                if (ret) {
                        dev_err(&pdev->dev,
                                "request_irq failed with err %d channel %d\n",
diff --combined drivers/spi/Kconfig
@@@ -134,6 -134,7 +134,7 @@@ config SPI_DAVINC
        tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
        depends on ARCH_DAVINCI
        select SPI_BITBANG
+       select TI_EDMA
        help
          SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
  
@@@ -237,13 -238,6 +238,13 @@@ config SPI_OC_TIN
        help
          This is the driver for OpenCores tiny SPI master controller.
  
 +config SPI_OCTEON
 +      tristate "Cavium OCTEON SPI controller"
 +      depends on CPU_CAVIUM_OCTEON
 +      help
 +        SPI host driver for the hardware found on some Cavium OCTEON
 +        SOCs.
 +
  config SPI_OMAP_UWIRE
        tristate "OMAP1 MicroWire"
        depends on ARCH_OMAP1
@@@ -332,12 -326,6 +333,12 @@@ config SPI_S3C64X
        help
          SPI driver for Samsung S3C64XX and newer SoCs.
  
 +config SPI_SC18IS602
 +      tristate "NXP SC18IS602/602B/603 I2C to SPI bridge"
 +      depends on I2C
 +      help
 +        SPI driver for NXP SC18IS602/602B/603 I2C to SPI bridge.
 +
  config SPI_SH_MSIOF
        tristate "SuperH MSIOF SPI controller"
        depends on SUPERH && HAVE_CLK
@@@ -377,12 -365,11 +378,12 @@@ config SPI_STMP3XX
        help
          SPI driver for Freescale STMP37xx/378x SoC SSP interface
  
 -config SPI_TEGRA
 -      tristate "Nvidia Tegra SPI controller"
 -      depends on ARCH_TEGRA && (TEGRA_SYSTEM_DMA || TEGRA20_APB_DMA)
 +config SPI_MXS
 +      tristate "Freescale MXS SPI controller"
 +      depends on ARCH_MXS
 +      select STMP_DEVICE
        help
 -        SPI driver for NVidia Tegra SoCs
 +        SPI driver for Freescale MXS devices.
  
  config SPI_TI_SSP
        tristate "TI Sequencer Serial Port - SPI Support"
  #include <linux/platform_device.h>
  #include <linux/err.h>
  #include <linux/clk.h>
+ #include <linux/dmaengine.h>
  #include <linux/dma-mapping.h>
+ #include <linux/edma.h>
  #include <linux/spi/spi.h>
  #include <linux/spi/spi_bitbang.h>
  #include <linux/slab.h>
  
 -#include <mach/spi.h>
 +#include <linux/platform_data/spi-davinci.h>
- #include <mach/edma.h>
  
  #define SPI_NO_RESOURCE               ((resource_size_t)-1)
  
  #define SPIDEF                0x4c
  #define SPIFMT0               0x50
  
- /* We have 2 DMA channels per CS, one for RX and one for TX */
- struct davinci_spi_dma {
-       int                     tx_channel;
-       int                     rx_channel;
-       int                     dummy_param_slot;
-       enum dma_event_q        eventq;
- };
  /* SPI Controller driver's private data. */
  struct davinci_spi {
        struct spi_bitbang      bitbang;
  
        const void              *tx;
        void                    *rx;
- #define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1)
-       u8                      rx_tmp_buf[SPI_TMP_BUFSZ];
        int                     rcount;
        int                     wcount;
-       struct davinci_spi_dma  dma;
+       struct dma_chan         *dma_rx;
+       struct dma_chan         *dma_tx;
+       int                     dma_rx_chnum;
+       int                     dma_tx_chnum;
        struct davinci_spi_platform_data *pdata;
  
        void                    (*get_rx)(u32 rx_data, struct davinci_spi *);
@@@ -496,21 -492,23 +492,23 @@@ out
        return errors;
  }
  
- static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
+ static void davinci_spi_dma_rx_callback(void *data)
  {
-       struct davinci_spi *dspi = data;
-       struct davinci_spi_dma *dma = &dspi->dma;
+       struct davinci_spi *dspi = (struct davinci_spi *)data;
  
-       edma_stop(lch);
+       dspi->rcount = 0;
  
-       if (status == DMA_COMPLETE) {
-               if (lch == dma->rx_channel)
-                       dspi->rcount = 0;
-               if (lch == dma->tx_channel)
-                       dspi->wcount = 0;
-       }
+       if (!dspi->wcount && !dspi->rcount)
+               complete(&dspi->done);
+ }
  
-       if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE))
+ static void davinci_spi_dma_tx_callback(void *data)
+ {
+       struct davinci_spi *dspi = (struct davinci_spi *)data;
+       dspi->wcount = 0;
+       if (!dspi->wcount && !dspi->rcount)
                complete(&dspi->done);
  }
  
  static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
  {
        struct davinci_spi *dspi;
-       int data_type, ret;
+       int data_type, ret = -ENOMEM;
        u32 tx_data, spidat1;
        u32 errors = 0;
        struct davinci_spi_config *spicfg;
        struct davinci_spi_platform_data *pdata;
        unsigned uninitialized_var(rx_buf_count);
-       struct device *sdev;
+       void *dummy_buf = NULL;
+       struct scatterlist sg_rx, sg_tx;
  
        dspi = spi_master_get_devdata(spi->master);
        pdata = dspi->pdata;
        spicfg = (struct davinci_spi_config *)spi->controller_data;
        if (!spicfg)
                spicfg = &davinci_spi_default_cfg;
-       sdev = dspi->bitbang.master->dev.parent;
  
        /* convert len to words based on bits_per_word */
        data_type = dspi->bytes_per_word[spi->chip_select];
                spidat1 |= tx_data & 0xFFFF;
                iowrite32(spidat1, dspi->base + SPIDAT1);
        } else {
-               struct davinci_spi_dma *dma;
-               unsigned long tx_reg, rx_reg;
-               struct edmacc_param param;
-               void *rx_buf;
-               int b, c;
-               dma = &dspi->dma;
-               tx_reg = (unsigned long)dspi->pbase + SPIDAT1;
-               rx_reg = (unsigned long)dspi->pbase + SPIBUF;
-               /*
-                * Transmit DMA setup
-                *
-                * If there is transmit data, map the transmit buffer, set it
-                * as the source of data and set the source B index to data
-                * size. If there is no transmit data, set the transmit register
-                * as the source of data, and set the source B index to zero.
-                *
-                * The destination is always the transmit register itself. And
-                * the destination never increments.
-                */
-               if (t->tx_buf) {
-                       t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf,
-                                               t->len, DMA_TO_DEVICE);
-                       if (dma_mapping_error(&spi->dev, t->tx_dma)) {
-                               dev_dbg(sdev, "Unable to DMA map %d bytes"
-                                               "TX buffer\n", t->len);
-                               return -ENOMEM;
-                       }
-               }
-               /*
-                * If number of words is greater than 65535, then we need
-                * to configure a 3 dimension transfer.  Use the BCNTRLD
-                * feature to allow for transfers that aren't even multiples
-                * of 65535 (or any other possible b size) by first transferring
-                * the remainder amount then grabbing the next N blocks of
-                * 65535 words.
-                */
-               c = dspi->wcount / (SZ_64K - 1);        /* N 65535 Blocks */
-               b = dspi->wcount - c * (SZ_64K - 1);    /* Remainder */
-               if (b)
-                       c++;
+               struct dma_slave_config dma_rx_conf = {
+                       .direction = DMA_DEV_TO_MEM,
+                       .src_addr = (unsigned long)dspi->pbase + SPIBUF,
+                       .src_addr_width = data_type,
+                       .src_maxburst = 1,
+               };
+               struct dma_slave_config dma_tx_conf = {
+                       .direction = DMA_MEM_TO_DEV,
+                       .dst_addr = (unsigned long)dspi->pbase + SPIDAT1,
+                       .dst_addr_width = data_type,
+                       .dst_maxburst = 1,
+               };
+               struct dma_async_tx_descriptor *rxdesc;
+               struct dma_async_tx_descriptor *txdesc;
+               void *buf;
+               dummy_buf = kzalloc(t->len, GFP_KERNEL);
+               if (!dummy_buf)
+                       goto err_alloc_dummy_buf;
+               dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf);
+               dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf);
+               sg_init_table(&sg_rx, 1);
+               if (!t->rx_buf)
+                       buf = dummy_buf;
                else
-                       b = SZ_64K - 1;
-               param.opt = TCINTEN | EDMA_TCC(dma->tx_channel);
-               param.src = t->tx_buf ? t->tx_dma : tx_reg;
-               param.a_b_cnt = b << 16 | data_type;
-               param.dst = tx_reg;
-               param.src_dst_bidx = t->tx_buf ? data_type : 0;
-               param.link_bcntrld = 0xffffffff;
-               param.src_dst_cidx = t->tx_buf ? data_type : 0;
-               param.ccnt = c;
-               edma_write_slot(dma->tx_channel, &param);
-               edma_link(dma->tx_channel, dma->dummy_param_slot);
-               /*
-                * Receive DMA setup
-                *
-                * If there is receive buffer, use it to receive data. If there
-                * is none provided, use a temporary receive buffer. Set the
-                * destination B index to 0 so effectively only one byte is used
-                * in the temporary buffer (address does not increment).
-                *
-                * The source of receive data is the receive data register. The
-                * source address never increments.
-                */
-               if (t->rx_buf) {
-                       rx_buf = t->rx_buf;
-                       rx_buf_count = t->len;
-               } else {
-                       rx_buf = dspi->rx_tmp_buf;
-                       rx_buf_count = sizeof(dspi->rx_tmp_buf);
+                       buf = t->rx_buf;
+               t->rx_dma = dma_map_single(&spi->dev, buf,
+                               t->len, DMA_FROM_DEVICE);
+               if (!t->rx_dma) {
+                       ret = -EFAULT;
+                       goto err_rx_map;
                }
+               sg_dma_address(&sg_rx) = t->rx_dma;
+               sg_dma_len(&sg_rx) = t->len;
  
-               t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count,
-                                                       DMA_FROM_DEVICE);
-               if (dma_mapping_error(&spi->dev, t->rx_dma)) {
-                       dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
-                                                               rx_buf_count);
-                       if (t->tx_buf)
-                               dma_unmap_single(&spi->dev, t->tx_dma, t->len,
-                                                               DMA_TO_DEVICE);
-                       return -ENOMEM;
+               sg_init_table(&sg_tx, 1);
+               if (!t->tx_buf)
+                       buf = dummy_buf;
+               else
+                       buf = (void *)t->tx_buf;
+               t->tx_dma = dma_map_single(&spi->dev, buf,
+                               t->len, DMA_FROM_DEVICE);
+               if (!t->tx_dma) {
+                       ret = -EFAULT;
+                       goto err_tx_map;
                }
-               param.opt = TCINTEN | EDMA_TCC(dma->rx_channel);
-               param.src = rx_reg;
-               param.a_b_cnt = b << 16 | data_type;
-               param.dst = t->rx_dma;
-               param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16;
-               param.link_bcntrld = 0xffffffff;
-               param.src_dst_cidx = (t->rx_buf ? data_type : 0) << 16;
-               param.ccnt = c;
-               edma_write_slot(dma->rx_channel, &param);
+               sg_dma_address(&sg_tx) = t->tx_dma;
+               sg_dma_len(&sg_tx) = t->len;
+               rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx,
+                               &sg_rx, 1, DMA_DEV_TO_MEM,
+                               DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               if (!rxdesc)
+                       goto err_desc;
+               txdesc = dmaengine_prep_slave_sg(dspi->dma_tx,
+                               &sg_tx, 1, DMA_MEM_TO_DEV,
+                               DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               if (!txdesc)
+                       goto err_desc;
+               rxdesc->callback = davinci_spi_dma_rx_callback;
+               rxdesc->callback_param = (void *)dspi;
+               txdesc->callback = davinci_spi_dma_tx_callback;
+               txdesc->callback_param = (void *)dspi;
  
                if (pdata->cshold_bug)
                        iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
  
-               edma_start(dma->rx_channel);
-               edma_start(dma->tx_channel);
+               dmaengine_submit(rxdesc);
+               dmaengine_submit(txdesc);
+               dma_async_issue_pending(dspi->dma_rx);
+               dma_async_issue_pending(dspi->dma_tx);
                set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
        }
  
  
        clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
        if (spicfg->io_type == SPI_IO_TYPE_DMA) {
-               if (t->tx_buf)
-                       dma_unmap_single(&spi->dev, t->tx_dma, t->len,
-                                                               DMA_TO_DEVICE);
-               dma_unmap_single(&spi->dev, t->rx_dma, rx_buf_count,
-                                                       DMA_FROM_DEVICE);
                clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
+               dma_unmap_single(&spi->dev, t->rx_dma,
+                               t->len, DMA_FROM_DEVICE);
+               dma_unmap_single(&spi->dev, t->tx_dma,
+                               t->len, DMA_TO_DEVICE);
+               kfree(dummy_buf);
        }
  
        clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
        }
  
        if (dspi->rcount != 0 || dspi->wcount != 0) {
-               dev_err(sdev, "SPI data transfer error\n");
+               dev_err(&spi->dev, "SPI data transfer error\n");
                return -EIO;
        }
  
        return t->len;
+ err_desc:
+       dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE);
+ err_tx_map:
+       dma_unmap_single(&spi->dev, t->rx_dma, t->len, DMA_FROM_DEVICE);
+ err_rx_map:
+       kfree(dummy_buf);
+ err_alloc_dummy_buf:
+       return ret;
  }
  
  /**
@@@ -751,39 -727,33 +727,33 @@@ static irqreturn_t davinci_spi_irq(s32 
  
  static int davinci_spi_request_dma(struct davinci_spi *dspi)
  {
+       dma_cap_mask_t mask;
+       struct device *sdev = dspi->bitbang.master->dev.parent;
        int r;
-       struct davinci_spi_dma *dma = &dspi->dma;
  
-       r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi,
-                                                               dma->eventq);
-       if (r < 0) {
-               pr_err("Unable to request DMA channel for SPI RX\n");
-               r = -EAGAIN;
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+       dspi->dma_rx = dma_request_channel(mask, edma_filter_fn,
+                                          &dspi->dma_rx_chnum);
+       if (!dspi->dma_rx) {
+               dev_err(sdev, "request RX DMA channel failed\n");
+               r = -ENODEV;
                goto rx_dma_failed;
        }
  
-       r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi,
-                                                               dma->eventq);
-       if (r < 0) {
-               pr_err("Unable to request DMA channel for SPI TX\n");
-               r = -EAGAIN;
+       dspi->dma_tx = dma_request_channel(mask, edma_filter_fn,
+                                          &dspi->dma_tx_chnum);
+       if (!dspi->dma_tx) {
+               dev_err(sdev, "request TX DMA channel failed\n");
+               r = -ENODEV;
                goto tx_dma_failed;
        }
  
-       r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY);
-       if (r < 0) {
-               pr_err("Unable to request SPI TX DMA param slot\n");
-               r = -EAGAIN;
-               goto param_failed;
-       }
-       dma->dummy_param_slot = r;
-       edma_link(dma->dummy_param_slot, dma->dummy_param_slot);
        return 0;
- param_failed:
-       edma_free_channel(dma->tx_channel);
  tx_dma_failed:
-       edma_free_channel(dma->rx_channel);
+       dma_release_channel(dspi->dma_rx);
  rx_dma_failed:
        return r;
  }
@@@ -898,9 -868,8 +868,8 @@@ static int __devinit davinci_spi_probe(
        dspi->bitbang.txrx_bufs = davinci_spi_bufs;
        if (dma_rx_chan != SPI_NO_RESOURCE &&
            dma_tx_chan != SPI_NO_RESOURCE) {
-               dspi->dma.rx_channel = dma_rx_chan;
-               dspi->dma.tx_channel = dma_tx_chan;
-               dspi->dma.eventq = pdata->dma_event_q;
+               dspi->dma_rx_chnum = dma_rx_chan;
+               dspi->dma_tx_chnum = dma_tx_chan;
  
                ret = davinci_spi_request_dma(dspi);
                if (ret)
        return ret;
  
  free_dma:
-       edma_free_channel(dspi->dma.tx_channel);
-       edma_free_channel(dspi->dma.rx_channel);
-       edma_free_slot(dspi->dma.dummy_param_slot);
+       dma_release_channel(dspi->dma_rx);
+       dma_release_channel(dspi->dma_tx);
  free_clk:
        clk_disable(dspi->clk);
        clk_put(dspi->clk);