2 * drivers/dma/imx-sdma.c
4 * This file contains a driver for the Freescale Smart DMA engine
6 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Based on code from Freescale:
10 * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
12 * The code contained herein is licensed under the GNU General Public
13 * License. You may obtain a copy of the GNU General Public License
14 * Version 2 or later at the following locations:
16 * http://www.opensource.org/licenses/gpl-license.html
17 * http://www.gnu.org/copyleft/gpl.html
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
24 #include <linux/interrupt.h>
25 #include <linux/clk.h>
26 #include <linux/wait.h>
27 #include <linux/sched.h>
28 #include <linux/semaphore.h>
29 #include <linux/spinlock.h>
30 #include <linux/device.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/firmware.h>
33 #include <linux/slab.h>
34 #include <linux/platform_device.h>
35 #include <linux/dmaengine.h>
37 #include <linux/of_device.h>
40 #include <mach/sdma.h>
42 #include <mach/hardware.h>
45 #define SDMA_H_C0PTR 0x000
46 #define SDMA_H_INTR 0x004
47 #define SDMA_H_STATSTOP 0x008
48 #define SDMA_H_START 0x00c
49 #define SDMA_H_EVTOVR 0x010
50 #define SDMA_H_DSPOVR 0x014
51 #define SDMA_H_HOSTOVR 0x018
52 #define SDMA_H_EVTPEND 0x01c
53 #define SDMA_H_DSPENBL 0x020
54 #define SDMA_H_RESET 0x024
55 #define SDMA_H_EVTERR 0x028
56 #define SDMA_H_INTRMSK 0x02c
57 #define SDMA_H_PSW 0x030
58 #define SDMA_H_EVTERRDBG 0x034
59 #define SDMA_H_CONFIG 0x038
60 #define SDMA_ONCE_ENB 0x040
61 #define SDMA_ONCE_DATA 0x044
62 #define SDMA_ONCE_INSTR 0x048
63 #define SDMA_ONCE_STAT 0x04c
64 #define SDMA_ONCE_CMD 0x050
65 #define SDMA_EVT_MIRROR 0x054
66 #define SDMA_ILLINSTADDR 0x058
67 #define SDMA_CHN0ADDR 0x05c
68 #define SDMA_ONCE_RTB 0x060
69 #define SDMA_XTRIG_CONF1 0x070
70 #define SDMA_XTRIG_CONF2 0x074
71 #define SDMA_CHNENBL0_IMX35 0x200
72 #define SDMA_CHNENBL0_IMX31 0x080
73 #define SDMA_CHNPRI_0 0x100
76 * Buffer descriptor status values.
87 * Data Node descriptor status values.
89 #define DND_END_OF_FRAME 0x80
90 #define DND_END_OF_XFER 0x40
92 #define DND_UNUSED 0x01
95 * IPCV2 descriptor status values.
97 #define BD_IPCV2_END_OF_FRAME 0x40
99 #define IPCV2_MAX_NODES 50
101 * Error bit set in the CCB status field by the SDMA,
102 * in setbd routine, in case of a transfer error
104 #define DATA_ERROR 0x10000000
107 * Buffer descriptor commands.
112 #define C0_SETCTX 0x07
113 #define C0_GETCTX 0x03
114 #define C0_SETDM 0x01
115 #define C0_SETPM 0x04
116 #define C0_GETDM 0x02
117 #define C0_GETPM 0x08
119 * Change endianness indicator in the BD command field
121 #define CHANGE_ENDIANNESS 0x80
124 * Mode/Count of data node descriptors - IPCv2
126 struct sdma_mode_count {
127 u32 count : 16; /* size of the buffer pointed by this BD */
128 u32 status : 8; /* E,R,I,C,W,D status bits stored here */
129 u32 command : 8; /* command mostlky used for channel 0 */
135 struct sdma_buffer_descriptor {
136 struct sdma_mode_count mode;
137 u32 buffer_addr; /* address of the buffer described */
138 u32 ext_buffer_addr; /* extended buffer address */
139 } __attribute__ ((packed));
142 * struct sdma_channel_control - Channel control Block
144 * @current_bd_ptr current buffer descriptor processed
145 * @base_bd_ptr first element of buffer descriptor array
146 * @unused padding. The SDMA engine expects an array of 128 byte
149 struct sdma_channel_control {
153 } __attribute__ ((packed));
156 * struct sdma_state_registers - SDMA context for a channel
158 * @pc: program counter
159 * @t: test bit: status of arithmetic & test instruction
160 * @rpc: return program counter
161 * @sf: source fault while loading data
162 * @spc: loop start program counter
163 * @df: destination fault while storing data
164 * @epc: loop end program counter
167 struct sdma_state_registers {
179 } __attribute__ ((packed));
182 * struct sdma_context_data - sdma context specific to a channel
184 * @channel_state: channel state bits
185 * @gReg: general registers
186 * @mda: burst dma destination address register
187 * @msa: burst dma source address register
188 * @ms: burst dma status register
189 * @md: burst dma data register
190 * @pda: peripheral dma destination address register
191 * @psa: peripheral dma source address register
192 * @ps: peripheral dma status register
193 * @pd: peripheral dma data register
194 * @ca: CRC polynomial register
195 * @cs: CRC accumulator register
196 * @dda: dedicated core destination address register
197 * @dsa: dedicated core source address register
198 * @ds: dedicated core status register
199 * @dd: dedicated core data register
201 struct sdma_context_data {
202 struct sdma_state_registers channel_state;
226 } __attribute__ ((packed));
228 #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
233 * struct sdma_channel - housekeeping for a SDMA channel
235 * @sdma pointer to the SDMA engine for this channel
236 * @channel the channel number, matches dmaengine chan_id + 1
237 * @direction transfer type. Needed for setting SDMA script
238 * @peripheral_type Peripheral type. Needed for setting SDMA script
239 * @event_id0 aka dma request line
240 * @event_id1 for channels that use 2 events
241 * @word_size peripheral access size
242 * @buf_tail ID of the buffer that was processed
243 * @done channel completion
244 * @num_bd max NUM_BD. number of descriptors currently handling
246 struct sdma_channel {
247 struct sdma_engine *sdma;
248 unsigned int channel;
249 enum dma_transfer_direction direction;
250 enum sdma_peripheral_type peripheral_type;
251 unsigned int event_id0;
252 unsigned int event_id1;
253 enum dma_slave_buswidth word_size;
254 unsigned int buf_tail;
255 struct completion done;
257 struct sdma_buffer_descriptor *bd;
259 unsigned int pc_from_device, pc_to_device;
261 dma_addr_t per_address;
262 u32 event_mask0, event_mask1;
264 u32 shp_addr, per_addr;
265 struct dma_chan chan;
267 struct dma_async_tx_descriptor desc;
268 dma_cookie_t last_completed;
269 enum dma_status status;
270 unsigned int chn_count;
271 unsigned int chn_real_count;
274 #define IMX_DMA_SG_LOOP (1 << 0)
276 #define MAX_DMA_CHANNELS 32
277 #define MXC_SDMA_DEFAULT_PRIORITY 1
278 #define MXC_SDMA_MIN_PRIORITY 1
279 #define MXC_SDMA_MAX_PRIORITY 7
281 #define SDMA_FIRMWARE_MAGIC 0x414d4453
284 * struct sdma_firmware_header - Layout of the firmware image
287 * @version_major increased whenever layout of struct sdma_script_start_addrs
289 * @version_minor firmware minor version (for binary compatible changes)
290 * @script_addrs_start offset of struct sdma_script_start_addrs in this image
291 * @num_script_addrs Number of script addresses in this image
292 * @ram_code_start offset of SDMA ram image in this firmware image
293 * @ram_code_size size of SDMA ram image
294 * @script_addrs Stores the start address of the SDMA scripts
295 * (in SDMA memory space)
297 struct sdma_firmware_header {
301 u32 script_addrs_start;
302 u32 num_script_addrs;
308 IMX31_SDMA, /* runs on i.mx31 */
309 IMX35_SDMA, /* runs on i.mx35 and later */
314 struct device_dma_parameters dma_parms;
315 struct sdma_channel channel[MAX_DMA_CHANNELS];
316 struct sdma_channel_control *channel_control;
318 enum sdma_devtype devtype;
319 unsigned int num_events;
320 struct sdma_context_data *context;
321 dma_addr_t context_phys;
322 struct dma_device dma_device;
324 struct mutex channel_0_lock;
325 struct sdma_script_start_addrs *script_addrs;
328 static struct platform_device_id sdma_devtypes[] = {
330 .name = "imx31-sdma",
331 .driver_data = IMX31_SDMA,
333 .name = "imx35-sdma",
334 .driver_data = IMX35_SDMA,
339 MODULE_DEVICE_TABLE(platform, sdma_devtypes);
341 static const struct of_device_id sdma_dt_ids[] = {
342 { .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
343 { .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
346 MODULE_DEVICE_TABLE(of, sdma_dt_ids);
348 #define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */
349 #define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
350 #define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */
351 #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
353 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
355 u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
356 SDMA_CHNENBL0_IMX35);
357 return chnenbl0 + event * 4;
360 static int sdma_config_ownership(struct sdma_channel *sdmac,
361 bool event_override, bool mcu_override, bool dsp_override)
363 struct sdma_engine *sdma = sdmac->sdma;
364 int channel = sdmac->channel;
367 if (event_override && mcu_override && dsp_override)
370 evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR);
371 mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR);
372 dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR);
375 dsp &= ~(1 << channel);
377 dsp |= (1 << channel);
380 evt &= ~(1 << channel);
382 evt |= (1 << channel);
385 mcu &= ~(1 << channel);
387 mcu |= (1 << channel);
389 __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR);
390 __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR);
391 __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR);
397 * sdma_run_channel - run a channel and wait till it's done
399 static int sdma_run_channel(struct sdma_channel *sdmac)
401 struct sdma_engine *sdma = sdmac->sdma;
402 int channel = sdmac->channel;
405 init_completion(&sdmac->done);
407 __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
409 ret = wait_for_completion_timeout(&sdmac->done, HZ);
411 return ret ? 0 : -ETIMEDOUT;
414 static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
417 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
422 mutex_lock(&sdma->channel_0_lock);
424 buf_virt = dma_alloc_coherent(NULL,
426 &buf_phys, GFP_KERNEL);
432 bd0->mode.command = C0_SETPM;
433 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
434 bd0->mode.count = size / 2;
435 bd0->buffer_addr = buf_phys;
436 bd0->ext_buffer_addr = address;
438 memcpy(buf_virt, buf, size);
440 ret = sdma_run_channel(&sdma->channel[0]);
442 dma_free_coherent(NULL, size, buf_virt, buf_phys);
445 mutex_unlock(&sdma->channel_0_lock);
450 static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
452 struct sdma_engine *sdma = sdmac->sdma;
453 int channel = sdmac->channel;
455 u32 chnenbl = chnenbl_ofs(sdma, event);
457 val = __raw_readl(sdma->regs + chnenbl);
458 val |= (1 << channel);
459 __raw_writel(val, sdma->regs + chnenbl);
462 static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
464 struct sdma_engine *sdma = sdmac->sdma;
465 int channel = sdmac->channel;
466 u32 chnenbl = chnenbl_ofs(sdma, event);
469 val = __raw_readl(sdma->regs + chnenbl);
470 val &= ~(1 << channel);
471 __raw_writel(val, sdma->regs + chnenbl);
474 static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
476 struct sdma_buffer_descriptor *bd;
479 * loop mode. Iterate over descriptors, re-setup them and
480 * call callback function.
483 bd = &sdmac->bd[sdmac->buf_tail];
485 if (bd->mode.status & BD_DONE)
488 if (bd->mode.status & BD_RROR)
489 sdmac->status = DMA_ERROR;
491 sdmac->status = DMA_IN_PROGRESS;
493 bd->mode.status |= BD_DONE;
495 sdmac->buf_tail %= sdmac->num_bd;
497 if (sdmac->desc.callback)
498 sdmac->desc.callback(sdmac->desc.callback_param);
502 static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
504 struct sdma_buffer_descriptor *bd;
507 sdmac->chn_real_count = 0;
509 * non loop mode. Iterate over all descriptors, collect
510 * errors and call callback function
512 for (i = 0; i < sdmac->num_bd; i++) {
515 if (bd->mode.status & (BD_DONE | BD_RROR))
517 sdmac->chn_real_count += bd->mode.count;
521 sdmac->status = DMA_ERROR;
523 sdmac->status = DMA_SUCCESS;
525 sdmac->last_completed = sdmac->desc.cookie;
526 if (sdmac->desc.callback)
527 sdmac->desc.callback(sdmac->desc.callback_param);
530 static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
532 complete(&sdmac->done);
534 /* not interested in channel 0 interrupts */
535 if (sdmac->channel == 0)
538 if (sdmac->flags & IMX_DMA_SG_LOOP)
539 sdma_handle_channel_loop(sdmac);
541 mxc_sdma_handle_channel_normal(sdmac);
544 static irqreturn_t sdma_int_handler(int irq, void *dev_id)
546 struct sdma_engine *sdma = dev_id;
549 stat = __raw_readl(sdma->regs + SDMA_H_INTR);
550 __raw_writel(stat, sdma->regs + SDMA_H_INTR);
553 int channel = fls(stat) - 1;
554 struct sdma_channel *sdmac = &sdma->channel[channel];
556 mxc_sdma_handle_channel(sdmac);
558 stat &= ~(1 << channel);
565 * sets the pc of SDMA script according to the peripheral type
567 static void sdma_get_pc(struct sdma_channel *sdmac,
568 enum sdma_peripheral_type peripheral_type)
570 struct sdma_engine *sdma = sdmac->sdma;
571 int per_2_emi = 0, emi_2_per = 0;
573 * These are needed once we start to support transfers between
574 * two peripherals or memory-to-memory transfers
576 int per_2_per = 0, emi_2_emi = 0;
578 sdmac->pc_from_device = 0;
579 sdmac->pc_to_device = 0;
581 switch (peripheral_type) {
582 case IMX_DMATYPE_MEMORY:
583 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
585 case IMX_DMATYPE_DSP:
586 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
587 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
589 case IMX_DMATYPE_FIRI:
590 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
591 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
593 case IMX_DMATYPE_UART:
594 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
595 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
597 case IMX_DMATYPE_UART_SP:
598 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
599 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
601 case IMX_DMATYPE_ATA:
602 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
603 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
605 case IMX_DMATYPE_CSPI:
606 case IMX_DMATYPE_EXT:
607 case IMX_DMATYPE_SSI:
608 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
609 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
611 case IMX_DMATYPE_SSI_SP:
612 case IMX_DMATYPE_MMC:
613 case IMX_DMATYPE_SDHC:
614 case IMX_DMATYPE_CSPI_SP:
615 case IMX_DMATYPE_ESAI:
616 case IMX_DMATYPE_MSHC_SP:
617 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
618 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
620 case IMX_DMATYPE_ASRC:
621 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
622 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
623 per_2_per = sdma->script_addrs->per_2_per_addr;
625 case IMX_DMATYPE_MSHC:
626 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
627 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
629 case IMX_DMATYPE_CCM:
630 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
632 case IMX_DMATYPE_SPDIF:
633 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
634 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
636 case IMX_DMATYPE_IPU_MEMORY:
637 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
643 sdmac->pc_from_device = per_2_emi;
644 sdmac->pc_to_device = emi_2_per;
647 static int sdma_load_context(struct sdma_channel *sdmac)
649 struct sdma_engine *sdma = sdmac->sdma;
650 int channel = sdmac->channel;
652 struct sdma_context_data *context = sdma->context;
653 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
656 if (sdmac->direction == DMA_DEV_TO_MEM) {
657 load_address = sdmac->pc_from_device;
659 load_address = sdmac->pc_to_device;
662 if (load_address < 0)
665 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
666 dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
667 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
668 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
669 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
670 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
672 mutex_lock(&sdma->channel_0_lock);
674 memset(context, 0, sizeof(*context));
675 context->channel_state.pc = load_address;
677 /* Send by context the event mask,base address for peripheral
678 * and watermark level
680 context->gReg[0] = sdmac->event_mask1;
681 context->gReg[1] = sdmac->event_mask0;
682 context->gReg[2] = sdmac->per_addr;
683 context->gReg[6] = sdmac->shp_addr;
684 context->gReg[7] = sdmac->watermark_level;
686 bd0->mode.command = C0_SETDM;
687 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
688 bd0->mode.count = sizeof(*context) / 4;
689 bd0->buffer_addr = sdma->context_phys;
690 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
692 ret = sdma_run_channel(&sdma->channel[0]);
694 mutex_unlock(&sdma->channel_0_lock);
699 static void sdma_disable_channel(struct sdma_channel *sdmac)
701 struct sdma_engine *sdma = sdmac->sdma;
702 int channel = sdmac->channel;
704 __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP);
705 sdmac->status = DMA_ERROR;
708 static int sdma_config_channel(struct sdma_channel *sdmac)
712 sdma_disable_channel(sdmac);
714 sdmac->event_mask0 = 0;
715 sdmac->event_mask1 = 0;
719 if (sdmac->event_id0) {
720 if (sdmac->event_id0 > 32)
722 sdma_event_enable(sdmac, sdmac->event_id0);
725 switch (sdmac->peripheral_type) {
726 case IMX_DMATYPE_DSP:
727 sdma_config_ownership(sdmac, false, true, true);
729 case IMX_DMATYPE_MEMORY:
730 sdma_config_ownership(sdmac, false, true, false);
733 sdma_config_ownership(sdmac, true, true, false);
737 sdma_get_pc(sdmac, sdmac->peripheral_type);
739 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
740 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
741 /* Handle multiple event channels differently */
742 if (sdmac->event_id1) {
743 sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
744 if (sdmac->event_id1 > 31)
745 sdmac->watermark_level |= 1 << 31;
746 sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
747 if (sdmac->event_id0 > 31)
748 sdmac->watermark_level |= 1 << 30;
750 sdmac->event_mask0 = 1 << sdmac->event_id0;
751 sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
753 /* Watermark Level */
754 sdmac->watermark_level |= sdmac->watermark_level;
756 sdmac->shp_addr = sdmac->per_address;
758 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
761 ret = sdma_load_context(sdmac);
766 static int sdma_set_channel_priority(struct sdma_channel *sdmac,
767 unsigned int priority)
769 struct sdma_engine *sdma = sdmac->sdma;
770 int channel = sdmac->channel;
772 if (priority < MXC_SDMA_MIN_PRIORITY
773 || priority > MXC_SDMA_MAX_PRIORITY) {
777 __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
782 static int sdma_request_channel(struct sdma_channel *sdmac)
784 struct sdma_engine *sdma = sdmac->sdma;
785 int channel = sdmac->channel;
788 sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
794 memset(sdmac->bd, 0, PAGE_SIZE);
796 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
797 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
799 clk_enable(sdma->clk);
801 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
803 init_completion(&sdmac->done);
813 static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
815 __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
818 static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
820 dma_cookie_t cookie = sdmac->chan.cookie;
825 sdmac->chan.cookie = cookie;
826 sdmac->desc.cookie = cookie;
831 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
833 return container_of(chan, struct sdma_channel, chan);
836 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
839 struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
840 struct sdma_engine *sdma = sdmac->sdma;
843 spin_lock_irqsave(&sdmac->lock, flags);
845 cookie = sdma_assign_cookie(sdmac);
847 sdma_enable_channel(sdma, sdmac->channel);
849 spin_unlock_irqrestore(&sdmac->lock, flags);
854 static int sdma_alloc_chan_resources(struct dma_chan *chan)
856 struct sdma_channel *sdmac = to_sdma_chan(chan);
857 struct imx_dma_data *data = chan->private;
863 switch (data->priority) {
867 case DMA_PRIO_MEDIUM:
876 sdmac->peripheral_type = data->peripheral_type;
877 sdmac->event_id0 = data->dma_request;
878 ret = sdma_set_channel_priority(sdmac, prio);
882 ret = sdma_request_channel(sdmac);
886 dma_async_tx_descriptor_init(&sdmac->desc, chan);
887 sdmac->desc.tx_submit = sdma_tx_submit;
888 /* txd.flags will be overwritten in prep funcs */
889 sdmac->desc.flags = DMA_CTRL_ACK;
894 static void sdma_free_chan_resources(struct dma_chan *chan)
896 struct sdma_channel *sdmac = to_sdma_chan(chan);
897 struct sdma_engine *sdma = sdmac->sdma;
899 sdma_disable_channel(sdmac);
901 if (sdmac->event_id0)
902 sdma_event_disable(sdmac, sdmac->event_id0);
903 if (sdmac->event_id1)
904 sdma_event_disable(sdmac, sdmac->event_id1);
906 sdmac->event_id0 = 0;
907 sdmac->event_id1 = 0;
909 sdma_set_channel_priority(sdmac, 0);
911 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
913 clk_disable(sdma->clk);
916 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
917 struct dma_chan *chan, struct scatterlist *sgl,
918 unsigned int sg_len, enum dma_transfer_direction direction,
921 struct sdma_channel *sdmac = to_sdma_chan(chan);
922 struct sdma_engine *sdma = sdmac->sdma;
924 int channel = sdmac->channel;
925 struct scatterlist *sg;
927 if (sdmac->status == DMA_IN_PROGRESS)
929 sdmac->status = DMA_IN_PROGRESS;
933 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
936 sdmac->direction = direction;
937 ret = sdma_load_context(sdmac);
941 if (sg_len > NUM_BD) {
942 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
943 channel, sg_len, NUM_BD);
948 sdmac->chn_count = 0;
949 for_each_sg(sgl, sg, sg_len, i) {
950 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
953 bd->buffer_addr = sg->dma_address;
957 if (count > 0xffff) {
958 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
959 channel, count, 0xffff);
964 bd->mode.count = count;
965 sdmac->chn_count += count;
967 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
972 switch (sdmac->word_size) {
973 case DMA_SLAVE_BUSWIDTH_4_BYTES:
974 bd->mode.command = 0;
975 if (count & 3 || sg->dma_address & 3)
978 case DMA_SLAVE_BUSWIDTH_2_BYTES:
979 bd->mode.command = 2;
980 if (count & 1 || sg->dma_address & 1)
983 case DMA_SLAVE_BUSWIDTH_1_BYTE:
984 bd->mode.command = 1;
990 param = BD_DONE | BD_EXTD | BD_CONT;
992 if (i + 1 == sg_len) {
998 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
999 i, count, sg->dma_address,
1000 param & BD_WRAP ? "wrap" : "",
1001 param & BD_INTR ? " intr" : "");
1003 bd->mode.status = param;
1006 sdmac->num_bd = sg_len;
1007 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1009 return &sdmac->desc;
1011 sdmac->status = DMA_ERROR;
1015 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1016 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1017 size_t period_len, enum dma_transfer_direction direction)
1019 struct sdma_channel *sdmac = to_sdma_chan(chan);
1020 struct sdma_engine *sdma = sdmac->sdma;
1021 int num_periods = buf_len / period_len;
1022 int channel = sdmac->channel;
1023 int ret, i = 0, buf = 0;
1025 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1027 if (sdmac->status == DMA_IN_PROGRESS)
1030 sdmac->status = DMA_IN_PROGRESS;
1032 sdmac->flags |= IMX_DMA_SG_LOOP;
1033 sdmac->direction = direction;
1034 ret = sdma_load_context(sdmac);
1038 if (num_periods > NUM_BD) {
1039 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1040 channel, num_periods, NUM_BD);
1044 if (period_len > 0xffff) {
1045 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1046 channel, period_len, 0xffff);
1050 while (buf < buf_len) {
1051 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1054 bd->buffer_addr = dma_addr;
1056 bd->mode.count = period_len;
1058 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1060 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1061 bd->mode.command = 0;
1063 bd->mode.command = sdmac->word_size;
1065 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1066 if (i + 1 == num_periods)
1069 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1070 i, period_len, dma_addr,
1071 param & BD_WRAP ? "wrap" : "",
1072 param & BD_INTR ? " intr" : "");
1074 bd->mode.status = param;
1076 dma_addr += period_len;
1082 sdmac->num_bd = num_periods;
1083 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1085 return &sdmac->desc;
1087 sdmac->status = DMA_ERROR;
1091 static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1094 struct sdma_channel *sdmac = to_sdma_chan(chan);
1095 struct dma_slave_config *dmaengine_cfg = (void *)arg;
1098 case DMA_TERMINATE_ALL:
1099 sdma_disable_channel(sdmac);
1101 case DMA_SLAVE_CONFIG:
1102 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1103 sdmac->per_address = dmaengine_cfg->src_addr;
1104 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1105 dmaengine_cfg->src_addr_width;
1106 sdmac->word_size = dmaengine_cfg->src_addr_width;
1108 sdmac->per_address = dmaengine_cfg->dst_addr;
1109 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1110 dmaengine_cfg->dst_addr_width;
1111 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1113 sdmac->direction = dmaengine_cfg->direction;
1114 return sdma_config_channel(sdmac);
1122 static enum dma_status sdma_tx_status(struct dma_chan *chan,
1123 dma_cookie_t cookie,
1124 struct dma_tx_state *txstate)
1126 struct sdma_channel *sdmac = to_sdma_chan(chan);
1127 dma_cookie_t last_used;
1129 last_used = chan->cookie;
1131 dma_set_tx_state(txstate, sdmac->last_completed, last_used,
1132 sdmac->chn_count - sdmac->chn_real_count);
1134 return sdmac->status;
1137 static void sdma_issue_pending(struct dma_chan *chan)
1140 * Nothing to do. We only have a single descriptor
1144 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1146 static void sdma_add_scripts(struct sdma_engine *sdma,
1147 const struct sdma_script_start_addrs *addr)
1149 s32 *addr_arr = (u32 *)addr;
1150 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1153 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1154 if (addr_arr[i] > 0)
1155 saddr_arr[i] = addr_arr[i];
1158 static void sdma_load_firmware(const struct firmware *fw, void *context)
1160 struct sdma_engine *sdma = context;
1161 const struct sdma_firmware_header *header;
1162 const struct sdma_script_start_addrs *addr;
1163 unsigned short *ram_code;
1166 dev_err(sdma->dev, "firmware not found\n");
1170 if (fw->size < sizeof(*header))
1173 header = (struct sdma_firmware_header *)fw->data;
1175 if (header->magic != SDMA_FIRMWARE_MAGIC)
1177 if (header->ram_code_start + header->ram_code_size > fw->size)
1180 addr = (void *)header + header->script_addrs_start;
1181 ram_code = (void *)header + header->ram_code_start;
1183 clk_enable(sdma->clk);
1184 /* download the RAM image for SDMA */
1185 sdma_load_script(sdma, ram_code,
1186 header->ram_code_size,
1187 addr->ram_code_start_addr);
1188 clk_disable(sdma->clk);
1190 sdma_add_scripts(sdma, addr);
1192 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1193 header->version_major,
1194 header->version_minor);
1197 release_firmware(fw);
1200 static int __init sdma_get_firmware(struct sdma_engine *sdma,
1201 const char *fw_name)
1205 ret = request_firmware_nowait(THIS_MODULE,
1206 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1207 GFP_KERNEL, sdma, sdma_load_firmware);
1212 static int __init sdma_init(struct sdma_engine *sdma)
1215 dma_addr_t ccb_phys;
1217 switch (sdma->devtype) {
1219 sdma->num_events = 32;
1222 sdma->num_events = 48;
1225 dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
1230 clk_enable(sdma->clk);
1232 /* Be sure SDMA has not started yet */
1233 __raw_writel(0, sdma->regs + SDMA_H_C0PTR);
1235 sdma->channel_control = dma_alloc_coherent(NULL,
1236 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1237 sizeof(struct sdma_context_data),
1238 &ccb_phys, GFP_KERNEL);
1240 if (!sdma->channel_control) {
1245 sdma->context = (void *)sdma->channel_control +
1246 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1247 sdma->context_phys = ccb_phys +
1248 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1250 /* Zero-out the CCB structures array just allocated */
1251 memset(sdma->channel_control, 0,
1252 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1254 /* disable all channels */
1255 for (i = 0; i < sdma->num_events; i++)
1256 __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));
1258 /* All channels have priority 0 */
1259 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1260 __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1262 ret = sdma_request_channel(&sdma->channel[0]);
1266 sdma_config_ownership(&sdma->channel[0], false, true, false);
1268 /* Set Command Channel (Channel Zero) */
1269 __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);
1271 /* Set bits of CONFIG register but with static context switching */
1272 /* FIXME: Check whether to set ACR bit depending on clock ratios */
1273 __raw_writel(0, sdma->regs + SDMA_H_CONFIG);
1275 __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1277 /* Set bits of CONFIG register with given context switching mode */
1278 __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1280 /* Initializes channel's priorities */
1281 sdma_set_channel_priority(&sdma->channel[0], 7);
1283 clk_disable(sdma->clk);
1288 clk_disable(sdma->clk);
1289 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1293 static int __init sdma_probe(struct platform_device *pdev)
1295 const struct of_device_id *of_id =
1296 of_match_device(sdma_dt_ids, &pdev->dev);
1297 struct device_node *np = pdev->dev.of_node;
1298 const char *fw_name;
1301 struct resource *iores;
1302 struct sdma_platform_data *pdata = pdev->dev.platform_data;
1304 struct sdma_engine *sdma;
1307 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1311 mutex_init(&sdma->channel_0_lock);
1313 sdma->dev = &pdev->dev;
1315 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1316 irq = platform_get_irq(pdev, 0);
1317 if (!iores || irq < 0) {
1322 if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
1324 goto err_request_region;
1327 sdma->clk = clk_get(&pdev->dev, NULL);
1328 if (IS_ERR(sdma->clk)) {
1329 ret = PTR_ERR(sdma->clk);
1333 sdma->regs = ioremap(iores->start, resource_size(iores));
1339 ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
1341 goto err_request_irq;
1343 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1344 if (!sdma->script_addrs) {
1349 /* initially no scripts available */
1350 saddr_arr = (s32 *)sdma->script_addrs;
1351 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1352 saddr_arr[i] = -EINVAL;
1355 pdev->id_entry = of_id->data;
1356 sdma->devtype = pdev->id_entry->driver_data;
1358 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1359 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1361 INIT_LIST_HEAD(&sdma->dma_device.channels);
1362 /* Initialize channel parameters */
1363 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1364 struct sdma_channel *sdmac = &sdma->channel[i];
1367 spin_lock_init(&sdmac->lock);
1369 sdmac->chan.device = &sdma->dma_device;
1373 * Add the channel to the DMAC list. Do not add channel 0 though
1374 * because we need it internally in the SDMA driver. This also means
1375 * that channel 0 in dmaengine counting matches sdma channel 1.
1378 list_add_tail(&sdmac->chan.device_node,
1379 &sdma->dma_device.channels);
1382 ret = sdma_init(sdma);
1386 if (pdata && pdata->script_addrs)
1387 sdma_add_scripts(sdma, pdata->script_addrs);
1390 sdma_get_firmware(sdma, pdata->fw_name);
1393 * Because that device tree does not encode ROM script address,
1394 * the RAM script in firmware is mandatory for device tree
1395 * probe, otherwise it fails.
1397 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1400 dev_err(&pdev->dev, "failed to get firmware name\n");
1404 ret = sdma_get_firmware(sdma, fw_name);
1406 dev_err(&pdev->dev, "failed to get firmware\n");
1411 sdma->dma_device.dev = &pdev->dev;
1413 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1414 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1415 sdma->dma_device.device_tx_status = sdma_tx_status;
1416 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1417 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1418 sdma->dma_device.device_control = sdma_control;
1419 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1420 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1421 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1423 ret = dma_async_device_register(&sdma->dma_device);
1425 dev_err(&pdev->dev, "unable to register\n");
1429 dev_info(sdma->dev, "initialized\n");
1434 kfree(sdma->script_addrs);
1436 free_irq(irq, sdma);
1438 iounmap(sdma->regs);
1442 release_mem_region(iores->start, resource_size(iores));
1449 static int __exit sdma_remove(struct platform_device *pdev)
1454 static struct platform_driver sdma_driver = {
1457 .of_match_table = sdma_dt_ids,
1459 .id_table = sdma_devtypes,
1460 .remove = __exit_p(sdma_remove),
1463 static int __init sdma_module_init(void)
1465 return platform_driver_probe(&sdma_driver, sdma_probe);
1467 module_init(sdma_module_init);
1469 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1470 MODULE_DESCRIPTION("i.MX SDMA driver");
1471 MODULE_LICENSE("GPL");