mmc: sunxi: Document host init sequence
[cascardo/linux.git] / drivers / mmc / host / sunxi-mmc.c
1 /*
2  * Driver for sunxi SD/MMC host controllers
3  * (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd.
4  * (C) Copyright 2007-2011 Aaron Maoye <leafy.myeh@reuuimllatech.com>
5  * (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch>
6  * (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch>
7  * (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License as
11  * published by the Free Software Foundation; either version 2 of
12  * the License, or (at your option) any later version.
13  */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/io.h>
18 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/delay.h>
21 #include <linux/err.h>
22
23 #include <linux/clk.h>
24 #include <linux/gpio.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock.h>
27 #include <linux/scatterlist.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/slab.h>
30 #include <linux/reset.h>
31
32 #include <linux/of_address.h>
33 #include <linux/of_gpio.h>
34 #include <linux/of_platform.h>
35
36 #include <linux/mmc/host.h>
37 #include <linux/mmc/sd.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/mmc.h>
40 #include <linux/mmc/core.h>
41 #include <linux/mmc/card.h>
42 #include <linux/mmc/slot-gpio.h>
43
44 /* register offset definitions */
45 #define SDXC_REG_GCTRL  (0x00) /* SMC Global Control Register */
46 #define SDXC_REG_CLKCR  (0x04) /* SMC Clock Control Register */
47 #define SDXC_REG_TMOUT  (0x08) /* SMC Time Out Register */
48 #define SDXC_REG_WIDTH  (0x0C) /* SMC Bus Width Register */
49 #define SDXC_REG_BLKSZ  (0x10) /* SMC Block Size Register */
50 #define SDXC_REG_BCNTR  (0x14) /* SMC Byte Count Register */
51 #define SDXC_REG_CMDR   (0x18) /* SMC Command Register */
52 #define SDXC_REG_CARG   (0x1C) /* SMC Argument Register */
53 #define SDXC_REG_RESP0  (0x20) /* SMC Response Register 0 */
54 #define SDXC_REG_RESP1  (0x24) /* SMC Response Register 1 */
55 #define SDXC_REG_RESP2  (0x28) /* SMC Response Register 2 */
56 #define SDXC_REG_RESP3  (0x2C) /* SMC Response Register 3 */
57 #define SDXC_REG_IMASK  (0x30) /* SMC Interrupt Mask Register */
58 #define SDXC_REG_MISTA  (0x34) /* SMC Masked Interrupt Status Register */
59 #define SDXC_REG_RINTR  (0x38) /* SMC Raw Interrupt Status Register */
60 #define SDXC_REG_STAS   (0x3C) /* SMC Status Register */
61 #define SDXC_REG_FTRGL  (0x40) /* SMC FIFO Threshold Watermark Registe */
62 #define SDXC_REG_FUNS   (0x44) /* SMC Function Select Register */
63 #define SDXC_REG_CBCR   (0x48) /* SMC CIU Byte Count Register */
64 #define SDXC_REG_BBCR   (0x4C) /* SMC BIU Byte Count Register */
65 #define SDXC_REG_DBGC   (0x50) /* SMC Debug Enable Register */
66 #define SDXC_REG_HWRST  (0x78) /* SMC Card Hardware Reset for Register */
67 #define SDXC_REG_DMAC   (0x80) /* SMC IDMAC Control Register */
68 #define SDXC_REG_DLBA   (0x84) /* SMC IDMAC Descriptor List Base Addre */
69 #define SDXC_REG_IDST   (0x88) /* SMC IDMAC Status Register */
70 #define SDXC_REG_IDIE   (0x8C) /* SMC IDMAC Interrupt Enable Register */
71 #define SDXC_REG_CHDA   (0x90)
72 #define SDXC_REG_CBDA   (0x94)
73
74 #define mmc_readl(host, reg) \
75         readl((host)->reg_base + SDXC_##reg)
76 #define mmc_writel(host, reg, value) \
77         writel((value), (host)->reg_base + SDXC_##reg)
78
79 /* global control register bits */
80 #define SDXC_SOFT_RESET                 BIT(0)
81 #define SDXC_FIFO_RESET                 BIT(1)
82 #define SDXC_DMA_RESET                  BIT(2)
83 #define SDXC_INTERRUPT_ENABLE_BIT       BIT(4)
84 #define SDXC_DMA_ENABLE_BIT             BIT(5)
85 #define SDXC_DEBOUNCE_ENABLE_BIT        BIT(8)
86 #define SDXC_POSEDGE_LATCH_DATA         BIT(9)
87 #define SDXC_DDR_MODE                   BIT(10)
88 #define SDXC_MEMORY_ACCESS_DONE         BIT(29)
89 #define SDXC_ACCESS_DONE_DIRECT         BIT(30)
90 #define SDXC_ACCESS_BY_AHB              BIT(31)
91 #define SDXC_ACCESS_BY_DMA              (0 << 31)
92 #define SDXC_HARDWARE_RESET \
93         (SDXC_SOFT_RESET | SDXC_FIFO_RESET | SDXC_DMA_RESET)
94
95 /* clock control bits */
96 #define SDXC_CARD_CLOCK_ON              BIT(16)
97 #define SDXC_LOW_POWER_ON               BIT(17)
98
99 /* bus width */
100 #define SDXC_WIDTH1                     0
101 #define SDXC_WIDTH4                     1
102 #define SDXC_WIDTH8                     2
103
104 /* smc command bits */
105 #define SDXC_RESP_EXPIRE                BIT(6)
106 #define SDXC_LONG_RESPONSE              BIT(7)
107 #define SDXC_CHECK_RESPONSE_CRC         BIT(8)
108 #define SDXC_DATA_EXPIRE                BIT(9)
109 #define SDXC_WRITE                      BIT(10)
110 #define SDXC_SEQUENCE_MODE              BIT(11)
111 #define SDXC_SEND_AUTO_STOP             BIT(12)
112 #define SDXC_WAIT_PRE_OVER              BIT(13)
113 #define SDXC_STOP_ABORT_CMD             BIT(14)
114 #define SDXC_SEND_INIT_SEQUENCE         BIT(15)
115 #define SDXC_UPCLK_ONLY                 BIT(21)
116 #define SDXC_READ_CEATA_DEV             BIT(22)
117 #define SDXC_CCS_EXPIRE                 BIT(23)
118 #define SDXC_ENABLE_BIT_BOOT            BIT(24)
119 #define SDXC_ALT_BOOT_OPTIONS           BIT(25)
120 #define SDXC_BOOT_ACK_EXPIRE            BIT(26)
121 #define SDXC_BOOT_ABORT                 BIT(27)
122 #define SDXC_VOLTAGE_SWITCH             BIT(28)
123 #define SDXC_USE_HOLD_REGISTER          BIT(29)
124 #define SDXC_START                      BIT(31)
125
126 /* interrupt bits */
127 #define SDXC_RESP_ERROR                 BIT(1)
128 #define SDXC_COMMAND_DONE               BIT(2)
129 #define SDXC_DATA_OVER                  BIT(3)
130 #define SDXC_TX_DATA_REQUEST            BIT(4)
131 #define SDXC_RX_DATA_REQUEST            BIT(5)
132 #define SDXC_RESP_CRC_ERROR             BIT(6)
133 #define SDXC_DATA_CRC_ERROR             BIT(7)
134 #define SDXC_RESP_TIMEOUT               BIT(8)
135 #define SDXC_DATA_TIMEOUT               BIT(9)
136 #define SDXC_VOLTAGE_CHANGE_DONE        BIT(10)
137 #define SDXC_FIFO_RUN_ERROR             BIT(11)
138 #define SDXC_HARD_WARE_LOCKED           BIT(12)
139 #define SDXC_START_BIT_ERROR            BIT(13)
140 #define SDXC_AUTO_COMMAND_DONE          BIT(14)
141 #define SDXC_END_BIT_ERROR              BIT(15)
142 #define SDXC_SDIO_INTERRUPT             BIT(16)
143 #define SDXC_CARD_INSERT                BIT(30)
144 #define SDXC_CARD_REMOVE                BIT(31)
145 #define SDXC_INTERRUPT_ERROR_BIT \
146         (SDXC_RESP_ERROR | SDXC_RESP_CRC_ERROR | SDXC_DATA_CRC_ERROR | \
147          SDXC_RESP_TIMEOUT | SDXC_DATA_TIMEOUT | SDXC_FIFO_RUN_ERROR | \
148          SDXC_HARD_WARE_LOCKED | SDXC_START_BIT_ERROR | SDXC_END_BIT_ERROR)
149 #define SDXC_INTERRUPT_DONE_BIT \
150         (SDXC_AUTO_COMMAND_DONE | SDXC_DATA_OVER | \
151          SDXC_COMMAND_DONE | SDXC_VOLTAGE_CHANGE_DONE)
152
153 /* status */
154 #define SDXC_RXWL_FLAG                  BIT(0)
155 #define SDXC_TXWL_FLAG                  BIT(1)
156 #define SDXC_FIFO_EMPTY                 BIT(2)
157 #define SDXC_FIFO_FULL                  BIT(3)
158 #define SDXC_CARD_PRESENT               BIT(8)
159 #define SDXC_CARD_DATA_BUSY             BIT(9)
160 #define SDXC_DATA_FSM_BUSY              BIT(10)
161 #define SDXC_DMA_REQUEST                BIT(31)
162 #define SDXC_FIFO_SIZE                  16
163
164 /* Function select */
165 #define SDXC_CEATA_ON                   (0xceaa << 16)
166 #define SDXC_SEND_IRQ_RESPONSE          BIT(0)
167 #define SDXC_SDIO_READ_WAIT             BIT(1)
168 #define SDXC_ABORT_READ_DATA            BIT(2)
169 #define SDXC_SEND_CCSD                  BIT(8)
170 #define SDXC_SEND_AUTO_STOPCCSD         BIT(9)
171 #define SDXC_CEATA_DEV_IRQ_ENABLE       BIT(10)
172
173 /* IDMA controller bus mod bit field */
174 #define SDXC_IDMAC_SOFT_RESET           BIT(0)
175 #define SDXC_IDMAC_FIX_BURST            BIT(1)
176 #define SDXC_IDMAC_IDMA_ON              BIT(7)
177 #define SDXC_IDMAC_REFETCH_DES          BIT(31)
178
179 /* IDMA status bit field */
180 #define SDXC_IDMAC_TRANSMIT_INTERRUPT           BIT(0)
181 #define SDXC_IDMAC_RECEIVE_INTERRUPT            BIT(1)
182 #define SDXC_IDMAC_FATAL_BUS_ERROR              BIT(2)
183 #define SDXC_IDMAC_DESTINATION_INVALID          BIT(4)
184 #define SDXC_IDMAC_CARD_ERROR_SUM               BIT(5)
185 #define SDXC_IDMAC_NORMAL_INTERRUPT_SUM         BIT(8)
186 #define SDXC_IDMAC_ABNORMAL_INTERRUPT_SUM       BIT(9)
187 #define SDXC_IDMAC_HOST_ABORT_INTERRUPT         BIT(10)
188 #define SDXC_IDMAC_IDLE                         (0 << 13)
189 #define SDXC_IDMAC_SUSPEND                      (1 << 13)
190 #define SDXC_IDMAC_DESC_READ                    (2 << 13)
191 #define SDXC_IDMAC_DESC_CHECK                   (3 << 13)
192 #define SDXC_IDMAC_READ_REQUEST_WAIT            (4 << 13)
193 #define SDXC_IDMAC_WRITE_REQUEST_WAIT           (5 << 13)
194 #define SDXC_IDMAC_READ                         (6 << 13)
195 #define SDXC_IDMAC_WRITE                        (7 << 13)
196 #define SDXC_IDMAC_DESC_CLOSE                   (8 << 13)
197
198 /*
199 * If the idma-des-size-bits of property is ie 13, bufsize bits are:
200 *  Bits  0-12: buf1 size
201 *  Bits 13-25: buf2 size
202 *  Bits 26-31: not used
203 * Since we only ever set buf1 size, we can simply store it directly.
204 */
205 #define SDXC_IDMAC_DES0_DIC     BIT(1)  /* disable interrupt on completion */
206 #define SDXC_IDMAC_DES0_LD      BIT(2)  /* last descriptor */
207 #define SDXC_IDMAC_DES0_FD      BIT(3)  /* first descriptor */
208 #define SDXC_IDMAC_DES0_CH      BIT(4)  /* chain mode */
209 #define SDXC_IDMAC_DES0_ER      BIT(5)  /* end of ring */
210 #define SDXC_IDMAC_DES0_CES     BIT(30) /* card error summary */
211 #define SDXC_IDMAC_DES0_OWN     BIT(31) /* 1-idma owns it, 0-host owns it */
212
213 #define SDXC_CLK_400K           0
214 #define SDXC_CLK_25M            1
215 #define SDXC_CLK_50M            2
216 #define SDXC_CLK_50M_DDR        3
217
218 struct sunxi_mmc_clk_delay {
219         u32 output;
220         u32 sample;
221 };
222
223 struct sunxi_idma_des {
224         u32     config;
225         u32     buf_size;
226         u32     buf_addr_ptr1;
227         u32     buf_addr_ptr2;
228 };
229
230 struct sunxi_mmc_host {
231         struct mmc_host *mmc;
232         struct reset_control *reset;
233
234         /* IO mapping base */
235         void __iomem    *reg_base;
236
237         /* clock management */
238         struct clk      *clk_ahb;
239         struct clk      *clk_mmc;
240         struct clk      *clk_sample;
241         struct clk      *clk_output;
242         const struct sunxi_mmc_clk_delay *clk_delays;
243
244         /* irq */
245         spinlock_t      lock;
246         int             irq;
247         u32             int_sum;
248         u32             sdio_imask;
249
250         /* dma */
251         u32             idma_des_size_bits;
252         dma_addr_t      sg_dma;
253         void            *sg_cpu;
254         bool            wait_dma;
255
256         struct mmc_request *mrq;
257         struct mmc_request *manual_stop_mrq;
258         int             ferror;
259 };
260
261 static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host)
262 {
263         unsigned long expire = jiffies + msecs_to_jiffies(250);
264         u32 rval;
265
266         mmc_writel(host, REG_GCTRL, SDXC_HARDWARE_RESET);
267         do {
268                 rval = mmc_readl(host, REG_GCTRL);
269         } while (time_before(jiffies, expire) && (rval & SDXC_HARDWARE_RESET));
270
271         if (rval & SDXC_HARDWARE_RESET) {
272                 dev_err(mmc_dev(host->mmc), "fatal err reset timeout\n");
273                 return -EIO;
274         }
275
276         return 0;
277 }
278
279 static int sunxi_mmc_init_host(struct mmc_host *mmc)
280 {
281         u32 rval;
282         struct sunxi_mmc_host *host = mmc_priv(mmc);
283
284         if (sunxi_mmc_reset_host(host))
285                 return -EIO;
286
287         /*
288          * Burst 8 transfers, RX trigger level: 7, TX trigger level: 8
289          *
290          * TODO: sun9i has a larger FIFO and supports higher trigger values
291          */
292         mmc_writel(host, REG_FTRGL, 0x20070008);
293         /* Maximum timeout value */
294         mmc_writel(host, REG_TMOUT, 0xffffffff);
295         /* Unmask SDIO interrupt if needed */
296         mmc_writel(host, REG_IMASK, host->sdio_imask);
297         /* Clear all pending interrupts */
298         mmc_writel(host, REG_RINTR, 0xffffffff);
299         /* Debug register? undocumented */
300         mmc_writel(host, REG_DBGC, 0xdeb);
301         /* Enable CEATA support */
302         mmc_writel(host, REG_FUNS, SDXC_CEATA_ON);
303         /* Set DMA descriptor list base address */
304         mmc_writel(host, REG_DLBA, host->sg_dma);
305
306         rval = mmc_readl(host, REG_GCTRL);
307         rval |= SDXC_INTERRUPT_ENABLE_BIT;
308         /* Undocumented, but found in Allwinner code */
309         rval &= ~SDXC_ACCESS_DONE_DIRECT;
310         mmc_writel(host, REG_GCTRL, rval);
311
312         return 0;
313 }
314
315 static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
316                                     struct mmc_data *data)
317 {
318         struct sunxi_idma_des *pdes = (struct sunxi_idma_des *)host->sg_cpu;
319         dma_addr_t next_desc = host->sg_dma;
320         int i, max_len = (1 << host->idma_des_size_bits);
321
322         for (i = 0; i < data->sg_len; i++) {
323                 pdes[i].config = SDXC_IDMAC_DES0_CH | SDXC_IDMAC_DES0_OWN |
324                                  SDXC_IDMAC_DES0_DIC;
325
326                 if (data->sg[i].length == max_len)
327                         pdes[i].buf_size = 0; /* 0 == max_len */
328                 else
329                         pdes[i].buf_size = data->sg[i].length;
330
331                 next_desc += sizeof(struct sunxi_idma_des);
332                 pdes[i].buf_addr_ptr1 = sg_dma_address(&data->sg[i]);
333                 pdes[i].buf_addr_ptr2 = (u32)next_desc;
334         }
335
336         pdes[0].config |= SDXC_IDMAC_DES0_FD;
337         pdes[i - 1].config |= SDXC_IDMAC_DES0_LD | SDXC_IDMAC_DES0_ER;
338         pdes[i - 1].config &= ~SDXC_IDMAC_DES0_DIC;
339         pdes[i - 1].buf_addr_ptr2 = 0;
340
341         /*
342          * Avoid the io-store starting the idmac hitting io-mem before the
343          * descriptors hit the main-mem.
344          */
345         wmb();
346 }
347
348 static enum dma_data_direction sunxi_mmc_get_dma_dir(struct mmc_data *data)
349 {
350         if (data->flags & MMC_DATA_WRITE)
351                 return DMA_TO_DEVICE;
352         else
353                 return DMA_FROM_DEVICE;
354 }
355
356 static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host,
357                              struct mmc_data *data)
358 {
359         u32 i, dma_len;
360         struct scatterlist *sg;
361
362         dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
363                              sunxi_mmc_get_dma_dir(data));
364         if (dma_len == 0) {
365                 dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
366                 return -ENOMEM;
367         }
368
369         for_each_sg(data->sg, sg, data->sg_len, i) {
370                 if (sg->offset & 3 || sg->length & 3) {
371                         dev_err(mmc_dev(host->mmc),
372                                 "unaligned scatterlist: os %x length %d\n",
373                                 sg->offset, sg->length);
374                         return -EINVAL;
375                 }
376         }
377
378         return 0;
379 }
380
381 static void sunxi_mmc_start_dma(struct sunxi_mmc_host *host,
382                                 struct mmc_data *data)
383 {
384         u32 rval;
385
386         sunxi_mmc_init_idma_des(host, data);
387
388         rval = mmc_readl(host, REG_GCTRL);
389         rval |= SDXC_DMA_ENABLE_BIT;
390         mmc_writel(host, REG_GCTRL, rval);
391         rval |= SDXC_DMA_RESET;
392         mmc_writel(host, REG_GCTRL, rval);
393
394         mmc_writel(host, REG_DMAC, SDXC_IDMAC_SOFT_RESET);
395
396         if (!(data->flags & MMC_DATA_WRITE))
397                 mmc_writel(host, REG_IDIE, SDXC_IDMAC_RECEIVE_INTERRUPT);
398
399         mmc_writel(host, REG_DMAC,
400                    SDXC_IDMAC_FIX_BURST | SDXC_IDMAC_IDMA_ON);
401 }
402
403 static void sunxi_mmc_send_manual_stop(struct sunxi_mmc_host *host,
404                                        struct mmc_request *req)
405 {
406         u32 arg, cmd_val, ri;
407         unsigned long expire = jiffies + msecs_to_jiffies(1000);
408
409         cmd_val = SDXC_START | SDXC_RESP_EXPIRE |
410                   SDXC_STOP_ABORT_CMD | SDXC_CHECK_RESPONSE_CRC;
411
412         if (req->cmd->opcode == SD_IO_RW_EXTENDED) {
413                 cmd_val |= SD_IO_RW_DIRECT;
414                 arg = (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
415                       ((req->cmd->arg >> 28) & 0x7);
416         } else {
417                 cmd_val |= MMC_STOP_TRANSMISSION;
418                 arg = 0;
419         }
420
421         mmc_writel(host, REG_CARG, arg);
422         mmc_writel(host, REG_CMDR, cmd_val);
423
424         do {
425                 ri = mmc_readl(host, REG_RINTR);
426         } while (!(ri & (SDXC_COMMAND_DONE | SDXC_INTERRUPT_ERROR_BIT)) &&
427                  time_before(jiffies, expire));
428
429         if (!(ri & SDXC_COMMAND_DONE) || (ri & SDXC_INTERRUPT_ERROR_BIT)) {
430                 dev_err(mmc_dev(host->mmc), "send stop command failed\n");
431                 if (req->stop)
432                         req->stop->resp[0] = -ETIMEDOUT;
433         } else {
434                 if (req->stop)
435                         req->stop->resp[0] = mmc_readl(host, REG_RESP0);
436         }
437
438         mmc_writel(host, REG_RINTR, 0xffff);
439 }
440
441 static void sunxi_mmc_dump_errinfo(struct sunxi_mmc_host *host)
442 {
443         struct mmc_command *cmd = host->mrq->cmd;
444         struct mmc_data *data = host->mrq->data;
445
446         /* For some cmds timeout is normal with sd/mmc cards */
447         if ((host->int_sum & SDXC_INTERRUPT_ERROR_BIT) ==
448                 SDXC_RESP_TIMEOUT && (cmd->opcode == SD_IO_SEND_OP_COND ||
449                                       cmd->opcode == SD_IO_RW_DIRECT))
450                 return;
451
452         dev_err(mmc_dev(host->mmc),
453                 "smc %d err, cmd %d,%s%s%s%s%s%s%s%s%s%s !!\n",
454                 host->mmc->index, cmd->opcode,
455                 data ? (data->flags & MMC_DATA_WRITE ? " WR" : " RD") : "",
456                 host->int_sum & SDXC_RESP_ERROR     ? " RE"     : "",
457                 host->int_sum & SDXC_RESP_CRC_ERROR  ? " RCE"    : "",
458                 host->int_sum & SDXC_DATA_CRC_ERROR  ? " DCE"    : "",
459                 host->int_sum & SDXC_RESP_TIMEOUT ? " RTO"    : "",
460                 host->int_sum & SDXC_DATA_TIMEOUT ? " DTO"    : "",
461                 host->int_sum & SDXC_FIFO_RUN_ERROR  ? " FE"     : "",
462                 host->int_sum & SDXC_HARD_WARE_LOCKED ? " HL"     : "",
463                 host->int_sum & SDXC_START_BIT_ERROR ? " SBE"    : "",
464                 host->int_sum & SDXC_END_BIT_ERROR   ? " EBE"    : ""
465                 );
466 }
467
468 /* Called in interrupt context! */
469 static irqreturn_t sunxi_mmc_finalize_request(struct sunxi_mmc_host *host)
470 {
471         struct mmc_request *mrq = host->mrq;
472         struct mmc_data *data = mrq->data;
473         u32 rval;
474
475         mmc_writel(host, REG_IMASK, host->sdio_imask);
476         mmc_writel(host, REG_IDIE, 0);
477
478         if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT) {
479                 sunxi_mmc_dump_errinfo(host);
480                 mrq->cmd->error = -ETIMEDOUT;
481
482                 if (data) {
483                         data->error = -ETIMEDOUT;
484                         host->manual_stop_mrq = mrq;
485                 }
486
487                 if (mrq->stop)
488                         mrq->stop->error = -ETIMEDOUT;
489         } else {
490                 if (mrq->cmd->flags & MMC_RSP_136) {
491                         mrq->cmd->resp[0] = mmc_readl(host, REG_RESP3);
492                         mrq->cmd->resp[1] = mmc_readl(host, REG_RESP2);
493                         mrq->cmd->resp[2] = mmc_readl(host, REG_RESP1);
494                         mrq->cmd->resp[3] = mmc_readl(host, REG_RESP0);
495                 } else {
496                         mrq->cmd->resp[0] = mmc_readl(host, REG_RESP0);
497                 }
498
499                 if (data)
500                         data->bytes_xfered = data->blocks * data->blksz;
501         }
502
503         if (data) {
504                 mmc_writel(host, REG_IDST, 0x337);
505                 mmc_writel(host, REG_DMAC, 0);
506                 rval = mmc_readl(host, REG_GCTRL);
507                 rval |= SDXC_DMA_RESET;
508                 mmc_writel(host, REG_GCTRL, rval);
509                 rval &= ~SDXC_DMA_ENABLE_BIT;
510                 mmc_writel(host, REG_GCTRL, rval);
511                 rval |= SDXC_FIFO_RESET;
512                 mmc_writel(host, REG_GCTRL, rval);
513                 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
514                                      sunxi_mmc_get_dma_dir(data));
515         }
516
517         mmc_writel(host, REG_RINTR, 0xffff);
518
519         host->mrq = NULL;
520         host->int_sum = 0;
521         host->wait_dma = false;
522
523         return host->manual_stop_mrq ? IRQ_WAKE_THREAD : IRQ_HANDLED;
524 }
525
526 static irqreturn_t sunxi_mmc_irq(int irq, void *dev_id)
527 {
528         struct sunxi_mmc_host *host = dev_id;
529         struct mmc_request *mrq;
530         u32 msk_int, idma_int;
531         bool finalize = false;
532         bool sdio_int = false;
533         irqreturn_t ret = IRQ_HANDLED;
534
535         spin_lock(&host->lock);
536
537         idma_int  = mmc_readl(host, REG_IDST);
538         msk_int   = mmc_readl(host, REG_MISTA);
539
540         dev_dbg(mmc_dev(host->mmc), "irq: rq %p mi %08x idi %08x\n",
541                 host->mrq, msk_int, idma_int);
542
543         mrq = host->mrq;
544         if (mrq) {
545                 if (idma_int & SDXC_IDMAC_RECEIVE_INTERRUPT)
546                         host->wait_dma = false;
547
548                 host->int_sum |= msk_int;
549
550                 /* Wait for COMMAND_DONE on RESPONSE_TIMEOUT before finalize */
551                 if ((host->int_sum & SDXC_RESP_TIMEOUT) &&
552                                 !(host->int_sum & SDXC_COMMAND_DONE))
553                         mmc_writel(host, REG_IMASK,
554                                    host->sdio_imask | SDXC_COMMAND_DONE);
555                 /* Don't wait for dma on error */
556                 else if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT)
557                         finalize = true;
558                 else if ((host->int_sum & SDXC_INTERRUPT_DONE_BIT) &&
559                                 !host->wait_dma)
560                         finalize = true;
561         }
562
563         if (msk_int & SDXC_SDIO_INTERRUPT)
564                 sdio_int = true;
565
566         mmc_writel(host, REG_RINTR, msk_int);
567         mmc_writel(host, REG_IDST, idma_int);
568
569         if (finalize)
570                 ret = sunxi_mmc_finalize_request(host);
571
572         spin_unlock(&host->lock);
573
574         if (finalize && ret == IRQ_HANDLED)
575                 mmc_request_done(host->mmc, mrq);
576
577         if (sdio_int)
578                 mmc_signal_sdio_irq(host->mmc);
579
580         return ret;
581 }
582
583 static irqreturn_t sunxi_mmc_handle_manual_stop(int irq, void *dev_id)
584 {
585         struct sunxi_mmc_host *host = dev_id;
586         struct mmc_request *mrq;
587         unsigned long iflags;
588
589         spin_lock_irqsave(&host->lock, iflags);
590         mrq = host->manual_stop_mrq;
591         spin_unlock_irqrestore(&host->lock, iflags);
592
593         if (!mrq) {
594                 dev_err(mmc_dev(host->mmc), "no request for manual stop\n");
595                 return IRQ_HANDLED;
596         }
597
598         dev_err(mmc_dev(host->mmc), "data error, sending stop command\n");
599
600         /*
601          * We will never have more than one outstanding request,
602          * and we do not complete the request until after
603          * we've cleared host->manual_stop_mrq so we do not need to
604          * spin lock this function.
605          * Additionally we have wait states within this function
606          * so having it in a lock is a very bad idea.
607          */
608         sunxi_mmc_send_manual_stop(host, mrq);
609
610         spin_lock_irqsave(&host->lock, iflags);
611         host->manual_stop_mrq = NULL;
612         spin_unlock_irqrestore(&host->lock, iflags);
613
614         mmc_request_done(host->mmc, mrq);
615
616         return IRQ_HANDLED;
617 }
618
619 static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
620 {
621         unsigned long expire = jiffies + msecs_to_jiffies(750);
622         u32 rval;
623
624         rval = mmc_readl(host, REG_CLKCR);
625         rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON);
626
627         if (oclk_en)
628                 rval |= SDXC_CARD_CLOCK_ON;
629
630         mmc_writel(host, REG_CLKCR, rval);
631
632         rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER;
633         mmc_writel(host, REG_CMDR, rval);
634
635         do {
636                 rval = mmc_readl(host, REG_CMDR);
637         } while (time_before(jiffies, expire) && (rval & SDXC_START));
638
639         /* clear irq status bits set by the command */
640         mmc_writel(host, REG_RINTR,
641                    mmc_readl(host, REG_RINTR) & ~SDXC_SDIO_INTERRUPT);
642
643         if (rval & SDXC_START) {
644                 dev_err(mmc_dev(host->mmc), "fatal err update clk timeout\n");
645                 return -EIO;
646         }
647
648         return 0;
649 }
650
651 static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
652                                   struct mmc_ios *ios)
653 {
654         u32 rate, oclk_dly, rval, sclk_dly;
655         int ret;
656
657         rate = clk_round_rate(host->clk_mmc, ios->clock);
658         dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %d\n",
659                 ios->clock, rate);
660
661         /* setting clock rate */
662         ret = clk_set_rate(host->clk_mmc, rate);
663         if (ret) {
664                 dev_err(mmc_dev(host->mmc), "error setting clk to %d: %d\n",
665                         rate, ret);
666                 return ret;
667         }
668
669         ret = sunxi_mmc_oclk_onoff(host, 0);
670         if (ret)
671                 return ret;
672
673         /* clear internal divider */
674         rval = mmc_readl(host, REG_CLKCR);
675         rval &= ~0xff;
676         mmc_writel(host, REG_CLKCR, rval);
677
678         /* determine delays */
679         if (rate <= 400000) {
680                 oclk_dly = host->clk_delays[SDXC_CLK_400K].output;
681                 sclk_dly = host->clk_delays[SDXC_CLK_400K].sample;
682         } else if (rate <= 25000000) {
683                 oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
684                 sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
685         } else if (rate <= 50000000) {
686                 if (ios->timing == MMC_TIMING_UHS_DDR50) {
687                         oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
688                         sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
689                 } else {
690                         oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
691                         sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
692                 }
693         } else {
694                 return -EINVAL;
695         }
696
697         clk_set_phase(host->clk_sample, sclk_dly);
698         clk_set_phase(host->clk_output, oclk_dly);
699
700         return sunxi_mmc_oclk_onoff(host, 1);
701 }
702
703 static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
704 {
705         struct sunxi_mmc_host *host = mmc_priv(mmc);
706         u32 rval;
707
708         /* Set the power state */
709         switch (ios->power_mode) {
710         case MMC_POWER_ON:
711                 break;
712
713         case MMC_POWER_UP:
714                 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
715
716                 host->ferror = sunxi_mmc_init_host(mmc);
717                 if (host->ferror)
718                         return;
719
720                 dev_dbg(mmc_dev(mmc), "power on!\n");
721                 break;
722
723         case MMC_POWER_OFF:
724                 dev_dbg(mmc_dev(mmc), "power off!\n");
725                 sunxi_mmc_reset_host(host);
726                 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
727                 break;
728         }
729
730         /* set bus width */
731         switch (ios->bus_width) {
732         case MMC_BUS_WIDTH_1:
733                 mmc_writel(host, REG_WIDTH, SDXC_WIDTH1);
734                 break;
735         case MMC_BUS_WIDTH_4:
736                 mmc_writel(host, REG_WIDTH, SDXC_WIDTH4);
737                 break;
738         case MMC_BUS_WIDTH_8:
739                 mmc_writel(host, REG_WIDTH, SDXC_WIDTH8);
740                 break;
741         }
742
743         /* set ddr mode */
744         rval = mmc_readl(host, REG_GCTRL);
745         if (ios->timing == MMC_TIMING_UHS_DDR50)
746                 rval |= SDXC_DDR_MODE;
747         else
748                 rval &= ~SDXC_DDR_MODE;
749         mmc_writel(host, REG_GCTRL, rval);
750
751         /* set up clock */
752         if (ios->clock && ios->power_mode) {
753                 host->ferror = sunxi_mmc_clk_set_rate(host, ios);
754                 /* Android code had a usleep_range(50000, 55000); here */
755         }
756 }
757
758 static void sunxi_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
759 {
760         struct sunxi_mmc_host *host = mmc_priv(mmc);
761         unsigned long flags;
762         u32 imask;
763
764         spin_lock_irqsave(&host->lock, flags);
765
766         imask = mmc_readl(host, REG_IMASK);
767         if (enable) {
768                 host->sdio_imask = SDXC_SDIO_INTERRUPT;
769                 imask |= SDXC_SDIO_INTERRUPT;
770         } else {
771                 host->sdio_imask = 0;
772                 imask &= ~SDXC_SDIO_INTERRUPT;
773         }
774         mmc_writel(host, REG_IMASK, imask);
775         spin_unlock_irqrestore(&host->lock, flags);
776 }
777
778 static void sunxi_mmc_hw_reset(struct mmc_host *mmc)
779 {
780         struct sunxi_mmc_host *host = mmc_priv(mmc);
781         mmc_writel(host, REG_HWRST, 0);
782         udelay(10);
783         mmc_writel(host, REG_HWRST, 1);
784         udelay(300);
785 }
786
787 static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
788 {
789         struct sunxi_mmc_host *host = mmc_priv(mmc);
790         struct mmc_command *cmd = mrq->cmd;
791         struct mmc_data *data = mrq->data;
792         unsigned long iflags;
793         u32 imask = SDXC_INTERRUPT_ERROR_BIT;
794         u32 cmd_val = SDXC_START | (cmd->opcode & 0x3f);
795         bool wait_dma = host->wait_dma;
796         int ret;
797
798         /* Check for set_ios errors (should never happen) */
799         if (host->ferror) {
800                 mrq->cmd->error = host->ferror;
801                 mmc_request_done(mmc, mrq);
802                 return;
803         }
804
805         if (data) {
806                 ret = sunxi_mmc_map_dma(host, data);
807                 if (ret < 0) {
808                         dev_err(mmc_dev(mmc), "map DMA failed\n");
809                         cmd->error = ret;
810                         data->error = ret;
811                         mmc_request_done(mmc, mrq);
812                         return;
813                 }
814         }
815
816         if (cmd->opcode == MMC_GO_IDLE_STATE) {
817                 cmd_val |= SDXC_SEND_INIT_SEQUENCE;
818                 imask |= SDXC_COMMAND_DONE;
819         }
820
821         if (cmd->flags & MMC_RSP_PRESENT) {
822                 cmd_val |= SDXC_RESP_EXPIRE;
823                 if (cmd->flags & MMC_RSP_136)
824                         cmd_val |= SDXC_LONG_RESPONSE;
825                 if (cmd->flags & MMC_RSP_CRC)
826                         cmd_val |= SDXC_CHECK_RESPONSE_CRC;
827
828                 if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) {
829                         cmd_val |= SDXC_DATA_EXPIRE | SDXC_WAIT_PRE_OVER;
830                         if (cmd->data->flags & MMC_DATA_STREAM) {
831                                 imask |= SDXC_AUTO_COMMAND_DONE;
832                                 cmd_val |= SDXC_SEQUENCE_MODE |
833                                            SDXC_SEND_AUTO_STOP;
834                         }
835
836                         if (cmd->data->stop) {
837                                 imask |= SDXC_AUTO_COMMAND_DONE;
838                                 cmd_val |= SDXC_SEND_AUTO_STOP;
839                         } else {
840                                 imask |= SDXC_DATA_OVER;
841                         }
842
843                         if (cmd->data->flags & MMC_DATA_WRITE)
844                                 cmd_val |= SDXC_WRITE;
845                         else
846                                 wait_dma = true;
847                 } else {
848                         imask |= SDXC_COMMAND_DONE;
849                 }
850         } else {
851                 imask |= SDXC_COMMAND_DONE;
852         }
853
854         dev_dbg(mmc_dev(mmc), "cmd %d(%08x) arg %x ie 0x%08x len %d\n",
855                 cmd_val & 0x3f, cmd_val, cmd->arg, imask,
856                 mrq->data ? mrq->data->blksz * mrq->data->blocks : 0);
857
858         spin_lock_irqsave(&host->lock, iflags);
859
860         if (host->mrq || host->manual_stop_mrq) {
861                 spin_unlock_irqrestore(&host->lock, iflags);
862
863                 if (data)
864                         dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
865                                      sunxi_mmc_get_dma_dir(data));
866
867                 dev_err(mmc_dev(mmc), "request already pending\n");
868                 mrq->cmd->error = -EBUSY;
869                 mmc_request_done(mmc, mrq);
870                 return;
871         }
872
873         if (data) {
874                 mmc_writel(host, REG_BLKSZ, data->blksz);
875                 mmc_writel(host, REG_BCNTR, data->blksz * data->blocks);
876                 sunxi_mmc_start_dma(host, data);
877         }
878
879         host->mrq = mrq;
880         host->wait_dma = wait_dma;
881         mmc_writel(host, REG_IMASK, host->sdio_imask | imask);
882         mmc_writel(host, REG_CARG, cmd->arg);
883         mmc_writel(host, REG_CMDR, cmd_val);
884
885         spin_unlock_irqrestore(&host->lock, iflags);
886 }
887
888 static int sunxi_mmc_card_busy(struct mmc_host *mmc)
889 {
890         struct sunxi_mmc_host *host = mmc_priv(mmc);
891
892         return !!(mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY);
893 }
894
895 static const struct of_device_id sunxi_mmc_of_match[] = {
896         { .compatible = "allwinner,sun4i-a10-mmc", },
897         { .compatible = "allwinner,sun5i-a13-mmc", },
898         { .compatible = "allwinner,sun9i-a80-mmc", },
899         { /* sentinel */ }
900 };
901 MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
902
903 static struct mmc_host_ops sunxi_mmc_ops = {
904         .request         = sunxi_mmc_request,
905         .set_ios         = sunxi_mmc_set_ios,
906         .get_ro          = mmc_gpio_get_ro,
907         .get_cd          = mmc_gpio_get_cd,
908         .enable_sdio_irq = sunxi_mmc_enable_sdio_irq,
909         .hw_reset        = sunxi_mmc_hw_reset,
910         .card_busy       = sunxi_mmc_card_busy,
911 };
912
913 static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
914         [SDXC_CLK_400K]         = { .output = 180, .sample = 180 },
915         [SDXC_CLK_25M]          = { .output = 180, .sample =  75 },
916         [SDXC_CLK_50M]          = { .output =  90, .sample = 120 },
917         [SDXC_CLK_50M_DDR]      = { .output =  60, .sample = 120 },
918 };
919
920 static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
921         [SDXC_CLK_400K]         = { .output = 180, .sample = 180 },
922         [SDXC_CLK_25M]          = { .output = 180, .sample =  75 },
923         [SDXC_CLK_50M]          = { .output = 150, .sample = 120 },
924         [SDXC_CLK_50M_DDR]      = { .output =  90, .sample = 120 },
925 };
926
927 static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
928                                       struct platform_device *pdev)
929 {
930         struct device_node *np = pdev->dev.of_node;
931         int ret;
932
933         if (of_device_is_compatible(np, "allwinner,sun4i-a10-mmc"))
934                 host->idma_des_size_bits = 13;
935         else
936                 host->idma_des_size_bits = 16;
937
938         if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc"))
939                 host->clk_delays = sun9i_mmc_clk_delays;
940         else
941                 host->clk_delays = sunxi_mmc_clk_delays;
942
943         ret = mmc_regulator_get_supply(host->mmc);
944         if (ret) {
945                 if (ret != -EPROBE_DEFER)
946                         dev_err(&pdev->dev, "Could not get vmmc supply\n");
947                 return ret;
948         }
949
950         host->reg_base = devm_ioremap_resource(&pdev->dev,
951                               platform_get_resource(pdev, IORESOURCE_MEM, 0));
952         if (IS_ERR(host->reg_base))
953                 return PTR_ERR(host->reg_base);
954
955         host->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
956         if (IS_ERR(host->clk_ahb)) {
957                 dev_err(&pdev->dev, "Could not get ahb clock\n");
958                 return PTR_ERR(host->clk_ahb);
959         }
960
961         host->clk_mmc = devm_clk_get(&pdev->dev, "mmc");
962         if (IS_ERR(host->clk_mmc)) {
963                 dev_err(&pdev->dev, "Could not get mmc clock\n");
964                 return PTR_ERR(host->clk_mmc);
965         }
966
967         host->clk_output = devm_clk_get(&pdev->dev, "output");
968         if (IS_ERR(host->clk_output)) {
969                 dev_err(&pdev->dev, "Could not get output clock\n");
970                 return PTR_ERR(host->clk_output);
971         }
972
973         host->clk_sample = devm_clk_get(&pdev->dev, "sample");
974         if (IS_ERR(host->clk_sample)) {
975                 dev_err(&pdev->dev, "Could not get sample clock\n");
976                 return PTR_ERR(host->clk_sample);
977         }
978
979         host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
980         if (PTR_ERR(host->reset) == -EPROBE_DEFER)
981                 return PTR_ERR(host->reset);
982
983         ret = clk_prepare_enable(host->clk_ahb);
984         if (ret) {
985                 dev_err(&pdev->dev, "Enable ahb clk err %d\n", ret);
986                 return ret;
987         }
988
989         ret = clk_prepare_enable(host->clk_mmc);
990         if (ret) {
991                 dev_err(&pdev->dev, "Enable mmc clk err %d\n", ret);
992                 goto error_disable_clk_ahb;
993         }
994
995         ret = clk_prepare_enable(host->clk_output);
996         if (ret) {
997                 dev_err(&pdev->dev, "Enable output clk err %d\n", ret);
998                 goto error_disable_clk_mmc;
999         }
1000
1001         ret = clk_prepare_enable(host->clk_sample);
1002         if (ret) {
1003                 dev_err(&pdev->dev, "Enable sample clk err %d\n", ret);
1004                 goto error_disable_clk_output;
1005         }
1006
1007         if (!IS_ERR(host->reset)) {
1008                 ret = reset_control_deassert(host->reset);
1009                 if (ret) {
1010                         dev_err(&pdev->dev, "reset err %d\n", ret);
1011                         goto error_disable_clk_sample;
1012                 }
1013         }
1014
1015         /*
1016          * Sometimes the controller asserts the irq on boot for some reason,
1017          * make sure the controller is in a sane state before enabling irqs.
1018          */
1019         ret = sunxi_mmc_reset_host(host);
1020         if (ret)
1021                 goto error_assert_reset;
1022
1023         host->irq = platform_get_irq(pdev, 0);
1024         return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq,
1025                         sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host);
1026
1027 error_assert_reset:
1028         if (!IS_ERR(host->reset))
1029                 reset_control_assert(host->reset);
1030 error_disable_clk_sample:
1031         clk_disable_unprepare(host->clk_sample);
1032 error_disable_clk_output:
1033         clk_disable_unprepare(host->clk_output);
1034 error_disable_clk_mmc:
1035         clk_disable_unprepare(host->clk_mmc);
1036 error_disable_clk_ahb:
1037         clk_disable_unprepare(host->clk_ahb);
1038         return ret;
1039 }
1040
1041 static int sunxi_mmc_probe(struct platform_device *pdev)
1042 {
1043         struct sunxi_mmc_host *host;
1044         struct mmc_host *mmc;
1045         int ret;
1046
1047         mmc = mmc_alloc_host(sizeof(struct sunxi_mmc_host), &pdev->dev);
1048         if (!mmc) {
1049                 dev_err(&pdev->dev, "mmc alloc host failed\n");
1050                 return -ENOMEM;
1051         }
1052
1053         host = mmc_priv(mmc);
1054         host->mmc = mmc;
1055         spin_lock_init(&host->lock);
1056
1057         ret = sunxi_mmc_resource_request(host, pdev);
1058         if (ret)
1059                 goto error_free_host;
1060
1061         host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
1062                                           &host->sg_dma, GFP_KERNEL);
1063         if (!host->sg_cpu) {
1064                 dev_err(&pdev->dev, "Failed to allocate DMA descriptor mem\n");
1065                 ret = -ENOMEM;
1066                 goto error_free_host;
1067         }
1068
1069         mmc->ops                = &sunxi_mmc_ops;
1070         mmc->max_blk_count      = 8192;
1071         mmc->max_blk_size       = 4096;
1072         mmc->max_segs           = PAGE_SIZE / sizeof(struct sunxi_idma_des);
1073         mmc->max_seg_size       = (1 << host->idma_des_size_bits);
1074         mmc->max_req_size       = mmc->max_seg_size * mmc->max_segs;
1075         /* 400kHz ~ 50MHz */
1076         mmc->f_min              =   400000;
1077         mmc->f_max              = 50000000;
1078         mmc->caps              |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1079                                   MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
1080
1081         ret = mmc_of_parse(mmc);
1082         if (ret)
1083                 goto error_free_dma;
1084
1085         ret = mmc_add_host(mmc);
1086         if (ret)
1087                 goto error_free_dma;
1088
1089         dev_info(&pdev->dev, "base:0x%p irq:%u\n", host->reg_base, host->irq);
1090         platform_set_drvdata(pdev, mmc);
1091         return 0;
1092
1093 error_free_dma:
1094         dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
1095 error_free_host:
1096         mmc_free_host(mmc);
1097         return ret;
1098 }
1099
1100 static int sunxi_mmc_remove(struct platform_device *pdev)
1101 {
1102         struct mmc_host *mmc = platform_get_drvdata(pdev);
1103         struct sunxi_mmc_host *host = mmc_priv(mmc);
1104
1105         mmc_remove_host(mmc);
1106         disable_irq(host->irq);
1107         sunxi_mmc_reset_host(host);
1108
1109         if (!IS_ERR(host->reset))
1110                 reset_control_assert(host->reset);
1111
1112         clk_disable_unprepare(host->clk_mmc);
1113         clk_disable_unprepare(host->clk_ahb);
1114
1115         dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
1116         mmc_free_host(mmc);
1117
1118         return 0;
1119 }
1120
1121 static struct platform_driver sunxi_mmc_driver = {
1122         .driver = {
1123                 .name   = "sunxi-mmc",
1124                 .of_match_table = of_match_ptr(sunxi_mmc_of_match),
1125         },
1126         .probe          = sunxi_mmc_probe,
1127         .remove         = sunxi_mmc_remove,
1128 };
1129 module_platform_driver(sunxi_mmc_driver);
1130
1131 MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver");
1132 MODULE_LICENSE("GPL v2");
1133 MODULE_AUTHOR("David Lanzend�rfer <david.lanzendoerfer@o2s.ch>");
1134 MODULE_ALIAS("platform:sunxi-mmc");