bed0f9446a662eeee8da19e685a45c904954f8bb
[cascardo/linux.git] / drivers / mmc / host / dw_mmc.c
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  *  (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/dw_mmc.h>
36 #include <linux/bitops.h>
37 #include <linux/regulator/consumer.h>
38 #include <linux/of.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mmc/slot-gpio.h>
41
42 #include "dw_mmc.h"
43
44 /* Common flag combinations */
45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46                                  SDMMC_INT_HTO | SDMMC_INT_SBE  | \
47                                  SDMMC_INT_EBE)
48 #define DW_MCI_CMD_ERROR_FLAGS  (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
49                                  SDMMC_INT_RESP_ERR)
50 #define DW_MCI_ERROR_FLAGS      (DW_MCI_DATA_ERROR_FLAGS | \
51                                  DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
52 #define DW_MCI_SEND_STATUS      1
53 #define DW_MCI_RECV_STATUS      2
54 #define DW_MCI_DMA_THRESHOLD    16
55
56 #define DW_MCI_FREQ_MAX 200000000       /* unit: HZ */
57 #define DW_MCI_FREQ_MIN 400000          /* unit: HZ */
58
59 #ifdef CONFIG_MMC_DW_IDMAC
60 #define IDMAC_INT_CLR           (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61                                  SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62                                  SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
63                                  SDMMC_IDMAC_INT_TI)
64
65 struct idmac_desc_64addr {
66         u32             des0;   /* Control Descriptor */
67
68         u32             des1;   /* Reserved */
69
70         u32             des2;   /*Buffer sizes */
71 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
72         ((d)->des2 = ((d)->des2 & 0x03ffe000) | ((s) & 0x1fff))
73
74         u32             des3;   /* Reserved */
75
76         u32             des4;   /* Lower 32-bits of Buffer Address Pointer 1*/
77         u32             des5;   /* Upper 32-bits of Buffer Address Pointer 1*/
78
79         u32             des6;   /* Lower 32-bits of Next Descriptor Address */
80         u32             des7;   /* Upper 32-bits of Next Descriptor Address */
81 };
82
83 struct idmac_desc {
84         u32             des0;   /* Control Descriptor */
85 #define IDMAC_DES0_DIC  BIT(1)
86 #define IDMAC_DES0_LD   BIT(2)
87 #define IDMAC_DES0_FD   BIT(3)
88 #define IDMAC_DES0_CH   BIT(4)
89 #define IDMAC_DES0_ER   BIT(5)
90 #define IDMAC_DES0_CES  BIT(30)
91 #define IDMAC_DES0_OWN  BIT(31)
92
93         u32             des1;   /* Buffer sizes */
94 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
95         ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
96
97         u32             des2;   /* buffer 1 physical address */
98
99         u32             des3;   /* buffer 2 physical address */
100 };
101 #endif /* CONFIG_MMC_DW_IDMAC */
102
103 static bool dw_mci_reset(struct dw_mci *host);
104 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
105
106 #if defined(CONFIG_DEBUG_FS)
107 static int dw_mci_req_show(struct seq_file *s, void *v)
108 {
109         struct dw_mci_slot *slot = s->private;
110         struct mmc_request *mrq;
111         struct mmc_command *cmd;
112         struct mmc_command *stop;
113         struct mmc_data *data;
114
115         /* Make sure we get a consistent snapshot */
116         spin_lock_bh(&slot->host->lock);
117         mrq = slot->mrq;
118
119         if (mrq) {
120                 cmd = mrq->cmd;
121                 data = mrq->data;
122                 stop = mrq->stop;
123
124                 if (cmd)
125                         seq_printf(s,
126                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
127                                    cmd->opcode, cmd->arg, cmd->flags,
128                                    cmd->resp[0], cmd->resp[1], cmd->resp[2],
129                                    cmd->resp[2], cmd->error);
130                 if (data)
131                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
132                                    data->bytes_xfered, data->blocks,
133                                    data->blksz, data->flags, data->error);
134                 if (stop)
135                         seq_printf(s,
136                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137                                    stop->opcode, stop->arg, stop->flags,
138                                    stop->resp[0], stop->resp[1], stop->resp[2],
139                                    stop->resp[2], stop->error);
140         }
141
142         spin_unlock_bh(&slot->host->lock);
143
144         return 0;
145 }
146
147 static int dw_mci_req_open(struct inode *inode, struct file *file)
148 {
149         return single_open(file, dw_mci_req_show, inode->i_private);
150 }
151
152 static const struct file_operations dw_mci_req_fops = {
153         .owner          = THIS_MODULE,
154         .open           = dw_mci_req_open,
155         .read           = seq_read,
156         .llseek         = seq_lseek,
157         .release        = single_release,
158 };
159
160 static int dw_mci_regs_show(struct seq_file *s, void *v)
161 {
162         seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
163         seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
164         seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
165         seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
166         seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
167         seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
168
169         return 0;
170 }
171
172 static int dw_mci_regs_open(struct inode *inode, struct file *file)
173 {
174         return single_open(file, dw_mci_regs_show, inode->i_private);
175 }
176
177 static const struct file_operations dw_mci_regs_fops = {
178         .owner          = THIS_MODULE,
179         .open           = dw_mci_regs_open,
180         .read           = seq_read,
181         .llseek         = seq_lseek,
182         .release        = single_release,
183 };
184
185 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
186 {
187         struct mmc_host *mmc = slot->mmc;
188         struct dw_mci *host = slot->host;
189         struct dentry *root;
190         struct dentry *node;
191
192         root = mmc->debugfs_root;
193         if (!root)
194                 return;
195
196         node = debugfs_create_file("regs", S_IRUSR, root, host,
197                                    &dw_mci_regs_fops);
198         if (!node)
199                 goto err;
200
201         node = debugfs_create_file("req", S_IRUSR, root, slot,
202                                    &dw_mci_req_fops);
203         if (!node)
204                 goto err;
205
206         node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
207         if (!node)
208                 goto err;
209
210         node = debugfs_create_x32("pending_events", S_IRUSR, root,
211                                   (u32 *)&host->pending_events);
212         if (!node)
213                 goto err;
214
215         node = debugfs_create_x32("completed_events", S_IRUSR, root,
216                                   (u32 *)&host->completed_events);
217         if (!node)
218                 goto err;
219
220         return;
221
222 err:
223         dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
224 }
225 #endif /* defined(CONFIG_DEBUG_FS) */
226
227 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
228
229 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
230 {
231         struct mmc_data *data;
232         struct dw_mci_slot *slot = mmc_priv(mmc);
233         struct dw_mci *host = slot->host;
234         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
235         u32 cmdr;
236         cmd->error = -EINPROGRESS;
237
238         cmdr = cmd->opcode;
239
240         if (cmd->opcode == MMC_STOP_TRANSMISSION ||
241             cmd->opcode == MMC_GO_IDLE_STATE ||
242             cmd->opcode == MMC_GO_INACTIVE_STATE ||
243             (cmd->opcode == SD_IO_RW_DIRECT &&
244              ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
245                 cmdr |= SDMMC_CMD_STOP;
246         else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
247                 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
248
249         if (cmd->opcode == SD_SWITCH_VOLTAGE) {
250                 u32 clk_en_a;
251
252                 /* Special bit makes CMD11 not die */
253                 cmdr |= SDMMC_CMD_VOLT_SWITCH;
254
255                 /* Change state to continue to handle CMD11 weirdness */
256                 WARN_ON(slot->host->state != STATE_SENDING_CMD);
257                 slot->host->state = STATE_SENDING_CMD11;
258
259                 /*
260                  * We need to disable low power mode (automatic clock stop)
261                  * while doing voltage switch so we don't confuse the card,
262                  * since stopping the clock is a specific part of the UHS
263                  * voltage change dance.
264                  *
265                  * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
266                  * unconditionally turned back on in dw_mci_setup_bus() if it's
267                  * ever called with a non-zero clock.  That shouldn't happen
268                  * until the voltage change is all done.
269                  */
270                 clk_en_a = mci_readl(host, CLKENA);
271                 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
272                 mci_writel(host, CLKENA, clk_en_a);
273                 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
274                              SDMMC_CMD_PRV_DAT_WAIT, 0);
275         }
276
277         if (cmd->flags & MMC_RSP_PRESENT) {
278                 /* We expect a response, so set this bit */
279                 cmdr |= SDMMC_CMD_RESP_EXP;
280                 if (cmd->flags & MMC_RSP_136)
281                         cmdr |= SDMMC_CMD_RESP_LONG;
282         }
283
284         if (cmd->flags & MMC_RSP_CRC)
285                 cmdr |= SDMMC_CMD_RESP_CRC;
286
287         data = cmd->data;
288         if (data) {
289                 cmdr |= SDMMC_CMD_DAT_EXP;
290                 if (data->flags & MMC_DATA_STREAM)
291                         cmdr |= SDMMC_CMD_STRM_MODE;
292                 if (data->flags & MMC_DATA_WRITE)
293                         cmdr |= SDMMC_CMD_DAT_WR;
294         }
295
296         if (drv_data && drv_data->prepare_command)
297                 drv_data->prepare_command(slot->host, &cmdr);
298
299         return cmdr;
300 }
301
302 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
303 {
304         struct mmc_command *stop;
305         u32 cmdr;
306
307         if (!cmd->data)
308                 return 0;
309
310         stop = &host->stop_abort;
311         cmdr = cmd->opcode;
312         memset(stop, 0, sizeof(struct mmc_command));
313
314         if (cmdr == MMC_READ_SINGLE_BLOCK ||
315             cmdr == MMC_READ_MULTIPLE_BLOCK ||
316             cmdr == MMC_WRITE_BLOCK ||
317             cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
318             cmdr == MMC_SEND_TUNING_BLOCK ||
319             cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
320                 stop->opcode = MMC_STOP_TRANSMISSION;
321                 stop->arg = 0;
322                 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
323         } else if (cmdr == SD_IO_RW_EXTENDED) {
324                 stop->opcode = SD_IO_RW_DIRECT;
325                 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
326                              ((cmd->arg >> 28) & 0x7);
327                 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
328         } else {
329                 return 0;
330         }
331
332         cmdr = stop->opcode | SDMMC_CMD_STOP |
333                 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
334
335         return cmdr;
336 }
337
338 static void dw_mci_start_command(struct dw_mci *host,
339                                  struct mmc_command *cmd, u32 cmd_flags)
340 {
341         host->cmd = cmd;
342         dev_vdbg(host->dev,
343                  "start command: ARGR=0x%08x CMDR=0x%08x\n",
344                  cmd->arg, cmd_flags);
345
346         mci_writel(host, CMDARG, cmd->arg);
347         wmb();
348
349         mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
350 }
351
352 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
353 {
354         struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
355         dw_mci_start_command(host, stop, host->stop_cmdr);
356 }
357
358 /* DMA interface functions */
359 static void dw_mci_stop_dma(struct dw_mci *host)
360 {
361         if (host->using_dma) {
362                 host->dma_ops->stop(host);
363                 host->dma_ops->cleanup(host);
364         }
365
366         /* Data transfer was stopped by the interrupt handler */
367         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
368 }
369
370 static int dw_mci_get_dma_dir(struct mmc_data *data)
371 {
372         if (data->flags & MMC_DATA_WRITE)
373                 return DMA_TO_DEVICE;
374         else
375                 return DMA_FROM_DEVICE;
376 }
377
378 #ifdef CONFIG_MMC_DW_IDMAC
379 static void dw_mci_dma_cleanup(struct dw_mci *host)
380 {
381         struct mmc_data *data = host->data;
382
383         if (data)
384                 if (!data->host_cookie)
385                         dma_unmap_sg(host->dev,
386                                      data->sg,
387                                      data->sg_len,
388                                      dw_mci_get_dma_dir(data));
389 }
390
391 static void dw_mci_idmac_reset(struct dw_mci *host)
392 {
393         u32 bmod = mci_readl(host, BMOD);
394         /* Software reset of DMA */
395         bmod |= SDMMC_IDMAC_SWRESET;
396         mci_writel(host, BMOD, bmod);
397 }
398
399 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
400 {
401         u32 temp;
402
403         /* Disable and reset the IDMAC interface */
404         temp = mci_readl(host, CTRL);
405         temp &= ~SDMMC_CTRL_USE_IDMAC;
406         temp |= SDMMC_CTRL_DMA_RESET;
407         mci_writel(host, CTRL, temp);
408
409         /* Stop the IDMAC running */
410         temp = mci_readl(host, BMOD);
411         temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
412         temp |= SDMMC_IDMAC_SWRESET;
413         mci_writel(host, BMOD, temp);
414 }
415
416 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
417 {
418         struct mmc_data *data = host->data;
419
420         dev_vdbg(host->dev, "DMA complete\n");
421
422         host->dma_ops->cleanup(host);
423
424         /*
425          * If the card was removed, data will be NULL. No point in trying to
426          * send the stop command or waiting for NBUSY in this case.
427          */
428         if (data) {
429                 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
430                 tasklet_schedule(&host->tasklet);
431         }
432 }
433
434 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
435                                     unsigned int sg_len)
436 {
437         int i;
438         if (host->dma_64bit_address == 1) {
439                 struct idmac_desc_64addr *desc = host->sg_cpu;
440
441                 for (i = 0; i < sg_len; i++, desc++) {
442                         unsigned int length = sg_dma_len(&data->sg[i]);
443                         u64 mem_addr = sg_dma_address(&data->sg[i]);
444
445                         /*
446                          * Set the OWN bit and disable interrupts for this
447                          * descriptor
448                          */
449                         desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
450                                                 IDMAC_DES0_CH;
451                         /* Buffer length */
452                         IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
453
454                         /* Physical address to DMA to/from */
455                         desc->des4 = mem_addr & 0xffffffff;
456                         desc->des5 = mem_addr >> 32;
457                 }
458
459                 /* Set first descriptor */
460                 desc = host->sg_cpu;
461                 desc->des0 |= IDMAC_DES0_FD;
462
463                 /* Set last descriptor */
464                 desc = host->sg_cpu + (i - 1) *
465                                 sizeof(struct idmac_desc_64addr);
466                 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
467                 desc->des0 |= IDMAC_DES0_LD;
468
469         } else {
470                 struct idmac_desc *desc = host->sg_cpu;
471
472                 for (i = 0; i < sg_len; i++, desc++) {
473                         unsigned int length = sg_dma_len(&data->sg[i]);
474                         u32 mem_addr = sg_dma_address(&data->sg[i]);
475
476                         /*
477                          * Set the OWN bit and disable interrupts for this
478                          * descriptor
479                          */
480                         desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
481                                                 IDMAC_DES0_CH;
482                         /* Buffer length */
483                         IDMAC_SET_BUFFER1_SIZE(desc, length);
484
485                         /* Physical address to DMA to/from */
486                         desc->des2 = mem_addr;
487                 }
488
489                 /* Set first descriptor */
490                 desc = host->sg_cpu;
491                 desc->des0 |= IDMAC_DES0_FD;
492
493                 /* Set last descriptor */
494                 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
495                 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
496                 desc->des0 |= IDMAC_DES0_LD;
497         }
498
499         wmb();
500 }
501
502 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
503 {
504         u32 temp;
505
506         dw_mci_translate_sglist(host, host->data, sg_len);
507
508         /* Make sure to reset DMA in case we did PIO before this */
509         dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
510         dw_mci_idmac_reset(host);
511
512         /* Select IDMAC interface */
513         temp = mci_readl(host, CTRL);
514         temp |= SDMMC_CTRL_USE_IDMAC;
515         mci_writel(host, CTRL, temp);
516
517         wmb();
518
519         /* Enable the IDMAC */
520         temp = mci_readl(host, BMOD);
521         temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
522         mci_writel(host, BMOD, temp);
523
524         /* Start it running */
525         mci_writel(host, PLDMND, 1);
526 }
527
528 static int dw_mci_idmac_init(struct dw_mci *host)
529 {
530         int i;
531
532         if (host->dma_64bit_address == 1) {
533                 struct idmac_desc_64addr *p;
534                 /* Number of descriptors in the ring buffer */
535                 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
536
537                 /* Forward link the descriptor list */
538                 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
539                                                                 i++, p++) {
540                         p->des6 = (host->sg_dma +
541                                         (sizeof(struct idmac_desc_64addr) *
542                                                         (i + 1))) & 0xffffffff;
543
544                         p->des7 = (u64)(host->sg_dma +
545                                         (sizeof(struct idmac_desc_64addr) *
546                                                         (i + 1))) >> 32;
547                         /* Initialize reserved and buffer size fields to "0" */
548                         p->des1 = 0;
549                         p->des2 = 0;
550                         p->des3 = 0;
551                 }
552
553                 /* Set the last descriptor as the end-of-ring descriptor */
554                 p->des6 = host->sg_dma & 0xffffffff;
555                 p->des7 = (u64)host->sg_dma >> 32;
556                 p->des0 = IDMAC_DES0_ER;
557
558         } else {
559                 struct idmac_desc *p;
560                 /* Number of descriptors in the ring buffer */
561                 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
562
563                 /* Forward link the descriptor list */
564                 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
565                         p->des3 = host->sg_dma + (sizeof(struct idmac_desc) *
566                                                                 (i + 1));
567
568                 /* Set the last descriptor as the end-of-ring descriptor */
569                 p->des3 = host->sg_dma;
570                 p->des0 = IDMAC_DES0_ER;
571         }
572
573         dw_mci_idmac_reset(host);
574
575         if (host->dma_64bit_address == 1) {
576                 /* Mask out interrupts - get Tx & Rx complete only */
577                 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
578                 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
579                                 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
580
581                 /* Set the descriptor base address */
582                 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
583                 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
584
585         } else {
586                 /* Mask out interrupts - get Tx & Rx complete only */
587                 mci_writel(host, IDSTS, IDMAC_INT_CLR);
588                 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
589                                 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
590
591                 /* Set the descriptor base address */
592                 mci_writel(host, DBADDR, host->sg_dma);
593         }
594
595         return 0;
596 }
597
598 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
599         .init = dw_mci_idmac_init,
600         .start = dw_mci_idmac_start_dma,
601         .stop = dw_mci_idmac_stop_dma,
602         .complete = dw_mci_idmac_complete_dma,
603         .cleanup = dw_mci_dma_cleanup,
604 };
605 #endif /* CONFIG_MMC_DW_IDMAC */
606
607 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
608                                    struct mmc_data *data,
609                                    bool next)
610 {
611         struct scatterlist *sg;
612         unsigned int i, sg_len;
613
614         if (!next && data->host_cookie)
615                 return data->host_cookie;
616
617         /*
618          * We don't do DMA on "complex" transfers, i.e. with
619          * non-word-aligned buffers or lengths. Also, we don't bother
620          * with all the DMA setup overhead for short transfers.
621          */
622         if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
623                 return -EINVAL;
624
625         if (data->blksz & 3)
626                 return -EINVAL;
627
628         for_each_sg(data->sg, sg, data->sg_len, i) {
629                 if (sg->offset & 3 || sg->length & 3)
630                         return -EINVAL;
631         }
632
633         sg_len = dma_map_sg(host->dev,
634                             data->sg,
635                             data->sg_len,
636                             dw_mci_get_dma_dir(data));
637         if (sg_len == 0)
638                 return -EINVAL;
639
640         if (next)
641                 data->host_cookie = sg_len;
642
643         return sg_len;
644 }
645
646 static void dw_mci_pre_req(struct mmc_host *mmc,
647                            struct mmc_request *mrq,
648                            bool is_first_req)
649 {
650         struct dw_mci_slot *slot = mmc_priv(mmc);
651         struct mmc_data *data = mrq->data;
652
653         if (!slot->host->use_dma || !data)
654                 return;
655
656         if (data->host_cookie) {
657                 data->host_cookie = 0;
658                 return;
659         }
660
661         if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
662                 data->host_cookie = 0;
663 }
664
665 static void dw_mci_post_req(struct mmc_host *mmc,
666                             struct mmc_request *mrq,
667                             int err)
668 {
669         struct dw_mci_slot *slot = mmc_priv(mmc);
670         struct mmc_data *data = mrq->data;
671
672         if (!slot->host->use_dma || !data)
673                 return;
674
675         if (data->host_cookie)
676                 dma_unmap_sg(slot->host->dev,
677                              data->sg,
678                              data->sg_len,
679                              dw_mci_get_dma_dir(data));
680         data->host_cookie = 0;
681 }
682
683 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
684 {
685 #ifdef CONFIG_MMC_DW_IDMAC
686         unsigned int blksz = data->blksz;
687         const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
688         u32 fifo_width = 1 << host->data_shift;
689         u32 blksz_depth = blksz / fifo_width, fifoth_val;
690         u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
691         int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
692
693         tx_wmark = (host->fifo_depth) / 2;
694         tx_wmark_invers = host->fifo_depth - tx_wmark;
695
696         /*
697          * MSIZE is '1',
698          * if blksz is not a multiple of the FIFO width
699          */
700         if (blksz % fifo_width) {
701                 msize = 0;
702                 rx_wmark = 1;
703                 goto done;
704         }
705
706         do {
707                 if (!((blksz_depth % mszs[idx]) ||
708                      (tx_wmark_invers % mszs[idx]))) {
709                         msize = idx;
710                         rx_wmark = mszs[idx] - 1;
711                         break;
712                 }
713         } while (--idx > 0);
714         /*
715          * If idx is '0', it won't be tried
716          * Thus, initial values are uesed
717          */
718 done:
719         fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
720         mci_writel(host, FIFOTH, fifoth_val);
721 #endif
722 }
723
724 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
725 {
726         unsigned int blksz = data->blksz;
727         u32 blksz_depth, fifo_depth;
728         u16 thld_size;
729
730         WARN_ON(!(data->flags & MMC_DATA_READ));
731
732         /*
733          * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
734          * in the FIFO region, so we really shouldn't access it).
735          */
736         if (host->verid < DW_MMC_240A)
737                 return;
738
739         if (host->timing != MMC_TIMING_MMC_HS200 &&
740             host->timing != MMC_TIMING_UHS_SDR104)
741                 goto disable;
742
743         blksz_depth = blksz / (1 << host->data_shift);
744         fifo_depth = host->fifo_depth;
745
746         if (blksz_depth > fifo_depth)
747                 goto disable;
748
749         /*
750          * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
751          * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
752          * Currently just choose blksz.
753          */
754         thld_size = blksz;
755         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
756         return;
757
758 disable:
759         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
760 }
761
762 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
763 {
764         unsigned long irqflags;
765         int sg_len;
766         u32 temp;
767
768         host->using_dma = 0;
769
770         /* If we don't have a channel, we can't do DMA */
771         if (!host->use_dma)
772                 return -ENODEV;
773
774         sg_len = dw_mci_pre_dma_transfer(host, data, 0);
775         if (sg_len < 0) {
776                 host->dma_ops->stop(host);
777                 return sg_len;
778         }
779
780         host->using_dma = 1;
781
782         dev_vdbg(host->dev,
783                  "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
784                  (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
785                  sg_len);
786
787         /*
788          * Decide the MSIZE and RX/TX Watermark.
789          * If current block size is same with previous size,
790          * no need to update fifoth.
791          */
792         if (host->prev_blksz != data->blksz)
793                 dw_mci_adjust_fifoth(host, data);
794
795         /* Enable the DMA interface */
796         temp = mci_readl(host, CTRL);
797         temp |= SDMMC_CTRL_DMA_ENABLE;
798         mci_writel(host, CTRL, temp);
799
800         /* Disable RX/TX IRQs, let DMA handle it */
801         spin_lock_irqsave(&host->irq_lock, irqflags);
802         temp = mci_readl(host, INTMASK);
803         temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
804         mci_writel(host, INTMASK, temp);
805         spin_unlock_irqrestore(&host->irq_lock, irqflags);
806
807         host->dma_ops->start(host, sg_len);
808
809         return 0;
810 }
811
812 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
813 {
814         unsigned long irqflags;
815         u32 temp;
816
817         data->error = -EINPROGRESS;
818
819         WARN_ON(host->data);
820         host->sg = NULL;
821         host->data = data;
822
823         if (data->flags & MMC_DATA_READ) {
824                 host->dir_status = DW_MCI_RECV_STATUS;
825                 dw_mci_ctrl_rd_thld(host, data);
826         } else {
827                 host->dir_status = DW_MCI_SEND_STATUS;
828         }
829
830         if (dw_mci_submit_data_dma(host, data)) {
831                 int flags = SG_MITER_ATOMIC;
832                 if (host->data->flags & MMC_DATA_READ)
833                         flags |= SG_MITER_TO_SG;
834                 else
835                         flags |= SG_MITER_FROM_SG;
836
837                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
838                 host->sg = data->sg;
839                 host->part_buf_start = 0;
840                 host->part_buf_count = 0;
841
842                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
843
844                 spin_lock_irqsave(&host->irq_lock, irqflags);
845                 temp = mci_readl(host, INTMASK);
846                 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
847                 mci_writel(host, INTMASK, temp);
848                 spin_unlock_irqrestore(&host->irq_lock, irqflags);
849
850                 temp = mci_readl(host, CTRL);
851                 temp &= ~SDMMC_CTRL_DMA_ENABLE;
852                 mci_writel(host, CTRL, temp);
853
854                 /*
855                  * Use the initial fifoth_val for PIO mode.
856                  * If next issued data may be transfered by DMA mode,
857                  * prev_blksz should be invalidated.
858                  */
859                 mci_writel(host, FIFOTH, host->fifoth_val);
860                 host->prev_blksz = 0;
861         } else {
862                 /*
863                  * Keep the current block size.
864                  * It will be used to decide whether to update
865                  * fifoth register next time.
866                  */
867                 host->prev_blksz = data->blksz;
868         }
869 }
870
871 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
872 {
873         struct dw_mci *host = slot->host;
874         unsigned long timeout = jiffies + msecs_to_jiffies(500);
875         unsigned int cmd_status = 0;
876
877         mci_writel(host, CMDARG, arg);
878         wmb();
879         mci_writel(host, CMD, SDMMC_CMD_START | cmd);
880
881         while (time_before(jiffies, timeout)) {
882                 cmd_status = mci_readl(host, CMD);
883                 if (!(cmd_status & SDMMC_CMD_START))
884                         return;
885         }
886         dev_err(&slot->mmc->class_dev,
887                 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
888                 cmd, arg, cmd_status);
889 }
890
891 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
892 {
893         struct dw_mci *host = slot->host;
894         unsigned int clock = slot->clock;
895         u32 div;
896         u32 clk_en_a;
897         u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
898
899         /* We must continue to set bit 28 in CMD until the change is complete */
900         if (host->state == STATE_WAITING_CMD11_DONE)
901                 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
902
903         if (!clock) {
904                 mci_writel(host, CLKENA, 0);
905                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
906         } else if (clock != host->current_speed || force_clkinit) {
907                 div = host->bus_hz / clock;
908                 if (host->bus_hz % clock && host->bus_hz > clock)
909                         /*
910                          * move the + 1 after the divide to prevent
911                          * over-clocking the card.
912                          */
913                         div += 1;
914
915                 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
916
917                 if ((clock << div) != slot->__clk_old || force_clkinit)
918                         dev_info(&slot->mmc->class_dev,
919                                  "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
920                                  slot->id, host->bus_hz, clock,
921                                  div ? ((host->bus_hz / div) >> 1) :
922                                  host->bus_hz, div);
923
924                 /* disable clock */
925                 mci_writel(host, CLKENA, 0);
926                 mci_writel(host, CLKSRC, 0);
927
928                 /* inform CIU */
929                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
930
931                 /* set clock to desired speed */
932                 mci_writel(host, CLKDIV, div);
933
934                 /* inform CIU */
935                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
936
937                 /* enable clock; only low power if no SDIO */
938                 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
939                 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
940                         clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
941                 mci_writel(host, CLKENA, clk_en_a);
942
943                 /* inform CIU */
944                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
945
946                 /* keep the clock with reflecting clock dividor */
947                 slot->__clk_old = clock << div;
948         }
949
950         host->current_speed = clock;
951
952         /* Set the current slot bus width */
953         mci_writel(host, CTYPE, (slot->ctype << slot->id));
954 }
955
956 static void __dw_mci_start_request(struct dw_mci *host,
957                                    struct dw_mci_slot *slot,
958                                    struct mmc_command *cmd)
959 {
960         struct mmc_request *mrq;
961         struct mmc_data *data;
962         u32 cmdflags;
963
964         mrq = slot->mrq;
965
966         host->cur_slot = slot;
967         host->mrq = mrq;
968
969         host->pending_events = 0;
970         host->completed_events = 0;
971         host->cmd_status = 0;
972         host->data_status = 0;
973         host->dir_status = 0;
974
975         data = cmd->data;
976         if (data) {
977                 mci_writel(host, TMOUT, 0xFFFFFFFF);
978                 mci_writel(host, BYTCNT, data->blksz*data->blocks);
979                 mci_writel(host, BLKSIZ, data->blksz);
980         }
981
982         cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
983
984         /* this is the first command, send the initialization clock */
985         if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
986                 cmdflags |= SDMMC_CMD_INIT;
987
988         if (data) {
989                 dw_mci_submit_data(host, data);
990                 wmb();
991         }
992
993         dw_mci_start_command(host, cmd, cmdflags);
994
995         if (mrq->stop)
996                 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
997         else
998                 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
999 }
1000
1001 static void dw_mci_start_request(struct dw_mci *host,
1002                                  struct dw_mci_slot *slot)
1003 {
1004         struct mmc_request *mrq = slot->mrq;
1005         struct mmc_command *cmd;
1006
1007         cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1008         __dw_mci_start_request(host, slot, cmd);
1009 }
1010
1011 /* must be called with host->lock held */
1012 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1013                                  struct mmc_request *mrq)
1014 {
1015         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1016                  host->state);
1017
1018         slot->mrq = mrq;
1019
1020         if (host->state == STATE_WAITING_CMD11_DONE) {
1021                 dev_warn(&slot->mmc->class_dev,
1022                          "Voltage change didn't complete\n");
1023                 /*
1024                  * this case isn't expected to happen, so we can
1025                  * either crash here or just try to continue on
1026                  * in the closest possible state
1027                  */
1028                 host->state = STATE_IDLE;
1029         }
1030
1031         if (host->state == STATE_IDLE) {
1032                 host->state = STATE_SENDING_CMD;
1033                 dw_mci_start_request(host, slot);
1034         } else {
1035                 list_add_tail(&slot->queue_node, &host->queue);
1036         }
1037 }
1038
1039 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1040 {
1041         struct dw_mci_slot *slot = mmc_priv(mmc);
1042         struct dw_mci *host = slot->host;
1043
1044         WARN_ON(slot->mrq);
1045
1046         /*
1047          * The check for card presence and queueing of the request must be
1048          * atomic, otherwise the card could be removed in between and the
1049          * request wouldn't fail until another card was inserted.
1050          */
1051         spin_lock_bh(&host->lock);
1052
1053         if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1054                 spin_unlock_bh(&host->lock);
1055                 mrq->cmd->error = -ENOMEDIUM;
1056                 mmc_request_done(mmc, mrq);
1057                 return;
1058         }
1059
1060         dw_mci_queue_request(host, slot, mrq);
1061
1062         spin_unlock_bh(&host->lock);
1063 }
1064
1065 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1066 {
1067         struct dw_mci_slot *slot = mmc_priv(mmc);
1068         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1069         u32 regs;
1070         int ret;
1071
1072         switch (ios->bus_width) {
1073         case MMC_BUS_WIDTH_4:
1074                 slot->ctype = SDMMC_CTYPE_4BIT;
1075                 break;
1076         case MMC_BUS_WIDTH_8:
1077                 slot->ctype = SDMMC_CTYPE_8BIT;
1078                 break;
1079         default:
1080                 /* set default 1 bit mode */
1081                 slot->ctype = SDMMC_CTYPE_1BIT;
1082         }
1083
1084         regs = mci_readl(slot->host, UHS_REG);
1085
1086         /* DDR mode set */
1087         if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1088             ios->timing == MMC_TIMING_MMC_HS400)
1089                 regs |= ((0x1 << slot->id) << 16);
1090         else
1091                 regs &= ~((0x1 << slot->id) << 16);
1092
1093         mci_writel(slot->host, UHS_REG, regs);
1094         slot->host->timing = ios->timing;
1095
1096         /*
1097          * Use mirror of ios->clock to prevent race with mmc
1098          * core ios update when finding the minimum.
1099          */
1100         slot->clock = ios->clock;
1101
1102         if (drv_data && drv_data->set_ios)
1103                 drv_data->set_ios(slot->host, ios);
1104
1105         /* Slot specific timing and width adjustment */
1106         dw_mci_setup_bus(slot, false);
1107
1108         if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1109                 slot->host->state = STATE_IDLE;
1110
1111         switch (ios->power_mode) {
1112         case MMC_POWER_UP:
1113                 if (!IS_ERR(mmc->supply.vmmc)) {
1114                         ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1115                                         ios->vdd);
1116                         if (ret) {
1117                                 dev_err(slot->host->dev,
1118                                         "failed to enable vmmc regulator\n");
1119                                 /*return, if failed turn on vmmc*/
1120                                 return;
1121                         }
1122                 }
1123                 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1124                 regs = mci_readl(slot->host, PWREN);
1125                 regs |= (1 << slot->id);
1126                 mci_writel(slot->host, PWREN, regs);
1127                 break;
1128         case MMC_POWER_ON:
1129                 if (!IS_ERR(mmc->supply.vqmmc) && !slot->host->vqmmc_enabled) {
1130                         ret = regulator_enable(mmc->supply.vqmmc);
1131                         if (ret < 0)
1132                                 dev_err(slot->host->dev,
1133                                         "failed to enable vqmmc regulator\n");
1134                         else
1135                                 slot->host->vqmmc_enabled = true;
1136                 }
1137                 break;
1138         case MMC_POWER_OFF:
1139                 if (!IS_ERR(mmc->supply.vmmc))
1140                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1141
1142                 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) {
1143                         regulator_disable(mmc->supply.vqmmc);
1144                         slot->host->vqmmc_enabled = false;
1145                 }
1146
1147                 regs = mci_readl(slot->host, PWREN);
1148                 regs &= ~(1 << slot->id);
1149                 mci_writel(slot->host, PWREN, regs);
1150                 break;
1151         default:
1152                 break;
1153         }
1154 }
1155
1156 static int dw_mci_card_busy(struct mmc_host *mmc)
1157 {
1158         struct dw_mci_slot *slot = mmc_priv(mmc);
1159         u32 status;
1160
1161         /*
1162          * Check the busy bit which is low when DAT[3:0]
1163          * (the data lines) are 0000
1164          */
1165         status = mci_readl(slot->host, STATUS);
1166
1167         return !!(status & SDMMC_STATUS_BUSY);
1168 }
1169
1170 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1171 {
1172         struct dw_mci_slot *slot = mmc_priv(mmc);
1173         struct dw_mci *host = slot->host;
1174         u32 uhs;
1175         u32 v18 = SDMMC_UHS_18V << slot->id;
1176         int min_uv, max_uv;
1177         int ret;
1178
1179         /*
1180          * Program the voltage.  Note that some instances of dw_mmc may use
1181          * the UHS_REG for this.  For other instances (like exynos) the UHS_REG
1182          * does no harm but you need to set the regulator directly.  Try both.
1183          */
1184         uhs = mci_readl(host, UHS_REG);
1185         if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1186                 min_uv = 2700000;
1187                 max_uv = 3600000;
1188                 uhs &= ~v18;
1189         } else {
1190                 min_uv = 1700000;
1191                 max_uv = 1950000;
1192                 uhs |= v18;
1193         }
1194         if (!IS_ERR(mmc->supply.vqmmc)) {
1195                 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
1196
1197                 if (ret) {
1198                         dev_dbg(&mmc->class_dev,
1199                                          "Regulator set error %d: %d - %d\n",
1200                                          ret, min_uv, max_uv);
1201                         return ret;
1202                 }
1203         }
1204         mci_writel(host, UHS_REG, uhs);
1205
1206         return 0;
1207 }
1208
1209 static int dw_mci_get_ro(struct mmc_host *mmc)
1210 {
1211         int read_only;
1212         struct dw_mci_slot *slot = mmc_priv(mmc);
1213         int gpio_ro = mmc_gpio_get_ro(mmc);
1214
1215         /* Use platform get_ro function, else try on board write protect */
1216         if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
1217                         (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
1218                 read_only = 0;
1219         else if (!IS_ERR_VALUE(gpio_ro))
1220                 read_only = gpio_ro;
1221         else
1222                 read_only =
1223                         mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1224
1225         dev_dbg(&mmc->class_dev, "card is %s\n",
1226                 read_only ? "read-only" : "read-write");
1227
1228         return read_only;
1229 }
1230
1231 static int dw_mci_get_cd(struct mmc_host *mmc)
1232 {
1233         int present;
1234         struct dw_mci_slot *slot = mmc_priv(mmc);
1235         struct dw_mci_board *brd = slot->host->pdata;
1236         struct dw_mci *host = slot->host;
1237         int gpio_cd = mmc_gpio_get_cd(mmc);
1238
1239         /* Use platform get_cd function, else try onboard card detect */
1240         if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1241                 present = 1;
1242         else if (!IS_ERR_VALUE(gpio_cd))
1243                 present = gpio_cd;
1244         else
1245                 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1246                         == 0 ? 1 : 0;
1247
1248         spin_lock_bh(&host->lock);
1249         if (present) {
1250                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1251                 dev_dbg(&mmc->class_dev, "card is present\n");
1252         } else {
1253                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1254                 dev_dbg(&mmc->class_dev, "card is not present\n");
1255         }
1256         spin_unlock_bh(&host->lock);
1257
1258         return present;
1259 }
1260
1261 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1262 {
1263         struct dw_mci_slot *slot = mmc_priv(mmc);
1264         struct dw_mci *host = slot->host;
1265
1266         /*
1267          * Low power mode will stop the card clock when idle.  According to the
1268          * description of the CLKENA register we should disable low power mode
1269          * for SDIO cards if we need SDIO interrupts to work.
1270          */
1271         if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1272                 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1273                 u32 clk_en_a_old;
1274                 u32 clk_en_a;
1275
1276                 clk_en_a_old = mci_readl(host, CLKENA);
1277
1278                 if (card->type == MMC_TYPE_SDIO ||
1279                     card->type == MMC_TYPE_SD_COMBO) {
1280                         set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1281                         clk_en_a = clk_en_a_old & ~clken_low_pwr;
1282                 } else {
1283                         clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1284                         clk_en_a = clk_en_a_old | clken_low_pwr;
1285                 }
1286
1287                 if (clk_en_a != clk_en_a_old) {
1288                         mci_writel(host, CLKENA, clk_en_a);
1289                         mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1290                                      SDMMC_CMD_PRV_DAT_WAIT, 0);
1291                 }
1292         }
1293 }
1294
1295 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1296 {
1297         struct dw_mci_slot *slot = mmc_priv(mmc);
1298         struct dw_mci *host = slot->host;
1299         unsigned long irqflags;
1300         u32 int_mask;
1301
1302         spin_lock_irqsave(&host->irq_lock, irqflags);
1303
1304         /* Enable/disable Slot Specific SDIO interrupt */
1305         int_mask = mci_readl(host, INTMASK);
1306         if (enb)
1307                 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1308         else
1309                 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1310         mci_writel(host, INTMASK, int_mask);
1311
1312         spin_unlock_irqrestore(&host->irq_lock, irqflags);
1313 }
1314
1315 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1316 {
1317         struct dw_mci_slot *slot = mmc_priv(mmc);
1318         struct dw_mci *host = slot->host;
1319         const struct dw_mci_drv_data *drv_data = host->drv_data;
1320         int err = -ENOSYS;
1321
1322         if (drv_data && drv_data->execute_tuning)
1323                 err = drv_data->execute_tuning(slot);
1324         return err;
1325 }
1326
1327 int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1328 {
1329         struct dw_mci_slot *slot = mmc_priv(mmc);
1330         struct dw_mci *host = slot->host;
1331         const struct dw_mci_drv_data *drv_data = host->drv_data;
1332
1333         if (drv_data && drv_data->prepare_hs400_tuning)
1334                 return drv_data->prepare_hs400_tuning(host, ios);
1335
1336         return 0;
1337 }
1338
1339 static const struct mmc_host_ops dw_mci_ops = {
1340         .request                = dw_mci_request,
1341         .pre_req                = dw_mci_pre_req,
1342         .post_req               = dw_mci_post_req,
1343         .set_ios                = dw_mci_set_ios,
1344         .get_ro                 = dw_mci_get_ro,
1345         .get_cd                 = dw_mci_get_cd,
1346         .enable_sdio_irq        = dw_mci_enable_sdio_irq,
1347         .execute_tuning         = dw_mci_execute_tuning,
1348         .card_busy              = dw_mci_card_busy,
1349         .start_signal_voltage_switch = dw_mci_switch_voltage,
1350         .init_card              = dw_mci_init_card,
1351         .prepare_hs400_tuning   = dw_mci_prepare_hs400_tuning,
1352 };
1353
1354 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1355         __releases(&host->lock)
1356         __acquires(&host->lock)
1357 {
1358         struct dw_mci_slot *slot;
1359         struct mmc_host *prev_mmc = host->cur_slot->mmc;
1360
1361         WARN_ON(host->cmd || host->data);
1362
1363         host->cur_slot->mrq = NULL;
1364         host->mrq = NULL;
1365         if (!list_empty(&host->queue)) {
1366                 slot = list_entry(host->queue.next,
1367                                   struct dw_mci_slot, queue_node);
1368                 list_del(&slot->queue_node);
1369                 dev_vdbg(host->dev, "list not empty: %s is next\n",
1370                          mmc_hostname(slot->mmc));
1371                 host->state = STATE_SENDING_CMD;
1372                 dw_mci_start_request(host, slot);
1373         } else {
1374                 dev_vdbg(host->dev, "list empty\n");
1375
1376                 if (host->state == STATE_SENDING_CMD11)
1377                         host->state = STATE_WAITING_CMD11_DONE;
1378                 else
1379                         host->state = STATE_IDLE;
1380         }
1381
1382         spin_unlock(&host->lock);
1383         mmc_request_done(prev_mmc, mrq);
1384         spin_lock(&host->lock);
1385 }
1386
1387 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1388 {
1389         u32 status = host->cmd_status;
1390
1391         host->cmd_status = 0;
1392
1393         /* Read the response from the card (up to 16 bytes) */
1394         if (cmd->flags & MMC_RSP_PRESENT) {
1395                 if (cmd->flags & MMC_RSP_136) {
1396                         cmd->resp[3] = mci_readl(host, RESP0);
1397                         cmd->resp[2] = mci_readl(host, RESP1);
1398                         cmd->resp[1] = mci_readl(host, RESP2);
1399                         cmd->resp[0] = mci_readl(host, RESP3);
1400                 } else {
1401                         cmd->resp[0] = mci_readl(host, RESP0);
1402                         cmd->resp[1] = 0;
1403                         cmd->resp[2] = 0;
1404                         cmd->resp[3] = 0;
1405                 }
1406         }
1407
1408         if (status & SDMMC_INT_RTO)
1409                 cmd->error = -ETIMEDOUT;
1410         else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1411                 cmd->error = -EILSEQ;
1412         else if (status & SDMMC_INT_RESP_ERR)
1413                 cmd->error = -EIO;
1414         else
1415                 cmd->error = 0;
1416
1417         if (cmd->error) {
1418                 /* newer ip versions need a delay between retries */
1419                 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1420                         mdelay(20);
1421         }
1422
1423         return cmd->error;
1424 }
1425
1426 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1427 {
1428         u32 status = host->data_status;
1429
1430         if (status & DW_MCI_DATA_ERROR_FLAGS) {
1431                 if (status & SDMMC_INT_DRTO) {
1432                         data->error = -ETIMEDOUT;
1433                 } else if (status & SDMMC_INT_DCRC) {
1434                         data->error = -EILSEQ;
1435                 } else if (status & SDMMC_INT_EBE) {
1436                         if (host->dir_status ==
1437                                 DW_MCI_SEND_STATUS) {
1438                                 /*
1439                                  * No data CRC status was returned.
1440                                  * The number of bytes transferred
1441                                  * will be exaggerated in PIO mode.
1442                                  */
1443                                 data->bytes_xfered = 0;
1444                                 data->error = -ETIMEDOUT;
1445                         } else if (host->dir_status ==
1446                                         DW_MCI_RECV_STATUS) {
1447                                 data->error = -EIO;
1448                         }
1449                 } else {
1450                         /* SDMMC_INT_SBE is included */
1451                         data->error = -EIO;
1452                 }
1453
1454                 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1455
1456                 /*
1457                  * After an error, there may be data lingering
1458                  * in the FIFO
1459                  */
1460                 dw_mci_reset(host);
1461         } else {
1462                 data->bytes_xfered = data->blocks * data->blksz;
1463                 data->error = 0;
1464         }
1465
1466         return data->error;
1467 }
1468
1469 static void dw_mci_tasklet_func(unsigned long priv)
1470 {
1471         struct dw_mci *host = (struct dw_mci *)priv;
1472         struct mmc_data *data;
1473         struct mmc_command *cmd;
1474         struct mmc_request *mrq;
1475         enum dw_mci_state state;
1476         enum dw_mci_state prev_state;
1477         unsigned int err;
1478
1479         spin_lock(&host->lock);
1480
1481         state = host->state;
1482         data = host->data;
1483         mrq = host->mrq;
1484
1485         do {
1486                 prev_state = state;
1487
1488                 switch (state) {
1489                 case STATE_IDLE:
1490                 case STATE_WAITING_CMD11_DONE:
1491                         break;
1492
1493                 case STATE_SENDING_CMD11:
1494                 case STATE_SENDING_CMD:
1495                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1496                                                 &host->pending_events))
1497                                 break;
1498
1499                         cmd = host->cmd;
1500                         host->cmd = NULL;
1501                         set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1502                         err = dw_mci_command_complete(host, cmd);
1503                         if (cmd == mrq->sbc && !err) {
1504                                 prev_state = state = STATE_SENDING_CMD;
1505                                 __dw_mci_start_request(host, host->cur_slot,
1506                                                        mrq->cmd);
1507                                 goto unlock;
1508                         }
1509
1510                         if (cmd->data && err) {
1511                                 dw_mci_stop_dma(host);
1512                                 send_stop_abort(host, data);
1513                                 state = STATE_SENDING_STOP;
1514                                 break;
1515                         }
1516
1517                         if (!cmd->data || err) {
1518                                 dw_mci_request_end(host, mrq);
1519                                 goto unlock;
1520                         }
1521
1522                         prev_state = state = STATE_SENDING_DATA;
1523                         /* fall through */
1524
1525                 case STATE_SENDING_DATA:
1526                         /*
1527                          * We could get a data error and never a transfer
1528                          * complete so we'd better check for it here.
1529                          *
1530                          * Note that we don't really care if we also got a
1531                          * transfer complete; stopping the DMA and sending an
1532                          * abort won't hurt.
1533                          */
1534                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1535                                                &host->pending_events)) {
1536                                 dw_mci_stop_dma(host);
1537                                 if (data->stop ||
1538                                     !(host->data_status & (SDMMC_INT_DRTO |
1539                                                            SDMMC_INT_EBE)))
1540                                         send_stop_abort(host, data);
1541                                 state = STATE_DATA_ERROR;
1542                                 break;
1543                         }
1544
1545                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1546                                                 &host->pending_events))
1547                                 break;
1548
1549                         set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1550
1551                         /*
1552                          * Handle an EVENT_DATA_ERROR that might have shown up
1553                          * before the transfer completed.  This might not have
1554                          * been caught by the check above because the interrupt
1555                          * could have gone off between the previous check and
1556                          * the check for transfer complete.
1557                          *
1558                          * Technically this ought not be needed assuming we
1559                          * get a DATA_COMPLETE eventually (we'll notice the
1560                          * error and end the request), but it shouldn't hurt.
1561                          *
1562                          * This has the advantage of sending the stop command.
1563                          */
1564                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1565                                                &host->pending_events)) {
1566                                 dw_mci_stop_dma(host);
1567                                 if (data->stop ||
1568                                     !(host->data_status & (SDMMC_INT_DRTO |
1569                                                            SDMMC_INT_EBE)))
1570                                         send_stop_abort(host, data);
1571                                 state = STATE_DATA_ERROR;
1572                                 break;
1573                         }
1574                         prev_state = state = STATE_DATA_BUSY;
1575
1576                         /* fall through */
1577
1578                 case STATE_DATA_BUSY:
1579                         if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1580                                                 &host->pending_events))
1581                                 break;
1582
1583                         host->data = NULL;
1584                         set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1585                         err = dw_mci_data_complete(host, data);
1586
1587                         if (!err) {
1588                                 if (!data->stop || mrq->sbc) {
1589                                         if (mrq->sbc && data->stop)
1590                                                 data->stop->error = 0;
1591                                         dw_mci_request_end(host, mrq);
1592                                         goto unlock;
1593                                 }
1594
1595                                 /* stop command for open-ended transfer*/
1596                                 if (data->stop)
1597                                         send_stop_abort(host, data);
1598                         } else {
1599                                 /*
1600                                  * If we don't have a command complete now we'll
1601                                  * never get one since we just reset everything;
1602                                  * better end the request.
1603                                  *
1604                                  * If we do have a command complete we'll fall
1605                                  * through to the SENDING_STOP command and
1606                                  * everything will be peachy keen.
1607                                  */
1608                                 if (!test_bit(EVENT_CMD_COMPLETE,
1609                                               &host->pending_events)) {
1610                                         host->cmd = NULL;
1611                                         dw_mci_request_end(host, mrq);
1612                                         goto unlock;
1613                                 }
1614                         }
1615
1616                         /*
1617                          * If err has non-zero,
1618                          * stop-abort command has been already issued.
1619                          */
1620                         prev_state = state = STATE_SENDING_STOP;
1621
1622                         /* fall through */
1623
1624                 case STATE_SENDING_STOP:
1625                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1626                                                 &host->pending_events))
1627                                 break;
1628
1629                         /* CMD error in data command */
1630                         if (mrq->cmd->error && mrq->data)
1631                                 dw_mci_reset(host);
1632
1633                         host->cmd = NULL;
1634                         host->data = NULL;
1635
1636                         if (mrq->stop)
1637                                 dw_mci_command_complete(host, mrq->stop);
1638                         else
1639                                 host->cmd_status = 0;
1640
1641                         dw_mci_request_end(host, mrq);
1642                         goto unlock;
1643
1644                 case STATE_DATA_ERROR:
1645                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1646                                                 &host->pending_events))
1647                                 break;
1648
1649                         state = STATE_DATA_BUSY;
1650                         break;
1651                 }
1652         } while (state != prev_state);
1653
1654         host->state = state;
1655 unlock:
1656         spin_unlock(&host->lock);
1657
1658 }
1659
1660 /* push final bytes to part_buf, only use during push */
1661 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1662 {
1663         memcpy((void *)&host->part_buf, buf, cnt);
1664         host->part_buf_count = cnt;
1665 }
1666
1667 /* append bytes to part_buf, only use during push */
1668 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1669 {
1670         cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1671         memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1672         host->part_buf_count += cnt;
1673         return cnt;
1674 }
1675
1676 /* pull first bytes from part_buf, only use during pull */
1677 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1678 {
1679         cnt = min(cnt, (int)host->part_buf_count);
1680         if (cnt) {
1681                 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1682                        cnt);
1683                 host->part_buf_count -= cnt;
1684                 host->part_buf_start += cnt;
1685         }
1686         return cnt;
1687 }
1688
1689 /* pull final bytes from the part_buf, assuming it's just been filled */
1690 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1691 {
1692         memcpy(buf, &host->part_buf, cnt);
1693         host->part_buf_start = cnt;
1694         host->part_buf_count = (1 << host->data_shift) - cnt;
1695 }
1696
1697 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1698 {
1699         struct mmc_data *data = host->data;
1700         int init_cnt = cnt;
1701
1702         /* try and push anything in the part_buf */
1703         if (unlikely(host->part_buf_count)) {
1704                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1705                 buf += len;
1706                 cnt -= len;
1707                 if (host->part_buf_count == 2) {
1708                         mci_writew(host, DATA(host->data_offset),
1709                                         host->part_buf16);
1710                         host->part_buf_count = 0;
1711                 }
1712         }
1713 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1714         if (unlikely((unsigned long)buf & 0x1)) {
1715                 while (cnt >= 2) {
1716                         u16 aligned_buf[64];
1717                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1718                         int items = len >> 1;
1719                         int i;
1720                         /* memcpy from input buffer into aligned buffer */
1721                         memcpy(aligned_buf, buf, len);
1722                         buf += len;
1723                         cnt -= len;
1724                         /* push data from aligned buffer into fifo */
1725                         for (i = 0; i < items; ++i)
1726                                 mci_writew(host, DATA(host->data_offset),
1727                                                 aligned_buf[i]);
1728                 }
1729         } else
1730 #endif
1731         {
1732                 u16 *pdata = buf;
1733                 for (; cnt >= 2; cnt -= 2)
1734                         mci_writew(host, DATA(host->data_offset), *pdata++);
1735                 buf = pdata;
1736         }
1737         /* put anything remaining in the part_buf */
1738         if (cnt) {
1739                 dw_mci_set_part_bytes(host, buf, cnt);
1740                  /* Push data if we have reached the expected data length */
1741                 if ((data->bytes_xfered + init_cnt) ==
1742                     (data->blksz * data->blocks))
1743                         mci_writew(host, DATA(host->data_offset),
1744                                    host->part_buf16);
1745         }
1746 }
1747
1748 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1749 {
1750 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1751         if (unlikely((unsigned long)buf & 0x1)) {
1752                 while (cnt >= 2) {
1753                         /* pull data from fifo into aligned buffer */
1754                         u16 aligned_buf[64];
1755                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1756                         int items = len >> 1;
1757                         int i;
1758                         for (i = 0; i < items; ++i)
1759                                 aligned_buf[i] = mci_readw(host,
1760                                                 DATA(host->data_offset));
1761                         /* memcpy from aligned buffer into output buffer */
1762                         memcpy(buf, aligned_buf, len);
1763                         buf += len;
1764                         cnt -= len;
1765                 }
1766         } else
1767 #endif
1768         {
1769                 u16 *pdata = buf;
1770                 for (; cnt >= 2; cnt -= 2)
1771                         *pdata++ = mci_readw(host, DATA(host->data_offset));
1772                 buf = pdata;
1773         }
1774         if (cnt) {
1775                 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1776                 dw_mci_pull_final_bytes(host, buf, cnt);
1777         }
1778 }
1779
1780 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1781 {
1782         struct mmc_data *data = host->data;
1783         int init_cnt = cnt;
1784
1785         /* try and push anything in the part_buf */
1786         if (unlikely(host->part_buf_count)) {
1787                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1788                 buf += len;
1789                 cnt -= len;
1790                 if (host->part_buf_count == 4) {
1791                         mci_writel(host, DATA(host->data_offset),
1792                                         host->part_buf32);
1793                         host->part_buf_count = 0;
1794                 }
1795         }
1796 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1797         if (unlikely((unsigned long)buf & 0x3)) {
1798                 while (cnt >= 4) {
1799                         u32 aligned_buf[32];
1800                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1801                         int items = len >> 2;
1802                         int i;
1803                         /* memcpy from input buffer into aligned buffer */
1804                         memcpy(aligned_buf, buf, len);
1805                         buf += len;
1806                         cnt -= len;
1807                         /* push data from aligned buffer into fifo */
1808                         for (i = 0; i < items; ++i)
1809                                 mci_writel(host, DATA(host->data_offset),
1810                                                 aligned_buf[i]);
1811                 }
1812         } else
1813 #endif
1814         {
1815                 u32 *pdata = buf;
1816                 for (; cnt >= 4; cnt -= 4)
1817                         mci_writel(host, DATA(host->data_offset), *pdata++);
1818                 buf = pdata;
1819         }
1820         /* put anything remaining in the part_buf */
1821         if (cnt) {
1822                 dw_mci_set_part_bytes(host, buf, cnt);
1823                  /* Push data if we have reached the expected data length */
1824                 if ((data->bytes_xfered + init_cnt) ==
1825                     (data->blksz * data->blocks))
1826                         mci_writel(host, DATA(host->data_offset),
1827                                    host->part_buf32);
1828         }
1829 }
1830
1831 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1832 {
1833 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1834         if (unlikely((unsigned long)buf & 0x3)) {
1835                 while (cnt >= 4) {
1836                         /* pull data from fifo into aligned buffer */
1837                         u32 aligned_buf[32];
1838                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1839                         int items = len >> 2;
1840                         int i;
1841                         for (i = 0; i < items; ++i)
1842                                 aligned_buf[i] = mci_readl(host,
1843                                                 DATA(host->data_offset));
1844                         /* memcpy from aligned buffer into output buffer */
1845                         memcpy(buf, aligned_buf, len);
1846                         buf += len;
1847                         cnt -= len;
1848                 }
1849         } else
1850 #endif
1851         {
1852                 u32 *pdata = buf;
1853                 for (; cnt >= 4; cnt -= 4)
1854                         *pdata++ = mci_readl(host, DATA(host->data_offset));
1855                 buf = pdata;
1856         }
1857         if (cnt) {
1858                 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1859                 dw_mci_pull_final_bytes(host, buf, cnt);
1860         }
1861 }
1862
1863 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1864 {
1865         struct mmc_data *data = host->data;
1866         int init_cnt = cnt;
1867
1868         /* try and push anything in the part_buf */
1869         if (unlikely(host->part_buf_count)) {
1870                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1871                 buf += len;
1872                 cnt -= len;
1873
1874                 if (host->part_buf_count == 8) {
1875                         mci_writeq(host, DATA(host->data_offset),
1876                                         host->part_buf);
1877                         host->part_buf_count = 0;
1878                 }
1879         }
1880 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1881         if (unlikely((unsigned long)buf & 0x7)) {
1882                 while (cnt >= 8) {
1883                         u64 aligned_buf[16];
1884                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1885                         int items = len >> 3;
1886                         int i;
1887                         /* memcpy from input buffer into aligned buffer */
1888                         memcpy(aligned_buf, buf, len);
1889                         buf += len;
1890                         cnt -= len;
1891                         /* push data from aligned buffer into fifo */
1892                         for (i = 0; i < items; ++i)
1893                                 mci_writeq(host, DATA(host->data_offset),
1894                                                 aligned_buf[i]);
1895                 }
1896         } else
1897 #endif
1898         {
1899                 u64 *pdata = buf;
1900                 for (; cnt >= 8; cnt -= 8)
1901                         mci_writeq(host, DATA(host->data_offset), *pdata++);
1902                 buf = pdata;
1903         }
1904         /* put anything remaining in the part_buf */
1905         if (cnt) {
1906                 dw_mci_set_part_bytes(host, buf, cnt);
1907                 /* Push data if we have reached the expected data length */
1908                 if ((data->bytes_xfered + init_cnt) ==
1909                     (data->blksz * data->blocks))
1910                         mci_writeq(host, DATA(host->data_offset),
1911                                    host->part_buf);
1912         }
1913 }
1914
1915 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1916 {
1917 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1918         if (unlikely((unsigned long)buf & 0x7)) {
1919                 while (cnt >= 8) {
1920                         /* pull data from fifo into aligned buffer */
1921                         u64 aligned_buf[16];
1922                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1923                         int items = len >> 3;
1924                         int i;
1925                         for (i = 0; i < items; ++i)
1926                                 aligned_buf[i] = mci_readq(host,
1927                                                 DATA(host->data_offset));
1928                         /* memcpy from aligned buffer into output buffer */
1929                         memcpy(buf, aligned_buf, len);
1930                         buf += len;
1931                         cnt -= len;
1932                 }
1933         } else
1934 #endif
1935         {
1936                 u64 *pdata = buf;
1937                 for (; cnt >= 8; cnt -= 8)
1938                         *pdata++ = mci_readq(host, DATA(host->data_offset));
1939                 buf = pdata;
1940         }
1941         if (cnt) {
1942                 host->part_buf = mci_readq(host, DATA(host->data_offset));
1943                 dw_mci_pull_final_bytes(host, buf, cnt);
1944         }
1945 }
1946
1947 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1948 {
1949         int len;
1950
1951         /* get remaining partial bytes */
1952         len = dw_mci_pull_part_bytes(host, buf, cnt);
1953         if (unlikely(len == cnt))
1954                 return;
1955         buf += len;
1956         cnt -= len;
1957
1958         /* get the rest of the data */
1959         host->pull_data(host, buf, cnt);
1960 }
1961
1962 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1963 {
1964         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1965         void *buf;
1966         unsigned int offset;
1967         struct mmc_data *data = host->data;
1968         int shift = host->data_shift;
1969         u32 status;
1970         unsigned int len;
1971         unsigned int remain, fcnt;
1972
1973         do {
1974                 if (!sg_miter_next(sg_miter))
1975                         goto done;
1976
1977                 host->sg = sg_miter->piter.sg;
1978                 buf = sg_miter->addr;
1979                 remain = sg_miter->length;
1980                 offset = 0;
1981
1982                 do {
1983                         fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1984                                         << shift) + host->part_buf_count;
1985                         len = min(remain, fcnt);
1986                         if (!len)
1987                                 break;
1988                         dw_mci_pull_data(host, (void *)(buf + offset), len);
1989                         data->bytes_xfered += len;
1990                         offset += len;
1991                         remain -= len;
1992                 } while (remain);
1993
1994                 sg_miter->consumed = offset;
1995                 status = mci_readl(host, MINTSTS);
1996                 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1997         /* if the RXDR is ready read again */
1998         } while ((status & SDMMC_INT_RXDR) ||
1999                  (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2000
2001         if (!remain) {
2002                 if (!sg_miter_next(sg_miter))
2003                         goto done;
2004                 sg_miter->consumed = 0;
2005         }
2006         sg_miter_stop(sg_miter);
2007         return;
2008
2009 done:
2010         sg_miter_stop(sg_miter);
2011         host->sg = NULL;
2012         smp_wmb();
2013         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2014 }
2015
2016 static void dw_mci_write_data_pio(struct dw_mci *host)
2017 {
2018         struct sg_mapping_iter *sg_miter = &host->sg_miter;
2019         void *buf;
2020         unsigned int offset;
2021         struct mmc_data *data = host->data;
2022         int shift = host->data_shift;
2023         u32 status;
2024         unsigned int len;
2025         unsigned int fifo_depth = host->fifo_depth;
2026         unsigned int remain, fcnt;
2027
2028         do {
2029                 if (!sg_miter_next(sg_miter))
2030                         goto done;
2031
2032                 host->sg = sg_miter->piter.sg;
2033                 buf = sg_miter->addr;
2034                 remain = sg_miter->length;
2035                 offset = 0;
2036
2037                 do {
2038                         fcnt = ((fifo_depth -
2039                                  SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2040                                         << shift) - host->part_buf_count;
2041                         len = min(remain, fcnt);
2042                         if (!len)
2043                                 break;
2044                         host->push_data(host, (void *)(buf + offset), len);
2045                         data->bytes_xfered += len;
2046                         offset += len;
2047                         remain -= len;
2048                 } while (remain);
2049
2050                 sg_miter->consumed = offset;
2051                 status = mci_readl(host, MINTSTS);
2052                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2053         } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2054
2055         if (!remain) {
2056                 if (!sg_miter_next(sg_miter))
2057                         goto done;
2058                 sg_miter->consumed = 0;
2059         }
2060         sg_miter_stop(sg_miter);
2061         return;
2062
2063 done:
2064         sg_miter_stop(sg_miter);
2065         host->sg = NULL;
2066         smp_wmb();
2067         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2068 }
2069
2070 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2071 {
2072         if (!host->cmd_status)
2073                 host->cmd_status = status;
2074
2075         smp_wmb();
2076
2077         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2078         tasklet_schedule(&host->tasklet);
2079 }
2080
2081 static void dw_mci_handle_cd(struct dw_mci *host)
2082 {
2083         int i;
2084
2085         for (i = 0; i < host->num_slots; i++) {
2086                 struct dw_mci_slot *slot = host->slot[i];
2087
2088                 if (!slot)
2089                         continue;
2090
2091                 if (slot->mmc->ops->card_event)
2092                         slot->mmc->ops->card_event(slot->mmc);
2093                 mmc_detect_change(slot->mmc,
2094                         msecs_to_jiffies(host->pdata->detect_delay_ms));
2095         }
2096 }
2097
2098 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2099 {
2100         struct dw_mci *host = dev_id;
2101         u32 pending;
2102         int i;
2103
2104         pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2105
2106         /*
2107          * DTO fix - version 2.10a and below, and only if internal DMA
2108          * is configured.
2109          */
2110         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2111                 if (!pending &&
2112                     ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2113                         pending |= SDMMC_INT_DATA_OVER;
2114         }
2115
2116         if (pending) {
2117                 /* Check volt switch first, since it can look like an error */
2118                 if ((host->state == STATE_SENDING_CMD11) &&
2119                     (pending & SDMMC_INT_VOLT_SWITCH)) {
2120                         mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2121                         pending &= ~SDMMC_INT_VOLT_SWITCH;
2122                         dw_mci_cmd_interrupt(host, pending);
2123                 }
2124
2125                 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2126                         mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2127                         host->cmd_status = pending;
2128                         smp_wmb();
2129                         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2130                 }
2131
2132                 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2133                         /* if there is an error report DATA_ERROR */
2134                         mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2135                         host->data_status = pending;
2136                         smp_wmb();
2137                         set_bit(EVENT_DATA_ERROR, &host->pending_events);
2138                         tasklet_schedule(&host->tasklet);
2139                 }
2140
2141                 if (pending & SDMMC_INT_DATA_OVER) {
2142                         mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2143                         if (!host->data_status)
2144                                 host->data_status = pending;
2145                         smp_wmb();
2146                         if (host->dir_status == DW_MCI_RECV_STATUS) {
2147                                 if (host->sg != NULL)
2148                                         dw_mci_read_data_pio(host, true);
2149                         }
2150                         set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2151                         tasklet_schedule(&host->tasklet);
2152                 }
2153
2154                 if (pending & SDMMC_INT_RXDR) {
2155                         mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2156                         if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2157                                 dw_mci_read_data_pio(host, false);
2158                 }
2159
2160                 if (pending & SDMMC_INT_TXDR) {
2161                         mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2162                         if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2163                                 dw_mci_write_data_pio(host);
2164                 }
2165
2166                 if (pending & SDMMC_INT_CMD_DONE) {
2167                         mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2168                         dw_mci_cmd_interrupt(host, pending);
2169                 }
2170
2171                 if (pending & SDMMC_INT_CD) {
2172                         mci_writel(host, RINTSTS, SDMMC_INT_CD);
2173                         dw_mci_handle_cd(host);
2174                 }
2175
2176                 /* Handle SDIO Interrupts */
2177                 for (i = 0; i < host->num_slots; i++) {
2178                         struct dw_mci_slot *slot = host->slot[i];
2179                         if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2180                                 mci_writel(host, RINTSTS,
2181                                            SDMMC_INT_SDIO(slot->sdio_id));
2182                                 mmc_signal_sdio_irq(slot->mmc);
2183                         }
2184                 }
2185
2186         }
2187
2188 #ifdef CONFIG_MMC_DW_IDMAC
2189         /* Handle DMA interrupts */
2190         if (host->dma_64bit_address == 1) {
2191                 pending = mci_readl(host, IDSTS64);
2192                 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2193                         mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2194                                                         SDMMC_IDMAC_INT_RI);
2195                         mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2196                         host->dma_ops->complete(host);
2197                 }
2198         } else {
2199                 pending = mci_readl(host, IDSTS);
2200                 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2201                         mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2202                                                         SDMMC_IDMAC_INT_RI);
2203                         mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2204                         host->dma_ops->complete(host);
2205                 }
2206         }
2207 #endif
2208
2209         return IRQ_HANDLED;
2210 }
2211
2212 #ifdef CONFIG_OF
2213 /* given a slot id, find out the device node representing that slot */
2214 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2215 {
2216         struct device_node *np;
2217         const __be32 *addr;
2218         int len;
2219
2220         if (!dev || !dev->of_node)
2221                 return NULL;
2222
2223         for_each_child_of_node(dev->of_node, np) {
2224                 addr = of_get_property(np, "reg", &len);
2225                 if (!addr || (len < sizeof(int)))
2226                         continue;
2227                 if (be32_to_cpup(addr) == slot)
2228                         return np;
2229         }
2230         return NULL;
2231 }
2232
2233 static struct dw_mci_of_slot_quirks {
2234         char *quirk;
2235         int id;
2236 } of_slot_quirks[] = {
2237         {
2238                 .quirk  = "disable-wp",
2239                 .id     = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2240         },
2241 };
2242
2243 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2244 {
2245         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2246         int quirks = 0;
2247         int idx;
2248
2249         /* get quirks */
2250         for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2251                 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
2252                         dev_warn(dev, "Slot quirk %s is deprecated\n",
2253                                         of_slot_quirks[idx].quirk);
2254                         quirks |= of_slot_quirks[idx].id;
2255                 }
2256
2257         return quirks;
2258 }
2259 #else /* CONFIG_OF */
2260 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2261 {
2262         return 0;
2263 }
2264 #endif /* CONFIG_OF */
2265
2266 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2267 {
2268         struct mmc_host *mmc;
2269         struct dw_mci_slot *slot;
2270         const struct dw_mci_drv_data *drv_data = host->drv_data;
2271         int ctrl_id, ret;
2272         u32 freq[2];
2273
2274         mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2275         if (!mmc)
2276                 return -ENOMEM;
2277
2278         slot = mmc_priv(mmc);
2279         slot->id = id;
2280         slot->sdio_id = host->sdio_id0 + id;
2281         slot->mmc = mmc;
2282         slot->host = host;
2283         host->slot[id] = slot;
2284
2285         slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2286
2287         mmc->ops = &dw_mci_ops;
2288         if (of_property_read_u32_array(host->dev->of_node,
2289                                        "clock-freq-min-max", freq, 2)) {
2290                 mmc->f_min = DW_MCI_FREQ_MIN;
2291                 mmc->f_max = DW_MCI_FREQ_MAX;
2292         } else {
2293                 mmc->f_min = freq[0];
2294                 mmc->f_max = freq[1];
2295         }
2296
2297         /*if there are external regulators, get them*/
2298         ret = mmc_regulator_get_supply(mmc);
2299         if (ret == -EPROBE_DEFER)
2300                 goto err_host_allocated;
2301
2302         if (!mmc->ocr_avail)
2303                 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2304
2305         if (host->pdata->caps)
2306                 mmc->caps = host->pdata->caps;
2307
2308         if (host->pdata->pm_caps)
2309                 mmc->pm_caps = host->pdata->pm_caps;
2310
2311         if (host->dev->of_node) {
2312                 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2313                 if (ctrl_id < 0)
2314                         ctrl_id = 0;
2315         } else {
2316                 ctrl_id = to_platform_device(host->dev)->id;
2317         }
2318         if (drv_data && drv_data->caps)
2319                 mmc->caps |= drv_data->caps[ctrl_id];
2320
2321         if (host->pdata->caps2)
2322                 mmc->caps2 = host->pdata->caps2;
2323
2324         ret = mmc_of_parse(mmc);
2325         if (ret)
2326                 goto err_host_allocated;
2327
2328         if (host->pdata->blk_settings) {
2329                 mmc->max_segs = host->pdata->blk_settings->max_segs;
2330                 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2331                 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2332                 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2333                 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2334         } else {
2335                 /* Useful defaults if platform data is unset. */
2336 #ifdef CONFIG_MMC_DW_IDMAC
2337                 mmc->max_segs = host->ring_size;
2338                 mmc->max_blk_size = 65536;
2339                 mmc->max_seg_size = 0x1000;
2340                 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2341                 mmc->max_blk_count = mmc->max_req_size / 512;
2342 #else
2343                 mmc->max_segs = 64;
2344                 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2345                 mmc->max_blk_count = 512;
2346                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2347                 mmc->max_seg_size = mmc->max_req_size;
2348 #endif /* CONFIG_MMC_DW_IDMAC */
2349         }
2350
2351         if (dw_mci_get_cd(mmc))
2352                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2353         else
2354                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2355
2356         ret = mmc_add_host(mmc);
2357         if (ret)
2358                 goto err_host_allocated;
2359
2360 #if defined(CONFIG_DEBUG_FS)
2361         dw_mci_init_debugfs(slot);
2362 #endif
2363
2364         return 0;
2365
2366 err_host_allocated:
2367         mmc_free_host(mmc);
2368         return ret;
2369 }
2370
2371 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2372 {
2373         /* Debugfs stuff is cleaned up by mmc core */
2374         mmc_remove_host(slot->mmc);
2375         slot->host->slot[id] = NULL;
2376         mmc_free_host(slot->mmc);
2377 }
2378
2379 static void dw_mci_init_dma(struct dw_mci *host)
2380 {
2381         int addr_config;
2382         /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
2383         addr_config = (mci_readl(host, HCON) >> 27) & 0x01;
2384
2385         if (addr_config == 1) {
2386                 /* host supports IDMAC in 64-bit address mode */
2387                 host->dma_64bit_address = 1;
2388                 dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
2389                 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2390                         dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
2391         } else {
2392                 /* host supports IDMAC in 32-bit address mode */
2393                 host->dma_64bit_address = 0;
2394                 dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
2395         }
2396
2397         /* Alloc memory for sg translation */
2398         host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2399                                           &host->sg_dma, GFP_KERNEL);
2400         if (!host->sg_cpu) {
2401                 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2402                         __func__);
2403                 goto no_dma;
2404         }
2405
2406         /* Determine which DMA interface to use */
2407 #ifdef CONFIG_MMC_DW_IDMAC
2408         host->dma_ops = &dw_mci_idmac_ops;
2409         dev_info(host->dev, "Using internal DMA controller.\n");
2410 #endif
2411
2412         if (!host->dma_ops)
2413                 goto no_dma;
2414
2415         if (host->dma_ops->init && host->dma_ops->start &&
2416             host->dma_ops->stop && host->dma_ops->cleanup) {
2417                 if (host->dma_ops->init(host)) {
2418                         dev_err(host->dev, "%s: Unable to initialize "
2419                                 "DMA Controller.\n", __func__);
2420                         goto no_dma;
2421                 }
2422         } else {
2423                 dev_err(host->dev, "DMA initialization not found.\n");
2424                 goto no_dma;
2425         }
2426
2427         host->use_dma = 1;
2428         return;
2429
2430 no_dma:
2431         dev_info(host->dev, "Using PIO mode.\n");
2432         host->use_dma = 0;
2433         return;
2434 }
2435
2436 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2437 {
2438         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2439         u32 ctrl;
2440
2441         ctrl = mci_readl(host, CTRL);
2442         ctrl |= reset;
2443         mci_writel(host, CTRL, ctrl);
2444
2445         /* wait till resets clear */
2446         do {
2447                 ctrl = mci_readl(host, CTRL);
2448                 if (!(ctrl & reset))
2449                         return true;
2450         } while (time_before(jiffies, timeout));
2451
2452         dev_err(host->dev,
2453                 "Timeout resetting block (ctrl reset %#x)\n",
2454                 ctrl & reset);
2455
2456         return false;
2457 }
2458
2459 static bool dw_mci_reset(struct dw_mci *host)
2460 {
2461         u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2462         bool ret = false;
2463
2464         /*
2465          * Reseting generates a block interrupt, hence setting
2466          * the scatter-gather pointer to NULL.
2467          */
2468         if (host->sg) {
2469                 sg_miter_stop(&host->sg_miter);
2470                 host->sg = NULL;
2471         }
2472
2473         if (host->use_dma)
2474                 flags |= SDMMC_CTRL_DMA_RESET;
2475
2476         if (dw_mci_ctrl_reset(host, flags)) {
2477                 /*
2478                  * In all cases we clear the RAWINTS register to clear any
2479                  * interrupts.
2480                  */
2481                 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2482
2483                 /* if using dma we wait for dma_req to clear */
2484                 if (host->use_dma) {
2485                         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2486                         u32 status;
2487                         do {
2488                                 status = mci_readl(host, STATUS);
2489                                 if (!(status & SDMMC_STATUS_DMA_REQ))
2490                                         break;
2491                                 cpu_relax();
2492                         } while (time_before(jiffies, timeout));
2493
2494                         if (status & SDMMC_STATUS_DMA_REQ) {
2495                                 dev_err(host->dev,
2496                                         "%s: Timeout waiting for dma_req to "
2497                                         "clear during reset\n", __func__);
2498                                 goto ciu_out;
2499                         }
2500
2501                         /* when using DMA next we reset the fifo again */
2502                         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2503                                 goto ciu_out;
2504                 }
2505         } else {
2506                 /* if the controller reset bit did clear, then set clock regs */
2507                 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2508                         dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2509                                 "clear but ciu was reset, doing clock update\n",
2510                                 __func__);
2511                         goto ciu_out;
2512                 }
2513         }
2514
2515 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2516         /* It is also recommended that we reset and reprogram idmac */
2517         dw_mci_idmac_reset(host);
2518 #endif
2519
2520         ret = true;
2521
2522 ciu_out:
2523         /* After a CTRL reset we need to have CIU set clock registers  */
2524         mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2525
2526         return ret;
2527 }
2528
2529 #ifdef CONFIG_OF
2530 static struct dw_mci_of_quirks {
2531         char *quirk;
2532         int id;
2533 } of_quirks[] = {
2534         {
2535                 .quirk  = "broken-cd",
2536                 .id     = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2537         }, {
2538                 .quirk  = "disable-wp",
2539                 .id     = DW_MCI_QUIRK_NO_WRITE_PROTECT,
2540         },
2541 };
2542
2543 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2544 {
2545         struct dw_mci_board *pdata;
2546         struct device *dev = host->dev;
2547         struct device_node *np = dev->of_node;
2548         const struct dw_mci_drv_data *drv_data = host->drv_data;
2549         int idx, ret;
2550         u32 clock_frequency;
2551
2552         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2553         if (!pdata)
2554                 return ERR_PTR(-ENOMEM);
2555
2556         /* find out number of slots supported */
2557         if (of_property_read_u32(dev->of_node, "num-slots",
2558                                 &pdata->num_slots)) {
2559                 dev_info(dev, "num-slots property not found, "
2560                                 "assuming 1 slot is available\n");
2561                 pdata->num_slots = 1;
2562         }
2563
2564         /* get quirks */
2565         for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2566                 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2567                         pdata->quirks |= of_quirks[idx].id;
2568
2569         if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2570                 dev_info(dev, "fifo-depth property not found, using "
2571                                 "value of FIFOTH register as default\n");
2572
2573         of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2574
2575         if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2576                 pdata->bus_hz = clock_frequency;
2577
2578         if (drv_data && drv_data->parse_dt) {
2579                 ret = drv_data->parse_dt(host);
2580                 if (ret)
2581                         return ERR_PTR(ret);
2582         }
2583
2584         if (of_find_property(np, "supports-highspeed", NULL))
2585                 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2586
2587         return pdata;
2588 }
2589
2590 #else /* CONFIG_OF */
2591 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2592 {
2593         return ERR_PTR(-EINVAL);
2594 }
2595 #endif /* CONFIG_OF */
2596
2597 int dw_mci_probe(struct dw_mci *host)
2598 {
2599         const struct dw_mci_drv_data *drv_data = host->drv_data;
2600         int width, i, ret = 0;
2601         u32 fifo_size;
2602         int init_slots = 0;
2603
2604         if (!host->pdata) {
2605                 host->pdata = dw_mci_parse_dt(host);
2606                 if (IS_ERR(host->pdata)) {
2607                         dev_err(host->dev, "platform data not available\n");
2608                         return -EINVAL;
2609                 }
2610         }
2611
2612         if (host->pdata->num_slots > 1) {
2613                 dev_err(host->dev,
2614                         "Platform data must supply num_slots.\n");
2615                 return -ENODEV;
2616         }
2617
2618         host->biu_clk = devm_clk_get(host->dev, "biu");
2619         if (IS_ERR(host->biu_clk)) {
2620                 dev_dbg(host->dev, "biu clock not available\n");
2621         } else {
2622                 ret = clk_prepare_enable(host->biu_clk);
2623                 if (ret) {
2624                         dev_err(host->dev, "failed to enable biu clock\n");
2625                         return ret;
2626                 }
2627         }
2628
2629         host->ciu_clk = devm_clk_get(host->dev, "ciu");
2630         if (IS_ERR(host->ciu_clk)) {
2631                 dev_dbg(host->dev, "ciu clock not available\n");
2632                 host->bus_hz = host->pdata->bus_hz;
2633         } else {
2634                 ret = clk_prepare_enable(host->ciu_clk);
2635                 if (ret) {
2636                         dev_err(host->dev, "failed to enable ciu clock\n");
2637                         goto err_clk_biu;
2638                 }
2639
2640                 if (host->pdata->bus_hz) {
2641                         ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2642                         if (ret)
2643                                 dev_warn(host->dev,
2644                                          "Unable to set bus rate to %uHz\n",
2645                                          host->pdata->bus_hz);
2646                 }
2647                 host->bus_hz = clk_get_rate(host->ciu_clk);
2648         }
2649
2650         if (!host->bus_hz) {
2651                 dev_err(host->dev,
2652                         "Platform data must supply bus speed\n");
2653                 ret = -ENODEV;
2654                 goto err_clk_ciu;
2655         }
2656
2657         if (drv_data && drv_data->init) {
2658                 ret = drv_data->init(host);
2659                 if (ret) {
2660                         dev_err(host->dev,
2661                                 "implementation specific init failed\n");
2662                         goto err_clk_ciu;
2663                 }
2664         }
2665
2666         if (drv_data && drv_data->setup_clock) {
2667                 ret = drv_data->setup_clock(host);
2668                 if (ret) {
2669                         dev_err(host->dev,
2670                                 "implementation specific clock setup failed\n");
2671                         goto err_clk_ciu;
2672                 }
2673         }
2674
2675         host->quirks = host->pdata->quirks;
2676
2677         spin_lock_init(&host->lock);
2678         spin_lock_init(&host->irq_lock);
2679         INIT_LIST_HEAD(&host->queue);
2680
2681         /*
2682          * Get the host data width - this assumes that HCON has been set with
2683          * the correct values.
2684          */
2685         i = (mci_readl(host, HCON) >> 7) & 0x7;
2686         if (!i) {
2687                 host->push_data = dw_mci_push_data16;
2688                 host->pull_data = dw_mci_pull_data16;
2689                 width = 16;
2690                 host->data_shift = 1;
2691         } else if (i == 2) {
2692                 host->push_data = dw_mci_push_data64;
2693                 host->pull_data = dw_mci_pull_data64;
2694                 width = 64;
2695                 host->data_shift = 3;
2696         } else {
2697                 /* Check for a reserved value, and warn if it is */
2698                 WARN((i != 1),
2699                      "HCON reports a reserved host data width!\n"
2700                      "Defaulting to 32-bit access.\n");
2701                 host->push_data = dw_mci_push_data32;
2702                 host->pull_data = dw_mci_pull_data32;
2703                 width = 32;
2704                 host->data_shift = 2;
2705         }
2706
2707         /* Reset all blocks */
2708         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
2709                 return -ENODEV;
2710
2711         host->dma_ops = host->pdata->dma_ops;
2712         dw_mci_init_dma(host);
2713
2714         /* Clear the interrupts for the host controller */
2715         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2716         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2717
2718         /* Put in max timeout */
2719         mci_writel(host, TMOUT, 0xFFFFFFFF);
2720
2721         /*
2722          * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2723          *                          Tx Mark = fifo_size / 2 DMA Size = 8
2724          */
2725         if (!host->pdata->fifo_depth) {
2726                 /*
2727                  * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2728                  * have been overwritten by the bootloader, just like we're
2729                  * about to do, so if you know the value for your hardware, you
2730                  * should put it in the platform data.
2731                  */
2732                 fifo_size = mci_readl(host, FIFOTH);
2733                 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2734         } else {
2735                 fifo_size = host->pdata->fifo_depth;
2736         }
2737         host->fifo_depth = fifo_size;
2738         host->fifoth_val =
2739                 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2740         mci_writel(host, FIFOTH, host->fifoth_val);
2741
2742         /* disable clock to CIU */
2743         mci_writel(host, CLKENA, 0);
2744         mci_writel(host, CLKSRC, 0);
2745
2746         /*
2747          * In 2.40a spec, Data offset is changed.
2748          * Need to check the version-id and set data-offset for DATA register.
2749          */
2750         host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2751         dev_info(host->dev, "Version ID is %04x\n", host->verid);
2752
2753         if (host->verid < DW_MMC_240A)
2754                 host->data_offset = DATA_OFFSET;
2755         else
2756                 host->data_offset = DATA_240A_OFFSET;
2757
2758         tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2759         ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2760                                host->irq_flags, "dw-mci", host);
2761         if (ret)
2762                 goto err_dmaunmap;
2763
2764         if (host->pdata->num_slots)
2765                 host->num_slots = host->pdata->num_slots;
2766         else
2767                 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2768
2769         /*
2770          * Enable interrupts for command done, data over, data empty, card det,
2771          * receive ready and error such as transmit, receive timeout, crc error
2772          */
2773         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2774         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2775                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2776                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2777         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2778
2779         dev_info(host->dev, "DW MMC controller at irq %d, "
2780                  "%d bit host data width, "
2781                  "%u deep fifo\n",
2782                  host->irq, width, fifo_size);
2783
2784         /* We need at least one slot to succeed */
2785         for (i = 0; i < host->num_slots; i++) {
2786                 ret = dw_mci_init_slot(host, i);
2787                 if (ret)
2788                         dev_dbg(host->dev, "slot %d init failed\n", i);
2789                 else
2790                         init_slots++;
2791         }
2792
2793         if (init_slots) {
2794                 dev_info(host->dev, "%d slots initialized\n", init_slots);
2795         } else {
2796                 dev_dbg(host->dev, "attempted to initialize %d slots, "
2797                                         "but failed on all\n", host->num_slots);
2798                 goto err_dmaunmap;
2799         }
2800
2801         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2802                 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2803
2804         return 0;
2805
2806 err_dmaunmap:
2807         if (host->use_dma && host->dma_ops->exit)
2808                 host->dma_ops->exit(host);
2809
2810 err_clk_ciu:
2811         if (!IS_ERR(host->ciu_clk))
2812                 clk_disable_unprepare(host->ciu_clk);
2813
2814 err_clk_biu:
2815         if (!IS_ERR(host->biu_clk))
2816                 clk_disable_unprepare(host->biu_clk);
2817
2818         return ret;
2819 }
2820 EXPORT_SYMBOL(dw_mci_probe);
2821
2822 void dw_mci_remove(struct dw_mci *host)
2823 {
2824         int i;
2825
2826         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2827         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2828
2829         for (i = 0; i < host->num_slots; i++) {
2830                 dev_dbg(host->dev, "remove slot %d\n", i);
2831                 if (host->slot[i])
2832                         dw_mci_cleanup_slot(host->slot[i], i);
2833         }
2834
2835         /* disable clock to CIU */
2836         mci_writel(host, CLKENA, 0);
2837         mci_writel(host, CLKSRC, 0);
2838
2839         if (host->use_dma && host->dma_ops->exit)
2840                 host->dma_ops->exit(host);
2841
2842         if (!IS_ERR(host->ciu_clk))
2843                 clk_disable_unprepare(host->ciu_clk);
2844
2845         if (!IS_ERR(host->biu_clk))
2846                 clk_disable_unprepare(host->biu_clk);
2847 }
2848 EXPORT_SYMBOL(dw_mci_remove);
2849
2850
2851
2852 #ifdef CONFIG_PM_SLEEP
2853 /*
2854  * TODO: we should probably disable the clock to the card in the suspend path.
2855  */
2856 int dw_mci_suspend(struct dw_mci *host)
2857 {
2858         return 0;
2859 }
2860 EXPORT_SYMBOL(dw_mci_suspend);
2861
2862 int dw_mci_resume(struct dw_mci *host)
2863 {
2864         int i, ret;
2865
2866         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
2867                 ret = -ENODEV;
2868                 return ret;
2869         }
2870
2871         if (host->use_dma && host->dma_ops->init)
2872                 host->dma_ops->init(host);
2873
2874         /*
2875          * Restore the initial value at FIFOTH register
2876          * And Invalidate the prev_blksz with zero
2877          */
2878         mci_writel(host, FIFOTH, host->fifoth_val);
2879         host->prev_blksz = 0;
2880
2881         /* Put in max timeout */
2882         mci_writel(host, TMOUT, 0xFFFFFFFF);
2883
2884         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2885         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2886                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2887                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2888         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2889
2890         for (i = 0; i < host->num_slots; i++) {
2891                 struct dw_mci_slot *slot = host->slot[i];
2892                 if (!slot)
2893                         continue;
2894                 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2895                         dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2896                         dw_mci_setup_bus(slot, true);
2897                 }
2898         }
2899         return 0;
2900 }
2901 EXPORT_SYMBOL(dw_mci_resume);
2902 #endif /* CONFIG_PM_SLEEP */
2903
2904 static int __init dw_mci_init(void)
2905 {
2906         pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2907         return 0;
2908 }
2909
2910 static void __exit dw_mci_exit(void)
2911 {
2912 }
2913
2914 module_init(dw_mci_init);
2915 module_exit(dw_mci_exit);
2916
2917 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2918 MODULE_AUTHOR("NXP Semiconductor VietNam");
2919 MODULE_AUTHOR("Imagination Technologies Ltd");
2920 MODULE_LICENSE("GPL v2");