2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/leds.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
36 #define DRIVER_NAME "sdhci"
38 #define DBG(f, x...) \
39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
41 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
42 defined(CONFIG_MMC_SDHCI_MODULE))
43 #define SDHCI_USE_LEDS_CLASS
46 #define MAX_TUNING_LOOP 40
48 static unsigned int debug_quirks = 0;
49 static unsigned int debug_quirks2;
51 static void sdhci_finish_data(struct sdhci_host *);
53 static void sdhci_finish_command(struct sdhci_host *);
54 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56 static int sdhci_do_get_cd(struct sdhci_host *host);
59 static int sdhci_runtime_pm_get(struct sdhci_host *host);
60 static int sdhci_runtime_pm_put(struct sdhci_host *host);
61 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
62 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
64 static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
68 static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
72 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
75 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
80 static void sdhci_dumpregs(struct sdhci_host *host)
82 pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
83 mmc_hostname(host->mmc));
85 pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
86 sdhci_readl(host, SDHCI_DMA_ADDRESS),
87 sdhci_readw(host, SDHCI_HOST_VERSION));
88 pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
89 sdhci_readw(host, SDHCI_BLOCK_SIZE),
90 sdhci_readw(host, SDHCI_BLOCK_COUNT));
91 pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
92 sdhci_readl(host, SDHCI_ARGUMENT),
93 sdhci_readw(host, SDHCI_TRANSFER_MODE));
94 pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
95 sdhci_readl(host, SDHCI_PRESENT_STATE),
96 sdhci_readb(host, SDHCI_HOST_CONTROL));
97 pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
98 sdhci_readb(host, SDHCI_POWER_CONTROL),
99 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
100 pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
101 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
102 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
103 pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
104 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
105 sdhci_readl(host, SDHCI_INT_STATUS));
106 pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
107 sdhci_readl(host, SDHCI_INT_ENABLE),
108 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
109 pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
110 sdhci_readw(host, SDHCI_ACMD12_ERR),
111 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
112 pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
113 sdhci_readl(host, SDHCI_CAPABILITIES),
114 sdhci_readl(host, SDHCI_CAPABILITIES_1));
115 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
116 sdhci_readw(host, SDHCI_COMMAND),
117 sdhci_readl(host, SDHCI_MAX_CURRENT));
118 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
119 sdhci_readw(host, SDHCI_HOST_CONTROL2));
121 if (host->flags & SDHCI_USE_ADMA) {
122 if (host->flags & SDHCI_USE_64_BIT_DMA)
123 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
124 readl(host->ioaddr + SDHCI_ADMA_ERROR),
125 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
126 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
128 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
129 readl(host->ioaddr + SDHCI_ADMA_ERROR),
130 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
133 pr_debug(DRIVER_NAME ": ===========================================\n");
136 /*****************************************************************************\
138 * Low level functions *
140 \*****************************************************************************/
142 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
146 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
147 (host->mmc->caps & MMC_CAP_NONREMOVABLE))
151 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
154 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
155 SDHCI_INT_CARD_INSERT;
157 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
160 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
161 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
164 static void sdhci_enable_card_detection(struct sdhci_host *host)
166 sdhci_set_card_detection(host, true);
169 static void sdhci_disable_card_detection(struct sdhci_host *host)
171 sdhci_set_card_detection(host, false);
174 void sdhci_reset(struct sdhci_host *host, u8 mask)
176 unsigned long timeout;
178 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
180 if (mask & SDHCI_RESET_ALL) {
182 /* Reset-all turns off SD Bus Power */
183 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
184 sdhci_runtime_pm_bus_off(host);
187 /* Wait max 100 ms */
190 /* hw clears the bit when it's done */
191 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
193 pr_err("%s: Reset 0x%x never completed.\n",
194 mmc_hostname(host->mmc), (int)mask);
195 sdhci_dumpregs(host);
202 EXPORT_SYMBOL_GPL(sdhci_reset);
204 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
206 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
207 if (!sdhci_do_get_cd(host))
211 host->ops->reset(host, mask);
213 if (mask & SDHCI_RESET_ALL) {
214 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
215 if (host->ops->enable_dma)
216 host->ops->enable_dma(host);
219 /* Resetting the controller clears many */
220 host->preset_enabled = false;
224 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
226 static void sdhci_init(struct sdhci_host *host, int soft)
229 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
231 sdhci_do_reset(host, SDHCI_RESET_ALL);
233 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
234 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
235 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
236 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
239 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
240 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
243 /* force clock reconfiguration */
245 sdhci_set_ios(host->mmc, &host->mmc->ios);
249 static void sdhci_reinit(struct sdhci_host *host)
252 sdhci_enable_card_detection(host);
255 static void sdhci_activate_led(struct sdhci_host *host)
259 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
260 ctrl |= SDHCI_CTRL_LED;
261 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
264 static void sdhci_deactivate_led(struct sdhci_host *host)
268 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
269 ctrl &= ~SDHCI_CTRL_LED;
270 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
273 #ifdef SDHCI_USE_LEDS_CLASS
274 static void sdhci_led_control(struct led_classdev *led,
275 enum led_brightness brightness)
277 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
280 spin_lock_irqsave(&host->lock, flags);
282 if (host->runtime_suspended)
285 if (brightness == LED_OFF)
286 sdhci_deactivate_led(host);
288 sdhci_activate_led(host);
290 spin_unlock_irqrestore(&host->lock, flags);
294 /*****************************************************************************\
298 \*****************************************************************************/
300 static void sdhci_read_block_pio(struct sdhci_host *host)
303 size_t blksize, len, chunk;
304 u32 uninitialized_var(scratch);
307 DBG("PIO reading\n");
309 blksize = host->data->blksz;
312 local_irq_save(flags);
315 BUG_ON(!sg_miter_next(&host->sg_miter));
317 len = min(host->sg_miter.length, blksize);
320 host->sg_miter.consumed = len;
322 buf = host->sg_miter.addr;
326 scratch = sdhci_readl(host, SDHCI_BUFFER);
330 *buf = scratch & 0xFF;
339 sg_miter_stop(&host->sg_miter);
341 local_irq_restore(flags);
344 static void sdhci_write_block_pio(struct sdhci_host *host)
347 size_t blksize, len, chunk;
351 DBG("PIO writing\n");
353 blksize = host->data->blksz;
357 local_irq_save(flags);
360 BUG_ON(!sg_miter_next(&host->sg_miter));
362 len = min(host->sg_miter.length, blksize);
365 host->sg_miter.consumed = len;
367 buf = host->sg_miter.addr;
370 scratch |= (u32)*buf << (chunk * 8);
376 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
377 sdhci_writel(host, scratch, SDHCI_BUFFER);
384 sg_miter_stop(&host->sg_miter);
386 local_irq_restore(flags);
389 static void sdhci_transfer_pio(struct sdhci_host *host)
395 if (host->blocks == 0)
398 if (host->data->flags & MMC_DATA_READ)
399 mask = SDHCI_DATA_AVAILABLE;
401 mask = SDHCI_SPACE_AVAILABLE;
404 * Some controllers (JMicron JMB38x) mess up the buffer bits
405 * for transfers < 4 bytes. As long as it is just one block,
406 * we can ignore the bits.
408 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
409 (host->data->blocks == 1))
412 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
413 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
416 if (host->data->flags & MMC_DATA_READ)
417 sdhci_read_block_pio(host);
419 sdhci_write_block_pio(host);
422 if (host->blocks == 0)
426 DBG("PIO transfer complete.\n");
429 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
430 struct mmc_data *data, int cookie)
434 if (data->host_cookie == COOKIE_MAPPED) {
435 data->host_cookie = COOKIE_GIVEN;
436 return data->sg_count;
439 WARN_ON(data->host_cookie == COOKIE_GIVEN);
441 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
442 data->flags & MMC_DATA_WRITE ?
443 DMA_TO_DEVICE : DMA_FROM_DEVICE);
448 data->sg_count = sg_count;
449 data->host_cookie = cookie;
454 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
456 local_irq_save(*flags);
457 return kmap_atomic(sg_page(sg)) + sg->offset;
460 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
462 kunmap_atomic(buffer);
463 local_irq_restore(*flags);
466 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
467 dma_addr_t addr, int len, unsigned cmd)
469 struct sdhci_adma2_64_desc *dma_desc = desc;
471 /* 32-bit and 64-bit descriptors have these members in same position */
472 dma_desc->cmd = cpu_to_le16(cmd);
473 dma_desc->len = cpu_to_le16(len);
474 dma_desc->addr_lo = cpu_to_le32((u32)addr);
476 if (host->flags & SDHCI_USE_64_BIT_DMA)
477 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
480 static void sdhci_adma_mark_end(void *desc)
482 struct sdhci_adma2_64_desc *dma_desc = desc;
484 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
485 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
488 static void sdhci_adma_table_pre(struct sdhci_host *host,
489 struct mmc_data *data, int sg_count)
491 struct scatterlist *sg;
493 dma_addr_t addr, align_addr;
499 * The spec does not specify endianness of descriptor table.
500 * We currently guess that it is LE.
503 host->sg_count = sg_count;
505 desc = host->adma_table;
506 align = host->align_buffer;
508 align_addr = host->align_addr;
510 for_each_sg(data->sg, sg, host->sg_count, i) {
511 addr = sg_dma_address(sg);
512 len = sg_dma_len(sg);
515 * The SDHCI specification states that ADMA addresses must
516 * be 32-bit aligned. If they aren't, then we use a bounce
517 * buffer for the (up to three) bytes that screw up the
520 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
523 if (data->flags & MMC_DATA_WRITE) {
524 buffer = sdhci_kmap_atomic(sg, &flags);
525 memcpy(align, buffer, offset);
526 sdhci_kunmap_atomic(buffer, &flags);
530 sdhci_adma_write_desc(host, desc, align_addr, offset,
533 BUG_ON(offset > 65536);
535 align += SDHCI_ADMA2_ALIGN;
536 align_addr += SDHCI_ADMA2_ALIGN;
538 desc += host->desc_sz;
548 sdhci_adma_write_desc(host, desc, addr, len,
550 desc += host->desc_sz;
554 * If this triggers then we have a calculation bug
557 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
560 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
561 /* Mark the last descriptor as the terminating descriptor */
562 if (desc != host->adma_table) {
563 desc -= host->desc_sz;
564 sdhci_adma_mark_end(desc);
567 /* Add a terminating entry - nop, end, valid */
568 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
572 static void sdhci_adma_table_post(struct sdhci_host *host,
573 struct mmc_data *data)
575 struct scatterlist *sg;
581 if (data->flags & MMC_DATA_READ) {
582 bool has_unaligned = false;
584 /* Do a quick scan of the SG list for any unaligned mappings */
585 for_each_sg(data->sg, sg, host->sg_count, i)
586 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
587 has_unaligned = true;
592 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
593 data->sg_len, DMA_FROM_DEVICE);
595 align = host->align_buffer;
597 for_each_sg(data->sg, sg, host->sg_count, i) {
598 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
599 size = SDHCI_ADMA2_ALIGN -
600 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
602 buffer = sdhci_kmap_atomic(sg, &flags);
603 memcpy(buffer, align, size);
604 sdhci_kunmap_atomic(buffer, &flags);
606 align += SDHCI_ADMA2_ALIGN;
613 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
616 struct mmc_data *data = cmd->data;
617 unsigned target_timeout, current_timeout;
620 * If the host controller provides us with an incorrect timeout
621 * value, just skip the check and use 0xE. The hardware may take
622 * longer to time out, but that's much better than having a too-short
625 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
628 /* Unspecified timeout, assume max */
629 if (!data && !cmd->busy_timeout)
634 target_timeout = cmd->busy_timeout * 1000;
636 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
637 if (host->clock && data->timeout_clks) {
638 unsigned long long val;
641 * data->timeout_clks is in units of clock cycles.
642 * host->clock is in Hz. target_timeout is in us.
643 * Hence, us = 1000000 * cycles / Hz. Round up.
645 val = 1000000 * data->timeout_clks;
646 if (do_div(val, host->clock))
648 target_timeout += val;
653 * Figure out needed cycles.
654 * We do this in steps in order to fit inside a 32 bit int.
655 * The first step is the minimum timeout, which will have a
656 * minimum resolution of 6 bits:
657 * (1) 2^13*1000 > 2^22,
658 * (2) host->timeout_clk < 2^16
663 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
664 while (current_timeout < target_timeout) {
666 current_timeout <<= 1;
672 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
673 mmc_hostname(host->mmc), count, cmd->opcode);
680 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
682 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
683 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
685 if (host->flags & SDHCI_REQ_USE_DMA)
686 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
688 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
690 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
691 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
694 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
698 if (host->ops->set_timeout) {
699 host->ops->set_timeout(host, cmd);
701 count = sdhci_calc_timeout(host, cmd);
702 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
706 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
709 struct mmc_data *data = cmd->data;
713 if (data || (cmd->flags & MMC_RSP_BUSY))
714 sdhci_set_timeout(host, cmd);
720 BUG_ON(data->blksz * data->blocks > 524288);
721 BUG_ON(data->blksz > host->mmc->max_blk_size);
722 BUG_ON(data->blocks > 65535);
725 host->data_early = 0;
726 host->data->bytes_xfered = 0;
728 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
729 host->flags |= SDHCI_REQ_USE_DMA;
732 * FIXME: This doesn't account for merging when mapping the
735 if (host->flags & SDHCI_REQ_USE_DMA) {
737 struct scatterlist *sg;
740 if (host->flags & SDHCI_USE_ADMA) {
741 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
744 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
748 if (unlikely(broken)) {
749 for_each_sg(data->sg, sg, data->sg_len, i) {
750 if (sg->length & 0x3) {
751 DBG("Reverting to PIO because of transfer size (%d)\n",
753 host->flags &= ~SDHCI_REQ_USE_DMA;
761 * The assumption here being that alignment is the same after
762 * translation to device address space.
764 if (host->flags & SDHCI_REQ_USE_DMA) {
766 struct scatterlist *sg;
769 if (host->flags & SDHCI_USE_ADMA) {
771 * As we use 3 byte chunks to work around
772 * alignment problems, we need to check this
775 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
778 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
782 if (unlikely(broken)) {
783 for_each_sg(data->sg, sg, data->sg_len, i) {
784 if (sg->offset & 0x3) {
785 DBG("Reverting to PIO because of bad alignment\n");
786 host->flags &= ~SDHCI_REQ_USE_DMA;
793 if (host->flags & SDHCI_REQ_USE_DMA) {
794 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
798 * This only happens when someone fed
799 * us an invalid request.
802 host->flags &= ~SDHCI_REQ_USE_DMA;
803 } else if (host->flags & SDHCI_USE_ADMA) {
804 sdhci_adma_table_pre(host, data, sg_cnt);
806 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
807 if (host->flags & SDHCI_USE_64_BIT_DMA)
809 (u64)host->adma_addr >> 32,
810 SDHCI_ADMA_ADDRESS_HI);
812 WARN_ON(sg_cnt != 1);
813 sdhci_writel(host, sg_dma_address(data->sg),
819 * Always adjust the DMA selection as some controllers
820 * (e.g. JMicron) can't do PIO properly when the selection
823 if (host->version >= SDHCI_SPEC_200) {
824 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
825 ctrl &= ~SDHCI_CTRL_DMA_MASK;
826 if ((host->flags & SDHCI_REQ_USE_DMA) &&
827 (host->flags & SDHCI_USE_ADMA)) {
828 if (host->flags & SDHCI_USE_64_BIT_DMA)
829 ctrl |= SDHCI_CTRL_ADMA64;
831 ctrl |= SDHCI_CTRL_ADMA32;
833 ctrl |= SDHCI_CTRL_SDMA;
835 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
838 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
841 flags = SG_MITER_ATOMIC;
842 if (host->data->flags & MMC_DATA_READ)
843 flags |= SG_MITER_TO_SG;
845 flags |= SG_MITER_FROM_SG;
846 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
847 host->blocks = data->blocks;
850 sdhci_set_transfer_irqs(host);
852 /* Set the DMA boundary value and block size */
853 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
854 data->blksz), SDHCI_BLOCK_SIZE);
855 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
858 static void sdhci_set_transfer_mode(struct sdhci_host *host,
859 struct mmc_command *cmd)
862 struct mmc_data *data = cmd->data;
866 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
867 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
869 /* clear Auto CMD settings for no data CMDs */
870 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
871 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
872 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
877 WARN_ON(!host->data);
879 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
880 mode = SDHCI_TRNS_BLK_CNT_EN;
882 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
883 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
885 * If we are sending CMD23, CMD12 never gets sent
886 * on successful completion (so no Auto-CMD12).
888 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
889 (cmd->opcode != SD_IO_RW_EXTENDED))
890 mode |= SDHCI_TRNS_AUTO_CMD12;
891 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
892 mode |= SDHCI_TRNS_AUTO_CMD23;
893 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
897 if (data->flags & MMC_DATA_READ)
898 mode |= SDHCI_TRNS_READ;
899 if (host->flags & SDHCI_REQ_USE_DMA)
900 mode |= SDHCI_TRNS_DMA;
902 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
905 static void sdhci_finish_data(struct sdhci_host *host)
907 struct mmc_data *data;
914 if (host->flags & SDHCI_REQ_USE_DMA) {
915 if (host->flags & SDHCI_USE_ADMA)
916 sdhci_adma_table_post(host, data);
918 if (data->host_cookie == COOKIE_MAPPED) {
919 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
920 (data->flags & MMC_DATA_READ) ?
921 DMA_FROM_DEVICE : DMA_TO_DEVICE);
922 data->host_cookie = COOKIE_UNMAPPED;
927 * The specification states that the block count register must
928 * be updated, but it does not specify at what point in the
929 * data flow. That makes the register entirely useless to read
930 * back so we have to assume that nothing made it to the card
931 * in the event of an error.
934 data->bytes_xfered = 0;
936 data->bytes_xfered = data->blksz * data->blocks;
939 * Need to send CMD12 if -
940 * a) open-ended multiblock transfer (no CMD23)
941 * b) error in multiblock transfer
948 * The controller needs a reset of internal state machines
949 * upon error conditions.
952 sdhci_do_reset(host, SDHCI_RESET_CMD);
953 sdhci_do_reset(host, SDHCI_RESET_DATA);
956 sdhci_send_command(host, data->stop);
958 tasklet_schedule(&host->finish_tasklet);
961 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
965 unsigned long timeout;
969 /* Initially, a command has no error */
975 mask = SDHCI_CMD_INHIBIT;
976 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
977 mask |= SDHCI_DATA_INHIBIT;
979 /* We shouldn't wait for data inihibit for stop commands, even
980 though they might use busy signaling */
981 if (host->mrq->data && (cmd == host->mrq->data->stop))
982 mask &= ~SDHCI_DATA_INHIBIT;
984 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
986 pr_err("%s: Controller never released inhibit bit(s).\n",
987 mmc_hostname(host->mmc));
988 sdhci_dumpregs(host);
990 tasklet_schedule(&host->finish_tasklet);
998 if (!cmd->data && cmd->busy_timeout > 9000)
999 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1002 mod_timer(&host->timer, timeout);
1005 host->busy_handle = 0;
1007 sdhci_prepare_data(host, cmd);
1009 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1011 sdhci_set_transfer_mode(host, cmd);
1013 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1014 pr_err("%s: Unsupported response type!\n",
1015 mmc_hostname(host->mmc));
1016 cmd->error = -EINVAL;
1017 tasklet_schedule(&host->finish_tasklet);
1021 if (!(cmd->flags & MMC_RSP_PRESENT))
1022 flags = SDHCI_CMD_RESP_NONE;
1023 else if (cmd->flags & MMC_RSP_136)
1024 flags = SDHCI_CMD_RESP_LONG;
1025 else if (cmd->flags & MMC_RSP_BUSY)
1026 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1028 flags = SDHCI_CMD_RESP_SHORT;
1030 if (cmd->flags & MMC_RSP_CRC)
1031 flags |= SDHCI_CMD_CRC;
1032 if (cmd->flags & MMC_RSP_OPCODE)
1033 flags |= SDHCI_CMD_INDEX;
1035 /* CMD19 is special in that the Data Present Select should be set */
1036 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1037 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1038 flags |= SDHCI_CMD_DATA;
1040 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1042 EXPORT_SYMBOL_GPL(sdhci_send_command);
1044 static void sdhci_finish_command(struct sdhci_host *host)
1048 BUG_ON(host->cmd == NULL);
1050 if (host->cmd->flags & MMC_RSP_PRESENT) {
1051 if (host->cmd->flags & MMC_RSP_136) {
1052 /* CRC is stripped so we need to do some shifting. */
1053 for (i = 0;i < 4;i++) {
1054 host->cmd->resp[i] = sdhci_readl(host,
1055 SDHCI_RESPONSE + (3-i)*4) << 8;
1057 host->cmd->resp[i] |=
1059 SDHCI_RESPONSE + (3-i)*4-1);
1062 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1066 /* Finished CMD23, now send actual command. */
1067 if (host->cmd == host->mrq->sbc) {
1069 sdhci_send_command(host, host->mrq->cmd);
1072 /* Processed actual command. */
1073 if (host->data && host->data_early)
1074 sdhci_finish_data(host);
1076 if (!host->cmd->data)
1077 tasklet_schedule(&host->finish_tasklet);
1083 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1087 switch (host->timing) {
1088 case MMC_TIMING_UHS_SDR12:
1089 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1091 case MMC_TIMING_UHS_SDR25:
1092 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1094 case MMC_TIMING_UHS_SDR50:
1095 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1097 case MMC_TIMING_UHS_SDR104:
1098 case MMC_TIMING_MMC_HS200:
1099 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1101 case MMC_TIMING_UHS_DDR50:
1102 case MMC_TIMING_MMC_DDR52:
1103 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1105 case MMC_TIMING_MMC_HS400:
1106 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1109 pr_warn("%s: Invalid UHS-I mode selected\n",
1110 mmc_hostname(host->mmc));
1111 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1117 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1119 int div = 0; /* Initialized for compiler warning */
1120 int real_div = div, clk_mul = 1;
1122 unsigned long timeout;
1123 bool switch_base_clk = false;
1125 host->mmc->actual_clock = 0;
1127 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1128 if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
1134 if (host->version >= SDHCI_SPEC_300) {
1135 if (host->preset_enabled) {
1138 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1139 pre_val = sdhci_get_preset_value(host);
1140 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1141 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1142 if (host->clk_mul &&
1143 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1144 clk = SDHCI_PROG_CLOCK_MODE;
1146 clk_mul = host->clk_mul;
1148 real_div = max_t(int, 1, div << 1);
1154 * Check if the Host Controller supports Programmable Clock
1157 if (host->clk_mul) {
1158 for (div = 1; div <= 1024; div++) {
1159 if ((host->max_clk * host->clk_mul / div)
1163 if ((host->max_clk * host->clk_mul / div) <= clock) {
1165 * Set Programmable Clock Mode in the Clock
1168 clk = SDHCI_PROG_CLOCK_MODE;
1170 clk_mul = host->clk_mul;
1174 * Divisor can be too small to reach clock
1175 * speed requirement. Then use the base clock.
1177 switch_base_clk = true;
1181 if (!host->clk_mul || switch_base_clk) {
1182 /* Version 3.00 divisors must be a multiple of 2. */
1183 if (host->max_clk <= clock)
1186 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1188 if ((host->max_clk / div) <= clock)
1194 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1195 && !div && host->max_clk <= 25000000)
1199 /* Version 2.00 divisors must be a power of 2. */
1200 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1201 if ((host->max_clk / div) <= clock)
1210 host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
1211 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1212 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1213 << SDHCI_DIVIDER_HI_SHIFT;
1214 clk |= SDHCI_CLOCK_INT_EN;
1215 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1217 /* Wait max 20 ms */
1219 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1220 & SDHCI_CLOCK_INT_STABLE)) {
1222 pr_err("%s: Internal clock never stabilised.\n",
1223 mmc_hostname(host->mmc));
1224 sdhci_dumpregs(host);
1231 clk |= SDHCI_CLOCK_CARD_EN;
1232 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1234 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1236 static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1239 struct mmc_host *mmc = host->mmc;
1242 if (mode != MMC_POWER_OFF) {
1244 case MMC_VDD_165_195:
1245 pwr = SDHCI_POWER_180;
1249 pwr = SDHCI_POWER_300;
1253 pwr = SDHCI_POWER_330;
1256 WARN(1, "%s: Invalid vdd %#x\n",
1257 mmc_hostname(host->mmc), vdd);
1262 if (host->pwr == pwr)
1268 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1269 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1270 sdhci_runtime_pm_bus_off(host);
1274 * Spec says that we should clear the power reg before setting
1275 * a new value. Some controllers don't seem to like this though.
1277 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1278 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1281 * At least the Marvell CaFe chip gets confused if we set the
1282 * voltage and set turn on power at the same time, so set the
1285 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1286 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1288 pwr |= SDHCI_POWER_ON;
1290 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1292 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1293 sdhci_runtime_pm_bus_on(host);
1296 * Some controllers need an extra 10ms delay of 10ms before
1297 * they can apply clock after applying power
1299 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1303 if (!IS_ERR(mmc->supply.vmmc)) {
1304 spin_unlock_irq(&host->lock);
1305 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1306 spin_lock_irq(&host->lock);
1310 /*****************************************************************************\
1314 \*****************************************************************************/
1316 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1318 struct sdhci_host *host;
1320 unsigned long flags;
1322 host = mmc_priv(mmc);
1324 sdhci_runtime_pm_get(host);
1326 /* Firstly check card presence */
1327 present = mmc->ops->get_cd(mmc);
1329 spin_lock_irqsave(&host->lock, flags);
1331 WARN_ON(host->mrq != NULL);
1333 #ifndef SDHCI_USE_LEDS_CLASS
1334 sdhci_activate_led(host);
1338 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1339 * requests if Auto-CMD12 is enabled.
1341 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1343 mrq->data->stop = NULL;
1350 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1351 host->mrq->cmd->error = -ENOMEDIUM;
1352 tasklet_schedule(&host->finish_tasklet);
1354 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1355 sdhci_send_command(host, mrq->sbc);
1357 sdhci_send_command(host, mrq->cmd);
1361 spin_unlock_irqrestore(&host->lock, flags);
1364 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1368 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1369 if (width == MMC_BUS_WIDTH_8) {
1370 ctrl &= ~SDHCI_CTRL_4BITBUS;
1371 if (host->version >= SDHCI_SPEC_300)
1372 ctrl |= SDHCI_CTRL_8BITBUS;
1374 if (host->version >= SDHCI_SPEC_300)
1375 ctrl &= ~SDHCI_CTRL_8BITBUS;
1376 if (width == MMC_BUS_WIDTH_4)
1377 ctrl |= SDHCI_CTRL_4BITBUS;
1379 ctrl &= ~SDHCI_CTRL_4BITBUS;
1381 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1383 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1385 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1389 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1390 /* Select Bus Speed Mode for host */
1391 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1392 if ((timing == MMC_TIMING_MMC_HS200) ||
1393 (timing == MMC_TIMING_UHS_SDR104))
1394 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1395 else if (timing == MMC_TIMING_UHS_SDR12)
1396 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1397 else if (timing == MMC_TIMING_UHS_SDR25)
1398 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1399 else if (timing == MMC_TIMING_UHS_SDR50)
1400 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1401 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1402 (timing == MMC_TIMING_MMC_DDR52))
1403 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1404 else if (timing == MMC_TIMING_MMC_HS400)
1405 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1406 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1408 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1410 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1412 unsigned long flags;
1414 struct mmc_host *mmc = host->mmc;
1416 spin_lock_irqsave(&host->lock, flags);
1418 if (host->flags & SDHCI_DEVICE_DEAD) {
1419 spin_unlock_irqrestore(&host->lock, flags);
1420 if (!IS_ERR(mmc->supply.vmmc) &&
1421 ios->power_mode == MMC_POWER_OFF)
1422 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1427 * Reset the chip on each power off.
1428 * Should clear out any weird states.
1430 if (ios->power_mode == MMC_POWER_OFF) {
1431 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1435 if (host->version >= SDHCI_SPEC_300 &&
1436 (ios->power_mode == MMC_POWER_UP) &&
1437 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1438 sdhci_enable_preset_value(host, false);
1440 if (!ios->clock || ios->clock != host->clock) {
1441 host->ops->set_clock(host, ios->clock);
1442 host->clock = ios->clock;
1444 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1446 host->timeout_clk = host->mmc->actual_clock ?
1447 host->mmc->actual_clock / 1000 :
1449 host->mmc->max_busy_timeout =
1450 host->ops->get_max_timeout_count ?
1451 host->ops->get_max_timeout_count(host) :
1453 host->mmc->max_busy_timeout /= host->timeout_clk;
1457 sdhci_set_power(host, ios->power_mode, ios->vdd);
1459 if (host->ops->platform_send_init_74_clocks)
1460 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1462 host->ops->set_bus_width(host, ios->bus_width);
1464 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1466 if ((ios->timing == MMC_TIMING_SD_HS ||
1467 ios->timing == MMC_TIMING_MMC_HS)
1468 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1469 ctrl |= SDHCI_CTRL_HISPD;
1471 ctrl &= ~SDHCI_CTRL_HISPD;
1473 if (host->version >= SDHCI_SPEC_300) {
1476 /* In case of UHS-I modes, set High Speed Enable */
1477 if ((ios->timing == MMC_TIMING_MMC_HS400) ||
1478 (ios->timing == MMC_TIMING_MMC_HS200) ||
1479 (ios->timing == MMC_TIMING_MMC_DDR52) ||
1480 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1481 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1482 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1483 (ios->timing == MMC_TIMING_UHS_SDR25))
1484 ctrl |= SDHCI_CTRL_HISPD;
1486 if (!host->preset_enabled) {
1487 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1489 * We only need to set Driver Strength if the
1490 * preset value enable is not set.
1492 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1493 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1494 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1495 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1496 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1497 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1498 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1499 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1500 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1501 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1503 pr_warn("%s: invalid driver type, default to driver type B\n",
1505 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1508 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1511 * According to SDHC Spec v3.00, if the Preset Value
1512 * Enable in the Host Control 2 register is set, we
1513 * need to reset SD Clock Enable before changing High
1514 * Speed Enable to avoid generating clock gliches.
1517 /* Reset SD Clock Enable */
1518 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1519 clk &= ~SDHCI_CLOCK_CARD_EN;
1520 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1522 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1524 /* Re-enable SD Clock */
1525 host->ops->set_clock(host, host->clock);
1528 /* Reset SD Clock Enable */
1529 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1530 clk &= ~SDHCI_CLOCK_CARD_EN;
1531 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1533 host->ops->set_uhs_signaling(host, ios->timing);
1534 host->timing = ios->timing;
1536 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1537 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1538 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1539 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1540 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1541 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1542 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1545 sdhci_enable_preset_value(host, true);
1546 preset = sdhci_get_preset_value(host);
1547 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1548 >> SDHCI_PRESET_DRV_SHIFT;
1551 /* Re-enable SD Clock */
1552 host->ops->set_clock(host, host->clock);
1554 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1557 * Some (ENE) controllers go apeshit on some ios operation,
1558 * signalling timeout and CRC errors even on CMD0. Resetting
1559 * it on each ios seems to solve the problem.
1561 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1562 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1565 spin_unlock_irqrestore(&host->lock, flags);
1568 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1570 struct sdhci_host *host = mmc_priv(mmc);
1572 sdhci_runtime_pm_get(host);
1573 sdhci_do_set_ios(host, ios);
1574 sdhci_runtime_pm_put(host);
1577 static int sdhci_do_get_cd(struct sdhci_host *host)
1579 int gpio_cd = mmc_gpio_get_cd(host->mmc);
1581 if (host->flags & SDHCI_DEVICE_DEAD)
1584 /* If nonremovable, assume that the card is always present. */
1585 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
1589 * Try slot gpio detect, if defined it take precedence
1590 * over build in controller functionality
1592 if (!IS_ERR_VALUE(gpio_cd))
1595 /* If polling, assume that the card is always present. */
1596 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1599 /* Host native card detect */
1600 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1603 static int sdhci_get_cd(struct mmc_host *mmc)
1605 struct sdhci_host *host = mmc_priv(mmc);
1608 sdhci_runtime_pm_get(host);
1609 ret = sdhci_do_get_cd(host);
1610 sdhci_runtime_pm_put(host);
1614 static int sdhci_check_ro(struct sdhci_host *host)
1616 unsigned long flags;
1619 spin_lock_irqsave(&host->lock, flags);
1621 if (host->flags & SDHCI_DEVICE_DEAD)
1623 else if (host->ops->get_ro)
1624 is_readonly = host->ops->get_ro(host);
1626 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1627 & SDHCI_WRITE_PROTECT);
1629 spin_unlock_irqrestore(&host->lock, flags);
1631 /* This quirk needs to be replaced by a callback-function later */
1632 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1633 !is_readonly : is_readonly;
1636 #define SAMPLE_COUNT 5
1638 static int sdhci_do_get_ro(struct sdhci_host *host)
1642 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1643 return sdhci_check_ro(host);
1646 for (i = 0; i < SAMPLE_COUNT; i++) {
1647 if (sdhci_check_ro(host)) {
1648 if (++ro_count > SAMPLE_COUNT / 2)
1656 static void sdhci_hw_reset(struct mmc_host *mmc)
1658 struct sdhci_host *host = mmc_priv(mmc);
1660 if (host->ops && host->ops->hw_reset)
1661 host->ops->hw_reset(host);
1664 static int sdhci_get_ro(struct mmc_host *mmc)
1666 struct sdhci_host *host = mmc_priv(mmc);
1669 sdhci_runtime_pm_get(host);
1670 ret = sdhci_do_get_ro(host);
1671 sdhci_runtime_pm_put(host);
1675 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1677 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1679 host->ier |= SDHCI_INT_CARD_INT;
1681 host->ier &= ~SDHCI_INT_CARD_INT;
1683 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1684 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1689 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1691 struct sdhci_host *host = mmc_priv(mmc);
1692 unsigned long flags;
1694 sdhci_runtime_pm_get(host);
1696 spin_lock_irqsave(&host->lock, flags);
1698 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1700 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1702 sdhci_enable_sdio_irq_nolock(host, enable);
1703 spin_unlock_irqrestore(&host->lock, flags);
1705 sdhci_runtime_pm_put(host);
1708 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1709 struct mmc_ios *ios)
1711 struct mmc_host *mmc = host->mmc;
1716 * Signal Voltage Switching is only applicable for Host Controllers
1719 if (host->version < SDHCI_SPEC_300)
1722 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1724 switch (ios->signal_voltage) {
1725 case MMC_SIGNAL_VOLTAGE_330:
1726 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1727 ctrl &= ~SDHCI_CTRL_VDD_180;
1728 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1730 if (!IS_ERR(mmc->supply.vqmmc)) {
1731 ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
1734 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1740 usleep_range(5000, 5500);
1742 /* 3.3V regulator output should be stable within 5 ms */
1743 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1744 if (!(ctrl & SDHCI_CTRL_VDD_180))
1747 pr_warn("%s: 3.3V regulator output did not became stable\n",
1751 case MMC_SIGNAL_VOLTAGE_180:
1752 if (!IS_ERR(mmc->supply.vqmmc)) {
1753 ret = regulator_set_voltage(mmc->supply.vqmmc,
1756 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1763 * Enable 1.8V Signal Enable in the Host Control2
1766 ctrl |= SDHCI_CTRL_VDD_180;
1767 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1769 /* Some controller need to do more when switching */
1770 if (host->ops->voltage_switch)
1771 host->ops->voltage_switch(host);
1773 /* 1.8V regulator output should be stable within 5 ms */
1774 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1775 if (ctrl & SDHCI_CTRL_VDD_180)
1778 pr_warn("%s: 1.8V regulator output did not became stable\n",
1782 case MMC_SIGNAL_VOLTAGE_120:
1783 if (!IS_ERR(mmc->supply.vqmmc)) {
1784 ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
1787 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1794 /* No signal voltage switch required */
1799 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1800 struct mmc_ios *ios)
1802 struct sdhci_host *host = mmc_priv(mmc);
1805 if (host->version < SDHCI_SPEC_300)
1807 sdhci_runtime_pm_get(host);
1808 err = sdhci_do_start_signal_voltage_switch(host, ios);
1809 sdhci_runtime_pm_put(host);
1813 static int sdhci_card_busy(struct mmc_host *mmc)
1815 struct sdhci_host *host = mmc_priv(mmc);
1818 sdhci_runtime_pm_get(host);
1819 /* Check whether DAT[3:0] is 0000 */
1820 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1821 sdhci_runtime_pm_put(host);
1823 return !(present_state & SDHCI_DATA_LVL_MASK);
1826 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1828 struct sdhci_host *host = mmc_priv(mmc);
1829 unsigned long flags;
1831 spin_lock_irqsave(&host->lock, flags);
1832 host->flags |= SDHCI_HS400_TUNING;
1833 spin_unlock_irqrestore(&host->lock, flags);
1838 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1840 struct sdhci_host *host = mmc_priv(mmc);
1842 int tuning_loop_counter = MAX_TUNING_LOOP;
1844 unsigned long flags;
1845 unsigned int tuning_count = 0;
1848 sdhci_runtime_pm_get(host);
1849 spin_lock_irqsave(&host->lock, flags);
1851 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1852 host->flags &= ~SDHCI_HS400_TUNING;
1854 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1855 tuning_count = host->tuning_count;
1858 * The Host Controller needs tuning in case of SDR104 and DDR50
1859 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1860 * the Capabilities register.
1861 * If the Host Controller supports the HS200 mode then the
1862 * tuning function has to be executed.
1864 switch (host->timing) {
1865 /* HS400 tuning is done in HS200 mode */
1866 case MMC_TIMING_MMC_HS400:
1870 case MMC_TIMING_MMC_HS200:
1872 * Periodic re-tuning for HS400 is not expected to be needed, so
1879 case MMC_TIMING_UHS_SDR104:
1880 case MMC_TIMING_UHS_DDR50:
1883 case MMC_TIMING_UHS_SDR50:
1884 if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
1885 host->flags & SDHCI_SDR104_NEEDS_TUNING)
1893 if (host->ops->platform_execute_tuning) {
1894 spin_unlock_irqrestore(&host->lock, flags);
1895 err = host->ops->platform_execute_tuning(host, opcode);
1896 sdhci_runtime_pm_put(host);
1900 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1901 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1902 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1903 ctrl |= SDHCI_CTRL_TUNED_CLK;
1904 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1907 * As per the Host Controller spec v3.00, tuning command
1908 * generates Buffer Read Ready interrupt, so enable that.
1910 * Note: The spec clearly says that when tuning sequence
1911 * is being performed, the controller does not generate
1912 * interrupts other than Buffer Read Ready interrupt. But
1913 * to make sure we don't hit a controller bug, we _only_
1914 * enable Buffer Read Ready interrupt here.
1916 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
1917 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1920 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1921 * of loops reaches 40 times or a timeout of 150ms occurs.
1924 struct mmc_command cmd = {0};
1925 struct mmc_request mrq = {NULL};
1927 cmd.opcode = opcode;
1929 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1934 if (tuning_loop_counter-- == 0)
1941 * In response to CMD19, the card sends 64 bytes of tuning
1942 * block to the Host Controller. So we set the block size
1945 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1946 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
1947 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
1949 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
1950 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1953 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1958 * The tuning block is sent by the card to the host controller.
1959 * So we set the TRNS_READ bit in the Transfer Mode register.
1960 * This also takes care of setting DMA Enable and Multi Block
1961 * Select in the same register to 0.
1963 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
1965 sdhci_send_command(host, &cmd);
1970 spin_unlock_irqrestore(&host->lock, flags);
1971 /* Wait for Buffer Read Ready interrupt */
1972 wait_event_interruptible_timeout(host->buf_ready_int,
1973 (host->tuning_done == 1),
1974 msecs_to_jiffies(50));
1975 spin_lock_irqsave(&host->lock, flags);
1977 if (!host->tuning_done) {
1978 pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
1979 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1980 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1981 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
1982 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1988 host->tuning_done = 0;
1990 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1992 /* eMMC spec does not require a delay between tuning cycles */
1993 if (opcode == MMC_SEND_TUNING_BLOCK)
1995 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
1998 * The Host Driver has exhausted the maximum number of loops allowed,
1999 * so use fixed sampling frequency.
2001 if (tuning_loop_counter < 0) {
2002 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2003 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2005 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
2006 pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n");
2013 * In case tuning fails, host controllers which support
2014 * re-tuning can try tuning again at a later time, when the
2015 * re-tuning timer expires. So for these controllers, we
2016 * return 0. Since there might be other controllers who do not
2017 * have this capability, we return error for them.
2022 host->mmc->retune_period = err ? 0 : tuning_count;
2024 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2025 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2027 spin_unlock_irqrestore(&host->lock, flags);
2028 sdhci_runtime_pm_put(host);
2033 static int sdhci_select_drive_strength(struct mmc_card *card,
2034 unsigned int max_dtr, int host_drv,
2035 int card_drv, int *drv_type)
2037 struct sdhci_host *host = mmc_priv(card->host);
2039 if (!host->ops->select_drive_strength)
2042 return host->ops->select_drive_strength(host, card, max_dtr, host_drv,
2043 card_drv, drv_type);
2046 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2048 /* Host Controller v3.00 defines preset value registers */
2049 if (host->version < SDHCI_SPEC_300)
2053 * We only enable or disable Preset Value if they are not already
2054 * enabled or disabled respectively. Otherwise, we bail out.
2056 if (host->preset_enabled != enable) {
2057 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2060 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2062 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2064 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2067 host->flags |= SDHCI_PV_ENABLED;
2069 host->flags &= ~SDHCI_PV_ENABLED;
2071 host->preset_enabled = enable;
2075 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2078 struct sdhci_host *host = mmc_priv(mmc);
2079 struct mmc_data *data = mrq->data;
2081 if (data->host_cookie != COOKIE_UNMAPPED)
2082 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2083 data->flags & MMC_DATA_WRITE ?
2084 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2086 data->host_cookie = COOKIE_UNMAPPED;
2089 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
2092 struct sdhci_host *host = mmc_priv(mmc);
2094 mrq->data->host_cookie = COOKIE_UNMAPPED;
2096 if (host->flags & SDHCI_REQ_USE_DMA)
2097 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_MAPPED);
2100 static void sdhci_card_event(struct mmc_host *mmc)
2102 struct sdhci_host *host = mmc_priv(mmc);
2103 unsigned long flags;
2106 /* First check if client has provided their own card event */
2107 if (host->ops->card_event)
2108 host->ops->card_event(host);
2110 present = sdhci_do_get_cd(host);
2112 spin_lock_irqsave(&host->lock, flags);
2114 /* Check host->mrq first in case we are runtime suspended */
2115 if (host->mrq && !present) {
2116 pr_err("%s: Card removed during transfer!\n",
2117 mmc_hostname(host->mmc));
2118 pr_err("%s: Resetting controller.\n",
2119 mmc_hostname(host->mmc));
2121 sdhci_do_reset(host, SDHCI_RESET_CMD);
2122 sdhci_do_reset(host, SDHCI_RESET_DATA);
2124 host->mrq->cmd->error = -ENOMEDIUM;
2125 tasklet_schedule(&host->finish_tasklet);
2128 spin_unlock_irqrestore(&host->lock, flags);
2131 static const struct mmc_host_ops sdhci_ops = {
2132 .request = sdhci_request,
2133 .post_req = sdhci_post_req,
2134 .pre_req = sdhci_pre_req,
2135 .set_ios = sdhci_set_ios,
2136 .get_cd = sdhci_get_cd,
2137 .get_ro = sdhci_get_ro,
2138 .hw_reset = sdhci_hw_reset,
2139 .enable_sdio_irq = sdhci_enable_sdio_irq,
2140 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2141 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2142 .execute_tuning = sdhci_execute_tuning,
2143 .select_drive_strength = sdhci_select_drive_strength,
2144 .card_event = sdhci_card_event,
2145 .card_busy = sdhci_card_busy,
2148 /*****************************************************************************\
2152 \*****************************************************************************/
2154 static void sdhci_tasklet_finish(unsigned long param)
2156 struct sdhci_host *host;
2157 unsigned long flags;
2158 struct mmc_request *mrq;
2160 host = (struct sdhci_host*)param;
2162 spin_lock_irqsave(&host->lock, flags);
2165 * If this tasklet gets rescheduled while running, it will
2166 * be run again afterwards but without any active request.
2169 spin_unlock_irqrestore(&host->lock, flags);
2173 del_timer(&host->timer);
2178 * Always unmap the data buffers if they were mapped by
2179 * sdhci_prepare_data() whenever we finish with a request.
2180 * This avoids leaking DMA mappings on error.
2182 if (host->flags & SDHCI_REQ_USE_DMA) {
2183 struct mmc_data *data = mrq->data;
2185 if (data && data->host_cookie == COOKIE_MAPPED) {
2186 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2187 (data->flags & MMC_DATA_READ) ?
2188 DMA_FROM_DEVICE : DMA_TO_DEVICE);
2189 data->host_cookie = COOKIE_UNMAPPED;
2194 * The controller needs a reset of internal state machines
2195 * upon error conditions.
2197 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
2198 ((mrq->cmd && mrq->cmd->error) ||
2199 (mrq->sbc && mrq->sbc->error) ||
2200 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
2201 (mrq->data->stop && mrq->data->stop->error))) ||
2202 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
2204 /* Some controllers need this kick or reset won't work here */
2205 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2206 /* This is to force an update */
2207 host->ops->set_clock(host, host->clock);
2209 /* Spec says we should do both at the same time, but Ricoh
2210 controllers do not like that. */
2211 sdhci_do_reset(host, SDHCI_RESET_CMD);
2212 sdhci_do_reset(host, SDHCI_RESET_DATA);
2219 #ifndef SDHCI_USE_LEDS_CLASS
2220 sdhci_deactivate_led(host);
2224 spin_unlock_irqrestore(&host->lock, flags);
2226 mmc_request_done(host->mmc, mrq);
2227 sdhci_runtime_pm_put(host);
2230 static void sdhci_timeout_timer(unsigned long data)
2232 struct sdhci_host *host;
2233 unsigned long flags;
2235 host = (struct sdhci_host*)data;
2237 spin_lock_irqsave(&host->lock, flags);
2240 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2241 mmc_hostname(host->mmc));
2242 sdhci_dumpregs(host);
2245 host->data->error = -ETIMEDOUT;
2246 sdhci_finish_data(host);
2249 host->cmd->error = -ETIMEDOUT;
2251 host->mrq->cmd->error = -ETIMEDOUT;
2253 tasklet_schedule(&host->finish_tasklet);
2258 spin_unlock_irqrestore(&host->lock, flags);
2261 /*****************************************************************************\
2263 * Interrupt handling *
2265 \*****************************************************************************/
2267 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
2269 BUG_ON(intmask == 0);
2272 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2273 mmc_hostname(host->mmc), (unsigned)intmask);
2274 sdhci_dumpregs(host);
2278 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2279 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2280 if (intmask & SDHCI_INT_TIMEOUT)
2281 host->cmd->error = -ETIMEDOUT;
2283 host->cmd->error = -EILSEQ;
2286 * If this command initiates a data phase and a response
2287 * CRC error is signalled, the card can start transferring
2288 * data - the card may have received the command without
2289 * error. We must not terminate the mmc_request early.
2291 * If the card did not receive the command or returned an
2292 * error which prevented it sending data, the data phase
2295 if (host->cmd->data &&
2296 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2302 tasklet_schedule(&host->finish_tasklet);
2307 * The host can send and interrupt when the busy state has
2308 * ended, allowing us to wait without wasting CPU cycles.
2309 * Unfortunately this is overloaded on the "data complete"
2310 * interrupt, so we need to take some care when handling
2313 * Note: The 1.0 specification is a bit ambiguous about this
2314 * feature so there might be some problems with older
2317 if (host->cmd->flags & MMC_RSP_BUSY) {
2318 if (host->cmd->data)
2319 DBG("Cannot wait for busy signal when also doing a data transfer");
2320 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
2321 && !host->busy_handle) {
2322 /* Mark that command complete before busy is ended */
2323 host->busy_handle = 1;
2327 /* The controller does not support the end-of-busy IRQ,
2328 * fall through and take the SDHCI_INT_RESPONSE */
2329 } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
2330 host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) {
2331 *mask &= ~SDHCI_INT_DATA_END;
2334 if (intmask & SDHCI_INT_RESPONSE)
2335 sdhci_finish_command(host);
2338 #ifdef CONFIG_MMC_DEBUG
2339 static void sdhci_adma_show_error(struct sdhci_host *host)
2341 const char *name = mmc_hostname(host->mmc);
2342 void *desc = host->adma_table;
2344 sdhci_dumpregs(host);
2347 struct sdhci_adma2_64_desc *dma_desc = desc;
2349 if (host->flags & SDHCI_USE_64_BIT_DMA)
2350 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2351 name, desc, le32_to_cpu(dma_desc->addr_hi),
2352 le32_to_cpu(dma_desc->addr_lo),
2353 le16_to_cpu(dma_desc->len),
2354 le16_to_cpu(dma_desc->cmd));
2356 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2357 name, desc, le32_to_cpu(dma_desc->addr_lo),
2358 le16_to_cpu(dma_desc->len),
2359 le16_to_cpu(dma_desc->cmd));
2361 desc += host->desc_sz;
2363 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2368 static void sdhci_adma_show_error(struct sdhci_host *host) { }
2371 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2374 BUG_ON(intmask == 0);
2376 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2377 if (intmask & SDHCI_INT_DATA_AVAIL) {
2378 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2379 if (command == MMC_SEND_TUNING_BLOCK ||
2380 command == MMC_SEND_TUNING_BLOCK_HS200) {
2381 host->tuning_done = 1;
2382 wake_up(&host->buf_ready_int);
2389 * The "data complete" interrupt is also used to
2390 * indicate that a busy state has ended. See comment
2391 * above in sdhci_cmd_irq().
2393 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
2394 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2395 host->cmd->error = -ETIMEDOUT;
2396 tasklet_schedule(&host->finish_tasklet);
2399 if (intmask & SDHCI_INT_DATA_END) {
2401 * Some cards handle busy-end interrupt
2402 * before the command completed, so make
2403 * sure we do things in the proper order.
2405 if (host->busy_handle)
2406 sdhci_finish_command(host);
2408 host->busy_handle = 1;
2413 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2414 mmc_hostname(host->mmc), (unsigned)intmask);
2415 sdhci_dumpregs(host);
2420 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2421 host->data->error = -ETIMEDOUT;
2422 else if (intmask & SDHCI_INT_DATA_END_BIT)
2423 host->data->error = -EILSEQ;
2424 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2425 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2427 host->data->error = -EILSEQ;
2428 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2429 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2430 sdhci_adma_show_error(host);
2431 host->data->error = -EIO;
2432 if (host->ops->adma_workaround)
2433 host->ops->adma_workaround(host, intmask);
2436 if (host->data->error)
2437 sdhci_finish_data(host);
2439 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2440 sdhci_transfer_pio(host);
2443 * We currently don't do anything fancy with DMA
2444 * boundaries, but as we can't disable the feature
2445 * we need to at least restart the transfer.
2447 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2448 * should return a valid address to continue from, but as
2449 * some controllers are faulty, don't trust them.
2451 if (intmask & SDHCI_INT_DMA_END) {
2452 u32 dmastart, dmanow;
2453 dmastart = sg_dma_address(host->data->sg);
2454 dmanow = dmastart + host->data->bytes_xfered;
2456 * Force update to the next DMA block boundary.
2459 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2460 SDHCI_DEFAULT_BOUNDARY_SIZE;
2461 host->data->bytes_xfered = dmanow - dmastart;
2462 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2464 mmc_hostname(host->mmc), dmastart,
2465 host->data->bytes_xfered, dmanow);
2466 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2469 if (intmask & SDHCI_INT_DATA_END) {
2472 * Data managed to finish before the
2473 * command completed. Make sure we do
2474 * things in the proper order.
2476 host->data_early = 1;
2478 sdhci_finish_data(host);
2484 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2486 irqreturn_t result = IRQ_NONE;
2487 struct sdhci_host *host = dev_id;
2488 u32 intmask, mask, unexpected = 0;
2491 spin_lock(&host->lock);
2493 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2494 spin_unlock(&host->lock);
2498 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2499 if (!intmask || intmask == 0xffffffff) {
2505 /* Clear selected interrupts. */
2506 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2507 SDHCI_INT_BUS_POWER);
2508 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2510 DBG("*** %s got interrupt: 0x%08x\n",
2511 mmc_hostname(host->mmc), intmask);
2513 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2514 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2518 * There is a observation on i.mx esdhc. INSERT
2519 * bit will be immediately set again when it gets
2520 * cleared, if a card is inserted. We have to mask
2521 * the irq to prevent interrupt storm which will
2522 * freeze the system. And the REMOVE gets the
2525 * More testing are needed here to ensure it works
2526 * for other platforms though.
2528 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2529 SDHCI_INT_CARD_REMOVE);
2530 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2531 SDHCI_INT_CARD_INSERT;
2532 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2533 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2535 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2536 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2538 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2539 SDHCI_INT_CARD_REMOVE);
2540 result = IRQ_WAKE_THREAD;
2543 if (intmask & SDHCI_INT_CMD_MASK)
2544 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
2547 if (intmask & SDHCI_INT_DATA_MASK)
2548 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2550 if (intmask & SDHCI_INT_BUS_POWER)
2551 pr_err("%s: Card is consuming too much power!\n",
2552 mmc_hostname(host->mmc));
2554 if (intmask & SDHCI_INT_CARD_INT) {
2555 sdhci_enable_sdio_irq_nolock(host, false);
2556 host->thread_isr |= SDHCI_INT_CARD_INT;
2557 result = IRQ_WAKE_THREAD;
2560 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2561 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2562 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2563 SDHCI_INT_CARD_INT);
2566 unexpected |= intmask;
2567 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2570 if (result == IRQ_NONE)
2571 result = IRQ_HANDLED;
2573 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2574 } while (intmask && --max_loops);
2576 spin_unlock(&host->lock);
2579 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2580 mmc_hostname(host->mmc), unexpected);
2581 sdhci_dumpregs(host);
2587 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2589 struct sdhci_host *host = dev_id;
2590 unsigned long flags;
2593 spin_lock_irqsave(&host->lock, flags);
2594 isr = host->thread_isr;
2595 host->thread_isr = 0;
2596 spin_unlock_irqrestore(&host->lock, flags);
2598 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2599 sdhci_card_event(host->mmc);
2600 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
2603 if (isr & SDHCI_INT_CARD_INT) {
2604 sdio_run_irqs(host->mmc);
2606 spin_lock_irqsave(&host->lock, flags);
2607 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2608 sdhci_enable_sdio_irq_nolock(host, true);
2609 spin_unlock_irqrestore(&host->lock, flags);
2612 return isr ? IRQ_HANDLED : IRQ_NONE;
2615 /*****************************************************************************\
2619 \*****************************************************************************/
2622 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2625 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2626 | SDHCI_WAKE_ON_INT;
2628 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2630 /* Avoid fake wake up */
2631 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2632 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2633 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2635 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2637 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2640 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2641 | SDHCI_WAKE_ON_INT;
2643 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2645 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2648 int sdhci_suspend_host(struct sdhci_host *host)
2650 sdhci_disable_card_detection(host);
2652 mmc_retune_timer_stop(host->mmc);
2653 mmc_retune_needed(host->mmc);
2655 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2657 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2658 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2659 free_irq(host->irq, host);
2661 sdhci_enable_irq_wakeups(host);
2662 enable_irq_wake(host->irq);
2667 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2669 int sdhci_resume_host(struct sdhci_host *host)
2673 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2674 if (host->ops->enable_dma)
2675 host->ops->enable_dma(host);
2678 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2679 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2680 /* Card keeps power but host controller does not */
2681 sdhci_init(host, 0);
2684 sdhci_do_set_ios(host, &host->mmc->ios);
2686 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2690 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2691 ret = request_threaded_irq(host->irq, sdhci_irq,
2692 sdhci_thread_irq, IRQF_SHARED,
2693 mmc_hostname(host->mmc), host);
2697 sdhci_disable_irq_wakeups(host);
2698 disable_irq_wake(host->irq);
2701 sdhci_enable_card_detection(host);
2706 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2708 static int sdhci_runtime_pm_get(struct sdhci_host *host)
2710 return pm_runtime_get_sync(host->mmc->parent);
2713 static int sdhci_runtime_pm_put(struct sdhci_host *host)
2715 pm_runtime_mark_last_busy(host->mmc->parent);
2716 return pm_runtime_put_autosuspend(host->mmc->parent);
2719 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
2723 host->bus_on = true;
2724 pm_runtime_get_noresume(host->mmc->parent);
2727 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
2731 host->bus_on = false;
2732 pm_runtime_put_noidle(host->mmc->parent);
2735 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2737 unsigned long flags;
2739 mmc_retune_timer_stop(host->mmc);
2740 mmc_retune_needed(host->mmc);
2742 spin_lock_irqsave(&host->lock, flags);
2743 host->ier &= SDHCI_INT_CARD_INT;
2744 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2745 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2746 spin_unlock_irqrestore(&host->lock, flags);
2748 synchronize_hardirq(host->irq);
2750 spin_lock_irqsave(&host->lock, flags);
2751 host->runtime_suspended = true;
2752 spin_unlock_irqrestore(&host->lock, flags);
2756 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2758 int sdhci_runtime_resume_host(struct sdhci_host *host)
2760 unsigned long flags;
2761 int host_flags = host->flags;
2763 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2764 if (host->ops->enable_dma)
2765 host->ops->enable_dma(host);
2768 sdhci_init(host, 0);
2770 /* Force clock and power re-program */
2773 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
2774 sdhci_do_set_ios(host, &host->mmc->ios);
2776 if ((host_flags & SDHCI_PV_ENABLED) &&
2777 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2778 spin_lock_irqsave(&host->lock, flags);
2779 sdhci_enable_preset_value(host, true);
2780 spin_unlock_irqrestore(&host->lock, flags);
2783 spin_lock_irqsave(&host->lock, flags);
2785 host->runtime_suspended = false;
2787 /* Enable SDIO IRQ */
2788 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2789 sdhci_enable_sdio_irq_nolock(host, true);
2791 /* Enable Card Detection */
2792 sdhci_enable_card_detection(host);
2794 spin_unlock_irqrestore(&host->lock, flags);
2798 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2800 #endif /* CONFIG_PM */
2802 /*****************************************************************************\
2804 * Device allocation/registration *
2806 \*****************************************************************************/
2808 struct sdhci_host *sdhci_alloc_host(struct device *dev,
2811 struct mmc_host *mmc;
2812 struct sdhci_host *host;
2814 WARN_ON(dev == NULL);
2816 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2818 return ERR_PTR(-ENOMEM);
2820 host = mmc_priv(mmc);
2822 host->mmc_host_ops = sdhci_ops;
2823 mmc->ops = &host->mmc_host_ops;
2828 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2830 int sdhci_add_host(struct sdhci_host *host)
2832 struct mmc_host *mmc;
2833 u32 caps[2] = {0, 0};
2834 u32 max_current_caps;
2835 unsigned int ocr_avail;
2836 unsigned int override_timeout_clk;
2840 WARN_ON(host == NULL);
2847 host->quirks = debug_quirks;
2849 host->quirks2 = debug_quirks2;
2851 override_timeout_clk = host->timeout_clk;
2853 sdhci_do_reset(host, SDHCI_RESET_ALL);
2855 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2856 host->version = (host->version & SDHCI_SPEC_VER_MASK)
2857 >> SDHCI_SPEC_VER_SHIFT;
2858 if (host->version > SDHCI_SPEC_300) {
2859 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
2860 mmc_hostname(mmc), host->version);
2863 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
2864 sdhci_readl(host, SDHCI_CAPABILITIES);
2866 if (host->version >= SDHCI_SPEC_300)
2867 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
2869 sdhci_readl(host, SDHCI_CAPABILITIES_1);
2871 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
2872 host->flags |= SDHCI_USE_SDMA;
2873 else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
2874 DBG("Controller doesn't have SDMA capability\n");
2876 host->flags |= SDHCI_USE_SDMA;
2878 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
2879 (host->flags & SDHCI_USE_SDMA)) {
2880 DBG("Disabling DMA as it is marked broken\n");
2881 host->flags &= ~SDHCI_USE_SDMA;
2884 if ((host->version >= SDHCI_SPEC_200) &&
2885 (caps[0] & SDHCI_CAN_DO_ADMA2))
2886 host->flags |= SDHCI_USE_ADMA;
2888 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
2889 (host->flags & SDHCI_USE_ADMA)) {
2890 DBG("Disabling ADMA as it is marked broken\n");
2891 host->flags &= ~SDHCI_USE_ADMA;
2895 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
2896 * and *must* do 64-bit DMA. A driver has the opportunity to change
2897 * that during the first call to ->enable_dma(). Similarly
2898 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
2901 if (caps[0] & SDHCI_CAN_64BIT)
2902 host->flags |= SDHCI_USE_64_BIT_DMA;
2904 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2905 if (host->ops->enable_dma) {
2906 if (host->ops->enable_dma(host)) {
2907 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
2910 ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
2915 /* SDMA does not support 64-bit DMA */
2916 if (host->flags & SDHCI_USE_64_BIT_DMA)
2917 host->flags &= ~SDHCI_USE_SDMA;
2919 if (host->flags & SDHCI_USE_ADMA) {
2924 * The DMA descriptor table size is calculated as the maximum
2925 * number of segments times 2, to allow for an alignment
2926 * descriptor for each segment, plus 1 for a nop end descriptor,
2927 * all multipled by the descriptor size.
2929 if (host->flags & SDHCI_USE_64_BIT_DMA) {
2930 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
2931 SDHCI_ADMA2_64_DESC_SZ;
2932 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
2934 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
2935 SDHCI_ADMA2_32_DESC_SZ;
2936 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
2939 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
2940 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
2941 host->adma_table_sz, &dma, GFP_KERNEL);
2943 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2945 host->flags &= ~SDHCI_USE_ADMA;
2946 } else if ((dma + host->align_buffer_sz) &
2947 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
2948 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
2950 host->flags &= ~SDHCI_USE_ADMA;
2951 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
2952 host->adma_table_sz, buf, dma);
2954 host->align_buffer = buf;
2955 host->align_addr = dma;
2957 host->adma_table = buf + host->align_buffer_sz;
2958 host->adma_addr = dma + host->align_buffer_sz;
2963 * If we use DMA, then it's up to the caller to set the DMA
2964 * mask, but PIO does not need the hw shim so we set a new
2965 * mask here in that case.
2967 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
2968 host->dma_mask = DMA_BIT_MASK(64);
2969 mmc_dev(mmc)->dma_mask = &host->dma_mask;
2972 if (host->version >= SDHCI_SPEC_300)
2973 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
2974 >> SDHCI_CLOCK_BASE_SHIFT;
2976 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
2977 >> SDHCI_CLOCK_BASE_SHIFT;
2979 host->max_clk *= 1000000;
2980 if (host->max_clk == 0 || host->quirks &
2981 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
2982 if (!host->ops->get_max_clock) {
2983 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
2987 host->max_clk = host->ops->get_max_clock(host);
2991 * In case of Host Controller v3.00, find out whether clock
2992 * multiplier is supported.
2994 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
2995 SDHCI_CLOCK_MUL_SHIFT;
2998 * In case the value in Clock Multiplier is 0, then programmable
2999 * clock mode is not supported, otherwise the actual clock
3000 * multiplier is one more than the value of Clock Multiplier
3001 * in the Capabilities Register.
3007 * Set host parameters.
3009 max_clk = host->max_clk;
3011 if (host->ops->get_min_clock)
3012 mmc->f_min = host->ops->get_min_clock(host);
3013 else if (host->version >= SDHCI_SPEC_300) {
3014 if (host->clk_mul) {
3015 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3016 max_clk = host->max_clk * host->clk_mul;
3018 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3020 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3022 if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
3023 mmc->f_max = max_clk;
3025 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3026 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
3027 SDHCI_TIMEOUT_CLK_SHIFT;
3028 if (host->timeout_clk == 0) {
3029 if (host->ops->get_timeout_clock) {
3031 host->ops->get_timeout_clock(host);
3033 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3039 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
3040 host->timeout_clk *= 1000;
3042 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3043 host->ops->get_max_timeout_count(host) : 1 << 27;
3044 mmc->max_busy_timeout /= host->timeout_clk;
3047 if (override_timeout_clk)
3048 host->timeout_clk = override_timeout_clk;
3050 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3051 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3053 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3054 host->flags |= SDHCI_AUTO_CMD12;
3056 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3057 if ((host->version >= SDHCI_SPEC_300) &&
3058 ((host->flags & SDHCI_USE_ADMA) ||
3059 !(host->flags & SDHCI_USE_SDMA)) &&
3060 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3061 host->flags |= SDHCI_AUTO_CMD23;
3062 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
3064 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
3068 * A controller may support 8-bit width, but the board itself
3069 * might not have the pins brought out. Boards that support
3070 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3071 * their platform code before calling sdhci_add_host(), and we
3072 * won't assume 8-bit width for hosts without that CAP.
3074 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3075 mmc->caps |= MMC_CAP_4_BIT_DATA;
3077 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3078 mmc->caps &= ~MMC_CAP_CMD23;
3080 if (caps[0] & SDHCI_CAN_DO_HISPD)
3081 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3083 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3084 !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
3085 IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
3086 mmc->caps |= MMC_CAP_NEEDS_POLL;
3088 /* If there are external regulators, get them */
3089 if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER)
3090 return -EPROBE_DEFER;
3092 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3093 if (!IS_ERR(mmc->supply.vqmmc)) {
3094 ret = regulator_enable(mmc->supply.vqmmc);
3095 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3097 caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
3098 SDHCI_SUPPORT_SDR50 |
3099 SDHCI_SUPPORT_DDR50);
3101 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3102 mmc_hostname(mmc), ret);
3103 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3107 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
3108 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3109 SDHCI_SUPPORT_DDR50);
3111 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3112 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3113 SDHCI_SUPPORT_DDR50))
3114 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3116 /* SDR104 supports also implies SDR50 support */
3117 if (caps[1] & SDHCI_SUPPORT_SDR104) {
3118 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3119 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3120 * field can be promoted to support HS200.
3122 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3123 mmc->caps2 |= MMC_CAP2_HS200;
3124 } else if (caps[1] & SDHCI_SUPPORT_SDR50)
3125 mmc->caps |= MMC_CAP_UHS_SDR50;
3127 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3128 (caps[1] & SDHCI_SUPPORT_HS400))
3129 mmc->caps2 |= MMC_CAP2_HS400;
3131 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3132 (IS_ERR(mmc->supply.vqmmc) ||
3133 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3135 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3137 if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
3138 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3139 mmc->caps |= MMC_CAP_UHS_DDR50;
3141 /* Does the host need tuning for SDR50? */
3142 if (caps[1] & SDHCI_USE_SDR50_TUNING)
3143 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3145 /* Does the host need tuning for SDR104 / HS200? */
3146 if (mmc->caps2 & MMC_CAP2_HS200)
3147 host->flags |= SDHCI_SDR104_NEEDS_TUNING;
3149 /* Driver Type(s) (A, C, D) supported by the host */
3150 if (caps[1] & SDHCI_DRIVER_TYPE_A)
3151 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3152 if (caps[1] & SDHCI_DRIVER_TYPE_C)
3153 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3154 if (caps[1] & SDHCI_DRIVER_TYPE_D)
3155 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3157 /* Initial value for re-tuning timer count */
3158 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3159 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3162 * In case Re-tuning Timer is not disabled, the actual value of
3163 * re-tuning timer will be 2 ^ (n - 1).
3165 if (host->tuning_count)
3166 host->tuning_count = 1 << (host->tuning_count - 1);
3168 /* Re-tuning mode supported by the Host Controller */
3169 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
3170 SDHCI_RETUNING_MODE_SHIFT;
3175 * According to SD Host Controller spec v3.00, if the Host System
3176 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3177 * the value is meaningful only if Voltage Support in the Capabilities
3178 * register is set. The actual current value is 4 times the register
3181 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3182 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3183 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3186 /* convert to SDHCI_MAX_CURRENT format */
3187 curr = curr/1000; /* convert to mA */
3188 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3190 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3192 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3193 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3194 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3198 if (caps[0] & SDHCI_CAN_VDD_330) {
3199 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3201 mmc->max_current_330 = ((max_current_caps &
3202 SDHCI_MAX_CURRENT_330_MASK) >>
3203 SDHCI_MAX_CURRENT_330_SHIFT) *
3204 SDHCI_MAX_CURRENT_MULTIPLIER;
3206 if (caps[0] & SDHCI_CAN_VDD_300) {
3207 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3209 mmc->max_current_300 = ((max_current_caps &
3210 SDHCI_MAX_CURRENT_300_MASK) >>
3211 SDHCI_MAX_CURRENT_300_SHIFT) *
3212 SDHCI_MAX_CURRENT_MULTIPLIER;
3214 if (caps[0] & SDHCI_CAN_VDD_180) {
3215 ocr_avail |= MMC_VDD_165_195;
3217 mmc->max_current_180 = ((max_current_caps &
3218 SDHCI_MAX_CURRENT_180_MASK) >>
3219 SDHCI_MAX_CURRENT_180_SHIFT) *
3220 SDHCI_MAX_CURRENT_MULTIPLIER;
3223 /* If OCR set by host, use it instead. */
3225 ocr_avail = host->ocr_mask;
3227 /* If OCR set by external regulators, give it highest prio. */
3229 ocr_avail = mmc->ocr_avail;
3231 mmc->ocr_avail = ocr_avail;
3232 mmc->ocr_avail_sdio = ocr_avail;
3233 if (host->ocr_avail_sdio)
3234 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3235 mmc->ocr_avail_sd = ocr_avail;
3236 if (host->ocr_avail_sd)
3237 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3238 else /* normal SD controllers don't support 1.8V */
3239 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3240 mmc->ocr_avail_mmc = ocr_avail;
3241 if (host->ocr_avail_mmc)
3242 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3244 if (mmc->ocr_avail == 0) {
3245 pr_err("%s: Hardware doesn't report any support voltages.\n",
3250 spin_lock_init(&host->lock);
3253 * Maximum number of segments. Depends on if the hardware
3254 * can do scatter/gather or not.
3256 if (host->flags & SDHCI_USE_ADMA)
3257 mmc->max_segs = SDHCI_MAX_SEGS;
3258 else if (host->flags & SDHCI_USE_SDMA)
3261 mmc->max_segs = SDHCI_MAX_SEGS;
3264 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3265 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3268 mmc->max_req_size = 524288;
3271 * Maximum segment size. Could be one segment with the maximum number
3272 * of bytes. When doing hardware scatter/gather, each entry cannot
3273 * be larger than 64 KiB though.
3275 if (host->flags & SDHCI_USE_ADMA) {
3276 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3277 mmc->max_seg_size = 65535;
3279 mmc->max_seg_size = 65536;
3281 mmc->max_seg_size = mmc->max_req_size;
3285 * Maximum block size. This varies from controller to controller and
3286 * is specified in the capabilities register.
3288 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3289 mmc->max_blk_size = 2;
3291 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
3292 SDHCI_MAX_BLOCK_SHIFT;
3293 if (mmc->max_blk_size >= 3) {
3294 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3296 mmc->max_blk_size = 0;
3300 mmc->max_blk_size = 512 << mmc->max_blk_size;
3303 * Maximum block count.
3305 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3310 tasklet_init(&host->finish_tasklet,
3311 sdhci_tasklet_finish, (unsigned long)host);
3313 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3315 init_waitqueue_head(&host->buf_ready_int);
3317 sdhci_init(host, 0);
3319 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3320 IRQF_SHARED, mmc_hostname(mmc), host);
3322 pr_err("%s: Failed to request IRQ %d: %d\n",
3323 mmc_hostname(mmc), host->irq, ret);
3327 #ifdef CONFIG_MMC_DEBUG
3328 sdhci_dumpregs(host);
3331 #ifdef SDHCI_USE_LEDS_CLASS
3332 snprintf(host->led_name, sizeof(host->led_name),
3333 "%s::", mmc_hostname(mmc));
3334 host->led.name = host->led_name;
3335 host->led.brightness = LED_OFF;
3336 host->led.default_trigger = mmc_hostname(mmc);
3337 host->led.brightness_set = sdhci_led_control;
3339 ret = led_classdev_register(mmc_dev(mmc), &host->led);
3341 pr_err("%s: Failed to register LED device: %d\n",
3342 mmc_hostname(mmc), ret);
3351 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3352 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3353 (host->flags & SDHCI_USE_ADMA) ?
3354 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3355 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3357 sdhci_enable_card_detection(host);
3361 #ifdef SDHCI_USE_LEDS_CLASS
3363 sdhci_do_reset(host, SDHCI_RESET_ALL);
3364 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3365 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3366 free_irq(host->irq, host);
3369 tasklet_kill(&host->finish_tasklet);
3374 EXPORT_SYMBOL_GPL(sdhci_add_host);
3376 void sdhci_remove_host(struct sdhci_host *host, int dead)
3378 struct mmc_host *mmc = host->mmc;
3379 unsigned long flags;
3382 spin_lock_irqsave(&host->lock, flags);
3384 host->flags |= SDHCI_DEVICE_DEAD;
3387 pr_err("%s: Controller removed during "
3388 " transfer!\n", mmc_hostname(mmc));
3390 host->mrq->cmd->error = -ENOMEDIUM;
3391 tasklet_schedule(&host->finish_tasklet);
3394 spin_unlock_irqrestore(&host->lock, flags);
3397 sdhci_disable_card_detection(host);
3399 mmc_remove_host(mmc);
3401 #ifdef SDHCI_USE_LEDS_CLASS
3402 led_classdev_unregister(&host->led);
3406 sdhci_do_reset(host, SDHCI_RESET_ALL);
3408 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3409 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3410 free_irq(host->irq, host);
3412 del_timer_sync(&host->timer);
3414 tasklet_kill(&host->finish_tasklet);
3416 if (!IS_ERR(mmc->supply.vqmmc))
3417 regulator_disable(mmc->supply.vqmmc);
3419 if (host->align_buffer)
3420 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3421 host->adma_table_sz, host->align_buffer,
3424 host->adma_table = NULL;
3425 host->align_buffer = NULL;
3428 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3430 void sdhci_free_host(struct sdhci_host *host)
3432 mmc_free_host(host->mmc);
3435 EXPORT_SYMBOL_GPL(sdhci_free_host);
3437 /*****************************************************************************\
3439 * Driver init/exit *
3441 \*****************************************************************************/
3443 static int __init sdhci_drv_init(void)
3446 ": Secure Digital Host Controller Interface driver\n");
3447 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3452 static void __exit sdhci_drv_exit(void)
3456 module_init(sdhci_drv_init);
3457 module_exit(sdhci_drv_exit);
3459 module_param(debug_quirks, uint, 0444);
3460 module_param(debug_quirks2, uint, 0444);
3462 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3463 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3464 MODULE_LICENSE("GPL");
3466 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3467 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");