2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/leds.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
36 #define DRIVER_NAME "sdhci"
38 #define DBG(f, x...) \
39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
41 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
42 defined(CONFIG_MMC_SDHCI_MODULE))
43 #define SDHCI_USE_LEDS_CLASS
46 #define MAX_TUNING_LOOP 40
48 static unsigned int debug_quirks = 0;
49 static unsigned int debug_quirks2;
51 static void sdhci_finish_data(struct sdhci_host *);
53 static void sdhci_finish_command(struct sdhci_host *);
54 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56 static int sdhci_do_get_cd(struct sdhci_host *host);
59 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
60 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
62 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
65 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
70 static void sdhci_dumpregs(struct sdhci_host *host)
72 pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
73 mmc_hostname(host->mmc));
75 pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
76 sdhci_readl(host, SDHCI_DMA_ADDRESS),
77 sdhci_readw(host, SDHCI_HOST_VERSION));
78 pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
79 sdhci_readw(host, SDHCI_BLOCK_SIZE),
80 sdhci_readw(host, SDHCI_BLOCK_COUNT));
81 pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
82 sdhci_readl(host, SDHCI_ARGUMENT),
83 sdhci_readw(host, SDHCI_TRANSFER_MODE));
84 pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
85 sdhci_readl(host, SDHCI_PRESENT_STATE),
86 sdhci_readb(host, SDHCI_HOST_CONTROL));
87 pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
88 sdhci_readb(host, SDHCI_POWER_CONTROL),
89 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
90 pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
91 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
92 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
93 pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
94 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
95 sdhci_readl(host, SDHCI_INT_STATUS));
96 pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
97 sdhci_readl(host, SDHCI_INT_ENABLE),
98 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
99 pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
100 sdhci_readw(host, SDHCI_ACMD12_ERR),
101 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
102 pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
103 sdhci_readl(host, SDHCI_CAPABILITIES),
104 sdhci_readl(host, SDHCI_CAPABILITIES_1));
105 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
106 sdhci_readw(host, SDHCI_COMMAND),
107 sdhci_readl(host, SDHCI_MAX_CURRENT));
108 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
109 sdhci_readw(host, SDHCI_HOST_CONTROL2));
111 if (host->flags & SDHCI_USE_ADMA) {
112 if (host->flags & SDHCI_USE_64_BIT_DMA)
113 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
114 readl(host->ioaddr + SDHCI_ADMA_ERROR),
115 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
116 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
118 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
119 readl(host->ioaddr + SDHCI_ADMA_ERROR),
120 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
123 pr_debug(DRIVER_NAME ": ===========================================\n");
126 /*****************************************************************************\
128 * Low level functions *
130 \*****************************************************************************/
132 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
136 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
137 (host->mmc->caps & MMC_CAP_NONREMOVABLE))
141 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
144 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
145 SDHCI_INT_CARD_INSERT;
147 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
150 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
151 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
154 static void sdhci_enable_card_detection(struct sdhci_host *host)
156 sdhci_set_card_detection(host, true);
159 static void sdhci_disable_card_detection(struct sdhci_host *host)
161 sdhci_set_card_detection(host, false);
164 void sdhci_reset(struct sdhci_host *host, u8 mask)
166 unsigned long timeout;
168 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
170 if (mask & SDHCI_RESET_ALL) {
172 /* Reset-all turns off SD Bus Power */
173 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
174 sdhci_runtime_pm_bus_off(host);
177 /* Wait max 100 ms */
180 /* hw clears the bit when it's done */
181 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
183 pr_err("%s: Reset 0x%x never completed.\n",
184 mmc_hostname(host->mmc), (int)mask);
185 sdhci_dumpregs(host);
192 EXPORT_SYMBOL_GPL(sdhci_reset);
194 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
196 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
197 if (!sdhci_do_get_cd(host))
201 host->ops->reset(host, mask);
203 if (mask & SDHCI_RESET_ALL) {
204 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
205 if (host->ops->enable_dma)
206 host->ops->enable_dma(host);
209 /* Resetting the controller clears many */
210 host->preset_enabled = false;
214 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
216 static void sdhci_init(struct sdhci_host *host, int soft)
219 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
221 sdhci_do_reset(host, SDHCI_RESET_ALL);
223 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
224 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
225 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
226 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
229 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
230 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
233 /* force clock reconfiguration */
235 sdhci_set_ios(host->mmc, &host->mmc->ios);
239 static void sdhci_reinit(struct sdhci_host *host)
242 sdhci_enable_card_detection(host);
245 static void sdhci_activate_led(struct sdhci_host *host)
249 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
250 ctrl |= SDHCI_CTRL_LED;
251 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
254 static void sdhci_deactivate_led(struct sdhci_host *host)
258 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
259 ctrl &= ~SDHCI_CTRL_LED;
260 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
263 #ifdef SDHCI_USE_LEDS_CLASS
264 static void sdhci_led_control(struct led_classdev *led,
265 enum led_brightness brightness)
267 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
270 spin_lock_irqsave(&host->lock, flags);
272 if (host->runtime_suspended)
275 if (brightness == LED_OFF)
276 sdhci_deactivate_led(host);
278 sdhci_activate_led(host);
280 spin_unlock_irqrestore(&host->lock, flags);
284 /*****************************************************************************\
288 \*****************************************************************************/
290 static void sdhci_read_block_pio(struct sdhci_host *host)
293 size_t blksize, len, chunk;
294 u32 uninitialized_var(scratch);
297 DBG("PIO reading\n");
299 blksize = host->data->blksz;
302 local_irq_save(flags);
305 BUG_ON(!sg_miter_next(&host->sg_miter));
307 len = min(host->sg_miter.length, blksize);
310 host->sg_miter.consumed = len;
312 buf = host->sg_miter.addr;
316 scratch = sdhci_readl(host, SDHCI_BUFFER);
320 *buf = scratch & 0xFF;
329 sg_miter_stop(&host->sg_miter);
331 local_irq_restore(flags);
334 static void sdhci_write_block_pio(struct sdhci_host *host)
337 size_t blksize, len, chunk;
341 DBG("PIO writing\n");
343 blksize = host->data->blksz;
347 local_irq_save(flags);
350 BUG_ON(!sg_miter_next(&host->sg_miter));
352 len = min(host->sg_miter.length, blksize);
355 host->sg_miter.consumed = len;
357 buf = host->sg_miter.addr;
360 scratch |= (u32)*buf << (chunk * 8);
366 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
367 sdhci_writel(host, scratch, SDHCI_BUFFER);
374 sg_miter_stop(&host->sg_miter);
376 local_irq_restore(flags);
379 static void sdhci_transfer_pio(struct sdhci_host *host)
385 if (host->blocks == 0)
388 if (host->data->flags & MMC_DATA_READ)
389 mask = SDHCI_DATA_AVAILABLE;
391 mask = SDHCI_SPACE_AVAILABLE;
394 * Some controllers (JMicron JMB38x) mess up the buffer bits
395 * for transfers < 4 bytes. As long as it is just one block,
396 * we can ignore the bits.
398 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
399 (host->data->blocks == 1))
402 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
403 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
406 if (host->data->flags & MMC_DATA_READ)
407 sdhci_read_block_pio(host);
409 sdhci_write_block_pio(host);
412 if (host->blocks == 0)
416 DBG("PIO transfer complete.\n");
419 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
420 struct mmc_data *data, int cookie)
425 * If the data buffers are already mapped, return the previous
426 * dma_map_sg() result.
428 if (data->host_cookie == COOKIE_PRE_MAPPED)
429 return data->sg_count;
431 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
432 data->flags & MMC_DATA_WRITE ?
433 DMA_TO_DEVICE : DMA_FROM_DEVICE);
438 data->sg_count = sg_count;
439 data->host_cookie = cookie;
444 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
446 local_irq_save(*flags);
447 return kmap_atomic(sg_page(sg)) + sg->offset;
450 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
452 kunmap_atomic(buffer);
453 local_irq_restore(*flags);
456 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
457 dma_addr_t addr, int len, unsigned cmd)
459 struct sdhci_adma2_64_desc *dma_desc = desc;
461 /* 32-bit and 64-bit descriptors have these members in same position */
462 dma_desc->cmd = cpu_to_le16(cmd);
463 dma_desc->len = cpu_to_le16(len);
464 dma_desc->addr_lo = cpu_to_le32((u32)addr);
466 if (host->flags & SDHCI_USE_64_BIT_DMA)
467 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
470 static void sdhci_adma_mark_end(void *desc)
472 struct sdhci_adma2_64_desc *dma_desc = desc;
474 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
475 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
478 static void sdhci_adma_table_pre(struct sdhci_host *host,
479 struct mmc_data *data, int sg_count)
481 struct scatterlist *sg;
483 dma_addr_t addr, align_addr;
489 * The spec does not specify endianness of descriptor table.
490 * We currently guess that it is LE.
493 host->sg_count = sg_count;
495 desc = host->adma_table;
496 align = host->align_buffer;
498 align_addr = host->align_addr;
500 for_each_sg(data->sg, sg, host->sg_count, i) {
501 addr = sg_dma_address(sg);
502 len = sg_dma_len(sg);
505 * The SDHCI specification states that ADMA addresses must
506 * be 32-bit aligned. If they aren't, then we use a bounce
507 * buffer for the (up to three) bytes that screw up the
510 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
513 if (data->flags & MMC_DATA_WRITE) {
514 buffer = sdhci_kmap_atomic(sg, &flags);
515 memcpy(align, buffer, offset);
516 sdhci_kunmap_atomic(buffer, &flags);
520 sdhci_adma_write_desc(host, desc, align_addr, offset,
523 BUG_ON(offset > 65536);
525 align += SDHCI_ADMA2_ALIGN;
526 align_addr += SDHCI_ADMA2_ALIGN;
528 desc += host->desc_sz;
538 sdhci_adma_write_desc(host, desc, addr, len,
540 desc += host->desc_sz;
544 * If this triggers then we have a calculation bug
547 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
550 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
551 /* Mark the last descriptor as the terminating descriptor */
552 if (desc != host->adma_table) {
553 desc -= host->desc_sz;
554 sdhci_adma_mark_end(desc);
557 /* Add a terminating entry - nop, end, valid */
558 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
562 static void sdhci_adma_table_post(struct sdhci_host *host,
563 struct mmc_data *data)
565 struct scatterlist *sg;
571 if (data->flags & MMC_DATA_READ) {
572 bool has_unaligned = false;
574 /* Do a quick scan of the SG list for any unaligned mappings */
575 for_each_sg(data->sg, sg, host->sg_count, i)
576 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
577 has_unaligned = true;
582 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
583 data->sg_len, DMA_FROM_DEVICE);
585 align = host->align_buffer;
587 for_each_sg(data->sg, sg, host->sg_count, i) {
588 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
589 size = SDHCI_ADMA2_ALIGN -
590 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
592 buffer = sdhci_kmap_atomic(sg, &flags);
593 memcpy(buffer, align, size);
594 sdhci_kunmap_atomic(buffer, &flags);
596 align += SDHCI_ADMA2_ALIGN;
603 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
606 struct mmc_data *data = cmd->data;
607 unsigned target_timeout, current_timeout;
610 * If the host controller provides us with an incorrect timeout
611 * value, just skip the check and use 0xE. The hardware may take
612 * longer to time out, but that's much better than having a too-short
615 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
618 /* Unspecified timeout, assume max */
619 if (!data && !cmd->busy_timeout)
624 target_timeout = cmd->busy_timeout * 1000;
626 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
627 if (host->clock && data->timeout_clks) {
628 unsigned long long val;
631 * data->timeout_clks is in units of clock cycles.
632 * host->clock is in Hz. target_timeout is in us.
633 * Hence, us = 1000000 * cycles / Hz. Round up.
635 val = 1000000 * data->timeout_clks;
636 if (do_div(val, host->clock))
638 target_timeout += val;
643 * Figure out needed cycles.
644 * We do this in steps in order to fit inside a 32 bit int.
645 * The first step is the minimum timeout, which will have a
646 * minimum resolution of 6 bits:
647 * (1) 2^13*1000 > 2^22,
648 * (2) host->timeout_clk < 2^16
653 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
654 while (current_timeout < target_timeout) {
656 current_timeout <<= 1;
662 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
663 mmc_hostname(host->mmc), count, cmd->opcode);
670 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
672 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
673 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
675 if (host->flags & SDHCI_REQ_USE_DMA)
676 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
678 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
680 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
681 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
684 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
688 if (host->ops->set_timeout) {
689 host->ops->set_timeout(host, cmd);
691 count = sdhci_calc_timeout(host, cmd);
692 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
696 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
699 struct mmc_data *data = cmd->data;
703 if (data || (cmd->flags & MMC_RSP_BUSY))
704 sdhci_set_timeout(host, cmd);
710 BUG_ON(data->blksz * data->blocks > 524288);
711 BUG_ON(data->blksz > host->mmc->max_blk_size);
712 BUG_ON(data->blocks > 65535);
715 host->data_early = 0;
716 host->data->bytes_xfered = 0;
718 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
719 struct scatterlist *sg;
720 unsigned int length_mask, offset_mask;
723 host->flags |= SDHCI_REQ_USE_DMA;
726 * FIXME: This doesn't account for merging when mapping the
729 * The assumption here being that alignment and lengths are
730 * the same after DMA mapping to device address space.
734 if (host->flags & SDHCI_USE_ADMA) {
735 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
738 * As we use up to 3 byte chunks to work
739 * around alignment problems, we need to
740 * check the offset as well.
745 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
747 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
751 if (unlikely(length_mask | offset_mask)) {
752 for_each_sg(data->sg, sg, data->sg_len, i) {
753 if (sg->length & length_mask) {
754 DBG("Reverting to PIO because of transfer size (%d)\n",
756 host->flags &= ~SDHCI_REQ_USE_DMA;
759 if (sg->offset & offset_mask) {
760 DBG("Reverting to PIO because of bad alignment\n");
761 host->flags &= ~SDHCI_REQ_USE_DMA;
768 if (host->flags & SDHCI_REQ_USE_DMA) {
769 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
773 * This only happens when someone fed
774 * us an invalid request.
777 host->flags &= ~SDHCI_REQ_USE_DMA;
778 } else if (host->flags & SDHCI_USE_ADMA) {
779 sdhci_adma_table_pre(host, data, sg_cnt);
781 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
782 if (host->flags & SDHCI_USE_64_BIT_DMA)
784 (u64)host->adma_addr >> 32,
785 SDHCI_ADMA_ADDRESS_HI);
787 WARN_ON(sg_cnt != 1);
788 sdhci_writel(host, sg_dma_address(data->sg),
794 * Always adjust the DMA selection as some controllers
795 * (e.g. JMicron) can't do PIO properly when the selection
798 if (host->version >= SDHCI_SPEC_200) {
799 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
800 ctrl &= ~SDHCI_CTRL_DMA_MASK;
801 if ((host->flags & SDHCI_REQ_USE_DMA) &&
802 (host->flags & SDHCI_USE_ADMA)) {
803 if (host->flags & SDHCI_USE_64_BIT_DMA)
804 ctrl |= SDHCI_CTRL_ADMA64;
806 ctrl |= SDHCI_CTRL_ADMA32;
808 ctrl |= SDHCI_CTRL_SDMA;
810 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
813 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
816 flags = SG_MITER_ATOMIC;
817 if (host->data->flags & MMC_DATA_READ)
818 flags |= SG_MITER_TO_SG;
820 flags |= SG_MITER_FROM_SG;
821 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
822 host->blocks = data->blocks;
825 sdhci_set_transfer_irqs(host);
827 /* Set the DMA boundary value and block size */
828 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
829 data->blksz), SDHCI_BLOCK_SIZE);
830 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
833 static void sdhci_set_transfer_mode(struct sdhci_host *host,
834 struct mmc_command *cmd)
837 struct mmc_data *data = cmd->data;
841 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
842 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
844 /* clear Auto CMD settings for no data CMDs */
845 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
846 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
847 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
852 WARN_ON(!host->data);
854 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
855 mode = SDHCI_TRNS_BLK_CNT_EN;
857 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
858 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
860 * If we are sending CMD23, CMD12 never gets sent
861 * on successful completion (so no Auto-CMD12).
863 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
864 (cmd->opcode != SD_IO_RW_EXTENDED))
865 mode |= SDHCI_TRNS_AUTO_CMD12;
866 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
867 mode |= SDHCI_TRNS_AUTO_CMD23;
868 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
872 if (data->flags & MMC_DATA_READ)
873 mode |= SDHCI_TRNS_READ;
874 if (host->flags & SDHCI_REQ_USE_DMA)
875 mode |= SDHCI_TRNS_DMA;
877 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
880 static void sdhci_finish_data(struct sdhci_host *host)
882 struct mmc_data *data;
889 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
890 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
891 sdhci_adma_table_post(host, data);
894 * The specification states that the block count register must
895 * be updated, but it does not specify at what point in the
896 * data flow. That makes the register entirely useless to read
897 * back so we have to assume that nothing made it to the card
898 * in the event of an error.
901 data->bytes_xfered = 0;
903 data->bytes_xfered = data->blksz * data->blocks;
906 * Need to send CMD12 if -
907 * a) open-ended multiblock transfer (no CMD23)
908 * b) error in multiblock transfer
915 * The controller needs a reset of internal state machines
916 * upon error conditions.
919 sdhci_do_reset(host, SDHCI_RESET_CMD);
920 sdhci_do_reset(host, SDHCI_RESET_DATA);
923 sdhci_send_command(host, data->stop);
925 tasklet_schedule(&host->finish_tasklet);
928 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
932 unsigned long timeout;
936 /* Initially, a command has no error */
942 mask = SDHCI_CMD_INHIBIT;
943 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
944 mask |= SDHCI_DATA_INHIBIT;
946 /* We shouldn't wait for data inihibit for stop commands, even
947 though they might use busy signaling */
948 if (host->mrq->data && (cmd == host->mrq->data->stop))
949 mask &= ~SDHCI_DATA_INHIBIT;
951 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
953 pr_err("%s: Controller never released inhibit bit(s).\n",
954 mmc_hostname(host->mmc));
955 sdhci_dumpregs(host);
957 tasklet_schedule(&host->finish_tasklet);
965 if (!cmd->data && cmd->busy_timeout > 9000)
966 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
969 mod_timer(&host->timer, timeout);
972 host->busy_handle = 0;
974 sdhci_prepare_data(host, cmd);
976 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
978 sdhci_set_transfer_mode(host, cmd);
980 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
981 pr_err("%s: Unsupported response type!\n",
982 mmc_hostname(host->mmc));
983 cmd->error = -EINVAL;
984 tasklet_schedule(&host->finish_tasklet);
988 if (!(cmd->flags & MMC_RSP_PRESENT))
989 flags = SDHCI_CMD_RESP_NONE;
990 else if (cmd->flags & MMC_RSP_136)
991 flags = SDHCI_CMD_RESP_LONG;
992 else if (cmd->flags & MMC_RSP_BUSY)
993 flags = SDHCI_CMD_RESP_SHORT_BUSY;
995 flags = SDHCI_CMD_RESP_SHORT;
997 if (cmd->flags & MMC_RSP_CRC)
998 flags |= SDHCI_CMD_CRC;
999 if (cmd->flags & MMC_RSP_OPCODE)
1000 flags |= SDHCI_CMD_INDEX;
1002 /* CMD19 is special in that the Data Present Select should be set */
1003 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1004 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1005 flags |= SDHCI_CMD_DATA;
1007 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1009 EXPORT_SYMBOL_GPL(sdhci_send_command);
1011 static void sdhci_finish_command(struct sdhci_host *host)
1015 BUG_ON(host->cmd == NULL);
1017 if (host->cmd->flags & MMC_RSP_PRESENT) {
1018 if (host->cmd->flags & MMC_RSP_136) {
1019 /* CRC is stripped so we need to do some shifting. */
1020 for (i = 0;i < 4;i++) {
1021 host->cmd->resp[i] = sdhci_readl(host,
1022 SDHCI_RESPONSE + (3-i)*4) << 8;
1024 host->cmd->resp[i] |=
1026 SDHCI_RESPONSE + (3-i)*4-1);
1029 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1033 /* Finished CMD23, now send actual command. */
1034 if (host->cmd == host->mrq->sbc) {
1036 sdhci_send_command(host, host->mrq->cmd);
1039 /* Processed actual command. */
1040 if (host->data && host->data_early)
1041 sdhci_finish_data(host);
1043 if (!host->cmd->data)
1044 tasklet_schedule(&host->finish_tasklet);
1050 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1054 switch (host->timing) {
1055 case MMC_TIMING_UHS_SDR12:
1056 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1058 case MMC_TIMING_UHS_SDR25:
1059 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1061 case MMC_TIMING_UHS_SDR50:
1062 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1064 case MMC_TIMING_UHS_SDR104:
1065 case MMC_TIMING_MMC_HS200:
1066 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1068 case MMC_TIMING_UHS_DDR50:
1069 case MMC_TIMING_MMC_DDR52:
1070 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1072 case MMC_TIMING_MMC_HS400:
1073 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1076 pr_warn("%s: Invalid UHS-I mode selected\n",
1077 mmc_hostname(host->mmc));
1078 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1084 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1086 int div = 0; /* Initialized for compiler warning */
1087 int real_div = div, clk_mul = 1;
1089 unsigned long timeout;
1090 bool switch_base_clk = false;
1092 host->mmc->actual_clock = 0;
1094 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1095 if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
1101 if (host->version >= SDHCI_SPEC_300) {
1102 if (host->preset_enabled) {
1105 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1106 pre_val = sdhci_get_preset_value(host);
1107 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1108 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1109 if (host->clk_mul &&
1110 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1111 clk = SDHCI_PROG_CLOCK_MODE;
1113 clk_mul = host->clk_mul;
1115 real_div = max_t(int, 1, div << 1);
1121 * Check if the Host Controller supports Programmable Clock
1124 if (host->clk_mul) {
1125 for (div = 1; div <= 1024; div++) {
1126 if ((host->max_clk * host->clk_mul / div)
1130 if ((host->max_clk * host->clk_mul / div) <= clock) {
1132 * Set Programmable Clock Mode in the Clock
1135 clk = SDHCI_PROG_CLOCK_MODE;
1137 clk_mul = host->clk_mul;
1141 * Divisor can be too small to reach clock
1142 * speed requirement. Then use the base clock.
1144 switch_base_clk = true;
1148 if (!host->clk_mul || switch_base_clk) {
1149 /* Version 3.00 divisors must be a multiple of 2. */
1150 if (host->max_clk <= clock)
1153 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1155 if ((host->max_clk / div) <= clock)
1161 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1162 && !div && host->max_clk <= 25000000)
1166 /* Version 2.00 divisors must be a power of 2. */
1167 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1168 if ((host->max_clk / div) <= clock)
1177 host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
1178 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1179 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1180 << SDHCI_DIVIDER_HI_SHIFT;
1181 clk |= SDHCI_CLOCK_INT_EN;
1182 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1184 /* Wait max 20 ms */
1186 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1187 & SDHCI_CLOCK_INT_STABLE)) {
1189 pr_err("%s: Internal clock never stabilised.\n",
1190 mmc_hostname(host->mmc));
1191 sdhci_dumpregs(host);
1198 clk |= SDHCI_CLOCK_CARD_EN;
1199 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1201 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1203 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1206 struct mmc_host *mmc = host->mmc;
1208 spin_unlock_irq(&host->lock);
1209 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1210 spin_lock_irq(&host->lock);
1212 if (mode != MMC_POWER_OFF)
1213 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1215 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1218 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1223 if (mode != MMC_POWER_OFF) {
1225 case MMC_VDD_165_195:
1226 pwr = SDHCI_POWER_180;
1230 pwr = SDHCI_POWER_300;
1234 pwr = SDHCI_POWER_330;
1237 WARN(1, "%s: Invalid vdd %#x\n",
1238 mmc_hostname(host->mmc), vdd);
1243 if (host->pwr == pwr)
1249 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1250 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1251 sdhci_runtime_pm_bus_off(host);
1254 * Spec says that we should clear the power reg before setting
1255 * a new value. Some controllers don't seem to like this though.
1257 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1258 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1261 * At least the Marvell CaFe chip gets confused if we set the
1262 * voltage and set turn on power at the same time, so set the
1265 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1266 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1268 pwr |= SDHCI_POWER_ON;
1270 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1272 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1273 sdhci_runtime_pm_bus_on(host);
1276 * Some controllers need an extra 10ms delay of 10ms before
1277 * they can apply clock after applying power
1279 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1283 EXPORT_SYMBOL_GPL(sdhci_set_power);
1285 static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1288 struct mmc_host *mmc = host->mmc;
1290 if (host->ops->set_power)
1291 host->ops->set_power(host, mode, vdd);
1292 else if (!IS_ERR(mmc->supply.vmmc))
1293 sdhci_set_power_reg(host, mode, vdd);
1295 sdhci_set_power(host, mode, vdd);
1298 /*****************************************************************************\
1302 \*****************************************************************************/
1304 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1306 struct sdhci_host *host;
1308 unsigned long flags;
1310 host = mmc_priv(mmc);
1312 /* Firstly check card presence */
1313 present = mmc->ops->get_cd(mmc);
1315 spin_lock_irqsave(&host->lock, flags);
1317 WARN_ON(host->mrq != NULL);
1319 #ifndef SDHCI_USE_LEDS_CLASS
1320 sdhci_activate_led(host);
1324 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1325 * requests if Auto-CMD12 is enabled.
1327 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1329 mrq->data->stop = NULL;
1336 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1337 host->mrq->cmd->error = -ENOMEDIUM;
1338 tasklet_schedule(&host->finish_tasklet);
1340 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1341 sdhci_send_command(host, mrq->sbc);
1343 sdhci_send_command(host, mrq->cmd);
1347 spin_unlock_irqrestore(&host->lock, flags);
1350 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1354 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1355 if (width == MMC_BUS_WIDTH_8) {
1356 ctrl &= ~SDHCI_CTRL_4BITBUS;
1357 if (host->version >= SDHCI_SPEC_300)
1358 ctrl |= SDHCI_CTRL_8BITBUS;
1360 if (host->version >= SDHCI_SPEC_300)
1361 ctrl &= ~SDHCI_CTRL_8BITBUS;
1362 if (width == MMC_BUS_WIDTH_4)
1363 ctrl |= SDHCI_CTRL_4BITBUS;
1365 ctrl &= ~SDHCI_CTRL_4BITBUS;
1367 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1369 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1371 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1375 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1376 /* Select Bus Speed Mode for host */
1377 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1378 if ((timing == MMC_TIMING_MMC_HS200) ||
1379 (timing == MMC_TIMING_UHS_SDR104))
1380 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1381 else if (timing == MMC_TIMING_UHS_SDR12)
1382 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1383 else if (timing == MMC_TIMING_UHS_SDR25)
1384 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1385 else if (timing == MMC_TIMING_UHS_SDR50)
1386 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1387 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1388 (timing == MMC_TIMING_MMC_DDR52))
1389 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1390 else if (timing == MMC_TIMING_MMC_HS400)
1391 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1392 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1394 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1396 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1398 unsigned long flags;
1400 struct mmc_host *mmc = host->mmc;
1402 spin_lock_irqsave(&host->lock, flags);
1404 if (host->flags & SDHCI_DEVICE_DEAD) {
1405 spin_unlock_irqrestore(&host->lock, flags);
1406 if (!IS_ERR(mmc->supply.vmmc) &&
1407 ios->power_mode == MMC_POWER_OFF)
1408 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1413 * Reset the chip on each power off.
1414 * Should clear out any weird states.
1416 if (ios->power_mode == MMC_POWER_OFF) {
1417 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1421 if (host->version >= SDHCI_SPEC_300 &&
1422 (ios->power_mode == MMC_POWER_UP) &&
1423 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1424 sdhci_enable_preset_value(host, false);
1426 if (!ios->clock || ios->clock != host->clock) {
1427 host->ops->set_clock(host, ios->clock);
1428 host->clock = ios->clock;
1430 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1432 host->timeout_clk = host->mmc->actual_clock ?
1433 host->mmc->actual_clock / 1000 :
1435 host->mmc->max_busy_timeout =
1436 host->ops->get_max_timeout_count ?
1437 host->ops->get_max_timeout_count(host) :
1439 host->mmc->max_busy_timeout /= host->timeout_clk;
1443 __sdhci_set_power(host, ios->power_mode, ios->vdd);
1445 if (host->ops->platform_send_init_74_clocks)
1446 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1448 host->ops->set_bus_width(host, ios->bus_width);
1450 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1452 if ((ios->timing == MMC_TIMING_SD_HS ||
1453 ios->timing == MMC_TIMING_MMC_HS)
1454 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1455 ctrl |= SDHCI_CTRL_HISPD;
1457 ctrl &= ~SDHCI_CTRL_HISPD;
1459 if (host->version >= SDHCI_SPEC_300) {
1462 /* In case of UHS-I modes, set High Speed Enable */
1463 if ((ios->timing == MMC_TIMING_MMC_HS400) ||
1464 (ios->timing == MMC_TIMING_MMC_HS200) ||
1465 (ios->timing == MMC_TIMING_MMC_DDR52) ||
1466 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1467 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1468 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1469 (ios->timing == MMC_TIMING_UHS_SDR25))
1470 ctrl |= SDHCI_CTRL_HISPD;
1472 if (!host->preset_enabled) {
1473 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1475 * We only need to set Driver Strength if the
1476 * preset value enable is not set.
1478 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1479 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1480 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1481 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1482 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1483 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1484 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1485 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1486 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1487 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1489 pr_warn("%s: invalid driver type, default to driver type B\n",
1491 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1494 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1497 * According to SDHC Spec v3.00, if the Preset Value
1498 * Enable in the Host Control 2 register is set, we
1499 * need to reset SD Clock Enable before changing High
1500 * Speed Enable to avoid generating clock gliches.
1503 /* Reset SD Clock Enable */
1504 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1505 clk &= ~SDHCI_CLOCK_CARD_EN;
1506 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1508 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1510 /* Re-enable SD Clock */
1511 host->ops->set_clock(host, host->clock);
1514 /* Reset SD Clock Enable */
1515 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1516 clk &= ~SDHCI_CLOCK_CARD_EN;
1517 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1519 host->ops->set_uhs_signaling(host, ios->timing);
1520 host->timing = ios->timing;
1522 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1523 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1524 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1525 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1526 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1527 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1528 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1531 sdhci_enable_preset_value(host, true);
1532 preset = sdhci_get_preset_value(host);
1533 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1534 >> SDHCI_PRESET_DRV_SHIFT;
1537 /* Re-enable SD Clock */
1538 host->ops->set_clock(host, host->clock);
1540 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1543 * Some (ENE) controllers go apeshit on some ios operation,
1544 * signalling timeout and CRC errors even on CMD0. Resetting
1545 * it on each ios seems to solve the problem.
1547 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1548 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1551 spin_unlock_irqrestore(&host->lock, flags);
1554 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1556 struct sdhci_host *host = mmc_priv(mmc);
1558 sdhci_do_set_ios(host, ios);
1561 static int sdhci_do_get_cd(struct sdhci_host *host)
1563 int gpio_cd = mmc_gpio_get_cd(host->mmc);
1565 if (host->flags & SDHCI_DEVICE_DEAD)
1568 /* If nonremovable, assume that the card is always present. */
1569 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
1573 * Try slot gpio detect, if defined it take precedence
1574 * over build in controller functionality
1576 if (!IS_ERR_VALUE(gpio_cd))
1579 /* If polling, assume that the card is always present. */
1580 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1583 /* Host native card detect */
1584 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1587 static int sdhci_get_cd(struct mmc_host *mmc)
1589 struct sdhci_host *host = mmc_priv(mmc);
1591 return sdhci_do_get_cd(host);
1594 static int sdhci_check_ro(struct sdhci_host *host)
1596 unsigned long flags;
1599 spin_lock_irqsave(&host->lock, flags);
1601 if (host->flags & SDHCI_DEVICE_DEAD)
1603 else if (host->ops->get_ro)
1604 is_readonly = host->ops->get_ro(host);
1606 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1607 & SDHCI_WRITE_PROTECT);
1609 spin_unlock_irqrestore(&host->lock, flags);
1611 /* This quirk needs to be replaced by a callback-function later */
1612 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1613 !is_readonly : is_readonly;
1616 #define SAMPLE_COUNT 5
1618 static int sdhci_do_get_ro(struct sdhci_host *host)
1622 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1623 return sdhci_check_ro(host);
1626 for (i = 0; i < SAMPLE_COUNT; i++) {
1627 if (sdhci_check_ro(host)) {
1628 if (++ro_count > SAMPLE_COUNT / 2)
1636 static void sdhci_hw_reset(struct mmc_host *mmc)
1638 struct sdhci_host *host = mmc_priv(mmc);
1640 if (host->ops && host->ops->hw_reset)
1641 host->ops->hw_reset(host);
1644 static int sdhci_get_ro(struct mmc_host *mmc)
1646 struct sdhci_host *host = mmc_priv(mmc);
1648 return sdhci_do_get_ro(host);
1651 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1653 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1655 host->ier |= SDHCI_INT_CARD_INT;
1657 host->ier &= ~SDHCI_INT_CARD_INT;
1659 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1660 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1665 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1667 struct sdhci_host *host = mmc_priv(mmc);
1668 unsigned long flags;
1670 spin_lock_irqsave(&host->lock, flags);
1672 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1674 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1676 sdhci_enable_sdio_irq_nolock(host, enable);
1677 spin_unlock_irqrestore(&host->lock, flags);
1680 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1681 struct mmc_ios *ios)
1683 struct mmc_host *mmc = host->mmc;
1688 * Signal Voltage Switching is only applicable for Host Controllers
1691 if (host->version < SDHCI_SPEC_300)
1694 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1696 switch (ios->signal_voltage) {
1697 case MMC_SIGNAL_VOLTAGE_330:
1698 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1699 ctrl &= ~SDHCI_CTRL_VDD_180;
1700 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1702 if (!IS_ERR(mmc->supply.vqmmc)) {
1703 ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
1706 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1712 usleep_range(5000, 5500);
1714 /* 3.3V regulator output should be stable within 5 ms */
1715 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1716 if (!(ctrl & SDHCI_CTRL_VDD_180))
1719 pr_warn("%s: 3.3V regulator output did not became stable\n",
1723 case MMC_SIGNAL_VOLTAGE_180:
1724 if (!IS_ERR(mmc->supply.vqmmc)) {
1725 ret = regulator_set_voltage(mmc->supply.vqmmc,
1728 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1735 * Enable 1.8V Signal Enable in the Host Control2
1738 ctrl |= SDHCI_CTRL_VDD_180;
1739 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1741 /* Some controller need to do more when switching */
1742 if (host->ops->voltage_switch)
1743 host->ops->voltage_switch(host);
1745 /* 1.8V regulator output should be stable within 5 ms */
1746 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1747 if (ctrl & SDHCI_CTRL_VDD_180)
1750 pr_warn("%s: 1.8V regulator output did not became stable\n",
1754 case MMC_SIGNAL_VOLTAGE_120:
1755 if (!IS_ERR(mmc->supply.vqmmc)) {
1756 ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
1759 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1766 /* No signal voltage switch required */
1771 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1772 struct mmc_ios *ios)
1774 struct sdhci_host *host = mmc_priv(mmc);
1776 if (host->version < SDHCI_SPEC_300)
1779 return sdhci_do_start_signal_voltage_switch(host, ios);
1782 static int sdhci_card_busy(struct mmc_host *mmc)
1784 struct sdhci_host *host = mmc_priv(mmc);
1787 /* Check whether DAT[3:0] is 0000 */
1788 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1790 return !(present_state & SDHCI_DATA_LVL_MASK);
1793 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1795 struct sdhci_host *host = mmc_priv(mmc);
1796 unsigned long flags;
1798 spin_lock_irqsave(&host->lock, flags);
1799 host->flags |= SDHCI_HS400_TUNING;
1800 spin_unlock_irqrestore(&host->lock, flags);
1805 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1807 struct sdhci_host *host = mmc_priv(mmc);
1809 int tuning_loop_counter = MAX_TUNING_LOOP;
1811 unsigned long flags;
1812 unsigned int tuning_count = 0;
1815 spin_lock_irqsave(&host->lock, flags);
1817 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1818 host->flags &= ~SDHCI_HS400_TUNING;
1820 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1821 tuning_count = host->tuning_count;
1824 * The Host Controller needs tuning in case of SDR104 and DDR50
1825 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1826 * the Capabilities register.
1827 * If the Host Controller supports the HS200 mode then the
1828 * tuning function has to be executed.
1830 switch (host->timing) {
1831 /* HS400 tuning is done in HS200 mode */
1832 case MMC_TIMING_MMC_HS400:
1836 case MMC_TIMING_MMC_HS200:
1838 * Periodic re-tuning for HS400 is not expected to be needed, so
1845 case MMC_TIMING_UHS_SDR104:
1846 case MMC_TIMING_UHS_DDR50:
1849 case MMC_TIMING_UHS_SDR50:
1850 if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
1851 host->flags & SDHCI_SDR104_NEEDS_TUNING)
1859 if (host->ops->platform_execute_tuning) {
1860 spin_unlock_irqrestore(&host->lock, flags);
1861 err = host->ops->platform_execute_tuning(host, opcode);
1865 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1866 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1867 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1868 ctrl |= SDHCI_CTRL_TUNED_CLK;
1869 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1872 * As per the Host Controller spec v3.00, tuning command
1873 * generates Buffer Read Ready interrupt, so enable that.
1875 * Note: The spec clearly says that when tuning sequence
1876 * is being performed, the controller does not generate
1877 * interrupts other than Buffer Read Ready interrupt. But
1878 * to make sure we don't hit a controller bug, we _only_
1879 * enable Buffer Read Ready interrupt here.
1881 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
1882 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1885 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1886 * of loops reaches 40 times or a timeout of 150ms occurs.
1889 struct mmc_command cmd = {0};
1890 struct mmc_request mrq = {NULL};
1892 cmd.opcode = opcode;
1894 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1899 if (tuning_loop_counter-- == 0)
1906 * In response to CMD19, the card sends 64 bytes of tuning
1907 * block to the Host Controller. So we set the block size
1910 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1911 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
1912 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
1914 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
1915 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1918 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1923 * The tuning block is sent by the card to the host controller.
1924 * So we set the TRNS_READ bit in the Transfer Mode register.
1925 * This also takes care of setting DMA Enable and Multi Block
1926 * Select in the same register to 0.
1928 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
1930 sdhci_send_command(host, &cmd);
1935 spin_unlock_irqrestore(&host->lock, flags);
1936 /* Wait for Buffer Read Ready interrupt */
1937 wait_event_interruptible_timeout(host->buf_ready_int,
1938 (host->tuning_done == 1),
1939 msecs_to_jiffies(50));
1940 spin_lock_irqsave(&host->lock, flags);
1942 if (!host->tuning_done) {
1943 pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
1944 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1945 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1946 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
1947 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1953 host->tuning_done = 0;
1955 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1957 /* eMMC spec does not require a delay between tuning cycles */
1958 if (opcode == MMC_SEND_TUNING_BLOCK)
1960 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
1963 * The Host Driver has exhausted the maximum number of loops allowed,
1964 * so use fixed sampling frequency.
1966 if (tuning_loop_counter < 0) {
1967 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1968 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1970 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
1971 pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n");
1978 * In case tuning fails, host controllers which support
1979 * re-tuning can try tuning again at a later time, when the
1980 * re-tuning timer expires. So for these controllers, we
1981 * return 0. Since there might be other controllers who do not
1982 * have this capability, we return error for them.
1987 host->mmc->retune_period = err ? 0 : tuning_count;
1989 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1990 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1992 spin_unlock_irqrestore(&host->lock, flags);
1996 static int sdhci_select_drive_strength(struct mmc_card *card,
1997 unsigned int max_dtr, int host_drv,
1998 int card_drv, int *drv_type)
2000 struct sdhci_host *host = mmc_priv(card->host);
2002 if (!host->ops->select_drive_strength)
2005 return host->ops->select_drive_strength(host, card, max_dtr, host_drv,
2006 card_drv, drv_type);
2009 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2011 /* Host Controller v3.00 defines preset value registers */
2012 if (host->version < SDHCI_SPEC_300)
2016 * We only enable or disable Preset Value if they are not already
2017 * enabled or disabled respectively. Otherwise, we bail out.
2019 if (host->preset_enabled != enable) {
2020 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2023 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2025 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2027 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2030 host->flags |= SDHCI_PV_ENABLED;
2032 host->flags &= ~SDHCI_PV_ENABLED;
2034 host->preset_enabled = enable;
2038 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2041 struct sdhci_host *host = mmc_priv(mmc);
2042 struct mmc_data *data = mrq->data;
2044 if (data->host_cookie != COOKIE_UNMAPPED)
2045 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2046 data->flags & MMC_DATA_WRITE ?
2047 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2049 data->host_cookie = COOKIE_UNMAPPED;
2052 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
2055 struct sdhci_host *host = mmc_priv(mmc);
2057 mrq->data->host_cookie = COOKIE_UNMAPPED;
2059 if (host->flags & SDHCI_REQ_USE_DMA)
2060 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2063 static void sdhci_card_event(struct mmc_host *mmc)
2065 struct sdhci_host *host = mmc_priv(mmc);
2066 unsigned long flags;
2069 /* First check if client has provided their own card event */
2070 if (host->ops->card_event)
2071 host->ops->card_event(host);
2073 present = sdhci_do_get_cd(host);
2075 spin_lock_irqsave(&host->lock, flags);
2077 /* Check host->mrq first in case we are runtime suspended */
2078 if (host->mrq && !present) {
2079 pr_err("%s: Card removed during transfer!\n",
2080 mmc_hostname(host->mmc));
2081 pr_err("%s: Resetting controller.\n",
2082 mmc_hostname(host->mmc));
2084 sdhci_do_reset(host, SDHCI_RESET_CMD);
2085 sdhci_do_reset(host, SDHCI_RESET_DATA);
2087 host->mrq->cmd->error = -ENOMEDIUM;
2088 tasklet_schedule(&host->finish_tasklet);
2091 spin_unlock_irqrestore(&host->lock, flags);
2094 static const struct mmc_host_ops sdhci_ops = {
2095 .request = sdhci_request,
2096 .post_req = sdhci_post_req,
2097 .pre_req = sdhci_pre_req,
2098 .set_ios = sdhci_set_ios,
2099 .get_cd = sdhci_get_cd,
2100 .get_ro = sdhci_get_ro,
2101 .hw_reset = sdhci_hw_reset,
2102 .enable_sdio_irq = sdhci_enable_sdio_irq,
2103 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2104 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2105 .execute_tuning = sdhci_execute_tuning,
2106 .select_drive_strength = sdhci_select_drive_strength,
2107 .card_event = sdhci_card_event,
2108 .card_busy = sdhci_card_busy,
2111 /*****************************************************************************\
2115 \*****************************************************************************/
2117 static void sdhci_tasklet_finish(unsigned long param)
2119 struct sdhci_host *host;
2120 unsigned long flags;
2121 struct mmc_request *mrq;
2123 host = (struct sdhci_host*)param;
2125 spin_lock_irqsave(&host->lock, flags);
2128 * If this tasklet gets rescheduled while running, it will
2129 * be run again afterwards but without any active request.
2132 spin_unlock_irqrestore(&host->lock, flags);
2136 del_timer(&host->timer);
2141 * Always unmap the data buffers if they were mapped by
2142 * sdhci_prepare_data() whenever we finish with a request.
2143 * This avoids leaking DMA mappings on error.
2145 if (host->flags & SDHCI_REQ_USE_DMA) {
2146 struct mmc_data *data = mrq->data;
2148 if (data && data->host_cookie == COOKIE_MAPPED) {
2149 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2150 (data->flags & MMC_DATA_READ) ?
2151 DMA_FROM_DEVICE : DMA_TO_DEVICE);
2152 data->host_cookie = COOKIE_UNMAPPED;
2157 * The controller needs a reset of internal state machines
2158 * upon error conditions.
2160 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
2161 ((mrq->cmd && mrq->cmd->error) ||
2162 (mrq->sbc && mrq->sbc->error) ||
2163 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
2164 (mrq->data->stop && mrq->data->stop->error))) ||
2165 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
2167 /* Some controllers need this kick or reset won't work here */
2168 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2169 /* This is to force an update */
2170 host->ops->set_clock(host, host->clock);
2172 /* Spec says we should do both at the same time, but Ricoh
2173 controllers do not like that. */
2174 sdhci_do_reset(host, SDHCI_RESET_CMD);
2175 sdhci_do_reset(host, SDHCI_RESET_DATA);
2182 #ifndef SDHCI_USE_LEDS_CLASS
2183 sdhci_deactivate_led(host);
2187 spin_unlock_irqrestore(&host->lock, flags);
2189 mmc_request_done(host->mmc, mrq);
2192 static void sdhci_timeout_timer(unsigned long data)
2194 struct sdhci_host *host;
2195 unsigned long flags;
2197 host = (struct sdhci_host*)data;
2199 spin_lock_irqsave(&host->lock, flags);
2202 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2203 mmc_hostname(host->mmc));
2204 sdhci_dumpregs(host);
2207 host->data->error = -ETIMEDOUT;
2208 sdhci_finish_data(host);
2211 host->cmd->error = -ETIMEDOUT;
2213 host->mrq->cmd->error = -ETIMEDOUT;
2215 tasklet_schedule(&host->finish_tasklet);
2220 spin_unlock_irqrestore(&host->lock, flags);
2223 /*****************************************************************************\
2225 * Interrupt handling *
2227 \*****************************************************************************/
2229 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
2231 BUG_ON(intmask == 0);
2234 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2235 mmc_hostname(host->mmc), (unsigned)intmask);
2236 sdhci_dumpregs(host);
2240 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2241 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2242 if (intmask & SDHCI_INT_TIMEOUT)
2243 host->cmd->error = -ETIMEDOUT;
2245 host->cmd->error = -EILSEQ;
2248 * If this command initiates a data phase and a response
2249 * CRC error is signalled, the card can start transferring
2250 * data - the card may have received the command without
2251 * error. We must not terminate the mmc_request early.
2253 * If the card did not receive the command or returned an
2254 * error which prevented it sending data, the data phase
2257 if (host->cmd->data &&
2258 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2264 tasklet_schedule(&host->finish_tasklet);
2269 * The host can send and interrupt when the busy state has
2270 * ended, allowing us to wait without wasting CPU cycles.
2271 * Unfortunately this is overloaded on the "data complete"
2272 * interrupt, so we need to take some care when handling
2275 * Note: The 1.0 specification is a bit ambiguous about this
2276 * feature so there might be some problems with older
2279 if (host->cmd->flags & MMC_RSP_BUSY) {
2280 if (host->cmd->data)
2281 DBG("Cannot wait for busy signal when also doing a data transfer");
2282 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
2283 && !host->busy_handle) {
2284 /* Mark that command complete before busy is ended */
2285 host->busy_handle = 1;
2289 /* The controller does not support the end-of-busy IRQ,
2290 * fall through and take the SDHCI_INT_RESPONSE */
2291 } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
2292 host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) {
2293 *mask &= ~SDHCI_INT_DATA_END;
2296 if (intmask & SDHCI_INT_RESPONSE)
2297 sdhci_finish_command(host);
2300 #ifdef CONFIG_MMC_DEBUG
2301 static void sdhci_adma_show_error(struct sdhci_host *host)
2303 const char *name = mmc_hostname(host->mmc);
2304 void *desc = host->adma_table;
2306 sdhci_dumpregs(host);
2309 struct sdhci_adma2_64_desc *dma_desc = desc;
2311 if (host->flags & SDHCI_USE_64_BIT_DMA)
2312 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2313 name, desc, le32_to_cpu(dma_desc->addr_hi),
2314 le32_to_cpu(dma_desc->addr_lo),
2315 le16_to_cpu(dma_desc->len),
2316 le16_to_cpu(dma_desc->cmd));
2318 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2319 name, desc, le32_to_cpu(dma_desc->addr_lo),
2320 le16_to_cpu(dma_desc->len),
2321 le16_to_cpu(dma_desc->cmd));
2323 desc += host->desc_sz;
2325 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2330 static void sdhci_adma_show_error(struct sdhci_host *host) { }
2333 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2336 BUG_ON(intmask == 0);
2338 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2339 if (intmask & SDHCI_INT_DATA_AVAIL) {
2340 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2341 if (command == MMC_SEND_TUNING_BLOCK ||
2342 command == MMC_SEND_TUNING_BLOCK_HS200) {
2343 host->tuning_done = 1;
2344 wake_up(&host->buf_ready_int);
2351 * The "data complete" interrupt is also used to
2352 * indicate that a busy state has ended. See comment
2353 * above in sdhci_cmd_irq().
2355 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
2356 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2357 host->cmd->error = -ETIMEDOUT;
2358 tasklet_schedule(&host->finish_tasklet);
2361 if (intmask & SDHCI_INT_DATA_END) {
2363 * Some cards handle busy-end interrupt
2364 * before the command completed, so make
2365 * sure we do things in the proper order.
2367 if (host->busy_handle)
2368 sdhci_finish_command(host);
2370 host->busy_handle = 1;
2375 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2376 mmc_hostname(host->mmc), (unsigned)intmask);
2377 sdhci_dumpregs(host);
2382 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2383 host->data->error = -ETIMEDOUT;
2384 else if (intmask & SDHCI_INT_DATA_END_BIT)
2385 host->data->error = -EILSEQ;
2386 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2387 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2389 host->data->error = -EILSEQ;
2390 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2391 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2392 sdhci_adma_show_error(host);
2393 host->data->error = -EIO;
2394 if (host->ops->adma_workaround)
2395 host->ops->adma_workaround(host, intmask);
2398 if (host->data->error)
2399 sdhci_finish_data(host);
2401 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2402 sdhci_transfer_pio(host);
2405 * We currently don't do anything fancy with DMA
2406 * boundaries, but as we can't disable the feature
2407 * we need to at least restart the transfer.
2409 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2410 * should return a valid address to continue from, but as
2411 * some controllers are faulty, don't trust them.
2413 if (intmask & SDHCI_INT_DMA_END) {
2414 u32 dmastart, dmanow;
2415 dmastart = sg_dma_address(host->data->sg);
2416 dmanow = dmastart + host->data->bytes_xfered;
2418 * Force update to the next DMA block boundary.
2421 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2422 SDHCI_DEFAULT_BOUNDARY_SIZE;
2423 host->data->bytes_xfered = dmanow - dmastart;
2424 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2426 mmc_hostname(host->mmc), dmastart,
2427 host->data->bytes_xfered, dmanow);
2428 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2431 if (intmask & SDHCI_INT_DATA_END) {
2434 * Data managed to finish before the
2435 * command completed. Make sure we do
2436 * things in the proper order.
2438 host->data_early = 1;
2440 sdhci_finish_data(host);
2446 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2448 irqreturn_t result = IRQ_NONE;
2449 struct sdhci_host *host = dev_id;
2450 u32 intmask, mask, unexpected = 0;
2453 spin_lock(&host->lock);
2455 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2456 spin_unlock(&host->lock);
2460 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2461 if (!intmask || intmask == 0xffffffff) {
2467 /* Clear selected interrupts. */
2468 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2469 SDHCI_INT_BUS_POWER);
2470 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2472 DBG("*** %s got interrupt: 0x%08x\n",
2473 mmc_hostname(host->mmc), intmask);
2475 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2476 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2480 * There is a observation on i.mx esdhc. INSERT
2481 * bit will be immediately set again when it gets
2482 * cleared, if a card is inserted. We have to mask
2483 * the irq to prevent interrupt storm which will
2484 * freeze the system. And the REMOVE gets the
2487 * More testing are needed here to ensure it works
2488 * for other platforms though.
2490 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2491 SDHCI_INT_CARD_REMOVE);
2492 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2493 SDHCI_INT_CARD_INSERT;
2494 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2495 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2497 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2498 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2500 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2501 SDHCI_INT_CARD_REMOVE);
2502 result = IRQ_WAKE_THREAD;
2505 if (intmask & SDHCI_INT_CMD_MASK)
2506 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
2509 if (intmask & SDHCI_INT_DATA_MASK)
2510 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2512 if (intmask & SDHCI_INT_BUS_POWER)
2513 pr_err("%s: Card is consuming too much power!\n",
2514 mmc_hostname(host->mmc));
2516 if (intmask & SDHCI_INT_CARD_INT) {
2517 sdhci_enable_sdio_irq_nolock(host, false);
2518 host->thread_isr |= SDHCI_INT_CARD_INT;
2519 result = IRQ_WAKE_THREAD;
2522 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2523 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2524 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2525 SDHCI_INT_CARD_INT);
2528 unexpected |= intmask;
2529 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2532 if (result == IRQ_NONE)
2533 result = IRQ_HANDLED;
2535 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2536 } while (intmask && --max_loops);
2538 spin_unlock(&host->lock);
2541 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2542 mmc_hostname(host->mmc), unexpected);
2543 sdhci_dumpregs(host);
2549 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2551 struct sdhci_host *host = dev_id;
2552 unsigned long flags;
2555 spin_lock_irqsave(&host->lock, flags);
2556 isr = host->thread_isr;
2557 host->thread_isr = 0;
2558 spin_unlock_irqrestore(&host->lock, flags);
2560 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2561 sdhci_card_event(host->mmc);
2562 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
2565 if (isr & SDHCI_INT_CARD_INT) {
2566 sdio_run_irqs(host->mmc);
2568 spin_lock_irqsave(&host->lock, flags);
2569 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2570 sdhci_enable_sdio_irq_nolock(host, true);
2571 spin_unlock_irqrestore(&host->lock, flags);
2574 return isr ? IRQ_HANDLED : IRQ_NONE;
2577 /*****************************************************************************\
2581 \*****************************************************************************/
2584 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2587 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2588 | SDHCI_WAKE_ON_INT;
2590 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2592 /* Avoid fake wake up */
2593 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2594 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2595 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2597 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2599 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2602 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2603 | SDHCI_WAKE_ON_INT;
2605 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2607 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2610 int sdhci_suspend_host(struct sdhci_host *host)
2612 sdhci_disable_card_detection(host);
2614 mmc_retune_timer_stop(host->mmc);
2615 mmc_retune_needed(host->mmc);
2617 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2619 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2620 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2621 free_irq(host->irq, host);
2623 sdhci_enable_irq_wakeups(host);
2624 enable_irq_wake(host->irq);
2629 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2631 int sdhci_resume_host(struct sdhci_host *host)
2635 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2636 if (host->ops->enable_dma)
2637 host->ops->enable_dma(host);
2640 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2641 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2642 /* Card keeps power but host controller does not */
2643 sdhci_init(host, 0);
2646 sdhci_do_set_ios(host, &host->mmc->ios);
2648 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2652 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2653 ret = request_threaded_irq(host->irq, sdhci_irq,
2654 sdhci_thread_irq, IRQF_SHARED,
2655 mmc_hostname(host->mmc), host);
2659 sdhci_disable_irq_wakeups(host);
2660 disable_irq_wake(host->irq);
2663 sdhci_enable_card_detection(host);
2668 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2670 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
2674 host->bus_on = true;
2675 pm_runtime_get_noresume(host->mmc->parent);
2678 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
2682 host->bus_on = false;
2683 pm_runtime_put_noidle(host->mmc->parent);
2686 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2688 unsigned long flags;
2690 mmc_retune_timer_stop(host->mmc);
2691 mmc_retune_needed(host->mmc);
2693 spin_lock_irqsave(&host->lock, flags);
2694 host->ier &= SDHCI_INT_CARD_INT;
2695 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2696 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2697 spin_unlock_irqrestore(&host->lock, flags);
2699 synchronize_hardirq(host->irq);
2701 spin_lock_irqsave(&host->lock, flags);
2702 host->runtime_suspended = true;
2703 spin_unlock_irqrestore(&host->lock, flags);
2707 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2709 int sdhci_runtime_resume_host(struct sdhci_host *host)
2711 unsigned long flags;
2712 int host_flags = host->flags;
2714 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2715 if (host->ops->enable_dma)
2716 host->ops->enable_dma(host);
2719 sdhci_init(host, 0);
2721 /* Force clock and power re-program */
2724 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
2725 sdhci_do_set_ios(host, &host->mmc->ios);
2727 if ((host_flags & SDHCI_PV_ENABLED) &&
2728 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2729 spin_lock_irqsave(&host->lock, flags);
2730 sdhci_enable_preset_value(host, true);
2731 spin_unlock_irqrestore(&host->lock, flags);
2734 spin_lock_irqsave(&host->lock, flags);
2736 host->runtime_suspended = false;
2738 /* Enable SDIO IRQ */
2739 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2740 sdhci_enable_sdio_irq_nolock(host, true);
2742 /* Enable Card Detection */
2743 sdhci_enable_card_detection(host);
2745 spin_unlock_irqrestore(&host->lock, flags);
2749 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2751 #endif /* CONFIG_PM */
2753 /*****************************************************************************\
2755 * Device allocation/registration *
2757 \*****************************************************************************/
2759 struct sdhci_host *sdhci_alloc_host(struct device *dev,
2762 struct mmc_host *mmc;
2763 struct sdhci_host *host;
2765 WARN_ON(dev == NULL);
2767 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2769 return ERR_PTR(-ENOMEM);
2771 host = mmc_priv(mmc);
2773 host->mmc_host_ops = sdhci_ops;
2774 mmc->ops = &host->mmc_host_ops;
2779 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2781 static int sdhci_set_dma_mask(struct sdhci_host *host)
2783 struct mmc_host *mmc = host->mmc;
2784 struct device *dev = mmc_dev(mmc);
2787 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
2788 host->flags &= ~SDHCI_USE_64_BIT_DMA;
2790 /* Try 64-bit mask if hardware is capable of it */
2791 if (host->flags & SDHCI_USE_64_BIT_DMA) {
2792 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2794 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
2796 host->flags &= ~SDHCI_USE_64_BIT_DMA;
2800 /* 32-bit mask as default & fallback */
2802 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2804 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
2811 int sdhci_add_host(struct sdhci_host *host)
2813 struct mmc_host *mmc;
2814 u32 caps[2] = {0, 0};
2815 u32 max_current_caps;
2816 unsigned int ocr_avail;
2817 unsigned int override_timeout_clk;
2821 WARN_ON(host == NULL);
2828 host->quirks = debug_quirks;
2830 host->quirks2 = debug_quirks2;
2832 override_timeout_clk = host->timeout_clk;
2834 sdhci_do_reset(host, SDHCI_RESET_ALL);
2836 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2837 host->version = (host->version & SDHCI_SPEC_VER_MASK)
2838 >> SDHCI_SPEC_VER_SHIFT;
2839 if (host->version > SDHCI_SPEC_300) {
2840 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
2841 mmc_hostname(mmc), host->version);
2844 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
2845 sdhci_readl(host, SDHCI_CAPABILITIES);
2847 if (host->version >= SDHCI_SPEC_300)
2848 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
2850 sdhci_readl(host, SDHCI_CAPABILITIES_1);
2852 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
2853 host->flags |= SDHCI_USE_SDMA;
2854 else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
2855 DBG("Controller doesn't have SDMA capability\n");
2857 host->flags |= SDHCI_USE_SDMA;
2859 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
2860 (host->flags & SDHCI_USE_SDMA)) {
2861 DBG("Disabling DMA as it is marked broken\n");
2862 host->flags &= ~SDHCI_USE_SDMA;
2865 if ((host->version >= SDHCI_SPEC_200) &&
2866 (caps[0] & SDHCI_CAN_DO_ADMA2))
2867 host->flags |= SDHCI_USE_ADMA;
2869 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
2870 (host->flags & SDHCI_USE_ADMA)) {
2871 DBG("Disabling ADMA as it is marked broken\n");
2872 host->flags &= ~SDHCI_USE_ADMA;
2876 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
2877 * and *must* do 64-bit DMA. A driver has the opportunity to change
2878 * that during the first call to ->enable_dma(). Similarly
2879 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
2882 if (caps[0] & SDHCI_CAN_64BIT)
2883 host->flags |= SDHCI_USE_64_BIT_DMA;
2885 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2886 ret = sdhci_set_dma_mask(host);
2888 if (!ret && host->ops->enable_dma)
2889 ret = host->ops->enable_dma(host);
2892 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
2894 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
2900 /* SDMA does not support 64-bit DMA */
2901 if (host->flags & SDHCI_USE_64_BIT_DMA)
2902 host->flags &= ~SDHCI_USE_SDMA;
2904 if (host->flags & SDHCI_USE_ADMA) {
2909 * The DMA descriptor table size is calculated as the maximum
2910 * number of segments times 2, to allow for an alignment
2911 * descriptor for each segment, plus 1 for a nop end descriptor,
2912 * all multipled by the descriptor size.
2914 if (host->flags & SDHCI_USE_64_BIT_DMA) {
2915 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
2916 SDHCI_ADMA2_64_DESC_SZ;
2917 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
2919 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
2920 SDHCI_ADMA2_32_DESC_SZ;
2921 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
2924 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
2925 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
2926 host->adma_table_sz, &dma, GFP_KERNEL);
2928 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2930 host->flags &= ~SDHCI_USE_ADMA;
2931 } else if ((dma + host->align_buffer_sz) &
2932 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
2933 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
2935 host->flags &= ~SDHCI_USE_ADMA;
2936 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
2937 host->adma_table_sz, buf, dma);
2939 host->align_buffer = buf;
2940 host->align_addr = dma;
2942 host->adma_table = buf + host->align_buffer_sz;
2943 host->adma_addr = dma + host->align_buffer_sz;
2948 * If we use DMA, then it's up to the caller to set the DMA
2949 * mask, but PIO does not need the hw shim so we set a new
2950 * mask here in that case.
2952 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
2953 host->dma_mask = DMA_BIT_MASK(64);
2954 mmc_dev(mmc)->dma_mask = &host->dma_mask;
2957 if (host->version >= SDHCI_SPEC_300)
2958 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
2959 >> SDHCI_CLOCK_BASE_SHIFT;
2961 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
2962 >> SDHCI_CLOCK_BASE_SHIFT;
2964 host->max_clk *= 1000000;
2965 if (host->max_clk == 0 || host->quirks &
2966 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
2967 if (!host->ops->get_max_clock) {
2968 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
2972 host->max_clk = host->ops->get_max_clock(host);
2976 * In case of Host Controller v3.00, find out whether clock
2977 * multiplier is supported.
2979 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
2980 SDHCI_CLOCK_MUL_SHIFT;
2983 * In case the value in Clock Multiplier is 0, then programmable
2984 * clock mode is not supported, otherwise the actual clock
2985 * multiplier is one more than the value of Clock Multiplier
2986 * in the Capabilities Register.
2992 * Set host parameters.
2994 max_clk = host->max_clk;
2996 if (host->ops->get_min_clock)
2997 mmc->f_min = host->ops->get_min_clock(host);
2998 else if (host->version >= SDHCI_SPEC_300) {
2999 if (host->clk_mul) {
3000 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3001 max_clk = host->max_clk * host->clk_mul;
3003 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3005 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3007 if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
3008 mmc->f_max = max_clk;
3010 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3011 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
3012 SDHCI_TIMEOUT_CLK_SHIFT;
3013 if (host->timeout_clk == 0) {
3014 if (host->ops->get_timeout_clock) {
3016 host->ops->get_timeout_clock(host);
3018 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3024 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
3025 host->timeout_clk *= 1000;
3027 if (override_timeout_clk)
3028 host->timeout_clk = override_timeout_clk;
3030 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3031 host->ops->get_max_timeout_count(host) : 1 << 27;
3032 mmc->max_busy_timeout /= host->timeout_clk;
3035 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3036 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3038 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3039 host->flags |= SDHCI_AUTO_CMD12;
3041 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3042 if ((host->version >= SDHCI_SPEC_300) &&
3043 ((host->flags & SDHCI_USE_ADMA) ||
3044 !(host->flags & SDHCI_USE_SDMA)) &&
3045 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3046 host->flags |= SDHCI_AUTO_CMD23;
3047 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
3049 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
3053 * A controller may support 8-bit width, but the board itself
3054 * might not have the pins brought out. Boards that support
3055 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3056 * their platform code before calling sdhci_add_host(), and we
3057 * won't assume 8-bit width for hosts without that CAP.
3059 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3060 mmc->caps |= MMC_CAP_4_BIT_DATA;
3062 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3063 mmc->caps &= ~MMC_CAP_CMD23;
3065 if (caps[0] & SDHCI_CAN_DO_HISPD)
3066 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3068 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3069 !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
3070 IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
3071 mmc->caps |= MMC_CAP_NEEDS_POLL;
3073 /* If there are external regulators, get them */
3074 if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER)
3075 return -EPROBE_DEFER;
3077 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3078 if (!IS_ERR(mmc->supply.vqmmc)) {
3079 ret = regulator_enable(mmc->supply.vqmmc);
3080 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3082 caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
3083 SDHCI_SUPPORT_SDR50 |
3084 SDHCI_SUPPORT_DDR50);
3086 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3087 mmc_hostname(mmc), ret);
3088 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3092 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
3093 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3094 SDHCI_SUPPORT_DDR50);
3096 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3097 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3098 SDHCI_SUPPORT_DDR50))
3099 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3101 /* SDR104 supports also implies SDR50 support */
3102 if (caps[1] & SDHCI_SUPPORT_SDR104) {
3103 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3104 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3105 * field can be promoted to support HS200.
3107 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3108 mmc->caps2 |= MMC_CAP2_HS200;
3109 } else if (caps[1] & SDHCI_SUPPORT_SDR50)
3110 mmc->caps |= MMC_CAP_UHS_SDR50;
3112 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3113 (caps[1] & SDHCI_SUPPORT_HS400))
3114 mmc->caps2 |= MMC_CAP2_HS400;
3116 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3117 (IS_ERR(mmc->supply.vqmmc) ||
3118 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3120 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3122 if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
3123 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3124 mmc->caps |= MMC_CAP_UHS_DDR50;
3126 /* Does the host need tuning for SDR50? */
3127 if (caps[1] & SDHCI_USE_SDR50_TUNING)
3128 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3130 /* Does the host need tuning for SDR104 / HS200? */
3131 if (mmc->caps2 & MMC_CAP2_HS200)
3132 host->flags |= SDHCI_SDR104_NEEDS_TUNING;
3134 /* Driver Type(s) (A, C, D) supported by the host */
3135 if (caps[1] & SDHCI_DRIVER_TYPE_A)
3136 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3137 if (caps[1] & SDHCI_DRIVER_TYPE_C)
3138 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3139 if (caps[1] & SDHCI_DRIVER_TYPE_D)
3140 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3142 /* Initial value for re-tuning timer count */
3143 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3144 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3147 * In case Re-tuning Timer is not disabled, the actual value of
3148 * re-tuning timer will be 2 ^ (n - 1).
3150 if (host->tuning_count)
3151 host->tuning_count = 1 << (host->tuning_count - 1);
3153 /* Re-tuning mode supported by the Host Controller */
3154 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
3155 SDHCI_RETUNING_MODE_SHIFT;
3160 * According to SD Host Controller spec v3.00, if the Host System
3161 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3162 * the value is meaningful only if Voltage Support in the Capabilities
3163 * register is set. The actual current value is 4 times the register
3166 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3167 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3168 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3171 /* convert to SDHCI_MAX_CURRENT format */
3172 curr = curr/1000; /* convert to mA */
3173 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3175 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3177 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3178 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3179 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3183 if (caps[0] & SDHCI_CAN_VDD_330) {
3184 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3186 mmc->max_current_330 = ((max_current_caps &
3187 SDHCI_MAX_CURRENT_330_MASK) >>
3188 SDHCI_MAX_CURRENT_330_SHIFT) *
3189 SDHCI_MAX_CURRENT_MULTIPLIER;
3191 if (caps[0] & SDHCI_CAN_VDD_300) {
3192 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3194 mmc->max_current_300 = ((max_current_caps &
3195 SDHCI_MAX_CURRENT_300_MASK) >>
3196 SDHCI_MAX_CURRENT_300_SHIFT) *
3197 SDHCI_MAX_CURRENT_MULTIPLIER;
3199 if (caps[0] & SDHCI_CAN_VDD_180) {
3200 ocr_avail |= MMC_VDD_165_195;
3202 mmc->max_current_180 = ((max_current_caps &
3203 SDHCI_MAX_CURRENT_180_MASK) >>
3204 SDHCI_MAX_CURRENT_180_SHIFT) *
3205 SDHCI_MAX_CURRENT_MULTIPLIER;
3208 /* If OCR set by host, use it instead. */
3210 ocr_avail = host->ocr_mask;
3212 /* If OCR set by external regulators, give it highest prio. */
3214 ocr_avail = mmc->ocr_avail;
3216 mmc->ocr_avail = ocr_avail;
3217 mmc->ocr_avail_sdio = ocr_avail;
3218 if (host->ocr_avail_sdio)
3219 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3220 mmc->ocr_avail_sd = ocr_avail;
3221 if (host->ocr_avail_sd)
3222 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3223 else /* normal SD controllers don't support 1.8V */
3224 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3225 mmc->ocr_avail_mmc = ocr_avail;
3226 if (host->ocr_avail_mmc)
3227 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3229 if (mmc->ocr_avail == 0) {
3230 pr_err("%s: Hardware doesn't report any support voltages.\n",
3235 spin_lock_init(&host->lock);
3238 * Maximum number of segments. Depends on if the hardware
3239 * can do scatter/gather or not.
3241 if (host->flags & SDHCI_USE_ADMA)
3242 mmc->max_segs = SDHCI_MAX_SEGS;
3243 else if (host->flags & SDHCI_USE_SDMA)
3246 mmc->max_segs = SDHCI_MAX_SEGS;
3249 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3250 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3253 mmc->max_req_size = 524288;
3256 * Maximum segment size. Could be one segment with the maximum number
3257 * of bytes. When doing hardware scatter/gather, each entry cannot
3258 * be larger than 64 KiB though.
3260 if (host->flags & SDHCI_USE_ADMA) {
3261 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3262 mmc->max_seg_size = 65535;
3264 mmc->max_seg_size = 65536;
3266 mmc->max_seg_size = mmc->max_req_size;
3270 * Maximum block size. This varies from controller to controller and
3271 * is specified in the capabilities register.
3273 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3274 mmc->max_blk_size = 2;
3276 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
3277 SDHCI_MAX_BLOCK_SHIFT;
3278 if (mmc->max_blk_size >= 3) {
3279 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3281 mmc->max_blk_size = 0;
3285 mmc->max_blk_size = 512 << mmc->max_blk_size;
3288 * Maximum block count.
3290 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3295 tasklet_init(&host->finish_tasklet,
3296 sdhci_tasklet_finish, (unsigned long)host);
3298 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3300 init_waitqueue_head(&host->buf_ready_int);
3302 sdhci_init(host, 0);
3304 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3305 IRQF_SHARED, mmc_hostname(mmc), host);
3307 pr_err("%s: Failed to request IRQ %d: %d\n",
3308 mmc_hostname(mmc), host->irq, ret);
3312 #ifdef CONFIG_MMC_DEBUG
3313 sdhci_dumpregs(host);
3316 #ifdef SDHCI_USE_LEDS_CLASS
3317 snprintf(host->led_name, sizeof(host->led_name),
3318 "%s::", mmc_hostname(mmc));
3319 host->led.name = host->led_name;
3320 host->led.brightness = LED_OFF;
3321 host->led.default_trigger = mmc_hostname(mmc);
3322 host->led.brightness_set = sdhci_led_control;
3324 ret = led_classdev_register(mmc_dev(mmc), &host->led);
3326 pr_err("%s: Failed to register LED device: %d\n",
3327 mmc_hostname(mmc), ret);
3336 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3337 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3338 (host->flags & SDHCI_USE_ADMA) ?
3339 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3340 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3342 sdhci_enable_card_detection(host);
3346 #ifdef SDHCI_USE_LEDS_CLASS
3348 sdhci_do_reset(host, SDHCI_RESET_ALL);
3349 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3350 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3351 free_irq(host->irq, host);
3354 tasklet_kill(&host->finish_tasklet);
3359 EXPORT_SYMBOL_GPL(sdhci_add_host);
3361 void sdhci_remove_host(struct sdhci_host *host, int dead)
3363 struct mmc_host *mmc = host->mmc;
3364 unsigned long flags;
3367 spin_lock_irqsave(&host->lock, flags);
3369 host->flags |= SDHCI_DEVICE_DEAD;
3372 pr_err("%s: Controller removed during "
3373 " transfer!\n", mmc_hostname(mmc));
3375 host->mrq->cmd->error = -ENOMEDIUM;
3376 tasklet_schedule(&host->finish_tasklet);
3379 spin_unlock_irqrestore(&host->lock, flags);
3382 sdhci_disable_card_detection(host);
3384 mmc_remove_host(mmc);
3386 #ifdef SDHCI_USE_LEDS_CLASS
3387 led_classdev_unregister(&host->led);
3391 sdhci_do_reset(host, SDHCI_RESET_ALL);
3393 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3394 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3395 free_irq(host->irq, host);
3397 del_timer_sync(&host->timer);
3399 tasklet_kill(&host->finish_tasklet);
3401 if (!IS_ERR(mmc->supply.vqmmc))
3402 regulator_disable(mmc->supply.vqmmc);
3404 if (host->align_buffer)
3405 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3406 host->adma_table_sz, host->align_buffer,
3409 host->adma_table = NULL;
3410 host->align_buffer = NULL;
3413 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3415 void sdhci_free_host(struct sdhci_host *host)
3417 mmc_free_host(host->mmc);
3420 EXPORT_SYMBOL_GPL(sdhci_free_host);
3422 /*****************************************************************************\
3424 * Driver init/exit *
3426 \*****************************************************************************/
3428 static int __init sdhci_drv_init(void)
3431 ": Secure Digital Host Controller Interface driver\n");
3432 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3437 static void __exit sdhci_drv_exit(void)
3441 module_init(sdhci_drv_init);
3442 module_exit(sdhci_drv_exit);
3444 module_param(debug_quirks, uint, 0444);
3445 module_param(debug_quirks2, uint, 0444);
3447 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3448 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3449 MODULE_LICENSE("GPL");
3451 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3452 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");