2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/leds.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
36 #define DRIVER_NAME "sdhci"
38 #define DBG(f, x...) \
39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
41 #define MAX_TUNING_LOOP 40
43 static unsigned int debug_quirks = 0;
44 static unsigned int debug_quirks2;
46 static void sdhci_finish_data(struct sdhci_host *);
48 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
50 static void sdhci_dumpregs(struct sdhci_host *host)
52 pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
53 mmc_hostname(host->mmc));
55 pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
56 sdhci_readl(host, SDHCI_DMA_ADDRESS),
57 sdhci_readw(host, SDHCI_HOST_VERSION));
58 pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
59 sdhci_readw(host, SDHCI_BLOCK_SIZE),
60 sdhci_readw(host, SDHCI_BLOCK_COUNT));
61 pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
62 sdhci_readl(host, SDHCI_ARGUMENT),
63 sdhci_readw(host, SDHCI_TRANSFER_MODE));
64 pr_err(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
65 sdhci_readl(host, SDHCI_PRESENT_STATE),
66 sdhci_readb(host, SDHCI_HOST_CONTROL));
67 pr_err(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
68 sdhci_readb(host, SDHCI_POWER_CONTROL),
69 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
70 pr_err(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
71 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
72 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
73 pr_err(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
74 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
75 sdhci_readl(host, SDHCI_INT_STATUS));
76 pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
77 sdhci_readl(host, SDHCI_INT_ENABLE),
78 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
79 pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
80 sdhci_readw(host, SDHCI_ACMD12_ERR),
81 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
82 pr_err(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
83 sdhci_readl(host, SDHCI_CAPABILITIES),
84 sdhci_readl(host, SDHCI_CAPABILITIES_1));
85 pr_err(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
86 sdhci_readw(host, SDHCI_COMMAND),
87 sdhci_readl(host, SDHCI_MAX_CURRENT));
88 pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n",
89 sdhci_readw(host, SDHCI_HOST_CONTROL2));
91 if (host->flags & SDHCI_USE_ADMA) {
92 if (host->flags & SDHCI_USE_64_BIT_DMA)
93 pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
94 readl(host->ioaddr + SDHCI_ADMA_ERROR),
95 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
96 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
98 pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
99 readl(host->ioaddr + SDHCI_ADMA_ERROR),
100 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
103 pr_err(DRIVER_NAME ": ===========================================\n");
106 /*****************************************************************************\
108 * Low level functions *
110 \*****************************************************************************/
112 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
114 return cmd->data || cmd->flags & MMC_RSP_BUSY;
117 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
121 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
122 !mmc_card_is_removable(host->mmc))
126 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
129 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
130 SDHCI_INT_CARD_INSERT;
132 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
135 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
136 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
139 static void sdhci_enable_card_detection(struct sdhci_host *host)
141 sdhci_set_card_detection(host, true);
144 static void sdhci_disable_card_detection(struct sdhci_host *host)
146 sdhci_set_card_detection(host, false);
149 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
154 pm_runtime_get_noresume(host->mmc->parent);
157 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
161 host->bus_on = false;
162 pm_runtime_put_noidle(host->mmc->parent);
165 void sdhci_reset(struct sdhci_host *host, u8 mask)
167 unsigned long timeout;
169 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
171 if (mask & SDHCI_RESET_ALL) {
173 /* Reset-all turns off SD Bus Power */
174 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
175 sdhci_runtime_pm_bus_off(host);
178 /* Wait max 100 ms */
181 /* hw clears the bit when it's done */
182 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
184 pr_err("%s: Reset 0x%x never completed.\n",
185 mmc_hostname(host->mmc), (int)mask);
186 sdhci_dumpregs(host);
193 EXPORT_SYMBOL_GPL(sdhci_reset);
195 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
197 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
198 struct mmc_host *mmc = host->mmc;
200 if (!mmc->ops->get_cd(mmc))
204 host->ops->reset(host, mask);
206 if (mask & SDHCI_RESET_ALL) {
207 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
208 if (host->ops->enable_dma)
209 host->ops->enable_dma(host);
212 /* Resetting the controller clears many */
213 host->preset_enabled = false;
217 static void sdhci_init(struct sdhci_host *host, int soft)
219 struct mmc_host *mmc = host->mmc;
222 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
224 sdhci_do_reset(host, SDHCI_RESET_ALL);
226 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
227 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
228 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
229 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
232 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
233 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
236 /* force clock reconfiguration */
238 mmc->ops->set_ios(mmc, &mmc->ios);
242 static void sdhci_reinit(struct sdhci_host *host)
245 sdhci_enable_card_detection(host);
248 static void __sdhci_led_activate(struct sdhci_host *host)
252 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
253 ctrl |= SDHCI_CTRL_LED;
254 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
257 static void __sdhci_led_deactivate(struct sdhci_host *host)
261 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
262 ctrl &= ~SDHCI_CTRL_LED;
263 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
266 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
267 static void sdhci_led_control(struct led_classdev *led,
268 enum led_brightness brightness)
270 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
273 spin_lock_irqsave(&host->lock, flags);
275 if (host->runtime_suspended)
278 if (brightness == LED_OFF)
279 __sdhci_led_deactivate(host);
281 __sdhci_led_activate(host);
283 spin_unlock_irqrestore(&host->lock, flags);
286 static int sdhci_led_register(struct sdhci_host *host)
288 struct mmc_host *mmc = host->mmc;
290 snprintf(host->led_name, sizeof(host->led_name),
291 "%s::", mmc_hostname(mmc));
293 host->led.name = host->led_name;
294 host->led.brightness = LED_OFF;
295 host->led.default_trigger = mmc_hostname(mmc);
296 host->led.brightness_set = sdhci_led_control;
298 return led_classdev_register(mmc_dev(mmc), &host->led);
301 static void sdhci_led_unregister(struct sdhci_host *host)
303 led_classdev_unregister(&host->led);
306 static inline void sdhci_led_activate(struct sdhci_host *host)
310 static inline void sdhci_led_deactivate(struct sdhci_host *host)
316 static inline int sdhci_led_register(struct sdhci_host *host)
321 static inline void sdhci_led_unregister(struct sdhci_host *host)
325 static inline void sdhci_led_activate(struct sdhci_host *host)
327 __sdhci_led_activate(host);
330 static inline void sdhci_led_deactivate(struct sdhci_host *host)
332 __sdhci_led_deactivate(host);
337 /*****************************************************************************\
341 \*****************************************************************************/
343 static void sdhci_read_block_pio(struct sdhci_host *host)
346 size_t blksize, len, chunk;
347 u32 uninitialized_var(scratch);
350 DBG("PIO reading\n");
352 blksize = host->data->blksz;
355 local_irq_save(flags);
358 BUG_ON(!sg_miter_next(&host->sg_miter));
360 len = min(host->sg_miter.length, blksize);
363 host->sg_miter.consumed = len;
365 buf = host->sg_miter.addr;
369 scratch = sdhci_readl(host, SDHCI_BUFFER);
373 *buf = scratch & 0xFF;
382 sg_miter_stop(&host->sg_miter);
384 local_irq_restore(flags);
387 static void sdhci_write_block_pio(struct sdhci_host *host)
390 size_t blksize, len, chunk;
394 DBG("PIO writing\n");
396 blksize = host->data->blksz;
400 local_irq_save(flags);
403 BUG_ON(!sg_miter_next(&host->sg_miter));
405 len = min(host->sg_miter.length, blksize);
408 host->sg_miter.consumed = len;
410 buf = host->sg_miter.addr;
413 scratch |= (u32)*buf << (chunk * 8);
419 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
420 sdhci_writel(host, scratch, SDHCI_BUFFER);
427 sg_miter_stop(&host->sg_miter);
429 local_irq_restore(flags);
432 static void sdhci_transfer_pio(struct sdhci_host *host)
436 if (host->blocks == 0)
439 if (host->data->flags & MMC_DATA_READ)
440 mask = SDHCI_DATA_AVAILABLE;
442 mask = SDHCI_SPACE_AVAILABLE;
445 * Some controllers (JMicron JMB38x) mess up the buffer bits
446 * for transfers < 4 bytes. As long as it is just one block,
447 * we can ignore the bits.
449 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
450 (host->data->blocks == 1))
453 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
454 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
457 if (host->data->flags & MMC_DATA_READ)
458 sdhci_read_block_pio(host);
460 sdhci_write_block_pio(host);
463 if (host->blocks == 0)
467 DBG("PIO transfer complete.\n");
470 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
471 struct mmc_data *data, int cookie)
476 * If the data buffers are already mapped, return the previous
477 * dma_map_sg() result.
479 if (data->host_cookie == COOKIE_PRE_MAPPED)
480 return data->sg_count;
482 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
483 data->flags & MMC_DATA_WRITE ?
484 DMA_TO_DEVICE : DMA_FROM_DEVICE);
489 data->sg_count = sg_count;
490 data->host_cookie = cookie;
495 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
497 local_irq_save(*flags);
498 return kmap_atomic(sg_page(sg)) + sg->offset;
501 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
503 kunmap_atomic(buffer);
504 local_irq_restore(*flags);
507 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
508 dma_addr_t addr, int len, unsigned cmd)
510 struct sdhci_adma2_64_desc *dma_desc = desc;
512 /* 32-bit and 64-bit descriptors have these members in same position */
513 dma_desc->cmd = cpu_to_le16(cmd);
514 dma_desc->len = cpu_to_le16(len);
515 dma_desc->addr_lo = cpu_to_le32((u32)addr);
517 if (host->flags & SDHCI_USE_64_BIT_DMA)
518 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
521 static void sdhci_adma_mark_end(void *desc)
523 struct sdhci_adma2_64_desc *dma_desc = desc;
525 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
526 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
529 static void sdhci_adma_table_pre(struct sdhci_host *host,
530 struct mmc_data *data, int sg_count)
532 struct scatterlist *sg;
534 dma_addr_t addr, align_addr;
540 * The spec does not specify endianness of descriptor table.
541 * We currently guess that it is LE.
544 host->sg_count = sg_count;
546 desc = host->adma_table;
547 align = host->align_buffer;
549 align_addr = host->align_addr;
551 for_each_sg(data->sg, sg, host->sg_count, i) {
552 addr = sg_dma_address(sg);
553 len = sg_dma_len(sg);
556 * The SDHCI specification states that ADMA addresses must
557 * be 32-bit aligned. If they aren't, then we use a bounce
558 * buffer for the (up to three) bytes that screw up the
561 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
564 if (data->flags & MMC_DATA_WRITE) {
565 buffer = sdhci_kmap_atomic(sg, &flags);
566 memcpy(align, buffer, offset);
567 sdhci_kunmap_atomic(buffer, &flags);
571 sdhci_adma_write_desc(host, desc, align_addr, offset,
574 BUG_ON(offset > 65536);
576 align += SDHCI_ADMA2_ALIGN;
577 align_addr += SDHCI_ADMA2_ALIGN;
579 desc += host->desc_sz;
589 sdhci_adma_write_desc(host, desc, addr, len,
591 desc += host->desc_sz;
595 * If this triggers then we have a calculation bug
598 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
601 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
602 /* Mark the last descriptor as the terminating descriptor */
603 if (desc != host->adma_table) {
604 desc -= host->desc_sz;
605 sdhci_adma_mark_end(desc);
608 /* Add a terminating entry - nop, end, valid */
609 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
613 static void sdhci_adma_table_post(struct sdhci_host *host,
614 struct mmc_data *data)
616 struct scatterlist *sg;
622 if (data->flags & MMC_DATA_READ) {
623 bool has_unaligned = false;
625 /* Do a quick scan of the SG list for any unaligned mappings */
626 for_each_sg(data->sg, sg, host->sg_count, i)
627 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
628 has_unaligned = true;
633 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
634 data->sg_len, DMA_FROM_DEVICE);
636 align = host->align_buffer;
638 for_each_sg(data->sg, sg, host->sg_count, i) {
639 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
640 size = SDHCI_ADMA2_ALIGN -
641 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
643 buffer = sdhci_kmap_atomic(sg, &flags);
644 memcpy(buffer, align, size);
645 sdhci_kunmap_atomic(buffer, &flags);
647 align += SDHCI_ADMA2_ALIGN;
654 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
657 struct mmc_data *data = cmd->data;
658 unsigned target_timeout, current_timeout;
661 * If the host controller provides us with an incorrect timeout
662 * value, just skip the check and use 0xE. The hardware may take
663 * longer to time out, but that's much better than having a too-short
666 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
669 /* Unspecified timeout, assume max */
670 if (!data && !cmd->busy_timeout)
675 target_timeout = cmd->busy_timeout * 1000;
677 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
678 if (host->clock && data->timeout_clks) {
679 unsigned long long val;
682 * data->timeout_clks is in units of clock cycles.
683 * host->clock is in Hz. target_timeout is in us.
684 * Hence, us = 1000000 * cycles / Hz. Round up.
686 val = 1000000 * data->timeout_clks;
687 if (do_div(val, host->clock))
689 target_timeout += val;
694 * Figure out needed cycles.
695 * We do this in steps in order to fit inside a 32 bit int.
696 * The first step is the minimum timeout, which will have a
697 * minimum resolution of 6 bits:
698 * (1) 2^13*1000 > 2^22,
699 * (2) host->timeout_clk < 2^16
704 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
705 while (current_timeout < target_timeout) {
707 current_timeout <<= 1;
713 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
714 mmc_hostname(host->mmc), count, cmd->opcode);
721 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
723 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
724 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
726 if (host->flags & SDHCI_REQ_USE_DMA)
727 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
729 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
731 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
732 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
735 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
739 if (host->ops->set_timeout) {
740 host->ops->set_timeout(host, cmd);
742 count = sdhci_calc_timeout(host, cmd);
743 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
747 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
750 struct mmc_data *data = cmd->data;
752 if (sdhci_data_line_cmd(cmd))
753 sdhci_set_timeout(host, cmd);
761 BUG_ON(data->blksz * data->blocks > 524288);
762 BUG_ON(data->blksz > host->mmc->max_blk_size);
763 BUG_ON(data->blocks > 65535);
766 host->data_early = 0;
767 host->data->bytes_xfered = 0;
769 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
770 struct scatterlist *sg;
771 unsigned int length_mask, offset_mask;
774 host->flags |= SDHCI_REQ_USE_DMA;
777 * FIXME: This doesn't account for merging when mapping the
780 * The assumption here being that alignment and lengths are
781 * the same after DMA mapping to device address space.
785 if (host->flags & SDHCI_USE_ADMA) {
786 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
789 * As we use up to 3 byte chunks to work
790 * around alignment problems, we need to
791 * check the offset as well.
796 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
798 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
802 if (unlikely(length_mask | offset_mask)) {
803 for_each_sg(data->sg, sg, data->sg_len, i) {
804 if (sg->length & length_mask) {
805 DBG("Reverting to PIO because of transfer size (%d)\n",
807 host->flags &= ~SDHCI_REQ_USE_DMA;
810 if (sg->offset & offset_mask) {
811 DBG("Reverting to PIO because of bad alignment\n");
812 host->flags &= ~SDHCI_REQ_USE_DMA;
819 if (host->flags & SDHCI_REQ_USE_DMA) {
820 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
824 * This only happens when someone fed
825 * us an invalid request.
828 host->flags &= ~SDHCI_REQ_USE_DMA;
829 } else if (host->flags & SDHCI_USE_ADMA) {
830 sdhci_adma_table_pre(host, data, sg_cnt);
832 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
833 if (host->flags & SDHCI_USE_64_BIT_DMA)
835 (u64)host->adma_addr >> 32,
836 SDHCI_ADMA_ADDRESS_HI);
838 WARN_ON(sg_cnt != 1);
839 sdhci_writel(host, sg_dma_address(data->sg),
845 * Always adjust the DMA selection as some controllers
846 * (e.g. JMicron) can't do PIO properly when the selection
849 if (host->version >= SDHCI_SPEC_200) {
850 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
851 ctrl &= ~SDHCI_CTRL_DMA_MASK;
852 if ((host->flags & SDHCI_REQ_USE_DMA) &&
853 (host->flags & SDHCI_USE_ADMA)) {
854 if (host->flags & SDHCI_USE_64_BIT_DMA)
855 ctrl |= SDHCI_CTRL_ADMA64;
857 ctrl |= SDHCI_CTRL_ADMA32;
859 ctrl |= SDHCI_CTRL_SDMA;
861 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
864 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
867 flags = SG_MITER_ATOMIC;
868 if (host->data->flags & MMC_DATA_READ)
869 flags |= SG_MITER_TO_SG;
871 flags |= SG_MITER_FROM_SG;
872 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
873 host->blocks = data->blocks;
876 sdhci_set_transfer_irqs(host);
878 /* Set the DMA boundary value and block size */
879 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
880 data->blksz), SDHCI_BLOCK_SIZE);
881 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
884 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
885 struct mmc_request *mrq)
887 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12);
890 static void sdhci_set_transfer_mode(struct sdhci_host *host,
891 struct mmc_command *cmd)
894 struct mmc_data *data = cmd->data;
898 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
899 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
901 /* clear Auto CMD settings for no data CMDs */
902 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
903 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
904 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
909 WARN_ON(!host->data);
911 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
912 mode = SDHCI_TRNS_BLK_CNT_EN;
914 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
915 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
917 * If we are sending CMD23, CMD12 never gets sent
918 * on successful completion (so no Auto-CMD12).
920 if (sdhci_auto_cmd12(host, cmd->mrq) &&
921 (cmd->opcode != SD_IO_RW_EXTENDED))
922 mode |= SDHCI_TRNS_AUTO_CMD12;
923 else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
924 mode |= SDHCI_TRNS_AUTO_CMD23;
925 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
929 if (data->flags & MMC_DATA_READ)
930 mode |= SDHCI_TRNS_READ;
931 if (host->flags & SDHCI_REQ_USE_DMA)
932 mode |= SDHCI_TRNS_DMA;
934 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
937 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
939 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
940 ((mrq->cmd && mrq->cmd->error) ||
941 (mrq->sbc && mrq->sbc->error) ||
942 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
943 (mrq->data->stop && mrq->data->stop->error))) ||
944 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
947 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
951 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
952 if (host->mrqs_done[i] == mrq) {
958 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
959 if (!host->mrqs_done[i]) {
960 host->mrqs_done[i] = mrq;
965 WARN_ON(i >= SDHCI_MAX_MRQS);
967 tasklet_schedule(&host->finish_tasklet);
970 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
972 if (host->cmd && host->cmd->mrq == mrq)
975 if (host->data_cmd && host->data_cmd->mrq == mrq)
976 host->data_cmd = NULL;
978 if (host->data && host->data->mrq == mrq)
981 if (sdhci_needs_reset(host, mrq))
982 host->pending_reset = true;
984 __sdhci_finish_mrq(host, mrq);
987 static void sdhci_finish_data(struct sdhci_host *host)
989 struct mmc_command *data_cmd = host->data_cmd;
990 struct mmc_data *data = host->data;
993 host->data_cmd = NULL;
995 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
996 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
997 sdhci_adma_table_post(host, data);
1000 * The specification states that the block count register must
1001 * be updated, but it does not specify at what point in the
1002 * data flow. That makes the register entirely useless to read
1003 * back so we have to assume that nothing made it to the card
1004 * in the event of an error.
1007 data->bytes_xfered = 0;
1009 data->bytes_xfered = data->blksz * data->blocks;
1012 * Need to send CMD12 if -
1013 * a) open-ended multiblock transfer (no CMD23)
1014 * b) error in multiblock transfer
1021 * The controller needs a reset of internal state machines
1022 * upon error conditions.
1025 if (!host->cmd || host->cmd == data_cmd)
1026 sdhci_do_reset(host, SDHCI_RESET_CMD);
1027 sdhci_do_reset(host, SDHCI_RESET_DATA);
1030 sdhci_send_command(host, data->stop);
1032 sdhci_finish_mrq(host, data->mrq);
1036 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1037 unsigned long timeout)
1039 if (sdhci_data_line_cmd(mrq->cmd))
1040 mod_timer(&host->data_timer, timeout);
1042 mod_timer(&host->timer, timeout);
1045 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1047 if (sdhci_data_line_cmd(mrq->cmd))
1048 del_timer(&host->data_timer);
1050 del_timer(&host->timer);
1053 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1057 unsigned long timeout;
1061 /* Initially, a command has no error */
1064 /* Wait max 10 ms */
1067 mask = SDHCI_CMD_INHIBIT;
1068 if (sdhci_data_line_cmd(cmd))
1069 mask |= SDHCI_DATA_INHIBIT;
1071 /* We shouldn't wait for data inihibit for stop commands, even
1072 though they might use busy signaling */
1073 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1074 mask &= ~SDHCI_DATA_INHIBIT;
1076 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1078 pr_err("%s: Controller never released inhibit bit(s).\n",
1079 mmc_hostname(host->mmc));
1080 sdhci_dumpregs(host);
1082 sdhci_finish_mrq(host, cmd->mrq);
1090 if (!cmd->data && cmd->busy_timeout > 9000)
1091 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1094 sdhci_mod_timer(host, cmd->mrq, timeout);
1097 if (sdhci_data_line_cmd(cmd)) {
1098 WARN_ON(host->data_cmd);
1099 host->data_cmd = cmd;
1102 sdhci_prepare_data(host, cmd);
1104 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1106 sdhci_set_transfer_mode(host, cmd);
1108 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1109 pr_err("%s: Unsupported response type!\n",
1110 mmc_hostname(host->mmc));
1111 cmd->error = -EINVAL;
1112 sdhci_finish_mrq(host, cmd->mrq);
1116 if (!(cmd->flags & MMC_RSP_PRESENT))
1117 flags = SDHCI_CMD_RESP_NONE;
1118 else if (cmd->flags & MMC_RSP_136)
1119 flags = SDHCI_CMD_RESP_LONG;
1120 else if (cmd->flags & MMC_RSP_BUSY)
1121 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1123 flags = SDHCI_CMD_RESP_SHORT;
1125 if (cmd->flags & MMC_RSP_CRC)
1126 flags |= SDHCI_CMD_CRC;
1127 if (cmd->flags & MMC_RSP_OPCODE)
1128 flags |= SDHCI_CMD_INDEX;
1130 /* CMD19 is special in that the Data Present Select should be set */
1131 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1132 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1133 flags |= SDHCI_CMD_DATA;
1135 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1137 EXPORT_SYMBOL_GPL(sdhci_send_command);
1139 static void sdhci_finish_command(struct sdhci_host *host)
1141 struct mmc_command *cmd = host->cmd;
1146 if (cmd->flags & MMC_RSP_PRESENT) {
1147 if (cmd->flags & MMC_RSP_136) {
1148 /* CRC is stripped so we need to do some shifting. */
1149 for (i = 0;i < 4;i++) {
1150 cmd->resp[i] = sdhci_readl(host,
1151 SDHCI_RESPONSE + (3-i)*4) << 8;
1155 SDHCI_RESPONSE + (3-i)*4-1);
1158 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1163 * The host can send and interrupt when the busy state has
1164 * ended, allowing us to wait without wasting CPU cycles.
1165 * The busy signal uses DAT0 so this is similar to waiting
1166 * for data to complete.
1168 * Note: The 1.0 specification is a bit ambiguous about this
1169 * feature so there might be some problems with older
1172 if (cmd->flags & MMC_RSP_BUSY) {
1174 DBG("Cannot wait for busy signal when also doing a data transfer");
1175 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1176 cmd == host->data_cmd) {
1177 /* Command complete before busy is ended */
1182 /* Finished CMD23, now send actual command. */
1183 if (cmd == cmd->mrq->sbc) {
1184 sdhci_send_command(host, cmd->mrq->cmd);
1187 /* Processed actual command. */
1188 if (host->data && host->data_early)
1189 sdhci_finish_data(host);
1192 sdhci_finish_mrq(host, cmd->mrq);
1196 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1200 switch (host->timing) {
1201 case MMC_TIMING_UHS_SDR12:
1202 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1204 case MMC_TIMING_UHS_SDR25:
1205 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1207 case MMC_TIMING_UHS_SDR50:
1208 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1210 case MMC_TIMING_UHS_SDR104:
1211 case MMC_TIMING_MMC_HS200:
1212 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1214 case MMC_TIMING_UHS_DDR50:
1215 case MMC_TIMING_MMC_DDR52:
1216 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1218 case MMC_TIMING_MMC_HS400:
1219 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1222 pr_warn("%s: Invalid UHS-I mode selected\n",
1223 mmc_hostname(host->mmc));
1224 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1230 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1231 unsigned int *actual_clock)
1233 int div = 0; /* Initialized for compiler warning */
1234 int real_div = div, clk_mul = 1;
1236 bool switch_base_clk = false;
1238 if (host->version >= SDHCI_SPEC_300) {
1239 if (host->preset_enabled) {
1242 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1243 pre_val = sdhci_get_preset_value(host);
1244 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1245 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1246 if (host->clk_mul &&
1247 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1248 clk = SDHCI_PROG_CLOCK_MODE;
1250 clk_mul = host->clk_mul;
1252 real_div = max_t(int, 1, div << 1);
1258 * Check if the Host Controller supports Programmable Clock
1261 if (host->clk_mul) {
1262 for (div = 1; div <= 1024; div++) {
1263 if ((host->max_clk * host->clk_mul / div)
1267 if ((host->max_clk * host->clk_mul / div) <= clock) {
1269 * Set Programmable Clock Mode in the Clock
1272 clk = SDHCI_PROG_CLOCK_MODE;
1274 clk_mul = host->clk_mul;
1278 * Divisor can be too small to reach clock
1279 * speed requirement. Then use the base clock.
1281 switch_base_clk = true;
1285 if (!host->clk_mul || switch_base_clk) {
1286 /* Version 3.00 divisors must be a multiple of 2. */
1287 if (host->max_clk <= clock)
1290 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1292 if ((host->max_clk / div) <= clock)
1298 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1299 && !div && host->max_clk <= 25000000)
1303 /* Version 2.00 divisors must be a power of 2. */
1304 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1305 if ((host->max_clk / div) <= clock)
1314 *actual_clock = (host->max_clk * clk_mul) / real_div;
1315 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1316 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1317 << SDHCI_DIVIDER_HI_SHIFT;
1321 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1323 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1326 unsigned long timeout;
1328 host->mmc->actual_clock = 0;
1330 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1335 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1337 clk |= SDHCI_CLOCK_INT_EN;
1338 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1340 /* Wait max 20 ms */
1342 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1343 & SDHCI_CLOCK_INT_STABLE)) {
1345 pr_err("%s: Internal clock never stabilised.\n",
1346 mmc_hostname(host->mmc));
1347 sdhci_dumpregs(host);
1354 clk |= SDHCI_CLOCK_CARD_EN;
1355 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1357 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1359 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1362 struct mmc_host *mmc = host->mmc;
1364 spin_unlock_irq(&host->lock);
1365 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1366 spin_lock_irq(&host->lock);
1368 if (mode != MMC_POWER_OFF)
1369 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1371 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1374 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1379 if (mode != MMC_POWER_OFF) {
1381 case MMC_VDD_165_195:
1382 pwr = SDHCI_POWER_180;
1386 pwr = SDHCI_POWER_300;
1390 pwr = SDHCI_POWER_330;
1393 WARN(1, "%s: Invalid vdd %#x\n",
1394 mmc_hostname(host->mmc), vdd);
1399 if (host->pwr == pwr)
1405 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1406 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1407 sdhci_runtime_pm_bus_off(host);
1410 * Spec says that we should clear the power reg before setting
1411 * a new value. Some controllers don't seem to like this though.
1413 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1414 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1417 * At least the Marvell CaFe chip gets confused if we set the
1418 * voltage and set turn on power at the same time, so set the
1421 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1422 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1424 pwr |= SDHCI_POWER_ON;
1426 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1428 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1429 sdhci_runtime_pm_bus_on(host);
1432 * Some controllers need an extra 10ms delay of 10ms before
1433 * they can apply clock after applying power
1435 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1439 EXPORT_SYMBOL_GPL(sdhci_set_power);
1441 static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1444 struct mmc_host *mmc = host->mmc;
1446 if (host->ops->set_power)
1447 host->ops->set_power(host, mode, vdd);
1448 else if (!IS_ERR(mmc->supply.vmmc))
1449 sdhci_set_power_reg(host, mode, vdd);
1451 sdhci_set_power(host, mode, vdd);
1454 /*****************************************************************************\
1458 \*****************************************************************************/
1460 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1462 struct sdhci_host *host;
1464 unsigned long flags;
1466 host = mmc_priv(mmc);
1468 /* Firstly check card presence */
1469 present = mmc->ops->get_cd(mmc);
1471 spin_lock_irqsave(&host->lock, flags);
1473 sdhci_led_activate(host);
1476 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1477 * requests if Auto-CMD12 is enabled.
1479 if (sdhci_auto_cmd12(host, mrq)) {
1481 mrq->data->stop = NULL;
1486 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1487 mrq->cmd->error = -ENOMEDIUM;
1488 sdhci_finish_mrq(host, mrq);
1490 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1491 sdhci_send_command(host, mrq->sbc);
1493 sdhci_send_command(host, mrq->cmd);
1497 spin_unlock_irqrestore(&host->lock, flags);
1500 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1504 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1505 if (width == MMC_BUS_WIDTH_8) {
1506 ctrl &= ~SDHCI_CTRL_4BITBUS;
1507 if (host->version >= SDHCI_SPEC_300)
1508 ctrl |= SDHCI_CTRL_8BITBUS;
1510 if (host->version >= SDHCI_SPEC_300)
1511 ctrl &= ~SDHCI_CTRL_8BITBUS;
1512 if (width == MMC_BUS_WIDTH_4)
1513 ctrl |= SDHCI_CTRL_4BITBUS;
1515 ctrl &= ~SDHCI_CTRL_4BITBUS;
1517 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1519 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1521 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1525 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1526 /* Select Bus Speed Mode for host */
1527 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1528 if ((timing == MMC_TIMING_MMC_HS200) ||
1529 (timing == MMC_TIMING_UHS_SDR104))
1530 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1531 else if (timing == MMC_TIMING_UHS_SDR12)
1532 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1533 else if (timing == MMC_TIMING_UHS_SDR25)
1534 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1535 else if (timing == MMC_TIMING_UHS_SDR50)
1536 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1537 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1538 (timing == MMC_TIMING_MMC_DDR52))
1539 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1540 else if (timing == MMC_TIMING_MMC_HS400)
1541 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1542 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1544 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1546 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1548 struct sdhci_host *host = mmc_priv(mmc);
1549 unsigned long flags;
1552 spin_lock_irqsave(&host->lock, flags);
1554 if (host->flags & SDHCI_DEVICE_DEAD) {
1555 spin_unlock_irqrestore(&host->lock, flags);
1556 if (!IS_ERR(mmc->supply.vmmc) &&
1557 ios->power_mode == MMC_POWER_OFF)
1558 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1563 * Reset the chip on each power off.
1564 * Should clear out any weird states.
1566 if (ios->power_mode == MMC_POWER_OFF) {
1567 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1571 if (host->version >= SDHCI_SPEC_300 &&
1572 (ios->power_mode == MMC_POWER_UP) &&
1573 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1574 sdhci_enable_preset_value(host, false);
1576 if (!ios->clock || ios->clock != host->clock) {
1577 host->ops->set_clock(host, ios->clock);
1578 host->clock = ios->clock;
1580 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1582 host->timeout_clk = host->mmc->actual_clock ?
1583 host->mmc->actual_clock / 1000 :
1585 host->mmc->max_busy_timeout =
1586 host->ops->get_max_timeout_count ?
1587 host->ops->get_max_timeout_count(host) :
1589 host->mmc->max_busy_timeout /= host->timeout_clk;
1593 __sdhci_set_power(host, ios->power_mode, ios->vdd);
1595 if (host->ops->platform_send_init_74_clocks)
1596 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1598 host->ops->set_bus_width(host, ios->bus_width);
1600 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1602 if ((ios->timing == MMC_TIMING_SD_HS ||
1603 ios->timing == MMC_TIMING_MMC_HS)
1604 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1605 ctrl |= SDHCI_CTRL_HISPD;
1607 ctrl &= ~SDHCI_CTRL_HISPD;
1609 if (host->version >= SDHCI_SPEC_300) {
1612 /* In case of UHS-I modes, set High Speed Enable */
1613 if ((ios->timing == MMC_TIMING_MMC_HS400) ||
1614 (ios->timing == MMC_TIMING_MMC_HS200) ||
1615 (ios->timing == MMC_TIMING_MMC_DDR52) ||
1616 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1617 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1618 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1619 (ios->timing == MMC_TIMING_UHS_SDR25))
1620 ctrl |= SDHCI_CTRL_HISPD;
1622 if (!host->preset_enabled) {
1623 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1625 * We only need to set Driver Strength if the
1626 * preset value enable is not set.
1628 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1629 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1630 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1631 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1632 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1633 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1634 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1635 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1636 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1637 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1639 pr_warn("%s: invalid driver type, default to driver type B\n",
1641 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1644 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1647 * According to SDHC Spec v3.00, if the Preset Value
1648 * Enable in the Host Control 2 register is set, we
1649 * need to reset SD Clock Enable before changing High
1650 * Speed Enable to avoid generating clock gliches.
1653 /* Reset SD Clock Enable */
1654 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1655 clk &= ~SDHCI_CLOCK_CARD_EN;
1656 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1658 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1660 /* Re-enable SD Clock */
1661 host->ops->set_clock(host, host->clock);
1664 /* Reset SD Clock Enable */
1665 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1666 clk &= ~SDHCI_CLOCK_CARD_EN;
1667 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1669 host->ops->set_uhs_signaling(host, ios->timing);
1670 host->timing = ios->timing;
1672 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1673 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1674 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1675 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1676 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1677 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1678 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1681 sdhci_enable_preset_value(host, true);
1682 preset = sdhci_get_preset_value(host);
1683 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1684 >> SDHCI_PRESET_DRV_SHIFT;
1687 /* Re-enable SD Clock */
1688 host->ops->set_clock(host, host->clock);
1690 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1693 * Some (ENE) controllers go apeshit on some ios operation,
1694 * signalling timeout and CRC errors even on CMD0. Resetting
1695 * it on each ios seems to solve the problem.
1697 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1698 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1701 spin_unlock_irqrestore(&host->lock, flags);
1704 static int sdhci_get_cd(struct mmc_host *mmc)
1706 struct sdhci_host *host = mmc_priv(mmc);
1707 int gpio_cd = mmc_gpio_get_cd(mmc);
1709 if (host->flags & SDHCI_DEVICE_DEAD)
1712 /* If nonremovable, assume that the card is always present. */
1713 if (!mmc_card_is_removable(host->mmc))
1717 * Try slot gpio detect, if defined it take precedence
1718 * over build in controller functionality
1723 /* If polling, assume that the card is always present. */
1724 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1727 /* Host native card detect */
1728 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1731 static int sdhci_check_ro(struct sdhci_host *host)
1733 unsigned long flags;
1736 spin_lock_irqsave(&host->lock, flags);
1738 if (host->flags & SDHCI_DEVICE_DEAD)
1740 else if (host->ops->get_ro)
1741 is_readonly = host->ops->get_ro(host);
1743 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1744 & SDHCI_WRITE_PROTECT);
1746 spin_unlock_irqrestore(&host->lock, flags);
1748 /* This quirk needs to be replaced by a callback-function later */
1749 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1750 !is_readonly : is_readonly;
1753 #define SAMPLE_COUNT 5
1755 static int sdhci_get_ro(struct mmc_host *mmc)
1757 struct sdhci_host *host = mmc_priv(mmc);
1760 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1761 return sdhci_check_ro(host);
1764 for (i = 0; i < SAMPLE_COUNT; i++) {
1765 if (sdhci_check_ro(host)) {
1766 if (++ro_count > SAMPLE_COUNT / 2)
1774 static void sdhci_hw_reset(struct mmc_host *mmc)
1776 struct sdhci_host *host = mmc_priv(mmc);
1778 if (host->ops && host->ops->hw_reset)
1779 host->ops->hw_reset(host);
1782 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1784 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1786 host->ier |= SDHCI_INT_CARD_INT;
1788 host->ier &= ~SDHCI_INT_CARD_INT;
1790 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1791 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1796 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1798 struct sdhci_host *host = mmc_priv(mmc);
1799 unsigned long flags;
1801 spin_lock_irqsave(&host->lock, flags);
1803 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1805 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1807 sdhci_enable_sdio_irq_nolock(host, enable);
1808 spin_unlock_irqrestore(&host->lock, flags);
1811 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1812 struct mmc_ios *ios)
1814 struct sdhci_host *host = mmc_priv(mmc);
1819 * Signal Voltage Switching is only applicable for Host Controllers
1822 if (host->version < SDHCI_SPEC_300)
1825 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1827 switch (ios->signal_voltage) {
1828 case MMC_SIGNAL_VOLTAGE_330:
1829 if (!(host->flags & SDHCI_SIGNALING_330))
1831 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1832 ctrl &= ~SDHCI_CTRL_VDD_180;
1833 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1835 if (!IS_ERR(mmc->supply.vqmmc)) {
1836 ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
1839 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1845 usleep_range(5000, 5500);
1847 /* 3.3V regulator output should be stable within 5 ms */
1848 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1849 if (!(ctrl & SDHCI_CTRL_VDD_180))
1852 pr_warn("%s: 3.3V regulator output did not became stable\n",
1856 case MMC_SIGNAL_VOLTAGE_180:
1857 if (!(host->flags & SDHCI_SIGNALING_180))
1859 if (!IS_ERR(mmc->supply.vqmmc)) {
1860 ret = regulator_set_voltage(mmc->supply.vqmmc,
1863 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1870 * Enable 1.8V Signal Enable in the Host Control2
1873 ctrl |= SDHCI_CTRL_VDD_180;
1874 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1876 /* Some controller need to do more when switching */
1877 if (host->ops->voltage_switch)
1878 host->ops->voltage_switch(host);
1880 /* 1.8V regulator output should be stable within 5 ms */
1881 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1882 if (ctrl & SDHCI_CTRL_VDD_180)
1885 pr_warn("%s: 1.8V regulator output did not became stable\n",
1889 case MMC_SIGNAL_VOLTAGE_120:
1890 if (!(host->flags & SDHCI_SIGNALING_120))
1892 if (!IS_ERR(mmc->supply.vqmmc)) {
1893 ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
1896 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1903 /* No signal voltage switch required */
1908 static int sdhci_card_busy(struct mmc_host *mmc)
1910 struct sdhci_host *host = mmc_priv(mmc);
1913 /* Check whether DAT[0] is 0 */
1914 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1916 return !(present_state & SDHCI_DATA_0_LVL_MASK);
1919 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1921 struct sdhci_host *host = mmc_priv(mmc);
1922 unsigned long flags;
1924 spin_lock_irqsave(&host->lock, flags);
1925 host->flags |= SDHCI_HS400_TUNING;
1926 spin_unlock_irqrestore(&host->lock, flags);
1931 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1933 struct sdhci_host *host = mmc_priv(mmc);
1935 int tuning_loop_counter = MAX_TUNING_LOOP;
1937 unsigned long flags;
1938 unsigned int tuning_count = 0;
1941 spin_lock_irqsave(&host->lock, flags);
1943 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1944 host->flags &= ~SDHCI_HS400_TUNING;
1946 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1947 tuning_count = host->tuning_count;
1950 * The Host Controller needs tuning in case of SDR104 and DDR50
1951 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1952 * the Capabilities register.
1953 * If the Host Controller supports the HS200 mode then the
1954 * tuning function has to be executed.
1956 switch (host->timing) {
1957 /* HS400 tuning is done in HS200 mode */
1958 case MMC_TIMING_MMC_HS400:
1962 case MMC_TIMING_MMC_HS200:
1964 * Periodic re-tuning for HS400 is not expected to be needed, so
1971 case MMC_TIMING_UHS_SDR104:
1972 case MMC_TIMING_UHS_DDR50:
1975 case MMC_TIMING_UHS_SDR50:
1976 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
1984 if (host->ops->platform_execute_tuning) {
1985 spin_unlock_irqrestore(&host->lock, flags);
1986 err = host->ops->platform_execute_tuning(host, opcode);
1990 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1991 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1992 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1993 ctrl |= SDHCI_CTRL_TUNED_CLK;
1994 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1997 * As per the Host Controller spec v3.00, tuning command
1998 * generates Buffer Read Ready interrupt, so enable that.
2000 * Note: The spec clearly says that when tuning sequence
2001 * is being performed, the controller does not generate
2002 * interrupts other than Buffer Read Ready interrupt. But
2003 * to make sure we don't hit a controller bug, we _only_
2004 * enable Buffer Read Ready interrupt here.
2006 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2007 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2010 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
2011 * of loops reaches 40 times.
2014 struct mmc_command cmd = {0};
2015 struct mmc_request mrq = {NULL};
2017 cmd.opcode = opcode;
2019 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2025 if (tuning_loop_counter-- == 0)
2031 * In response to CMD19, the card sends 64 bytes of tuning
2032 * block to the Host Controller. So we set the block size
2035 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
2036 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2037 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
2039 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
2040 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
2043 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
2048 * The tuning block is sent by the card to the host controller.
2049 * So we set the TRNS_READ bit in the Transfer Mode register.
2050 * This also takes care of setting DMA Enable and Multi Block
2051 * Select in the same register to 0.
2053 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2055 sdhci_send_command(host, &cmd);
2059 spin_unlock_irqrestore(&host->lock, flags);
2060 /* Wait for Buffer Read Ready interrupt */
2061 wait_event_interruptible_timeout(host->buf_ready_int,
2062 (host->tuning_done == 1),
2063 msecs_to_jiffies(50));
2064 spin_lock_irqsave(&host->lock, flags);
2066 if (!host->tuning_done) {
2067 pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
2068 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2069 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2070 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2071 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2077 host->tuning_done = 0;
2079 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2081 /* eMMC spec does not require a delay between tuning cycles */
2082 if (opcode == MMC_SEND_TUNING_BLOCK)
2084 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
2087 * The Host Driver has exhausted the maximum number of loops allowed,
2088 * so use fixed sampling frequency.
2090 if (tuning_loop_counter < 0) {
2091 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2092 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2094 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
2095 pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n");
2102 * In case tuning fails, host controllers which support
2103 * re-tuning can try tuning again at a later time, when the
2104 * re-tuning timer expires. So for these controllers, we
2105 * return 0. Since there might be other controllers who do not
2106 * have this capability, we return error for them.
2111 host->mmc->retune_period = err ? 0 : tuning_count;
2113 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2114 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2116 spin_unlock_irqrestore(&host->lock, flags);
2120 static int sdhci_select_drive_strength(struct mmc_card *card,
2121 unsigned int max_dtr, int host_drv,
2122 int card_drv, int *drv_type)
2124 struct sdhci_host *host = mmc_priv(card->host);
2126 if (!host->ops->select_drive_strength)
2129 return host->ops->select_drive_strength(host, card, max_dtr, host_drv,
2130 card_drv, drv_type);
2133 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2135 /* Host Controller v3.00 defines preset value registers */
2136 if (host->version < SDHCI_SPEC_300)
2140 * We only enable or disable Preset Value if they are not already
2141 * enabled or disabled respectively. Otherwise, we bail out.
2143 if (host->preset_enabled != enable) {
2144 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2147 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2149 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2151 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2154 host->flags |= SDHCI_PV_ENABLED;
2156 host->flags &= ~SDHCI_PV_ENABLED;
2158 host->preset_enabled = enable;
2162 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2165 struct sdhci_host *host = mmc_priv(mmc);
2166 struct mmc_data *data = mrq->data;
2168 if (data->host_cookie != COOKIE_UNMAPPED)
2169 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2170 data->flags & MMC_DATA_WRITE ?
2171 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2173 data->host_cookie = COOKIE_UNMAPPED;
2176 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
2179 struct sdhci_host *host = mmc_priv(mmc);
2181 mrq->data->host_cookie = COOKIE_UNMAPPED;
2183 if (host->flags & SDHCI_REQ_USE_DMA)
2184 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2187 static inline bool sdhci_has_requests(struct sdhci_host *host)
2189 return host->cmd || host->data_cmd;
2192 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2194 if (host->data_cmd) {
2195 host->data_cmd->error = err;
2196 sdhci_finish_mrq(host, host->data_cmd->mrq);
2200 host->cmd->error = err;
2201 sdhci_finish_mrq(host, host->cmd->mrq);
2205 static void sdhci_card_event(struct mmc_host *mmc)
2207 struct sdhci_host *host = mmc_priv(mmc);
2208 unsigned long flags;
2211 /* First check if client has provided their own card event */
2212 if (host->ops->card_event)
2213 host->ops->card_event(host);
2215 present = mmc->ops->get_cd(mmc);
2217 spin_lock_irqsave(&host->lock, flags);
2219 /* Check sdhci_has_requests() first in case we are runtime suspended */
2220 if (sdhci_has_requests(host) && !present) {
2221 pr_err("%s: Card removed during transfer!\n",
2222 mmc_hostname(host->mmc));
2223 pr_err("%s: Resetting controller.\n",
2224 mmc_hostname(host->mmc));
2226 sdhci_do_reset(host, SDHCI_RESET_CMD);
2227 sdhci_do_reset(host, SDHCI_RESET_DATA);
2229 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2232 spin_unlock_irqrestore(&host->lock, flags);
2235 static const struct mmc_host_ops sdhci_ops = {
2236 .request = sdhci_request,
2237 .post_req = sdhci_post_req,
2238 .pre_req = sdhci_pre_req,
2239 .set_ios = sdhci_set_ios,
2240 .get_cd = sdhci_get_cd,
2241 .get_ro = sdhci_get_ro,
2242 .hw_reset = sdhci_hw_reset,
2243 .enable_sdio_irq = sdhci_enable_sdio_irq,
2244 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2245 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2246 .execute_tuning = sdhci_execute_tuning,
2247 .select_drive_strength = sdhci_select_drive_strength,
2248 .card_event = sdhci_card_event,
2249 .card_busy = sdhci_card_busy,
2252 /*****************************************************************************\
2256 \*****************************************************************************/
2258 static bool sdhci_request_done(struct sdhci_host *host)
2260 unsigned long flags;
2261 struct mmc_request *mrq;
2264 spin_lock_irqsave(&host->lock, flags);
2266 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2267 mrq = host->mrqs_done[i];
2269 host->mrqs_done[i] = NULL;
2275 spin_unlock_irqrestore(&host->lock, flags);
2279 sdhci_del_timer(host, mrq);
2282 * Always unmap the data buffers if they were mapped by
2283 * sdhci_prepare_data() whenever we finish with a request.
2284 * This avoids leaking DMA mappings on error.
2286 if (host->flags & SDHCI_REQ_USE_DMA) {
2287 struct mmc_data *data = mrq->data;
2289 if (data && data->host_cookie == COOKIE_MAPPED) {
2290 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2291 (data->flags & MMC_DATA_READ) ?
2292 DMA_FROM_DEVICE : DMA_TO_DEVICE);
2293 data->host_cookie = COOKIE_UNMAPPED;
2298 * The controller needs a reset of internal state machines
2299 * upon error conditions.
2301 if (sdhci_needs_reset(host, mrq)) {
2302 /* Some controllers need this kick or reset won't work here */
2303 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2304 /* This is to force an update */
2305 host->ops->set_clock(host, host->clock);
2307 /* Spec says we should do both at the same time, but Ricoh
2308 controllers do not like that. */
2310 sdhci_do_reset(host, SDHCI_RESET_CMD);
2311 if (!host->data_cmd)
2312 sdhci_do_reset(host, SDHCI_RESET_DATA);
2314 host->pending_reset = false;
2317 if (!sdhci_has_requests(host))
2318 sdhci_led_deactivate(host);
2321 spin_unlock_irqrestore(&host->lock, flags);
2323 mmc_request_done(host->mmc, mrq);
2328 static void sdhci_tasklet_finish(unsigned long param)
2330 struct sdhci_host *host = (struct sdhci_host *)param;
2332 while (!sdhci_request_done(host))
2336 static void sdhci_timeout_timer(unsigned long data)
2338 struct sdhci_host *host;
2339 unsigned long flags;
2341 host = (struct sdhci_host*)data;
2343 spin_lock_irqsave(&host->lock, flags);
2345 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2346 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2347 mmc_hostname(host->mmc));
2348 sdhci_dumpregs(host);
2350 host->cmd->error = -ETIMEDOUT;
2351 sdhci_finish_mrq(host, host->cmd->mrq);
2355 spin_unlock_irqrestore(&host->lock, flags);
2358 static void sdhci_timeout_data_timer(unsigned long data)
2360 struct sdhci_host *host;
2361 unsigned long flags;
2363 host = (struct sdhci_host *)data;
2365 spin_lock_irqsave(&host->lock, flags);
2367 if (host->data || host->data_cmd ||
2368 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2369 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2370 mmc_hostname(host->mmc));
2371 sdhci_dumpregs(host);
2374 host->data->error = -ETIMEDOUT;
2375 sdhci_finish_data(host);
2376 } else if (host->data_cmd) {
2377 host->data_cmd->error = -ETIMEDOUT;
2378 sdhci_finish_mrq(host, host->data_cmd->mrq);
2380 host->cmd->error = -ETIMEDOUT;
2381 sdhci_finish_mrq(host, host->cmd->mrq);
2386 spin_unlock_irqrestore(&host->lock, flags);
2389 /*****************************************************************************\
2391 * Interrupt handling *
2393 \*****************************************************************************/
2395 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
2399 * SDHCI recovers from errors by resetting the cmd and data
2400 * circuits. Until that is done, there very well might be more
2401 * interrupts, so ignore them in that case.
2403 if (host->pending_reset)
2405 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2406 mmc_hostname(host->mmc), (unsigned)intmask);
2407 sdhci_dumpregs(host);
2411 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2412 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2413 if (intmask & SDHCI_INT_TIMEOUT)
2414 host->cmd->error = -ETIMEDOUT;
2416 host->cmd->error = -EILSEQ;
2419 * If this command initiates a data phase and a response
2420 * CRC error is signalled, the card can start transferring
2421 * data - the card may have received the command without
2422 * error. We must not terminate the mmc_request early.
2424 * If the card did not receive the command or returned an
2425 * error which prevented it sending data, the data phase
2428 if (host->cmd->data &&
2429 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2435 sdhci_finish_mrq(host, host->cmd->mrq);
2439 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
2440 !(host->cmd->flags & MMC_RSP_BUSY) && !host->data &&
2441 host->cmd->opcode == MMC_STOP_TRANSMISSION)
2442 *mask &= ~SDHCI_INT_DATA_END;
2444 if (intmask & SDHCI_INT_RESPONSE)
2445 sdhci_finish_command(host);
2448 #ifdef CONFIG_MMC_DEBUG
2449 static void sdhci_adma_show_error(struct sdhci_host *host)
2451 const char *name = mmc_hostname(host->mmc);
2452 void *desc = host->adma_table;
2454 sdhci_dumpregs(host);
2457 struct sdhci_adma2_64_desc *dma_desc = desc;
2459 if (host->flags & SDHCI_USE_64_BIT_DMA)
2460 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2461 name, desc, le32_to_cpu(dma_desc->addr_hi),
2462 le32_to_cpu(dma_desc->addr_lo),
2463 le16_to_cpu(dma_desc->len),
2464 le16_to_cpu(dma_desc->cmd));
2466 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2467 name, desc, le32_to_cpu(dma_desc->addr_lo),
2468 le16_to_cpu(dma_desc->len),
2469 le16_to_cpu(dma_desc->cmd));
2471 desc += host->desc_sz;
2473 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2478 static void sdhci_adma_show_error(struct sdhci_host *host) { }
2481 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2485 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2486 if (intmask & SDHCI_INT_DATA_AVAIL) {
2487 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2488 if (command == MMC_SEND_TUNING_BLOCK ||
2489 command == MMC_SEND_TUNING_BLOCK_HS200) {
2490 host->tuning_done = 1;
2491 wake_up(&host->buf_ready_int);
2497 struct mmc_command *data_cmd = host->data_cmd;
2500 host->data_cmd = NULL;
2503 * The "data complete" interrupt is also used to
2504 * indicate that a busy state has ended. See comment
2505 * above in sdhci_cmd_irq().
2507 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2508 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2509 data_cmd->error = -ETIMEDOUT;
2510 sdhci_finish_mrq(host, data_cmd->mrq);
2513 if (intmask & SDHCI_INT_DATA_END) {
2515 * Some cards handle busy-end interrupt
2516 * before the command completed, so make
2517 * sure we do things in the proper order.
2519 if (host->cmd == data_cmd)
2522 sdhci_finish_mrq(host, data_cmd->mrq);
2528 * SDHCI recovers from errors by resetting the cmd and data
2529 * circuits. Until that is done, there very well might be more
2530 * interrupts, so ignore them in that case.
2532 if (host->pending_reset)
2535 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2536 mmc_hostname(host->mmc), (unsigned)intmask);
2537 sdhci_dumpregs(host);
2542 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2543 host->data->error = -ETIMEDOUT;
2544 else if (intmask & SDHCI_INT_DATA_END_BIT)
2545 host->data->error = -EILSEQ;
2546 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2547 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2549 host->data->error = -EILSEQ;
2550 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2551 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2552 sdhci_adma_show_error(host);
2553 host->data->error = -EIO;
2554 if (host->ops->adma_workaround)
2555 host->ops->adma_workaround(host, intmask);
2558 if (host->data->error)
2559 sdhci_finish_data(host);
2561 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2562 sdhci_transfer_pio(host);
2565 * We currently don't do anything fancy with DMA
2566 * boundaries, but as we can't disable the feature
2567 * we need to at least restart the transfer.
2569 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2570 * should return a valid address to continue from, but as
2571 * some controllers are faulty, don't trust them.
2573 if (intmask & SDHCI_INT_DMA_END) {
2574 u32 dmastart, dmanow;
2575 dmastart = sg_dma_address(host->data->sg);
2576 dmanow = dmastart + host->data->bytes_xfered;
2578 * Force update to the next DMA block boundary.
2581 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2582 SDHCI_DEFAULT_BOUNDARY_SIZE;
2583 host->data->bytes_xfered = dmanow - dmastart;
2584 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2586 mmc_hostname(host->mmc), dmastart,
2587 host->data->bytes_xfered, dmanow);
2588 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2591 if (intmask & SDHCI_INT_DATA_END) {
2592 if (host->cmd == host->data_cmd) {
2594 * Data managed to finish before the
2595 * command completed. Make sure we do
2596 * things in the proper order.
2598 host->data_early = 1;
2600 sdhci_finish_data(host);
2606 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2608 irqreturn_t result = IRQ_NONE;
2609 struct sdhci_host *host = dev_id;
2610 u32 intmask, mask, unexpected = 0;
2613 spin_lock(&host->lock);
2615 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2616 spin_unlock(&host->lock);
2620 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2621 if (!intmask || intmask == 0xffffffff) {
2627 /* Clear selected interrupts. */
2628 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2629 SDHCI_INT_BUS_POWER);
2630 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2632 DBG("*** %s got interrupt: 0x%08x\n",
2633 mmc_hostname(host->mmc), intmask);
2635 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2636 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2640 * There is a observation on i.mx esdhc. INSERT
2641 * bit will be immediately set again when it gets
2642 * cleared, if a card is inserted. We have to mask
2643 * the irq to prevent interrupt storm which will
2644 * freeze the system. And the REMOVE gets the
2647 * More testing are needed here to ensure it works
2648 * for other platforms though.
2650 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2651 SDHCI_INT_CARD_REMOVE);
2652 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2653 SDHCI_INT_CARD_INSERT;
2654 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2655 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2657 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2658 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2660 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2661 SDHCI_INT_CARD_REMOVE);
2662 result = IRQ_WAKE_THREAD;
2665 if (intmask & SDHCI_INT_CMD_MASK)
2666 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
2669 if (intmask & SDHCI_INT_DATA_MASK)
2670 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2672 if (intmask & SDHCI_INT_BUS_POWER)
2673 pr_err("%s: Card is consuming too much power!\n",
2674 mmc_hostname(host->mmc));
2676 if (intmask & SDHCI_INT_CARD_INT) {
2677 sdhci_enable_sdio_irq_nolock(host, false);
2678 host->thread_isr |= SDHCI_INT_CARD_INT;
2679 result = IRQ_WAKE_THREAD;
2682 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2683 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2684 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2685 SDHCI_INT_CARD_INT);
2688 unexpected |= intmask;
2689 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2692 if (result == IRQ_NONE)
2693 result = IRQ_HANDLED;
2695 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2696 } while (intmask && --max_loops);
2698 spin_unlock(&host->lock);
2701 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2702 mmc_hostname(host->mmc), unexpected);
2703 sdhci_dumpregs(host);
2709 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2711 struct sdhci_host *host = dev_id;
2712 unsigned long flags;
2715 spin_lock_irqsave(&host->lock, flags);
2716 isr = host->thread_isr;
2717 host->thread_isr = 0;
2718 spin_unlock_irqrestore(&host->lock, flags);
2720 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2721 struct mmc_host *mmc = host->mmc;
2723 mmc->ops->card_event(mmc);
2724 mmc_detect_change(mmc, msecs_to_jiffies(200));
2727 if (isr & SDHCI_INT_CARD_INT) {
2728 sdio_run_irqs(host->mmc);
2730 spin_lock_irqsave(&host->lock, flags);
2731 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2732 sdhci_enable_sdio_irq_nolock(host, true);
2733 spin_unlock_irqrestore(&host->lock, flags);
2736 return isr ? IRQ_HANDLED : IRQ_NONE;
2739 /*****************************************************************************\
2743 \*****************************************************************************/
2747 * To enable wakeup events, the corresponding events have to be enabled in
2748 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2749 * Table' in the SD Host Controller Standard Specification.
2750 * It is useless to restore SDHCI_INT_ENABLE state in
2751 * sdhci_disable_irq_wakeups() since it will be set by
2752 * sdhci_enable_card_detection() or sdhci_init().
2754 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2757 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2758 | SDHCI_WAKE_ON_INT;
2759 u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2762 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2764 /* Avoid fake wake up */
2765 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
2766 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2767 irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2769 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2770 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
2772 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2774 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2777 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2778 | SDHCI_WAKE_ON_INT;
2780 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2782 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2785 int sdhci_suspend_host(struct sdhci_host *host)
2787 sdhci_disable_card_detection(host);
2789 mmc_retune_timer_stop(host->mmc);
2790 mmc_retune_needed(host->mmc);
2792 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2794 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2795 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2796 free_irq(host->irq, host);
2798 sdhci_enable_irq_wakeups(host);
2799 enable_irq_wake(host->irq);
2804 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2806 int sdhci_resume_host(struct sdhci_host *host)
2808 struct mmc_host *mmc = host->mmc;
2811 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2812 if (host->ops->enable_dma)
2813 host->ops->enable_dma(host);
2816 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2817 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2818 /* Card keeps power but host controller does not */
2819 sdhci_init(host, 0);
2822 mmc->ops->set_ios(mmc, &mmc->ios);
2824 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2828 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2829 ret = request_threaded_irq(host->irq, sdhci_irq,
2830 sdhci_thread_irq, IRQF_SHARED,
2831 mmc_hostname(host->mmc), host);
2835 sdhci_disable_irq_wakeups(host);
2836 disable_irq_wake(host->irq);
2839 sdhci_enable_card_detection(host);
2844 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2846 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2848 unsigned long flags;
2850 mmc_retune_timer_stop(host->mmc);
2851 mmc_retune_needed(host->mmc);
2853 spin_lock_irqsave(&host->lock, flags);
2854 host->ier &= SDHCI_INT_CARD_INT;
2855 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2856 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2857 spin_unlock_irqrestore(&host->lock, flags);
2859 synchronize_hardirq(host->irq);
2861 spin_lock_irqsave(&host->lock, flags);
2862 host->runtime_suspended = true;
2863 spin_unlock_irqrestore(&host->lock, flags);
2867 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2869 int sdhci_runtime_resume_host(struct sdhci_host *host)
2871 struct mmc_host *mmc = host->mmc;
2872 unsigned long flags;
2873 int host_flags = host->flags;
2875 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2876 if (host->ops->enable_dma)
2877 host->ops->enable_dma(host);
2880 sdhci_init(host, 0);
2882 /* Force clock and power re-program */
2885 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
2886 mmc->ops->set_ios(mmc, &mmc->ios);
2888 if ((host_flags & SDHCI_PV_ENABLED) &&
2889 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2890 spin_lock_irqsave(&host->lock, flags);
2891 sdhci_enable_preset_value(host, true);
2892 spin_unlock_irqrestore(&host->lock, flags);
2895 spin_lock_irqsave(&host->lock, flags);
2897 host->runtime_suspended = false;
2899 /* Enable SDIO IRQ */
2900 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2901 sdhci_enable_sdio_irq_nolock(host, true);
2903 /* Enable Card Detection */
2904 sdhci_enable_card_detection(host);
2906 spin_unlock_irqrestore(&host->lock, flags);
2910 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2912 #endif /* CONFIG_PM */
2914 /*****************************************************************************\
2916 * Device allocation/registration *
2918 \*****************************************************************************/
2920 struct sdhci_host *sdhci_alloc_host(struct device *dev,
2923 struct mmc_host *mmc;
2924 struct sdhci_host *host;
2926 WARN_ON(dev == NULL);
2928 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2930 return ERR_PTR(-ENOMEM);
2932 host = mmc_priv(mmc);
2934 host->mmc_host_ops = sdhci_ops;
2935 mmc->ops = &host->mmc_host_ops;
2937 host->flags = SDHCI_SIGNALING_330;
2942 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2944 static int sdhci_set_dma_mask(struct sdhci_host *host)
2946 struct mmc_host *mmc = host->mmc;
2947 struct device *dev = mmc_dev(mmc);
2950 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
2951 host->flags &= ~SDHCI_USE_64_BIT_DMA;
2953 /* Try 64-bit mask if hardware is capable of it */
2954 if (host->flags & SDHCI_USE_64_BIT_DMA) {
2955 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2957 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
2959 host->flags &= ~SDHCI_USE_64_BIT_DMA;
2963 /* 32-bit mask as default & fallback */
2965 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2967 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
2974 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
2978 if (host->read_caps)
2981 host->read_caps = true;
2984 host->quirks = debug_quirks;
2987 host->quirks2 = debug_quirks2;
2989 sdhci_do_reset(host, SDHCI_RESET_ALL);
2991 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
2992 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
2994 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
2997 host->caps = caps ? *caps : sdhci_readl(host, SDHCI_CAPABILITIES);
2999 if (host->version < SDHCI_SPEC_300)
3002 host->caps1 = caps1 ? *caps1 : sdhci_readl(host, SDHCI_CAPABILITIES_1);
3004 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3006 int sdhci_setup_host(struct sdhci_host *host)
3008 struct mmc_host *mmc;
3009 u32 max_current_caps;
3010 unsigned int ocr_avail;
3011 unsigned int override_timeout_clk;
3015 WARN_ON(host == NULL);
3021 sdhci_read_caps(host);
3023 override_timeout_clk = host->timeout_clk;
3025 if (host->version > SDHCI_SPEC_300) {
3026 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3027 mmc_hostname(mmc), host->version);
3030 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3031 host->flags |= SDHCI_USE_SDMA;
3032 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3033 DBG("Controller doesn't have SDMA capability\n");
3035 host->flags |= SDHCI_USE_SDMA;
3037 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3038 (host->flags & SDHCI_USE_SDMA)) {
3039 DBG("Disabling DMA as it is marked broken\n");
3040 host->flags &= ~SDHCI_USE_SDMA;
3043 if ((host->version >= SDHCI_SPEC_200) &&
3044 (host->caps & SDHCI_CAN_DO_ADMA2))
3045 host->flags |= SDHCI_USE_ADMA;
3047 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3048 (host->flags & SDHCI_USE_ADMA)) {
3049 DBG("Disabling ADMA as it is marked broken\n");
3050 host->flags &= ~SDHCI_USE_ADMA;
3054 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3055 * and *must* do 64-bit DMA. A driver has the opportunity to change
3056 * that during the first call to ->enable_dma(). Similarly
3057 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3060 if (host->caps & SDHCI_CAN_64BIT)
3061 host->flags |= SDHCI_USE_64_BIT_DMA;
3063 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3064 ret = sdhci_set_dma_mask(host);
3066 if (!ret && host->ops->enable_dma)
3067 ret = host->ops->enable_dma(host);
3070 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3072 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3078 /* SDMA does not support 64-bit DMA */
3079 if (host->flags & SDHCI_USE_64_BIT_DMA)
3080 host->flags &= ~SDHCI_USE_SDMA;
3082 if (host->flags & SDHCI_USE_ADMA) {
3087 * The DMA descriptor table size is calculated as the maximum
3088 * number of segments times 2, to allow for an alignment
3089 * descriptor for each segment, plus 1 for a nop end descriptor,
3090 * all multipled by the descriptor size.
3092 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3093 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3094 SDHCI_ADMA2_64_DESC_SZ;
3095 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3097 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3098 SDHCI_ADMA2_32_DESC_SZ;
3099 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3102 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3103 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3104 host->adma_table_sz, &dma, GFP_KERNEL);
3106 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3108 host->flags &= ~SDHCI_USE_ADMA;
3109 } else if ((dma + host->align_buffer_sz) &
3110 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3111 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3113 host->flags &= ~SDHCI_USE_ADMA;
3114 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3115 host->adma_table_sz, buf, dma);
3117 host->align_buffer = buf;
3118 host->align_addr = dma;
3120 host->adma_table = buf + host->align_buffer_sz;
3121 host->adma_addr = dma + host->align_buffer_sz;
3126 * If we use DMA, then it's up to the caller to set the DMA
3127 * mask, but PIO does not need the hw shim so we set a new
3128 * mask here in that case.
3130 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3131 host->dma_mask = DMA_BIT_MASK(64);
3132 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3135 if (host->version >= SDHCI_SPEC_300)
3136 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3137 >> SDHCI_CLOCK_BASE_SHIFT;
3139 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3140 >> SDHCI_CLOCK_BASE_SHIFT;
3142 host->max_clk *= 1000000;
3143 if (host->max_clk == 0 || host->quirks &
3144 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3145 if (!host->ops->get_max_clock) {
3146 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3151 host->max_clk = host->ops->get_max_clock(host);
3155 * In case of Host Controller v3.00, find out whether clock
3156 * multiplier is supported.
3158 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3159 SDHCI_CLOCK_MUL_SHIFT;
3162 * In case the value in Clock Multiplier is 0, then programmable
3163 * clock mode is not supported, otherwise the actual clock
3164 * multiplier is one more than the value of Clock Multiplier
3165 * in the Capabilities Register.
3171 * Set host parameters.
3173 max_clk = host->max_clk;
3175 if (host->ops->get_min_clock)
3176 mmc->f_min = host->ops->get_min_clock(host);
3177 else if (host->version >= SDHCI_SPEC_300) {
3178 if (host->clk_mul) {
3179 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3180 max_clk = host->max_clk * host->clk_mul;
3182 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3184 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3186 if (!mmc->f_max || mmc->f_max > max_clk)
3187 mmc->f_max = max_clk;
3189 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3190 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3191 SDHCI_TIMEOUT_CLK_SHIFT;
3192 if (host->timeout_clk == 0) {
3193 if (host->ops->get_timeout_clock) {
3195 host->ops->get_timeout_clock(host);
3197 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3204 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3205 host->timeout_clk *= 1000;
3207 if (override_timeout_clk)
3208 host->timeout_clk = override_timeout_clk;
3210 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3211 host->ops->get_max_timeout_count(host) : 1 << 27;
3212 mmc->max_busy_timeout /= host->timeout_clk;
3215 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3216 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3218 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3219 host->flags |= SDHCI_AUTO_CMD12;
3221 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3222 if ((host->version >= SDHCI_SPEC_300) &&
3223 ((host->flags & SDHCI_USE_ADMA) ||
3224 !(host->flags & SDHCI_USE_SDMA)) &&
3225 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3226 host->flags |= SDHCI_AUTO_CMD23;
3227 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
3229 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
3233 * A controller may support 8-bit width, but the board itself
3234 * might not have the pins brought out. Boards that support
3235 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3236 * their platform code before calling sdhci_add_host(), and we
3237 * won't assume 8-bit width for hosts without that CAP.
3239 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3240 mmc->caps |= MMC_CAP_4_BIT_DATA;
3242 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3243 mmc->caps &= ~MMC_CAP_CMD23;
3245 if (host->caps & SDHCI_CAN_DO_HISPD)
3246 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3248 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3249 mmc_card_is_removable(mmc) &&
3250 mmc_gpio_get_cd(host->mmc) < 0)
3251 mmc->caps |= MMC_CAP_NEEDS_POLL;
3253 /* If there are external regulators, get them */
3254 ret = mmc_regulator_get_supply(mmc);
3255 if (ret == -EPROBE_DEFER)
3258 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3259 if (!IS_ERR(mmc->supply.vqmmc)) {
3260 ret = regulator_enable(mmc->supply.vqmmc);
3261 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3263 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3264 SDHCI_SUPPORT_SDR50 |
3265 SDHCI_SUPPORT_DDR50);
3267 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3268 mmc_hostname(mmc), ret);
3269 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3273 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3274 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3275 SDHCI_SUPPORT_DDR50);
3278 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3279 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3280 SDHCI_SUPPORT_DDR50))
3281 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3283 /* SDR104 supports also implies SDR50 support */
3284 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3285 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3286 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3287 * field can be promoted to support HS200.
3289 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3290 mmc->caps2 |= MMC_CAP2_HS200;
3291 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3292 mmc->caps |= MMC_CAP_UHS_SDR50;
3295 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3296 (host->caps1 & SDHCI_SUPPORT_HS400))
3297 mmc->caps2 |= MMC_CAP2_HS400;
3299 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3300 (IS_ERR(mmc->supply.vqmmc) ||
3301 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3303 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3305 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3306 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3307 mmc->caps |= MMC_CAP_UHS_DDR50;
3309 /* Does the host need tuning for SDR50? */
3310 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3311 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3313 /* Driver Type(s) (A, C, D) supported by the host */
3314 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3315 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3316 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3317 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3318 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3319 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3321 /* Initial value for re-tuning timer count */
3322 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3323 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3326 * In case Re-tuning Timer is not disabled, the actual value of
3327 * re-tuning timer will be 2 ^ (n - 1).
3329 if (host->tuning_count)
3330 host->tuning_count = 1 << (host->tuning_count - 1);
3332 /* Re-tuning mode supported by the Host Controller */
3333 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3334 SDHCI_RETUNING_MODE_SHIFT;
3339 * According to SD Host Controller spec v3.00, if the Host System
3340 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3341 * the value is meaningful only if Voltage Support in the Capabilities
3342 * register is set. The actual current value is 4 times the register
3345 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3346 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3347 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3350 /* convert to SDHCI_MAX_CURRENT format */
3351 curr = curr/1000; /* convert to mA */
3352 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3354 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3356 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3357 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3358 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3362 if (host->caps & SDHCI_CAN_VDD_330) {
3363 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3365 mmc->max_current_330 = ((max_current_caps &
3366 SDHCI_MAX_CURRENT_330_MASK) >>
3367 SDHCI_MAX_CURRENT_330_SHIFT) *
3368 SDHCI_MAX_CURRENT_MULTIPLIER;
3370 if (host->caps & SDHCI_CAN_VDD_300) {
3371 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3373 mmc->max_current_300 = ((max_current_caps &
3374 SDHCI_MAX_CURRENT_300_MASK) >>
3375 SDHCI_MAX_CURRENT_300_SHIFT) *
3376 SDHCI_MAX_CURRENT_MULTIPLIER;
3378 if (host->caps & SDHCI_CAN_VDD_180) {
3379 ocr_avail |= MMC_VDD_165_195;
3381 mmc->max_current_180 = ((max_current_caps &
3382 SDHCI_MAX_CURRENT_180_MASK) >>
3383 SDHCI_MAX_CURRENT_180_SHIFT) *
3384 SDHCI_MAX_CURRENT_MULTIPLIER;
3387 /* If OCR set by host, use it instead. */
3389 ocr_avail = host->ocr_mask;
3391 /* If OCR set by external regulators, give it highest prio. */
3393 ocr_avail = mmc->ocr_avail;
3395 mmc->ocr_avail = ocr_avail;
3396 mmc->ocr_avail_sdio = ocr_avail;
3397 if (host->ocr_avail_sdio)
3398 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3399 mmc->ocr_avail_sd = ocr_avail;
3400 if (host->ocr_avail_sd)
3401 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3402 else /* normal SD controllers don't support 1.8V */
3403 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3404 mmc->ocr_avail_mmc = ocr_avail;
3405 if (host->ocr_avail_mmc)
3406 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3408 if (mmc->ocr_avail == 0) {
3409 pr_err("%s: Hardware doesn't report any support voltages.\n",
3415 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3416 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3417 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3418 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3419 host->flags |= SDHCI_SIGNALING_180;
3421 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3422 host->flags |= SDHCI_SIGNALING_120;
3424 spin_lock_init(&host->lock);
3427 * Maximum number of segments. Depends on if the hardware
3428 * can do scatter/gather or not.
3430 if (host->flags & SDHCI_USE_ADMA)
3431 mmc->max_segs = SDHCI_MAX_SEGS;
3432 else if (host->flags & SDHCI_USE_SDMA)
3435 mmc->max_segs = SDHCI_MAX_SEGS;
3438 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3439 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3442 mmc->max_req_size = 524288;
3445 * Maximum segment size. Could be one segment with the maximum number
3446 * of bytes. When doing hardware scatter/gather, each entry cannot
3447 * be larger than 64 KiB though.
3449 if (host->flags & SDHCI_USE_ADMA) {
3450 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3451 mmc->max_seg_size = 65535;
3453 mmc->max_seg_size = 65536;
3455 mmc->max_seg_size = mmc->max_req_size;
3459 * Maximum block size. This varies from controller to controller and
3460 * is specified in the capabilities register.
3462 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3463 mmc->max_blk_size = 2;
3465 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3466 SDHCI_MAX_BLOCK_SHIFT;
3467 if (mmc->max_blk_size >= 3) {
3468 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3470 mmc->max_blk_size = 0;
3474 mmc->max_blk_size = 512 << mmc->max_blk_size;
3477 * Maximum block count.
3479 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3484 if (!IS_ERR(mmc->supply.vqmmc))
3485 regulator_disable(mmc->supply.vqmmc);
3487 if (host->align_buffer)
3488 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3489 host->adma_table_sz, host->align_buffer,
3491 host->adma_table = NULL;
3492 host->align_buffer = NULL;
3496 EXPORT_SYMBOL_GPL(sdhci_setup_host);
3498 int __sdhci_add_host(struct sdhci_host *host)
3500 struct mmc_host *mmc = host->mmc;
3506 tasklet_init(&host->finish_tasklet,
3507 sdhci_tasklet_finish, (unsigned long)host);
3509 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3510 setup_timer(&host->data_timer, sdhci_timeout_data_timer,
3511 (unsigned long)host);
3513 init_waitqueue_head(&host->buf_ready_int);
3515 sdhci_init(host, 0);
3517 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3518 IRQF_SHARED, mmc_hostname(mmc), host);
3520 pr_err("%s: Failed to request IRQ %d: %d\n",
3521 mmc_hostname(mmc), host->irq, ret);
3525 #ifdef CONFIG_MMC_DEBUG
3526 sdhci_dumpregs(host);
3529 ret = sdhci_led_register(host);
3531 pr_err("%s: Failed to register LED device: %d\n",
3532 mmc_hostname(mmc), ret);
3538 ret = mmc_add_host(mmc);
3542 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3543 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3544 (host->flags & SDHCI_USE_ADMA) ?
3545 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3546 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3548 sdhci_enable_card_detection(host);
3553 sdhci_led_unregister(host);
3555 sdhci_do_reset(host, SDHCI_RESET_ALL);
3556 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3557 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3558 free_irq(host->irq, host);
3560 tasklet_kill(&host->finish_tasklet);
3562 if (!IS_ERR(mmc->supply.vqmmc))
3563 regulator_disable(mmc->supply.vqmmc);
3565 if (host->align_buffer)
3566 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3567 host->adma_table_sz, host->align_buffer,
3569 host->adma_table = NULL;
3570 host->align_buffer = NULL;
3574 EXPORT_SYMBOL_GPL(__sdhci_add_host);
3576 int sdhci_add_host(struct sdhci_host *host)
3580 ret = sdhci_setup_host(host);
3584 return __sdhci_add_host(host);
3586 EXPORT_SYMBOL_GPL(sdhci_add_host);
3588 void sdhci_remove_host(struct sdhci_host *host, int dead)
3590 struct mmc_host *mmc = host->mmc;
3591 unsigned long flags;
3594 spin_lock_irqsave(&host->lock, flags);
3596 host->flags |= SDHCI_DEVICE_DEAD;
3598 if (sdhci_has_requests(host)) {
3599 pr_err("%s: Controller removed during "
3600 " transfer!\n", mmc_hostname(mmc));
3601 sdhci_error_out_mrqs(host, -ENOMEDIUM);
3604 spin_unlock_irqrestore(&host->lock, flags);
3607 sdhci_disable_card_detection(host);
3609 mmc_remove_host(mmc);
3611 sdhci_led_unregister(host);
3614 sdhci_do_reset(host, SDHCI_RESET_ALL);
3616 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3617 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3618 free_irq(host->irq, host);
3620 del_timer_sync(&host->timer);
3621 del_timer_sync(&host->data_timer);
3623 tasklet_kill(&host->finish_tasklet);
3625 if (!IS_ERR(mmc->supply.vqmmc))
3626 regulator_disable(mmc->supply.vqmmc);
3628 if (host->align_buffer)
3629 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3630 host->adma_table_sz, host->align_buffer,
3633 host->adma_table = NULL;
3634 host->align_buffer = NULL;
3637 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3639 void sdhci_free_host(struct sdhci_host *host)
3641 mmc_free_host(host->mmc);
3644 EXPORT_SYMBOL_GPL(sdhci_free_host);
3646 /*****************************************************************************\
3648 * Driver init/exit *
3650 \*****************************************************************************/
3652 static int __init sdhci_drv_init(void)
3655 ": Secure Digital Host Controller Interface driver\n");
3656 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3661 static void __exit sdhci_drv_exit(void)
3665 module_init(sdhci_drv_init);
3666 module_exit(sdhci_drv_exit);
3668 module_param(debug_quirks, uint, 0444);
3669 module_param(debug_quirks2, uint, 0444);
3671 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3672 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3673 MODULE_LICENSE("GPL");
3675 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3676 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");