2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
38 #include <linux/mmc/ioctl.h>
39 #include <linux/mmc/card.h>
40 #include <linux/mmc/host.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/sd.h>
44 #include <asm/uaccess.h>
48 MODULE_ALIAS("mmc:block");
49 #ifdef MODULE_PARAM_PREFIX
50 #undef MODULE_PARAM_PREFIX
52 #define MODULE_PARAM_PREFIX "mmcblk."
54 #define INAND_CMD38_ARG_EXT_CSD 113
55 #define INAND_CMD38_ARG_ERASE 0x00
56 #define INAND_CMD38_ARG_TRIM 0x01
57 #define INAND_CMD38_ARG_SECERASE 0x80
58 #define INAND_CMD38_ARG_SECTRIM1 0x81
59 #define INAND_CMD38_ARG_SECTRIM2 0x88
61 static DEFINE_MUTEX(block_mutex);
64 * The defaults come from config options but can be overriden by module
67 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
70 * We've only got one major, so number of mmcblk devices is
71 * limited to 256 / number of minors per device.
73 static int max_devices;
75 /* 256 minors, so at most 256 separate devices */
76 static DECLARE_BITMAP(dev_use, 256);
77 static DECLARE_BITMAP(name_use, 256);
80 * There is one mmc_blk_data per slot.
85 struct mmc_queue queue;
86 struct list_head part;
89 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
90 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
93 unsigned int read_only;
94 unsigned int part_type;
95 unsigned int name_idx;
96 unsigned int reset_done;
97 #define MMC_BLK_READ BIT(0)
98 #define MMC_BLK_WRITE BIT(1)
99 #define MMC_BLK_DISCARD BIT(2)
100 #define MMC_BLK_SECDISCARD BIT(3)
103 * Only set in main mmc_blk_data associated
104 * with mmc_card with mmc_set_drvdata, and keeps
105 * track of the current selected device partition.
107 unsigned int part_curr;
108 struct device_attribute force_ro;
109 struct device_attribute power_ro_lock;
113 static DEFINE_MUTEX(open_lock);
115 enum mmc_blk_status {
126 module_param(perdev_minors, int, 0444);
127 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
129 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
131 struct mmc_blk_data *md;
133 mutex_lock(&open_lock);
134 md = disk->private_data;
135 if (md && md->usage == 0)
139 mutex_unlock(&open_lock);
144 static inline int mmc_get_devidx(struct gendisk *disk)
146 int devmaj = MAJOR(disk_devt(disk));
147 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
150 devidx = disk->first_minor / perdev_minors;
154 static void mmc_blk_put(struct mmc_blk_data *md)
156 mutex_lock(&open_lock);
158 if (md->usage == 0) {
159 int devidx = mmc_get_devidx(md->disk);
160 blk_cleanup_queue(md->queue.queue);
162 __clear_bit(devidx, dev_use);
167 mutex_unlock(&open_lock);
170 static ssize_t power_ro_lock_show(struct device *dev,
171 struct device_attribute *attr, char *buf)
174 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
175 struct mmc_card *card = md->queue.card;
178 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
180 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
183 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
188 static ssize_t power_ro_lock_store(struct device *dev,
189 struct device_attribute *attr, const char *buf, size_t count)
192 struct mmc_blk_data *md, *part_md;
193 struct mmc_card *card;
196 if (kstrtoul(buf, 0, &set))
202 md = mmc_blk_get(dev_to_disk(dev));
203 card = md->queue.card;
205 mmc_claim_host(card->host);
207 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
208 card->ext_csd.boot_ro_lock |
209 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
210 card->ext_csd.part_time);
212 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
214 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
216 mmc_release_host(card->host);
219 pr_info("%s: Locking boot partition ro until next power on\n",
220 md->disk->disk_name);
221 set_disk_ro(md->disk, 1);
223 list_for_each_entry(part_md, &md->part, part)
224 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
225 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
226 set_disk_ro(part_md->disk, 1);
234 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
238 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
240 ret = snprintf(buf, PAGE_SIZE, "%d",
241 get_disk_ro(dev_to_disk(dev)) ^
247 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
248 const char *buf, size_t count)
252 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
253 unsigned long set = simple_strtoul(buf, &end, 0);
259 set_disk_ro(dev_to_disk(dev), set || md->read_only);
266 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
268 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
271 mutex_lock(&block_mutex);
274 check_disk_change(bdev);
277 if ((mode & FMODE_WRITE) && md->read_only) {
282 mutex_unlock(&block_mutex);
287 static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
289 struct mmc_blk_data *md = disk->private_data;
291 mutex_lock(&block_mutex);
293 mutex_unlock(&block_mutex);
298 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
300 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
306 struct mmc_blk_ioc_data {
307 struct mmc_ioc_cmd ic;
312 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
313 struct mmc_ioc_cmd __user *user)
315 struct mmc_blk_ioc_data *idata;
318 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
324 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
329 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
330 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
335 if (!idata->buf_bytes)
338 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
344 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
345 idata->ic.data_ptr, idata->buf_bytes)) {
360 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
361 struct mmc_ioc_cmd __user *ic_ptr)
363 struct mmc_blk_ioc_data *idata;
364 struct mmc_blk_data *md;
365 struct mmc_card *card;
366 struct mmc_command cmd = {0};
367 struct mmc_data data = {0};
368 struct mmc_request mrq = {NULL};
369 struct scatterlist sg;
373 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
374 * whole block device, not on a partition. This prevents overspray
375 * between sibling partitions.
377 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
380 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
382 return PTR_ERR(idata);
384 md = mmc_blk_get(bdev->bd_disk);
390 card = md->queue.card;
396 cmd.opcode = idata->ic.opcode;
397 cmd.arg = idata->ic.arg;
398 cmd.flags = idata->ic.flags;
400 if (idata->buf_bytes) {
403 data.blksz = idata->ic.blksz;
404 data.blocks = idata->ic.blocks;
406 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
408 if (idata->ic.write_flag)
409 data.flags = MMC_DATA_WRITE;
411 data.flags = MMC_DATA_READ;
413 /* data.flags must already be set before doing this. */
414 mmc_set_data_timeout(&data, card);
416 /* Allow overriding the timeout_ns for empirical tuning. */
417 if (idata->ic.data_timeout_ns)
418 data.timeout_ns = idata->ic.data_timeout_ns;
420 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
422 * Pretend this is a data transfer and rely on the
423 * host driver to compute timeout. When all host
424 * drivers support cmd.cmd_timeout for R1B, this
428 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
430 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
438 mmc_claim_host(card->host);
440 if (idata->ic.is_acmd) {
441 err = mmc_app_cmd(card->host, card);
446 mmc_wait_for_req(card->host, &mrq);
449 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
450 __func__, cmd.error);
455 dev_err(mmc_dev(card->host), "%s: data error %d\n",
456 __func__, data.error);
462 * According to the SD specs, some commands require a delay after
463 * issuing the command.
465 if (idata->ic.postsleep_min_us)
466 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
468 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
473 if (!idata->ic.write_flag) {
474 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
475 idata->buf, idata->buf_bytes)) {
482 mmc_release_host(card->host);
491 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
492 unsigned int cmd, unsigned long arg)
495 if (cmd == MMC_IOC_CMD)
496 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
501 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
502 unsigned int cmd, unsigned long arg)
504 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
508 static const struct block_device_operations mmc_bdops = {
509 .open = mmc_blk_open,
510 .release = mmc_blk_release,
511 .getgeo = mmc_blk_getgeo,
512 .owner = THIS_MODULE,
513 .ioctl = mmc_blk_ioctl,
515 .compat_ioctl = mmc_blk_compat_ioctl,
519 static inline int mmc_blk_part_switch(struct mmc_card *card,
520 struct mmc_blk_data *md)
523 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
525 if (main_md->part_curr == md->part_type)
528 if (mmc_card_mmc(card)) {
529 u8 part_config = card->ext_csd.part_config;
531 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
532 part_config |= md->part_type;
534 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
535 EXT_CSD_PART_CONFIG, part_config,
536 card->ext_csd.part_time);
540 card->ext_csd.part_config = part_config;
543 main_md->part_curr = md->part_type;
547 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
553 struct mmc_request mrq = {NULL};
554 struct mmc_command cmd = {0};
555 struct mmc_data data = {0};
556 unsigned int timeout_us;
558 struct scatterlist sg;
560 cmd.opcode = MMC_APP_CMD;
561 cmd.arg = card->rca << 16;
562 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
564 err = mmc_wait_for_cmd(card->host, &cmd, 0);
567 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
570 memset(&cmd, 0, sizeof(struct mmc_command));
572 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
574 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
576 data.timeout_ns = card->csd.tacc_ns * 100;
577 data.timeout_clks = card->csd.tacc_clks * 100;
579 timeout_us = data.timeout_ns / 1000;
580 timeout_us += data.timeout_clks * 1000 /
581 (card->host->ios.clock / 1000);
583 if (timeout_us > 100000) {
584 data.timeout_ns = 100000000;
585 data.timeout_clks = 0;
590 data.flags = MMC_DATA_READ;
597 blocks = kmalloc(4, GFP_KERNEL);
601 sg_init_one(&sg, blocks, 4);
603 mmc_wait_for_req(card->host, &mrq);
605 result = ntohl(*blocks);
608 if (cmd.error || data.error)
614 static int send_stop(struct mmc_card *card, u32 *status)
616 struct mmc_command cmd = {0};
619 cmd.opcode = MMC_STOP_TRANSMISSION;
620 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
621 err = mmc_wait_for_cmd(card->host, &cmd, 5);
623 *status = cmd.resp[0];
627 static int get_card_status(struct mmc_card *card, u32 *status, int retries)
629 struct mmc_command cmd = {0};
632 cmd.opcode = MMC_SEND_STATUS;
633 if (!mmc_host_is_spi(card->host))
634 cmd.arg = card->rca << 16;
635 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
636 err = mmc_wait_for_cmd(card->host, &cmd, retries);
638 *status = cmd.resp[0];
642 #define ERR_NOMEDIUM 3
645 #define ERR_CONTINUE 0
647 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
648 bool status_valid, u32 status)
652 /* response crc error, retry the r/w cmd */
653 pr_err("%s: %s sending %s command, card status %#x\n",
654 req->rq_disk->disk_name, "response CRC error",
659 pr_err("%s: %s sending %s command, card status %#x\n",
660 req->rq_disk->disk_name, "timed out", name, status);
662 /* If the status cmd initially failed, retry the r/w cmd */
667 * If it was a r/w cmd crc error, or illegal command
668 * (eg, issued in wrong state) then retry - we should
669 * have corrected the state problem above.
671 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
674 /* Otherwise abort the command */
678 /* We don't understand the error code the driver gave us */
679 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
680 req->rq_disk->disk_name, error, status);
686 * Initial r/w and stop cmd error recovery.
687 * We don't know whether the card received the r/w cmd or not, so try to
688 * restore things back to a sane state. Essentially, we do this as follows:
689 * - Obtain card status. If the first attempt to obtain card status fails,
690 * the status word will reflect the failed status cmd, not the failed
691 * r/w cmd. If we fail to obtain card status, it suggests we can no
692 * longer communicate with the card.
693 * - Check the card state. If the card received the cmd but there was a
694 * transient problem with the response, it might still be in a data transfer
695 * mode. Try to send it a stop command. If this fails, we can't recover.
696 * - If the r/w cmd failed due to a response CRC error, it was probably
697 * transient, so retry the cmd.
698 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
699 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
700 * illegal cmd, retry.
701 * Otherwise we don't understand what happened, so abort.
703 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
704 struct mmc_blk_request *brq, int *ecc_err)
706 bool prev_cmd_status_valid = true;
707 u32 status, stop_status = 0;
710 if (mmc_card_removed(card))
714 * Try to get card status which indicates both the card state
715 * and why there was no response. If the first attempt fails,
716 * we can't be sure the returned status is for the r/w command.
718 for (retry = 2; retry >= 0; retry--) {
719 err = get_card_status(card, &status, 0);
723 prev_cmd_status_valid = false;
724 pr_err("%s: error %d sending status command, %sing\n",
725 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
728 /* We couldn't get a response from the card. Give up. */
730 /* Check if the card is removed */
731 if (mmc_detect_card_removed(card->host))
736 /* Flag ECC errors */
737 if ((status & R1_CARD_ECC_FAILED) ||
738 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
739 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
743 * Check the current card state. If it is in some data transfer
744 * mode, tell it to stop (and hopefully transition back to TRAN.)
746 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
747 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
748 err = send_stop(card, &stop_status);
750 pr_err("%s: error %d sending stop command\n",
751 req->rq_disk->disk_name, err);
754 * If the stop cmd also timed out, the card is probably
755 * not present, so abort. Other errors are bad news too.
759 if (stop_status & R1_CARD_ECC_FAILED)
763 /* Check for set block count errors */
765 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
766 prev_cmd_status_valid, status);
768 /* Check for r/w command errors */
770 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
771 prev_cmd_status_valid, status);
774 if (!brq->stop.error)
777 /* Now for stop errors. These aren't fatal to the transfer. */
778 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
779 req->rq_disk->disk_name, brq->stop.error,
780 brq->cmd.resp[0], status);
783 * Subsitute in our own stop status as this will give the error
784 * state which happened during the execution of the r/w command.
787 brq->stop.resp[0] = stop_status;
793 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
798 if (md->reset_done & type)
801 md->reset_done |= type;
802 err = mmc_hw_reset(host);
803 /* Ensure we switch back to the correct partition */
804 if (err != -EOPNOTSUPP) {
805 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
808 main_md->part_curr = main_md->part_type;
809 part_err = mmc_blk_part_switch(host->card, md);
812 * We have failed to get back into the correct
813 * partition, so we need to abort the whole request.
821 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
823 md->reset_done &= ~type;
826 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
828 struct mmc_blk_data *md = mq->data;
829 struct mmc_card *card = md->queue.card;
830 unsigned int from, nr, arg;
831 int err = 0, type = MMC_BLK_DISCARD;
833 if (!mmc_can_erase(card)) {
838 from = blk_rq_pos(req);
839 nr = blk_rq_sectors(req);
841 if (mmc_can_discard(card))
842 arg = MMC_DISCARD_ARG;
843 else if (mmc_can_trim(card))
848 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
849 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
850 INAND_CMD38_ARG_EXT_CSD,
851 arg == MMC_TRIM_ARG ?
852 INAND_CMD38_ARG_TRIM :
853 INAND_CMD38_ARG_ERASE,
858 err = mmc_erase(card, from, nr, arg);
860 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
863 mmc_blk_reset_success(md, type);
864 spin_lock_irq(&md->lock);
865 __blk_end_request(req, err, blk_rq_bytes(req));
866 spin_unlock_irq(&md->lock);
871 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
874 struct mmc_blk_data *md = mq->data;
875 struct mmc_card *card = md->queue.card;
876 unsigned int from, nr, arg, trim_arg, erase_arg;
877 int err = 0, type = MMC_BLK_SECDISCARD;
879 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
884 from = blk_rq_pos(req);
885 nr = blk_rq_sectors(req);
887 /* The sanitize operation is supported at v4.5 only */
888 if (mmc_can_sanitize(card)) {
889 erase_arg = MMC_ERASE_ARG;
890 trim_arg = MMC_TRIM_ARG;
892 erase_arg = MMC_SECURE_ERASE_ARG;
893 trim_arg = MMC_SECURE_TRIM1_ARG;
896 if (mmc_erase_group_aligned(card, from, nr))
898 else if (mmc_can_trim(card))
905 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
906 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
907 INAND_CMD38_ARG_EXT_CSD,
908 arg == MMC_SECURE_TRIM1_ARG ?
909 INAND_CMD38_ARG_SECTRIM1 :
910 INAND_CMD38_ARG_SECERASE,
916 err = mmc_erase(card, from, nr, arg);
922 if (arg == MMC_SECURE_TRIM1_ARG) {
923 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
924 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
925 INAND_CMD38_ARG_EXT_CSD,
926 INAND_CMD38_ARG_SECTRIM2,
932 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
939 if (mmc_can_sanitize(card))
940 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
941 EXT_CSD_SANITIZE_START, 1, 0);
943 if (err && !mmc_blk_reset(md, card->host, type))
946 mmc_blk_reset_success(md, type);
948 spin_lock_irq(&md->lock);
949 __blk_end_request(req, err, blk_rq_bytes(req));
950 spin_unlock_irq(&md->lock);
955 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
957 struct mmc_blk_data *md = mq->data;
958 struct mmc_card *card = md->queue.card;
961 ret = mmc_flush_cache(card);
965 spin_lock_irq(&md->lock);
966 __blk_end_request_all(req, ret);
967 spin_unlock_irq(&md->lock);
973 * Reformat current write as a reliable write, supporting
974 * both legacy and the enhanced reliable write MMC cards.
975 * In each transfer we'll handle only as much as a single
976 * reliable write can handle, thus finish the request in
977 * partial completions.
979 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
980 struct mmc_card *card,
983 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
984 /* Legacy mode imposes restrictions on transfers. */
985 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
986 brq->data.blocks = 1;
988 if (brq->data.blocks > card->ext_csd.rel_sectors)
989 brq->data.blocks = card->ext_csd.rel_sectors;
990 else if (brq->data.blocks < card->ext_csd.rel_sectors)
991 brq->data.blocks = 1;
996 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
997 R1_ADDRESS_ERROR | /* Misaligned address */ \
998 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
999 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1000 R1_CC_ERROR | /* Card controller error */ \
1001 R1_ERROR) /* General/unknown error */
1003 static int mmc_blk_err_check(struct mmc_card *card,
1004 struct mmc_async_req *areq)
1006 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1008 struct mmc_blk_request *brq = &mq_mrq->brq;
1009 struct request *req = mq_mrq->req;
1013 * sbc.error indicates a problem with the set block count
1014 * command. No data will have been transferred.
1016 * cmd.error indicates a problem with the r/w command. No
1017 * data will have been transferred.
1019 * stop.error indicates a problem with the stop command. Data
1020 * may have been transferred, or may still be transferring.
1022 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1024 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
1026 return MMC_BLK_RETRY;
1028 return MMC_BLK_ABORT;
1030 return MMC_BLK_NOMEDIUM;
1037 * Check for errors relating to the execution of the
1038 * initial command - such as address errors. No data
1039 * has been transferred.
1041 if (brq->cmd.resp[0] & CMD_ERRORS) {
1042 pr_err("%s: r/w command failed, status = %#x\n",
1043 req->rq_disk->disk_name, brq->cmd.resp[0]);
1044 return MMC_BLK_ABORT;
1048 * Everything else is either success, or a data error of some
1049 * kind. If it was a write, we may have transitioned to
1050 * program mode, which we have to wait for it to complete.
1052 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1055 int err = get_card_status(card, &status, 5);
1057 pr_err("%s: error %d requesting status\n",
1058 req->rq_disk->disk_name, err);
1059 return MMC_BLK_CMD_ERR;
1062 * Some cards mishandle the status bits,
1063 * so make sure to check both the busy
1064 * indication and the card state.
1066 } while (!(status & R1_READY_FOR_DATA) ||
1067 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1070 if (brq->data.error) {
1071 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1072 req->rq_disk->disk_name, brq->data.error,
1073 (unsigned)blk_rq_pos(req),
1074 (unsigned)blk_rq_sectors(req),
1075 brq->cmd.resp[0], brq->stop.resp[0]);
1077 if (rq_data_dir(req) == READ) {
1079 return MMC_BLK_ECC_ERR;
1080 return MMC_BLK_DATA_ERR;
1082 return MMC_BLK_CMD_ERR;
1086 if (!brq->data.bytes_xfered)
1087 return MMC_BLK_RETRY;
1089 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1090 return MMC_BLK_PARTIAL;
1092 return MMC_BLK_SUCCESS;
1095 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1096 struct mmc_card *card,
1098 struct mmc_queue *mq)
1100 u32 readcmd, writecmd;
1101 struct mmc_blk_request *brq = &mqrq->brq;
1102 struct request *req = mqrq->req;
1103 struct mmc_blk_data *md = mq->data;
1107 * Reliable writes are used to implement Forced Unit Access and
1108 * REQ_META accesses, and are supported only on MMCs.
1110 * XXX: this really needs a good explanation of why REQ_META
1111 * is treated special.
1113 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1114 (req->cmd_flags & REQ_META)) &&
1115 (rq_data_dir(req) == WRITE) &&
1116 (md->flags & MMC_BLK_REL_WR);
1118 memset(brq, 0, sizeof(struct mmc_blk_request));
1119 brq->mrq.cmd = &brq->cmd;
1120 brq->mrq.data = &brq->data;
1122 brq->cmd.arg = blk_rq_pos(req);
1123 if (!mmc_card_blockaddr(card))
1125 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1126 brq->data.blksz = 512;
1127 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1129 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1130 brq->data.blocks = blk_rq_sectors(req);
1133 * The block layer doesn't support all sector count
1134 * restrictions, so we need to be prepared for too big
1137 if (brq->data.blocks > card->host->max_blk_count)
1138 brq->data.blocks = card->host->max_blk_count;
1140 if (brq->data.blocks > 1) {
1142 * After a read error, we redo the request one sector
1143 * at a time in order to accurately determine which
1144 * sectors can be read successfully.
1147 brq->data.blocks = 1;
1149 /* Some controllers can't do multiblock reads due to hw bugs */
1150 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1151 rq_data_dir(req) == READ)
1152 brq->data.blocks = 1;
1155 if (brq->data.blocks > 1 || do_rel_wr) {
1156 /* SPI multiblock writes terminate using a special
1157 * token, not a STOP_TRANSMISSION request.
1159 if (!mmc_host_is_spi(card->host) ||
1160 rq_data_dir(req) == READ)
1161 brq->mrq.stop = &brq->stop;
1162 readcmd = MMC_READ_MULTIPLE_BLOCK;
1163 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1165 brq->mrq.stop = NULL;
1166 readcmd = MMC_READ_SINGLE_BLOCK;
1167 writecmd = MMC_WRITE_BLOCK;
1169 if (rq_data_dir(req) == READ) {
1170 brq->cmd.opcode = readcmd;
1171 brq->data.flags |= MMC_DATA_READ;
1173 brq->cmd.opcode = writecmd;
1174 brq->data.flags |= MMC_DATA_WRITE;
1178 mmc_apply_rel_rw(brq, card, req);
1181 * Data tag is used only during writing meta data to speed
1182 * up write and any subsequent read of this meta data
1184 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1185 (req->cmd_flags & REQ_META) &&
1186 (rq_data_dir(req) == WRITE) &&
1187 ((brq->data.blocks * brq->data.blksz) >=
1188 card->ext_csd.data_tag_unit_size);
1191 * Pre-defined multi-block transfers are preferable to
1192 * open ended-ones (and necessary for reliable writes).
1193 * However, it is not sufficient to just send CMD23,
1194 * and avoid the final CMD12, as on an error condition
1195 * CMD12 (stop) needs to be sent anyway. This, coupled
1196 * with Auto-CMD23 enhancements provided by some
1197 * hosts, means that the complexity of dealing
1198 * with this is best left to the host. If CMD23 is
1199 * supported by card and host, we'll fill sbc in and let
1200 * the host deal with handling it correctly. This means
1201 * that for hosts that don't expose MMC_CAP_CMD23, no
1202 * change of behavior will be observed.
1204 * N.B: Some MMC cards experience perf degradation.
1205 * We'll avoid using CMD23-bounded multiblock writes for
1206 * these, while retaining features like reliable writes.
1208 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1209 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1211 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1212 brq->sbc.arg = brq->data.blocks |
1213 (do_rel_wr ? (1 << 31) : 0) |
1214 (do_data_tag ? (1 << 29) : 0);
1215 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1216 brq->mrq.sbc = &brq->sbc;
1219 mmc_set_data_timeout(&brq->data, card);
1221 brq->data.sg = mqrq->sg;
1222 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1225 * Adjust the sg list so it is the same size as the
1228 if (brq->data.blocks != blk_rq_sectors(req)) {
1229 int i, data_size = brq->data.blocks << 9;
1230 struct scatterlist *sg;
1232 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1233 data_size -= sg->length;
1234 if (data_size <= 0) {
1235 sg->length += data_size;
1240 brq->data.sg_len = i;
1243 mqrq->mmc_active.mrq = &brq->mrq;
1244 mqrq->mmc_active.err_check = mmc_blk_err_check;
1246 mmc_queue_bounce_pre(mqrq);
1249 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1250 struct mmc_blk_request *brq, struct request *req,
1254 * If this is an SD card and we're writing, we can first
1255 * mark the known good sectors as ok.
1257 * If the card is not SD, we can still ok written sectors
1258 * as reported by the controller (which might be less than
1259 * the real number of written sectors, but never more).
1261 if (mmc_card_sd(card)) {
1264 blocks = mmc_sd_num_wr_blocks(card);
1265 if (blocks != (u32)-1) {
1266 spin_lock_irq(&md->lock);
1267 ret = __blk_end_request(req, 0, blocks << 9);
1268 spin_unlock_irq(&md->lock);
1271 spin_lock_irq(&md->lock);
1272 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1273 spin_unlock_irq(&md->lock);
1278 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1280 struct mmc_blk_data *md = mq->data;
1281 struct mmc_card *card = md->queue.card;
1282 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1283 int ret = 1, disable_multi = 0, retry = 0, type;
1284 enum mmc_blk_status status;
1285 struct mmc_queue_req *mq_rq;
1286 struct request *req;
1287 struct mmc_async_req *areq;
1289 if (!rqc && !mq->mqrq_prev->req)
1294 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1295 areq = &mq->mqrq_cur->mmc_active;
1298 areq = mmc_start_req(card->host, areq, (int *) &status);
1302 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1305 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1306 mmc_queue_bounce_post(mq_rq);
1309 case MMC_BLK_SUCCESS:
1310 case MMC_BLK_PARTIAL:
1312 * A block was successfully transferred.
1314 mmc_blk_reset_success(md, type);
1315 spin_lock_irq(&md->lock);
1316 ret = __blk_end_request(req, 0,
1317 brq->data.bytes_xfered);
1318 spin_unlock_irq(&md->lock);
1320 * If the blk_end_request function returns non-zero even
1321 * though all data has been transferred and no errors
1322 * were returned by the host controller, it's a bug.
1324 if (status == MMC_BLK_SUCCESS && ret) {
1325 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1326 __func__, blk_rq_bytes(req),
1327 brq->data.bytes_xfered);
1332 case MMC_BLK_CMD_ERR:
1333 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1334 if (!mmc_blk_reset(md, card->host, type))
1342 if (!mmc_blk_reset(md, card->host, type))
1345 case MMC_BLK_DATA_ERR: {
1348 err = mmc_blk_reset(md, card->host, type);
1355 case MMC_BLK_ECC_ERR:
1356 if (brq->data.blocks > 1) {
1357 /* Redo read one sector at a time */
1358 pr_warning("%s: retrying using single block read\n",
1359 req->rq_disk->disk_name);
1364 * After an error, we redo I/O one sector at a
1365 * time, so we only reach here after trying to
1366 * read a single sector.
1368 spin_lock_irq(&md->lock);
1369 ret = __blk_end_request(req, -EIO,
1371 spin_unlock_irq(&md->lock);
1375 case MMC_BLK_NOMEDIUM:
1381 * In case of a incomplete request
1382 * prepare it again and resend.
1384 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
1385 mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
1392 spin_lock_irq(&md->lock);
1393 if (mmc_card_removed(card))
1394 req->cmd_flags |= REQ_QUIET;
1396 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
1397 spin_unlock_irq(&md->lock);
1401 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1402 mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
1408 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1411 struct mmc_blk_data *md = mq->data;
1412 struct mmc_card *card = md->queue.card;
1414 if (req && !mq->mqrq_prev->req)
1415 /* claim host only for the first request */
1416 mmc_claim_host(card->host);
1418 ret = mmc_blk_part_switch(card, md);
1421 spin_lock_irq(&md->lock);
1422 __blk_end_request_all(req, -EIO);
1423 spin_unlock_irq(&md->lock);
1429 if (req && req->cmd_flags & REQ_DISCARD) {
1430 /* complete ongoing async transfer before issuing discard */
1431 if (card->host->areq)
1432 mmc_blk_issue_rw_rq(mq, NULL);
1433 if (req->cmd_flags & REQ_SECURE)
1434 ret = mmc_blk_issue_secdiscard_rq(mq, req);
1436 ret = mmc_blk_issue_discard_rq(mq, req);
1437 } else if (req && req->cmd_flags & REQ_FLUSH) {
1438 /* complete ongoing async transfer before issuing flush */
1439 if (card->host->areq)
1440 mmc_blk_issue_rw_rq(mq, NULL);
1441 ret = mmc_blk_issue_flush(mq, req);
1443 ret = mmc_blk_issue_rw_rq(mq, req);
1448 /* release host only when there are no more requests */
1449 mmc_release_host(card->host);
1453 static inline int mmc_blk_readonly(struct mmc_card *card)
1455 return mmc_card_readonly(card) ||
1456 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
1459 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1460 struct device *parent,
1463 const char *subname,
1466 struct mmc_blk_data *md;
1469 devidx = find_first_zero_bit(dev_use, max_devices);
1470 if (devidx >= max_devices)
1471 return ERR_PTR(-ENOSPC);
1472 __set_bit(devidx, dev_use);
1474 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
1481 * !subname implies we are creating main mmc_blk_data that will be
1482 * associated with mmc_card with mmc_set_drvdata. Due to device
1483 * partitions, devidx will not coincide with a per-physical card
1484 * index anymore so we keep track of a name index.
1487 md->name_idx = find_first_zero_bit(name_use, max_devices);
1488 __set_bit(md->name_idx, name_use);
1490 md->name_idx = ((struct mmc_blk_data *)
1491 dev_to_disk(parent)->private_data)->name_idx;
1493 md->area_type = area_type;
1496 * Set the read-only status based on the supported commands
1497 * and the write protect switch.
1499 md->read_only = mmc_blk_readonly(card);
1501 md->disk = alloc_disk(perdev_minors);
1502 if (md->disk == NULL) {
1507 spin_lock_init(&md->lock);
1508 INIT_LIST_HEAD(&md->part);
1511 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
1515 md->queue.issue_fn = mmc_blk_issue_rq;
1516 md->queue.data = md;
1518 md->disk->major = MMC_BLOCK_MAJOR;
1519 md->disk->first_minor = devidx * perdev_minors;
1520 md->disk->fops = &mmc_bdops;
1521 md->disk->private_data = md;
1522 md->disk->queue = md->queue.queue;
1523 md->disk->driverfs_dev = parent;
1524 set_disk_ro(md->disk, md->read_only || default_ro);
1527 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1529 * - be set for removable media with permanent block devices
1530 * - be unset for removable block devices with permanent media
1532 * Since MMC block devices clearly fall under the second
1533 * case, we do not set GENHD_FL_REMOVABLE. Userspace
1534 * should use the block device creation/destruction hotplug
1535 * messages to tell when the card is present.
1538 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1539 "mmcblk%d%s", md->name_idx, subname ? subname : "");
1541 blk_queue_logical_block_size(md->queue.queue, 512);
1542 set_capacity(md->disk, size);
1544 if (mmc_host_cmd23(card->host)) {
1545 if (mmc_card_mmc(card) ||
1546 (mmc_card_sd(card) &&
1547 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1548 md->flags |= MMC_BLK_CMD23;
1551 if (mmc_card_mmc(card) &&
1552 md->flags & MMC_BLK_CMD23 &&
1553 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1554 card->ext_csd.rel_sectors)) {
1555 md->flags |= MMC_BLK_REL_WR;
1556 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1566 return ERR_PTR(ret);
1569 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1572 struct mmc_blk_data *md;
1574 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
1576 * The EXT_CSD sector count is in number or 512 byte
1579 size = card->ext_csd.sectors;
1582 * The CSD capacity field is in units of read_blkbits.
1583 * set_capacity takes units of 512 bytes.
1585 size = card->csd.capacity << (card->csd.read_blkbits - 9);
1588 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
1589 MMC_BLK_DATA_AREA_MAIN);
1593 static int mmc_blk_alloc_part(struct mmc_card *card,
1594 struct mmc_blk_data *md,
1595 unsigned int part_type,
1598 const char *subname,
1602 struct mmc_blk_data *part_md;
1604 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1605 subname, area_type);
1606 if (IS_ERR(part_md))
1607 return PTR_ERR(part_md);
1608 part_md->part_type = part_type;
1609 list_add(&part_md->part, &md->part);
1611 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1612 cap_str, sizeof(cap_str));
1613 pr_info("%s: %s %s partition %u %s\n",
1614 part_md->disk->disk_name, mmc_card_id(card),
1615 mmc_card_name(card), part_md->part_type, cap_str);
1619 /* MMC Physical partitions consist of two boot partitions and
1620 * up to four general purpose partitions.
1621 * For each partition enabled in EXT_CSD a block device will be allocatedi
1622 * to provide access to the partition.
1625 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1629 if (!mmc_card_mmc(card))
1632 for (idx = 0; idx < card->nr_parts; idx++) {
1633 if (card->part[idx].size) {
1634 ret = mmc_blk_alloc_part(card, md,
1635 card->part[idx].part_cfg,
1636 card->part[idx].size >> 9,
1637 card->part[idx].force_ro,
1638 card->part[idx].name,
1639 card->part[idx].area_type);
1648 static void mmc_blk_remove_req(struct mmc_blk_data *md)
1650 struct mmc_card *card;
1653 card = md->queue.card;
1654 if (md->disk->flags & GENHD_FL_UP) {
1655 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1656 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
1657 card->ext_csd.boot_ro_lockable)
1658 device_remove_file(disk_to_dev(md->disk),
1659 &md->power_ro_lock);
1661 /* Stop new requests from getting into the queue */
1662 del_gendisk(md->disk);
1665 /* Then flush out any already in there */
1666 mmc_cleanup_queue(&md->queue);
1671 static void mmc_blk_remove_parts(struct mmc_card *card,
1672 struct mmc_blk_data *md)
1674 struct list_head *pos, *q;
1675 struct mmc_blk_data *part_md;
1677 __clear_bit(md->name_idx, name_use);
1678 list_for_each_safe(pos, q, &md->part) {
1679 part_md = list_entry(pos, struct mmc_blk_data, part);
1681 mmc_blk_remove_req(part_md);
1685 static int mmc_add_disk(struct mmc_blk_data *md)
1688 struct mmc_card *card = md->queue.card;
1691 md->force_ro.show = force_ro_show;
1692 md->force_ro.store = force_ro_store;
1693 sysfs_attr_init(&md->force_ro.attr);
1694 md->force_ro.attr.name = "force_ro";
1695 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1696 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1700 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
1701 card->ext_csd.boot_ro_lockable) {
1704 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
1707 mode = S_IRUGO | S_IWUSR;
1709 md->power_ro_lock.show = power_ro_lock_show;
1710 md->power_ro_lock.store = power_ro_lock_store;
1711 sysfs_attr_init(&md->power_ro_lock.attr);
1712 md->power_ro_lock.attr.mode = mode;
1713 md->power_ro_lock.attr.name =
1714 "ro_lock_until_next_power_on";
1715 ret = device_create_file(disk_to_dev(md->disk),
1716 &md->power_ro_lock);
1718 goto power_ro_lock_fail;
1723 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1725 del_gendisk(md->disk);
1730 #define CID_MANFID_SANDISK 0x2
1731 #define CID_MANFID_TOSHIBA 0x11
1732 #define CID_MANFID_MICRON 0x13
1734 static const struct mmc_fixup blk_fixups[] =
1736 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
1737 MMC_QUIRK_INAND_CMD38),
1738 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
1739 MMC_QUIRK_INAND_CMD38),
1740 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
1741 MMC_QUIRK_INAND_CMD38),
1742 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
1743 MMC_QUIRK_INAND_CMD38),
1744 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
1745 MMC_QUIRK_INAND_CMD38),
1748 * Some MMC cards experience performance degradation with CMD23
1749 * instead of CMD12-bounded multiblock transfers. For now we'll
1750 * black list what's bad...
1751 * - Certain Toshiba cards.
1753 * N.B. This doesn't affect SD cards.
1755 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1756 MMC_QUIRK_BLK_NO_CMD23),
1757 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1758 MMC_QUIRK_BLK_NO_CMD23),
1759 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1760 MMC_QUIRK_BLK_NO_CMD23),
1763 * Some Micron MMC cards needs longer data read timeout than
1766 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
1767 MMC_QUIRK_LONG_READ_TIME),
1772 static int mmc_blk_probe(struct mmc_card *card)
1774 struct mmc_blk_data *md, *part_md;
1778 * Check that the card supports the command class(es) we need.
1780 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1783 md = mmc_blk_alloc(card);
1787 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
1788 cap_str, sizeof(cap_str));
1789 pr_info("%s: %s %s %s %s\n",
1790 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
1791 cap_str, md->read_only ? "(ro)" : "");
1793 if (mmc_blk_alloc_parts(card, md))
1796 mmc_set_drvdata(card, md);
1797 mmc_fixup_device(card, blk_fixups);
1799 if (mmc_add_disk(md))
1802 list_for_each_entry(part_md, &md->part, part) {
1803 if (mmc_add_disk(part_md))
1809 mmc_blk_remove_parts(card, md);
1810 mmc_blk_remove_req(md);
1814 static void mmc_blk_remove(struct mmc_card *card)
1816 struct mmc_blk_data *md = mmc_get_drvdata(card);
1818 mmc_blk_remove_parts(card, md);
1819 mmc_claim_host(card->host);
1820 mmc_blk_part_switch(card, md);
1821 mmc_release_host(card->host);
1822 mmc_blk_remove_req(md);
1823 mmc_set_drvdata(card, NULL);
1827 static int mmc_blk_suspend(struct mmc_card *card)
1829 struct mmc_blk_data *part_md;
1830 struct mmc_blk_data *md = mmc_get_drvdata(card);
1833 mmc_queue_suspend(&md->queue);
1834 list_for_each_entry(part_md, &md->part, part) {
1835 mmc_queue_suspend(&part_md->queue);
1841 static int mmc_blk_resume(struct mmc_card *card)
1843 struct mmc_blk_data *part_md;
1844 struct mmc_blk_data *md = mmc_get_drvdata(card);
1848 * Resume involves the card going into idle state,
1849 * so current partition is always the main one.
1851 md->part_curr = md->part_type;
1852 mmc_queue_resume(&md->queue);
1853 list_for_each_entry(part_md, &md->part, part) {
1854 mmc_queue_resume(&part_md->queue);
1860 #define mmc_blk_suspend NULL
1861 #define mmc_blk_resume NULL
1864 static struct mmc_driver mmc_driver = {
1868 .probe = mmc_blk_probe,
1869 .remove = mmc_blk_remove,
1870 .suspend = mmc_blk_suspend,
1871 .resume = mmc_blk_resume,
1874 static int __init mmc_blk_init(void)
1878 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1879 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1881 max_devices = 256 / perdev_minors;
1883 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
1887 res = mmc_register_driver(&mmc_driver);
1893 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1898 static void __exit mmc_blk_exit(void)
1900 mmc_unregister_driver(&mmc_driver);
1901 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1904 module_init(mmc_blk_init);
1905 module_exit(mmc_blk_exit);
1907 MODULE_LICENSE("GPL");
1908 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");