2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/idr.h>
40 #include <linux/mmc/ioctl.h>
41 #include <linux/mmc/card.h>
42 #include <linux/mmc/host.h>
43 #include <linux/mmc/mmc.h>
44 #include <linux/mmc/sd.h>
46 #include <asm/uaccess.h>
50 MODULE_ALIAS("mmc:block");
51 #ifdef MODULE_PARAM_PREFIX
52 #undef MODULE_PARAM_PREFIX
54 #define MODULE_PARAM_PREFIX "mmcblk."
56 #define INAND_CMD38_ARG_EXT_CSD 113
57 #define INAND_CMD38_ARG_ERASE 0x00
58 #define INAND_CMD38_ARG_TRIM 0x01
59 #define INAND_CMD38_ARG_SECERASE 0x80
60 #define INAND_CMD38_ARG_SECTRIM1 0x81
61 #define INAND_CMD38_ARG_SECTRIM2 0x88
62 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
63 #define MMC_SANITIZE_REQ_TIMEOUT 240000
64 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
66 #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
67 (rq_data_dir(req) == WRITE))
68 #define PACKED_CMD_VER 0x01
69 #define PACKED_CMD_WR 0x02
71 static DEFINE_MUTEX(block_mutex);
74 * The defaults come from config options but can be overriden by module
77 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
80 * We've only got one major, so number of mmcblk devices is
81 * limited to (1 << 20) / number of minors per device. It is also
82 * limited by the MAX_DEVICES below.
84 static int max_devices;
86 #define MAX_DEVICES 256
88 static DEFINE_IDA(mmc_blk_ida);
89 static DEFINE_SPINLOCK(mmc_blk_lock);
92 * There is one mmc_blk_data per slot.
97 struct mmc_queue queue;
98 struct list_head part;
101 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
102 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
103 #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
106 unsigned int read_only;
107 unsigned int part_type;
108 unsigned int reset_done;
109 #define MMC_BLK_READ BIT(0)
110 #define MMC_BLK_WRITE BIT(1)
111 #define MMC_BLK_DISCARD BIT(2)
112 #define MMC_BLK_SECDISCARD BIT(3)
115 * Only set in main mmc_blk_data associated
116 * with mmc_card with dev_set_drvdata, and keeps
117 * track of the current selected device partition.
119 unsigned int part_curr;
120 struct device_attribute force_ro;
121 struct device_attribute power_ro_lock;
125 static DEFINE_MUTEX(open_lock);
128 MMC_PACKED_NR_IDX = -1,
130 MMC_PACKED_NR_SINGLE,
133 module_param(perdev_minors, int, 0444);
134 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
136 static inline int mmc_blk_part_switch(struct mmc_card *card,
137 struct mmc_blk_data *md);
138 static int get_card_status(struct mmc_card *card, u32 *status, int retries);
140 static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
142 struct mmc_packed *packed = mqrq->packed;
146 mqrq->cmd_type = MMC_PACKED_NONE;
147 packed->nr_entries = MMC_PACKED_NR_ZERO;
148 packed->idx_failure = MMC_PACKED_NR_IDX;
153 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
155 struct mmc_blk_data *md;
157 mutex_lock(&open_lock);
158 md = disk->private_data;
159 if (md && md->usage == 0)
163 mutex_unlock(&open_lock);
168 static inline int mmc_get_devidx(struct gendisk *disk)
170 int devidx = disk->first_minor / perdev_minors;
174 static void mmc_blk_put(struct mmc_blk_data *md)
176 mutex_lock(&open_lock);
178 if (md->usage == 0) {
179 int devidx = mmc_get_devidx(md->disk);
180 blk_cleanup_queue(md->queue.queue);
182 spin_lock(&mmc_blk_lock);
183 ida_remove(&mmc_blk_ida, devidx);
184 spin_unlock(&mmc_blk_lock);
189 mutex_unlock(&open_lock);
192 static ssize_t power_ro_lock_show(struct device *dev,
193 struct device_attribute *attr, char *buf)
196 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
197 struct mmc_card *card = md->queue.card;
200 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
202 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
205 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
212 static ssize_t power_ro_lock_store(struct device *dev,
213 struct device_attribute *attr, const char *buf, size_t count)
216 struct mmc_blk_data *md, *part_md;
217 struct mmc_card *card;
220 if (kstrtoul(buf, 0, &set))
226 md = mmc_blk_get(dev_to_disk(dev));
227 card = md->queue.card;
231 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
232 card->ext_csd.boot_ro_lock |
233 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
234 card->ext_csd.part_time);
236 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
238 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
243 pr_info("%s: Locking boot partition ro until next power on\n",
244 md->disk->disk_name);
245 set_disk_ro(md->disk, 1);
247 list_for_each_entry(part_md, &md->part, part)
248 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
249 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
250 set_disk_ro(part_md->disk, 1);
258 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
262 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
264 ret = snprintf(buf, PAGE_SIZE, "%d\n",
265 get_disk_ro(dev_to_disk(dev)) ^
271 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
272 const char *buf, size_t count)
276 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
277 unsigned long set = simple_strtoul(buf, &end, 0);
283 set_disk_ro(dev_to_disk(dev), set || md->read_only);
290 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
292 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
295 mutex_lock(&block_mutex);
298 check_disk_change(bdev);
301 if ((mode & FMODE_WRITE) && md->read_only) {
306 mutex_unlock(&block_mutex);
311 static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
313 struct mmc_blk_data *md = disk->private_data;
315 mutex_lock(&block_mutex);
317 mutex_unlock(&block_mutex);
321 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
323 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
329 struct mmc_blk_ioc_data {
330 struct mmc_ioc_cmd ic;
335 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
336 struct mmc_ioc_cmd __user *user)
338 struct mmc_blk_ioc_data *idata;
341 idata = kmalloc(sizeof(*idata), GFP_KERNEL);
347 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
352 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
353 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
358 if (!idata->buf_bytes)
361 idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
367 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
368 idata->ic.data_ptr, idata->buf_bytes)) {
383 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
384 struct mmc_blk_ioc_data *idata)
386 struct mmc_ioc_cmd *ic = &idata->ic;
388 if (copy_to_user(&(ic_ptr->response), ic->response,
389 sizeof(ic->response)))
392 if (!idata->ic.write_flag) {
393 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
394 idata->buf, idata->buf_bytes))
401 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
407 if (!status || !retries_max)
411 err = get_card_status(card, status, 5);
415 if (!R1_STATUS(*status) &&
416 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
417 break; /* RPMB programming operation complete */
420 * Rechedule to give the MMC device a chance to continue
421 * processing the previous command without being polled too
424 usleep_range(1000, 5000);
425 } while (++retry_count < retries_max);
427 if (retry_count == retries_max)
433 static int ioctl_do_sanitize(struct mmc_card *card)
437 if (!mmc_can_sanitize(card)) {
438 pr_warn("%s: %s - SANITIZE is not supported\n",
439 mmc_hostname(card->host), __func__);
444 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
445 mmc_hostname(card->host), __func__);
447 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
448 EXT_CSD_SANITIZE_START, 1,
449 MMC_SANITIZE_REQ_TIMEOUT);
452 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
453 mmc_hostname(card->host), __func__, err);
455 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
461 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
462 struct mmc_blk_ioc_data *idata)
464 struct mmc_command cmd = {0};
465 struct mmc_data data = {0};
466 struct mmc_request mrq = {NULL};
467 struct scatterlist sg;
472 if (!card || !md || !idata)
475 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
478 cmd.opcode = idata->ic.opcode;
479 cmd.arg = idata->ic.arg;
480 cmd.flags = idata->ic.flags;
482 if (idata->buf_bytes) {
485 data.blksz = idata->ic.blksz;
486 data.blocks = idata->ic.blocks;
488 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
490 if (idata->ic.write_flag)
491 data.flags = MMC_DATA_WRITE;
493 data.flags = MMC_DATA_READ;
495 /* data.flags must already be set before doing this. */
496 mmc_set_data_timeout(&data, card);
498 /* Allow overriding the timeout_ns for empirical tuning. */
499 if (idata->ic.data_timeout_ns)
500 data.timeout_ns = idata->ic.data_timeout_ns;
502 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
504 * Pretend this is a data transfer and rely on the
505 * host driver to compute timeout. When all host
506 * drivers support cmd.cmd_timeout for R1B, this
510 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
512 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
520 err = mmc_blk_part_switch(card, md);
524 if (idata->ic.is_acmd) {
525 err = mmc_app_cmd(card->host, card);
531 err = mmc_set_blockcount(card, data.blocks,
532 idata->ic.write_flag & (1 << 31));
537 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
538 (cmd.opcode == MMC_SWITCH)) {
539 err = ioctl_do_sanitize(card);
542 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
548 mmc_wait_for_req(card->host, &mrq);
551 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
552 __func__, cmd.error);
556 dev_err(mmc_dev(card->host), "%s: data error %d\n",
557 __func__, data.error);
562 * According to the SD specs, some commands require a delay after
563 * issuing the command.
565 if (idata->ic.postsleep_min_us)
566 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
568 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
572 * Ensure RPMB command has completed by polling CMD13
575 err = ioctl_rpmb_card_status_poll(card, &status, 5);
577 dev_err(mmc_dev(card->host),
578 "%s: Card Status=0x%08X, error %d\n",
579 __func__, status, err);
585 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
586 struct mmc_ioc_cmd __user *ic_ptr)
588 struct mmc_blk_ioc_data *idata;
589 struct mmc_blk_data *md;
590 struct mmc_card *card;
591 int err = 0, ioc_err = 0;
594 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
595 * whole block device, not on a partition. This prevents overspray
596 * between sibling partitions.
598 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
601 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
603 return PTR_ERR(idata);
605 md = mmc_blk_get(bdev->bd_disk);
611 card = md->queue.card;
619 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
621 /* Always switch back to main area after RPMB access */
622 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
623 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
627 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
634 return ioc_err ? ioc_err : err;
637 static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
638 struct mmc_ioc_multi_cmd __user *user)
640 struct mmc_blk_ioc_data **idata = NULL;
641 struct mmc_ioc_cmd __user *cmds = user->cmds;
642 struct mmc_card *card;
643 struct mmc_blk_data *md;
644 int i, err = 0, ioc_err = 0;
648 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
649 * whole block device, not on a partition. This prevents overspray
650 * between sibling partitions.
652 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
655 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
656 sizeof(num_of_cmds)))
659 if (num_of_cmds > MMC_IOC_MAX_CMDS)
662 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
666 for (i = 0; i < num_of_cmds; i++) {
667 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
668 if (IS_ERR(idata[i])) {
669 err = PTR_ERR(idata[i]);
675 md = mmc_blk_get(bdev->bd_disk);
681 card = md->queue.card;
689 for (i = 0; i < num_of_cmds && !ioc_err; i++)
690 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
692 /* Always switch back to main area after RPMB access */
693 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
694 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
698 /* copy to user if data and response */
699 for (i = 0; i < num_of_cmds && !err; i++)
700 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
705 for (i = 0; i < num_of_cmds; i++) {
706 kfree(idata[i]->buf);
710 return ioc_err ? ioc_err : err;
713 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
714 unsigned int cmd, unsigned long arg)
718 return mmc_blk_ioctl_cmd(bdev,
719 (struct mmc_ioc_cmd __user *)arg);
720 case MMC_IOC_MULTI_CMD:
721 return mmc_blk_ioctl_multi_cmd(bdev,
722 (struct mmc_ioc_multi_cmd __user *)arg);
729 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
730 unsigned int cmd, unsigned long arg)
732 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
736 static const struct block_device_operations mmc_bdops = {
737 .open = mmc_blk_open,
738 .release = mmc_blk_release,
739 .getgeo = mmc_blk_getgeo,
740 .owner = THIS_MODULE,
741 .ioctl = mmc_blk_ioctl,
743 .compat_ioctl = mmc_blk_compat_ioctl,
747 static inline int mmc_blk_part_switch(struct mmc_card *card,
748 struct mmc_blk_data *md)
751 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
753 if (main_md->part_curr == md->part_type)
756 if (mmc_card_mmc(card)) {
757 u8 part_config = card->ext_csd.part_config;
759 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
760 part_config |= md->part_type;
762 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
763 EXT_CSD_PART_CONFIG, part_config,
764 card->ext_csd.part_time);
768 card->ext_csd.part_config = part_config;
771 main_md->part_curr = md->part_type;
775 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
781 struct mmc_request mrq = {NULL};
782 struct mmc_command cmd = {0};
783 struct mmc_data data = {0};
785 struct scatterlist sg;
787 cmd.opcode = MMC_APP_CMD;
788 cmd.arg = card->rca << 16;
789 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
791 err = mmc_wait_for_cmd(card->host, &cmd, 0);
794 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
797 memset(&cmd, 0, sizeof(struct mmc_command));
799 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
801 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
805 data.flags = MMC_DATA_READ;
808 mmc_set_data_timeout(&data, card);
813 blocks = kmalloc(4, GFP_KERNEL);
817 sg_init_one(&sg, blocks, 4);
819 mmc_wait_for_req(card->host, &mrq);
821 result = ntohl(*blocks);
824 if (cmd.error || data.error)
830 static int get_card_status(struct mmc_card *card, u32 *status, int retries)
832 struct mmc_command cmd = {0};
835 cmd.opcode = MMC_SEND_STATUS;
836 if (!mmc_host_is_spi(card->host))
837 cmd.arg = card->rca << 16;
838 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
839 err = mmc_wait_for_cmd(card->host, &cmd, retries);
841 *status = cmd.resp[0];
845 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
846 bool hw_busy_detect, struct request *req, int *gen_err)
848 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
853 err = get_card_status(card, &status, 5);
855 pr_err("%s: error %d requesting status\n",
856 req->rq_disk->disk_name, err);
860 if (status & R1_ERROR) {
861 pr_err("%s: %s: error sending status cmd, status %#x\n",
862 req->rq_disk->disk_name, __func__, status);
866 /* We may rely on the host hw to handle busy detection.*/
867 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
872 * Timeout if the device never becomes ready for data and never
873 * leaves the program state.
875 if (time_after(jiffies, timeout)) {
876 pr_err("%s: Card stuck in programming state! %s %s\n",
877 mmc_hostname(card->host),
878 req->rq_disk->disk_name, __func__);
883 * Some cards mishandle the status bits,
884 * so make sure to check both the busy
885 * indication and the card state.
887 } while (!(status & R1_READY_FOR_DATA) ||
888 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
893 static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
894 struct request *req, int *gen_err, u32 *stop_status)
896 struct mmc_host *host = card->host;
897 struct mmc_command cmd = {0};
899 bool use_r1b_resp = rq_data_dir(req) == WRITE;
902 * Normally we use R1B responses for WRITE, but in cases where the host
903 * has specified a max_busy_timeout we need to validate it. A failure
904 * means we need to prevent the host from doing hw busy detection, which
905 * is done by converting to a R1 response instead.
907 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
908 use_r1b_resp = false;
910 cmd.opcode = MMC_STOP_TRANSMISSION;
912 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
913 cmd.busy_timeout = timeout_ms;
915 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
918 err = mmc_wait_for_cmd(host, &cmd, 5);
922 *stop_status = cmd.resp[0];
924 /* No need to check card status in case of READ. */
925 if (rq_data_dir(req) == READ)
928 if (!mmc_host_is_spi(host) &&
929 (*stop_status & R1_ERROR)) {
930 pr_err("%s: %s: general error sending stop command, resp %#x\n",
931 req->rq_disk->disk_name, __func__, *stop_status);
935 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
938 #define ERR_NOMEDIUM 3
941 #define ERR_CONTINUE 0
943 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
944 bool status_valid, u32 status)
948 /* response crc error, retry the r/w cmd */
949 pr_err("%s: %s sending %s command, card status %#x\n",
950 req->rq_disk->disk_name, "response CRC error",
955 pr_err("%s: %s sending %s command, card status %#x\n",
956 req->rq_disk->disk_name, "timed out", name, status);
958 /* If the status cmd initially failed, retry the r/w cmd */
960 pr_err("%s: status not valid, retrying timeout\n",
961 req->rq_disk->disk_name);
966 * If it was a r/w cmd crc error, or illegal command
967 * (eg, issued in wrong state) then retry - we should
968 * have corrected the state problem above.
970 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
971 pr_err("%s: command error, retrying timeout\n",
972 req->rq_disk->disk_name);
976 /* Otherwise abort the command */
980 /* We don't understand the error code the driver gave us */
981 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
982 req->rq_disk->disk_name, error, status);
988 * Initial r/w and stop cmd error recovery.
989 * We don't know whether the card received the r/w cmd or not, so try to
990 * restore things back to a sane state. Essentially, we do this as follows:
991 * - Obtain card status. If the first attempt to obtain card status fails,
992 * the status word will reflect the failed status cmd, not the failed
993 * r/w cmd. If we fail to obtain card status, it suggests we can no
994 * longer communicate with the card.
995 * - Check the card state. If the card received the cmd but there was a
996 * transient problem with the response, it might still be in a data transfer
997 * mode. Try to send it a stop command. If this fails, we can't recover.
998 * - If the r/w cmd failed due to a response CRC error, it was probably
999 * transient, so retry the cmd.
1000 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1001 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1002 * illegal cmd, retry.
1003 * Otherwise we don't understand what happened, so abort.
1005 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
1006 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
1008 bool prev_cmd_status_valid = true;
1009 u32 status, stop_status = 0;
1012 if (mmc_card_removed(card))
1013 return ERR_NOMEDIUM;
1016 * Try to get card status which indicates both the card state
1017 * and why there was no response. If the first attempt fails,
1018 * we can't be sure the returned status is for the r/w command.
1020 for (retry = 2; retry >= 0; retry--) {
1021 err = get_card_status(card, &status, 0);
1025 /* Re-tune if needed */
1026 mmc_retune_recheck(card->host);
1028 prev_cmd_status_valid = false;
1029 pr_err("%s: error %d sending status command, %sing\n",
1030 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1033 /* We couldn't get a response from the card. Give up. */
1035 /* Check if the card is removed */
1036 if (mmc_detect_card_removed(card->host))
1037 return ERR_NOMEDIUM;
1041 /* Flag ECC errors */
1042 if ((status & R1_CARD_ECC_FAILED) ||
1043 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1044 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1047 /* Flag General errors */
1048 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1049 if ((status & R1_ERROR) ||
1050 (brq->stop.resp[0] & R1_ERROR)) {
1051 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1052 req->rq_disk->disk_name, __func__,
1053 brq->stop.resp[0], status);
1058 * Check the current card state. If it is in some data transfer
1059 * mode, tell it to stop (and hopefully transition back to TRAN.)
1061 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1062 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
1063 err = send_stop(card,
1064 DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1065 req, gen_err, &stop_status);
1067 pr_err("%s: error %d sending stop command\n",
1068 req->rq_disk->disk_name, err);
1070 * If the stop cmd also timed out, the card is probably
1071 * not present, so abort. Other errors are bad news too.
1076 if (stop_status & R1_CARD_ECC_FAILED)
1080 /* Check for set block count errors */
1082 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1083 prev_cmd_status_valid, status);
1085 /* Check for r/w command errors */
1087 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1088 prev_cmd_status_valid, status);
1091 if (!brq->stop.error)
1092 return ERR_CONTINUE;
1094 /* Now for stop errors. These aren't fatal to the transfer. */
1095 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1096 req->rq_disk->disk_name, brq->stop.error,
1097 brq->cmd.resp[0], status);
1100 * Subsitute in our own stop status as this will give the error
1101 * state which happened during the execution of the r/w command.
1104 brq->stop.resp[0] = stop_status;
1105 brq->stop.error = 0;
1107 return ERR_CONTINUE;
1110 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1115 if (md->reset_done & type)
1118 md->reset_done |= type;
1119 err = mmc_hw_reset(host);
1120 /* Ensure we switch back to the correct partition */
1121 if (err != -EOPNOTSUPP) {
1122 struct mmc_blk_data *main_md =
1123 dev_get_drvdata(&host->card->dev);
1126 main_md->part_curr = main_md->part_type;
1127 part_err = mmc_blk_part_switch(host->card, md);
1130 * We have failed to get back into the correct
1131 * partition, so we need to abort the whole request.
1139 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1141 md->reset_done &= ~type;
1144 int mmc_access_rpmb(struct mmc_queue *mq)
1146 struct mmc_blk_data *md = mq->data;
1148 * If this is a RPMB partition access, return ture
1150 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1156 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1158 struct mmc_blk_data *md = mq->data;
1159 struct mmc_card *card = md->queue.card;
1160 unsigned int from, nr, arg;
1161 int err = 0, type = MMC_BLK_DISCARD;
1163 if (!mmc_can_erase(card)) {
1168 from = blk_rq_pos(req);
1169 nr = blk_rq_sectors(req);
1171 if (mmc_can_discard(card))
1172 arg = MMC_DISCARD_ARG;
1173 else if (mmc_can_trim(card))
1176 arg = MMC_ERASE_ARG;
1178 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1179 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1180 INAND_CMD38_ARG_EXT_CSD,
1181 arg == MMC_TRIM_ARG ?
1182 INAND_CMD38_ARG_TRIM :
1183 INAND_CMD38_ARG_ERASE,
1188 err = mmc_erase(card, from, nr, arg);
1190 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1193 mmc_blk_reset_success(md, type);
1194 blk_end_request(req, err, blk_rq_bytes(req));
1199 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1200 struct request *req)
1202 struct mmc_blk_data *md = mq->data;
1203 struct mmc_card *card = md->queue.card;
1204 unsigned int from, nr, arg;
1205 int err = 0, type = MMC_BLK_SECDISCARD;
1207 if (!(mmc_can_secure_erase_trim(card))) {
1212 from = blk_rq_pos(req);
1213 nr = blk_rq_sectors(req);
1215 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1216 arg = MMC_SECURE_TRIM1_ARG;
1218 arg = MMC_SECURE_ERASE_ARG;
1221 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1222 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1223 INAND_CMD38_ARG_EXT_CSD,
1224 arg == MMC_SECURE_TRIM1_ARG ?
1225 INAND_CMD38_ARG_SECTRIM1 :
1226 INAND_CMD38_ARG_SECERASE,
1232 err = mmc_erase(card, from, nr, arg);
1238 if (arg == MMC_SECURE_TRIM1_ARG) {
1239 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1240 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1241 INAND_CMD38_ARG_EXT_CSD,
1242 INAND_CMD38_ARG_SECTRIM2,
1248 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1256 if (err && !mmc_blk_reset(md, card->host, type))
1259 mmc_blk_reset_success(md, type);
1261 blk_end_request(req, err, blk_rq_bytes(req));
1266 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1268 struct mmc_blk_data *md = mq->data;
1269 struct mmc_card *card = md->queue.card;
1272 ret = mmc_flush_cache(card);
1276 blk_end_request_all(req, ret);
1282 * Reformat current write as a reliable write, supporting
1283 * both legacy and the enhanced reliable write MMC cards.
1284 * In each transfer we'll handle only as much as a single
1285 * reliable write can handle, thus finish the request in
1286 * partial completions.
1288 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1289 struct mmc_card *card,
1290 struct request *req)
1292 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1293 /* Legacy mode imposes restrictions on transfers. */
1294 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1295 brq->data.blocks = 1;
1297 if (brq->data.blocks > card->ext_csd.rel_sectors)
1298 brq->data.blocks = card->ext_csd.rel_sectors;
1299 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1300 brq->data.blocks = 1;
1304 #define CMD_ERRORS \
1305 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1306 R1_ADDRESS_ERROR | /* Misaligned address */ \
1307 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1308 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1309 R1_CC_ERROR | /* Card controller error */ \
1310 R1_ERROR) /* General/unknown error */
1312 static int mmc_blk_err_check(struct mmc_card *card,
1313 struct mmc_async_req *areq)
1315 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1317 struct mmc_blk_request *brq = &mq_mrq->brq;
1318 struct request *req = mq_mrq->req;
1319 int need_retune = card->host->need_retune;
1320 int ecc_err = 0, gen_err = 0;
1323 * sbc.error indicates a problem with the set block count
1324 * command. No data will have been transferred.
1326 * cmd.error indicates a problem with the r/w command. No
1327 * data will have been transferred.
1329 * stop.error indicates a problem with the stop command. Data
1330 * may have been transferred, or may still be transferring.
1332 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1334 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1336 return MMC_BLK_RETRY;
1338 return MMC_BLK_ABORT;
1340 return MMC_BLK_NOMEDIUM;
1347 * Check for errors relating to the execution of the
1348 * initial command - such as address errors. No data
1349 * has been transferred.
1351 if (brq->cmd.resp[0] & CMD_ERRORS) {
1352 pr_err("%s: r/w command failed, status = %#x\n",
1353 req->rq_disk->disk_name, brq->cmd.resp[0]);
1354 return MMC_BLK_ABORT;
1358 * Everything else is either success, or a data error of some
1359 * kind. If it was a write, we may have transitioned to
1360 * program mode, which we have to wait for it to complete.
1362 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1365 /* Check stop command response */
1366 if (brq->stop.resp[0] & R1_ERROR) {
1367 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1368 req->rq_disk->disk_name, __func__,
1373 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1376 return MMC_BLK_CMD_ERR;
1379 /* if general error occurs, retry the write operation. */
1381 pr_warn("%s: retrying write for general error\n",
1382 req->rq_disk->disk_name);
1383 return MMC_BLK_RETRY;
1386 if (brq->data.error) {
1387 if (need_retune && !brq->retune_retry_done) {
1388 pr_debug("%s: retrying because a re-tune was needed\n",
1389 req->rq_disk->disk_name);
1390 brq->retune_retry_done = 1;
1391 return MMC_BLK_RETRY;
1393 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1394 req->rq_disk->disk_name, brq->data.error,
1395 (unsigned)blk_rq_pos(req),
1396 (unsigned)blk_rq_sectors(req),
1397 brq->cmd.resp[0], brq->stop.resp[0]);
1399 if (rq_data_dir(req) == READ) {
1401 return MMC_BLK_ECC_ERR;
1402 return MMC_BLK_DATA_ERR;
1404 return MMC_BLK_CMD_ERR;
1408 if (!brq->data.bytes_xfered)
1409 return MMC_BLK_RETRY;
1411 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1412 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1413 return MMC_BLK_PARTIAL;
1415 return MMC_BLK_SUCCESS;
1418 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1419 return MMC_BLK_PARTIAL;
1421 return MMC_BLK_SUCCESS;
1424 static int mmc_blk_packed_err_check(struct mmc_card *card,
1425 struct mmc_async_req *areq)
1427 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1429 struct request *req = mq_rq->req;
1430 struct mmc_packed *packed = mq_rq->packed;
1431 int err, check, status;
1437 check = mmc_blk_err_check(card, areq);
1438 err = get_card_status(card, &status, 0);
1440 pr_err("%s: error %d sending status command\n",
1441 req->rq_disk->disk_name, err);
1442 return MMC_BLK_ABORT;
1445 if (status & R1_EXCEPTION_EVENT) {
1446 err = mmc_get_ext_csd(card, &ext_csd);
1448 pr_err("%s: error %d sending ext_csd\n",
1449 req->rq_disk->disk_name, err);
1450 return MMC_BLK_ABORT;
1453 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1454 EXT_CSD_PACKED_FAILURE) &&
1455 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1456 EXT_CSD_PACKED_GENERIC_ERROR)) {
1457 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1458 EXT_CSD_PACKED_INDEXED_ERROR) {
1459 packed->idx_failure =
1460 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1461 check = MMC_BLK_PARTIAL;
1463 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1464 "failure index: %d\n",
1465 req->rq_disk->disk_name, packed->nr_entries,
1466 packed->blocks, packed->idx_failure);
1474 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1475 struct mmc_card *card,
1477 struct mmc_queue *mq)
1479 u32 readcmd, writecmd;
1480 struct mmc_blk_request *brq = &mqrq->brq;
1481 struct request *req = mqrq->req;
1482 struct mmc_blk_data *md = mq->data;
1486 * Reliable writes are used to implement Forced Unit Access and
1487 * are supported only on MMCs.
1489 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1490 (rq_data_dir(req) == WRITE) &&
1491 (md->flags & MMC_BLK_REL_WR);
1493 memset(brq, 0, sizeof(struct mmc_blk_request));
1494 brq->mrq.cmd = &brq->cmd;
1495 brq->mrq.data = &brq->data;
1497 brq->cmd.arg = blk_rq_pos(req);
1498 if (!mmc_card_blockaddr(card))
1500 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1501 brq->data.blksz = 512;
1502 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1504 brq->data.blocks = blk_rq_sectors(req);
1507 * The block layer doesn't support all sector count
1508 * restrictions, so we need to be prepared for too big
1511 if (brq->data.blocks > card->host->max_blk_count)
1512 brq->data.blocks = card->host->max_blk_count;
1514 if (brq->data.blocks > 1) {
1516 * After a read error, we redo the request one sector
1517 * at a time in order to accurately determine which
1518 * sectors can be read successfully.
1521 brq->data.blocks = 1;
1524 * Some controllers have HW issues while operating
1525 * in multiple I/O mode
1527 if (card->host->ops->multi_io_quirk)
1528 brq->data.blocks = card->host->ops->multi_io_quirk(card,
1529 (rq_data_dir(req) == READ) ?
1530 MMC_DATA_READ : MMC_DATA_WRITE,
1534 if (brq->data.blocks > 1 || do_rel_wr) {
1535 /* SPI multiblock writes terminate using a special
1536 * token, not a STOP_TRANSMISSION request.
1538 if (!mmc_host_is_spi(card->host) ||
1539 rq_data_dir(req) == READ)
1540 brq->mrq.stop = &brq->stop;
1541 readcmd = MMC_READ_MULTIPLE_BLOCK;
1542 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1544 brq->mrq.stop = NULL;
1545 readcmd = MMC_READ_SINGLE_BLOCK;
1546 writecmd = MMC_WRITE_BLOCK;
1548 if (rq_data_dir(req) == READ) {
1549 brq->cmd.opcode = readcmd;
1550 brq->data.flags = MMC_DATA_READ;
1552 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
1555 brq->cmd.opcode = writecmd;
1556 brq->data.flags = MMC_DATA_WRITE;
1558 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
1563 mmc_apply_rel_rw(brq, card, req);
1566 * Data tag is used only during writing meta data to speed
1567 * up write and any subsequent read of this meta data
1569 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1570 (req->cmd_flags & REQ_META) &&
1571 (rq_data_dir(req) == WRITE) &&
1572 ((brq->data.blocks * brq->data.blksz) >=
1573 card->ext_csd.data_tag_unit_size);
1576 * Pre-defined multi-block transfers are preferable to
1577 * open ended-ones (and necessary for reliable writes).
1578 * However, it is not sufficient to just send CMD23,
1579 * and avoid the final CMD12, as on an error condition
1580 * CMD12 (stop) needs to be sent anyway. This, coupled
1581 * with Auto-CMD23 enhancements provided by some
1582 * hosts, means that the complexity of dealing
1583 * with this is best left to the host. If CMD23 is
1584 * supported by card and host, we'll fill sbc in and let
1585 * the host deal with handling it correctly. This means
1586 * that for hosts that don't expose MMC_CAP_CMD23, no
1587 * change of behavior will be observed.
1589 * N.B: Some MMC cards experience perf degradation.
1590 * We'll avoid using CMD23-bounded multiblock writes for
1591 * these, while retaining features like reliable writes.
1593 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1594 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1596 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1597 brq->sbc.arg = brq->data.blocks |
1598 (do_rel_wr ? (1 << 31) : 0) |
1599 (do_data_tag ? (1 << 29) : 0);
1600 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1601 brq->mrq.sbc = &brq->sbc;
1604 mmc_set_data_timeout(&brq->data, card);
1606 brq->data.sg = mqrq->sg;
1607 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1610 * Adjust the sg list so it is the same size as the
1613 if (brq->data.blocks != blk_rq_sectors(req)) {
1614 int i, data_size = brq->data.blocks << 9;
1615 struct scatterlist *sg;
1617 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1618 data_size -= sg->length;
1619 if (data_size <= 0) {
1620 sg->length += data_size;
1625 brq->data.sg_len = i;
1628 mqrq->mmc_active.mrq = &brq->mrq;
1629 mqrq->mmc_active.err_check = mmc_blk_err_check;
1631 mmc_queue_bounce_pre(mqrq);
1634 static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1635 struct mmc_card *card)
1637 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1638 unsigned int max_seg_sz = queue_max_segment_size(q);
1639 unsigned int len, nr_segs = 0;
1642 len = min(hdr_sz, max_seg_sz);
1650 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1652 struct request_queue *q = mq->queue;
1653 struct mmc_card *card = mq->card;
1654 struct request *cur = req, *next = NULL;
1655 struct mmc_blk_data *md = mq->data;
1656 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1657 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1658 unsigned int req_sectors = 0, phys_segments = 0;
1659 unsigned int max_blk_count, max_phys_segs;
1660 bool put_back = true;
1661 u8 max_packed_rw = 0;
1664 if (!(md->flags & MMC_BLK_PACKED_CMD))
1667 if ((rq_data_dir(cur) == WRITE) &&
1668 mmc_host_packed_wr(card->host))
1669 max_packed_rw = card->ext_csd.max_packed_writes;
1671 if (max_packed_rw == 0)
1674 if (mmc_req_rel_wr(cur) &&
1675 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1678 if (mmc_large_sector(card) &&
1679 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1682 mmc_blk_clear_packed(mqrq);
1684 max_blk_count = min(card->host->max_blk_count,
1685 card->host->max_req_size >> 9);
1686 if (unlikely(max_blk_count > 0xffff))
1687 max_blk_count = 0xffff;
1689 max_phys_segs = queue_max_segments(q);
1690 req_sectors += blk_rq_sectors(cur);
1691 phys_segments += cur->nr_phys_segments;
1693 if (rq_data_dir(cur) == WRITE) {
1694 req_sectors += mmc_large_sector(card) ? 8 : 1;
1695 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1699 if (reqs >= max_packed_rw - 1) {
1704 spin_lock_irq(q->queue_lock);
1705 next = blk_fetch_request(q);
1706 spin_unlock_irq(q->queue_lock);
1712 if (mmc_large_sector(card) &&
1713 !IS_ALIGNED(blk_rq_sectors(next), 8))
1716 if (next->cmd_flags & REQ_DISCARD ||
1717 next->cmd_flags & REQ_FLUSH)
1720 if (rq_data_dir(cur) != rq_data_dir(next))
1723 if (mmc_req_rel_wr(next) &&
1724 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1727 req_sectors += blk_rq_sectors(next);
1728 if (req_sectors > max_blk_count)
1731 phys_segments += next->nr_phys_segments;
1732 if (phys_segments > max_phys_segs)
1735 list_add_tail(&next->queuelist, &mqrq->packed->list);
1741 spin_lock_irq(q->queue_lock);
1742 blk_requeue_request(q, next);
1743 spin_unlock_irq(q->queue_lock);
1747 list_add(&req->queuelist, &mqrq->packed->list);
1748 mqrq->packed->nr_entries = ++reqs;
1749 mqrq->packed->retries = reqs;
1754 mqrq->cmd_type = MMC_PACKED_NONE;
1758 static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1759 struct mmc_card *card,
1760 struct mmc_queue *mq)
1762 struct mmc_blk_request *brq = &mqrq->brq;
1763 struct request *req = mqrq->req;
1764 struct request *prq;
1765 struct mmc_blk_data *md = mq->data;
1766 struct mmc_packed *packed = mqrq->packed;
1767 bool do_rel_wr, do_data_tag;
1768 u32 *packed_cmd_hdr;
1774 mqrq->cmd_type = MMC_PACKED_WRITE;
1776 packed->idx_failure = MMC_PACKED_NR_IDX;
1778 packed_cmd_hdr = packed->cmd_hdr;
1779 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1780 packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1781 (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1782 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1785 * Argument for each entry of packed group
1787 list_for_each_entry(prq, &packed->list, queuelist) {
1788 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1789 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1790 (prq->cmd_flags & REQ_META) &&
1791 (rq_data_dir(prq) == WRITE) &&
1792 ((brq->data.blocks * brq->data.blksz) >=
1793 card->ext_csd.data_tag_unit_size);
1794 /* Argument of CMD23 */
1795 packed_cmd_hdr[(i * 2)] =
1796 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1797 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1798 blk_rq_sectors(prq);
1799 /* Argument of CMD18 or CMD25 */
1800 packed_cmd_hdr[((i * 2)) + 1] =
1801 mmc_card_blockaddr(card) ?
1802 blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1803 packed->blocks += blk_rq_sectors(prq);
1807 memset(brq, 0, sizeof(struct mmc_blk_request));
1808 brq->mrq.cmd = &brq->cmd;
1809 brq->mrq.data = &brq->data;
1810 brq->mrq.sbc = &brq->sbc;
1811 brq->mrq.stop = &brq->stop;
1813 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1814 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1815 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1817 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1818 brq->cmd.arg = blk_rq_pos(req);
1819 if (!mmc_card_blockaddr(card))
1821 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1823 brq->data.blksz = 512;
1824 brq->data.blocks = packed->blocks + hdr_blocks;
1825 brq->data.flags = MMC_DATA_WRITE;
1827 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1829 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1831 mmc_set_data_timeout(&brq->data, card);
1833 brq->data.sg = mqrq->sg;
1834 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1836 mqrq->mmc_active.mrq = &brq->mrq;
1837 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1839 mmc_queue_bounce_pre(mqrq);
1842 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1843 struct mmc_blk_request *brq, struct request *req,
1846 struct mmc_queue_req *mq_rq;
1847 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1850 * If this is an SD card and we're writing, we can first
1851 * mark the known good sectors as ok.
1853 * If the card is not SD, we can still ok written sectors
1854 * as reported by the controller (which might be less than
1855 * the real number of written sectors, but never more).
1857 if (mmc_card_sd(card)) {
1860 blocks = mmc_sd_num_wr_blocks(card);
1861 if (blocks != (u32)-1) {
1862 ret = blk_end_request(req, 0, blocks << 9);
1865 if (!mmc_packed_cmd(mq_rq->cmd_type))
1866 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
1871 static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1873 struct request *prq;
1874 struct mmc_packed *packed = mq_rq->packed;
1875 int idx = packed->idx_failure, i = 0;
1880 while (!list_empty(&packed->list)) {
1881 prq = list_entry_rq(packed->list.next);
1883 /* retry from error index */
1884 packed->nr_entries -= idx;
1888 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1889 list_del_init(&prq->queuelist);
1890 mmc_blk_clear_packed(mq_rq);
1894 list_del_init(&prq->queuelist);
1895 blk_end_request(prq, 0, blk_rq_bytes(prq));
1899 mmc_blk_clear_packed(mq_rq);
1903 static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1905 struct request *prq;
1906 struct mmc_packed *packed = mq_rq->packed;
1910 while (!list_empty(&packed->list)) {
1911 prq = list_entry_rq(packed->list.next);
1912 list_del_init(&prq->queuelist);
1913 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1916 mmc_blk_clear_packed(mq_rq);
1919 static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1920 struct mmc_queue_req *mq_rq)
1922 struct request *prq;
1923 struct request_queue *q = mq->queue;
1924 struct mmc_packed *packed = mq_rq->packed;
1928 while (!list_empty(&packed->list)) {
1929 prq = list_entry_rq(packed->list.prev);
1930 if (prq->queuelist.prev != &packed->list) {
1931 list_del_init(&prq->queuelist);
1932 spin_lock_irq(q->queue_lock);
1933 blk_requeue_request(mq->queue, prq);
1934 spin_unlock_irq(q->queue_lock);
1936 list_del_init(&prq->queuelist);
1940 mmc_blk_clear_packed(mq_rq);
1943 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1945 struct mmc_blk_data *md = mq->data;
1946 struct mmc_card *card = md->queue.card;
1947 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1948 int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
1949 enum mmc_blk_status status;
1950 struct mmc_queue_req *mq_rq;
1951 struct request *req = rqc;
1952 struct mmc_async_req *areq;
1953 const u8 packed_nr = 2;
1956 if (!rqc && !mq->mqrq_prev->req)
1960 reqs = mmc_blk_prep_packed_list(mq, rqc);
1965 * When 4KB native sector is enabled, only 8 blocks
1966 * multiple read or write is allowed
1968 if ((brq->data.blocks & 0x07) &&
1969 (card->ext_csd.data_sector_size == 4096)) {
1970 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1971 req->rq_disk->disk_name);
1972 mq_rq = mq->mqrq_cur;
1976 if (reqs >= packed_nr)
1977 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1980 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1981 areq = &mq->mqrq_cur->mmc_active;
1984 areq = mmc_start_req(card->host, areq, (int *) &status);
1986 if (status == MMC_BLK_NEW_REQUEST)
1987 mq->flags |= MMC_QUEUE_NEW_REQUEST;
1991 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1994 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1995 mmc_queue_bounce_post(mq_rq);
1998 case MMC_BLK_SUCCESS:
1999 case MMC_BLK_PARTIAL:
2001 * A block was successfully transferred.
2003 mmc_blk_reset_success(md, type);
2005 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2006 ret = mmc_blk_end_packed_req(mq_rq);
2009 ret = blk_end_request(req, 0,
2010 brq->data.bytes_xfered);
2014 * If the blk_end_request function returns non-zero even
2015 * though all data has been transferred and no errors
2016 * were returned by the host controller, it's a bug.
2018 if (status == MMC_BLK_SUCCESS && ret) {
2019 pr_err("%s BUG rq_tot %d d_xfer %d\n",
2020 __func__, blk_rq_bytes(req),
2021 brq->data.bytes_xfered);
2026 case MMC_BLK_CMD_ERR:
2027 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
2028 if (mmc_blk_reset(md, card->host, type))
2034 retune_retry_done = brq->retune_retry_done;
2039 if (!mmc_blk_reset(md, card->host, type))
2042 case MMC_BLK_DATA_ERR: {
2045 err = mmc_blk_reset(md, card->host, type);
2048 if (err == -ENODEV ||
2049 mmc_packed_cmd(mq_rq->cmd_type))
2053 case MMC_BLK_ECC_ERR:
2054 if (brq->data.blocks > 1) {
2055 /* Redo read one sector at a time */
2056 pr_warn("%s: retrying using single block read\n",
2057 req->rq_disk->disk_name);
2062 * After an error, we redo I/O one sector at a
2063 * time, so we only reach here after trying to
2064 * read a single sector.
2066 ret = blk_end_request(req, -EIO,
2071 case MMC_BLK_NOMEDIUM:
2074 pr_err("%s: Unhandled return value (%d)",
2075 req->rq_disk->disk_name, status);
2080 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2081 if (!mq_rq->packed->retries)
2083 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
2084 mmc_start_req(card->host,
2085 &mq_rq->mmc_active, NULL);
2089 * In case of a incomplete request
2090 * prepare it again and resend.
2092 mmc_blk_rw_rq_prep(mq_rq, card,
2094 mmc_start_req(card->host,
2095 &mq_rq->mmc_active, NULL);
2097 mq_rq->brq.retune_retry_done = retune_retry_done;
2104 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2105 mmc_blk_abort_packed_req(mq_rq);
2107 if (mmc_card_removed(card))
2108 req->cmd_flags |= REQ_QUIET;
2110 ret = blk_end_request(req, -EIO,
2111 blk_rq_cur_bytes(req));
2116 if (mmc_card_removed(card)) {
2117 rqc->cmd_flags |= REQ_QUIET;
2118 blk_end_request_all(rqc, -EIO);
2121 * If current request is packed, it needs to put back.
2123 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2124 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2126 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2127 mmc_start_req(card->host,
2128 &mq->mqrq_cur->mmc_active, NULL);
2135 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2138 struct mmc_blk_data *md = mq->data;
2139 struct mmc_card *card = md->queue.card;
2140 struct mmc_host *host = card->host;
2141 unsigned long flags;
2142 unsigned int cmd_flags = req ? req->cmd_flags : 0;
2144 if (req && !mq->mqrq_prev->req)
2145 /* claim host only for the first request */
2148 ret = mmc_blk_part_switch(card, md);
2151 blk_end_request_all(req, -EIO);
2157 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
2158 if (cmd_flags & REQ_DISCARD) {
2159 /* complete ongoing async transfer before issuing discard */
2160 if (card->host->areq)
2161 mmc_blk_issue_rw_rq(mq, NULL);
2162 if (req->cmd_flags & REQ_SECURE)
2163 ret = mmc_blk_issue_secdiscard_rq(mq, req);
2165 ret = mmc_blk_issue_discard_rq(mq, req);
2166 } else if (cmd_flags & REQ_FLUSH) {
2167 /* complete ongoing async transfer before issuing flush */
2168 if (card->host->areq)
2169 mmc_blk_issue_rw_rq(mq, NULL);
2170 ret = mmc_blk_issue_flush(mq, req);
2172 if (!req && host->areq) {
2173 spin_lock_irqsave(&host->context_info.lock, flags);
2174 host->context_info.is_waiting_last_req = true;
2175 spin_unlock_irqrestore(&host->context_info.lock, flags);
2177 ret = mmc_blk_issue_rw_rq(mq, req);
2181 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
2182 (cmd_flags & MMC_REQ_SPECIAL_MASK))
2184 * Release host when there are no more requests
2185 * and after special request(discard, flush) is done.
2186 * In case sepecial request, there is no reentry to
2187 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2193 static inline int mmc_blk_readonly(struct mmc_card *card)
2195 return mmc_card_readonly(card) ||
2196 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2199 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2200 struct device *parent,
2203 const char *subname,
2206 struct mmc_blk_data *md;
2210 if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
2211 return ERR_PTR(-ENOMEM);
2213 spin_lock(&mmc_blk_lock);
2214 ret = ida_get_new(&mmc_blk_ida, &devidx);
2215 spin_unlock(&mmc_blk_lock);
2220 return ERR_PTR(ret);
2222 if (devidx >= max_devices) {
2227 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2233 md->area_type = area_type;
2236 * Set the read-only status based on the supported commands
2237 * and the write protect switch.
2239 md->read_only = mmc_blk_readonly(card);
2241 md->disk = alloc_disk(perdev_minors);
2242 if (md->disk == NULL) {
2247 spin_lock_init(&md->lock);
2248 INIT_LIST_HEAD(&md->part);
2251 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2255 md->queue.issue_fn = mmc_blk_issue_rq;
2256 md->queue.data = md;
2258 md->disk->major = MMC_BLOCK_MAJOR;
2259 md->disk->first_minor = devidx * perdev_minors;
2260 md->disk->fops = &mmc_bdops;
2261 md->disk->private_data = md;
2262 md->disk->queue = md->queue.queue;
2263 md->disk->driverfs_dev = parent;
2264 set_disk_ro(md->disk, md->read_only || default_ro);
2265 md->disk->flags = GENHD_FL_EXT_DEVT;
2266 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
2267 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2270 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2272 * - be set for removable media with permanent block devices
2273 * - be unset for removable block devices with permanent media
2275 * Since MMC block devices clearly fall under the second
2276 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2277 * should use the block device creation/destruction hotplug
2278 * messages to tell when the card is present.
2281 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2282 "mmcblk%u%s", card->host->index, subname ? subname : "");
2284 if (mmc_card_mmc(card))
2285 blk_queue_logical_block_size(md->queue.queue,
2286 card->ext_csd.data_sector_size);
2288 blk_queue_logical_block_size(md->queue.queue, 512);
2290 set_capacity(md->disk, size);
2292 if (mmc_host_cmd23(card->host)) {
2293 if (mmc_card_mmc(card) ||
2294 (mmc_card_sd(card) &&
2295 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2296 md->flags |= MMC_BLK_CMD23;
2299 if (mmc_card_mmc(card) &&
2300 md->flags & MMC_BLK_CMD23 &&
2301 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2302 card->ext_csd.rel_sectors)) {
2303 md->flags |= MMC_BLK_REL_WR;
2304 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2307 if (mmc_card_mmc(card) &&
2308 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2309 (md->flags & MMC_BLK_CMD23) &&
2310 card->ext_csd.packed_event_en) {
2311 if (!mmc_packed_init(&md->queue, card))
2312 md->flags |= MMC_BLK_PACKED_CMD;
2322 spin_lock(&mmc_blk_lock);
2323 ida_remove(&mmc_blk_ida, devidx);
2324 spin_unlock(&mmc_blk_lock);
2325 return ERR_PTR(ret);
2328 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2332 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2334 * The EXT_CSD sector count is in number or 512 byte
2337 size = card->ext_csd.sectors;
2340 * The CSD capacity field is in units of read_blkbits.
2341 * set_capacity takes units of 512 bytes.
2343 size = (typeof(sector_t))card->csd.capacity
2344 << (card->csd.read_blkbits - 9);
2347 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2348 MMC_BLK_DATA_AREA_MAIN);
2351 static int mmc_blk_alloc_part(struct mmc_card *card,
2352 struct mmc_blk_data *md,
2353 unsigned int part_type,
2356 const char *subname,
2360 struct mmc_blk_data *part_md;
2362 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2363 subname, area_type);
2364 if (IS_ERR(part_md))
2365 return PTR_ERR(part_md);
2366 part_md->part_type = part_type;
2367 list_add(&part_md->part, &md->part);
2369 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
2370 cap_str, sizeof(cap_str));
2371 pr_info("%s: %s %s partition %u %s\n",
2372 part_md->disk->disk_name, mmc_card_id(card),
2373 mmc_card_name(card), part_md->part_type, cap_str);
2377 /* MMC Physical partitions consist of two boot partitions and
2378 * up to four general purpose partitions.
2379 * For each partition enabled in EXT_CSD a block device will be allocatedi
2380 * to provide access to the partition.
2383 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2387 if (!mmc_card_mmc(card))
2390 for (idx = 0; idx < card->nr_parts; idx++) {
2391 if (card->part[idx].size) {
2392 ret = mmc_blk_alloc_part(card, md,
2393 card->part[idx].part_cfg,
2394 card->part[idx].size >> 9,
2395 card->part[idx].force_ro,
2396 card->part[idx].name,
2397 card->part[idx].area_type);
2406 static void mmc_blk_remove_req(struct mmc_blk_data *md)
2408 struct mmc_card *card;
2412 * Flush remaining requests and free queues. It
2413 * is freeing the queue that stops new requests
2414 * from being accepted.
2416 card = md->queue.card;
2417 mmc_cleanup_queue(&md->queue);
2418 if (md->flags & MMC_BLK_PACKED_CMD)
2419 mmc_packed_clean(&md->queue);
2420 if (md->disk->flags & GENHD_FL_UP) {
2421 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2422 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2423 card->ext_csd.boot_ro_lockable)
2424 device_remove_file(disk_to_dev(md->disk),
2425 &md->power_ro_lock);
2427 del_gendisk(md->disk);
2433 static void mmc_blk_remove_parts(struct mmc_card *card,
2434 struct mmc_blk_data *md)
2436 struct list_head *pos, *q;
2437 struct mmc_blk_data *part_md;
2439 list_for_each_safe(pos, q, &md->part) {
2440 part_md = list_entry(pos, struct mmc_blk_data, part);
2442 mmc_blk_remove_req(part_md);
2446 static int mmc_add_disk(struct mmc_blk_data *md)
2449 struct mmc_card *card = md->queue.card;
2452 md->force_ro.show = force_ro_show;
2453 md->force_ro.store = force_ro_store;
2454 sysfs_attr_init(&md->force_ro.attr);
2455 md->force_ro.attr.name = "force_ro";
2456 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2457 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2461 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2462 card->ext_csd.boot_ro_lockable) {
2465 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2468 mode = S_IRUGO | S_IWUSR;
2470 md->power_ro_lock.show = power_ro_lock_show;
2471 md->power_ro_lock.store = power_ro_lock_store;
2472 sysfs_attr_init(&md->power_ro_lock.attr);
2473 md->power_ro_lock.attr.mode = mode;
2474 md->power_ro_lock.attr.name =
2475 "ro_lock_until_next_power_on";
2476 ret = device_create_file(disk_to_dev(md->disk),
2477 &md->power_ro_lock);
2479 goto power_ro_lock_fail;
2484 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2486 del_gendisk(md->disk);
2491 #define CID_MANFID_SANDISK 0x2
2492 #define CID_MANFID_TOSHIBA 0x11
2493 #define CID_MANFID_MICRON 0x13
2494 #define CID_MANFID_SAMSUNG 0x15
2495 #define CID_MANFID_KINGSTON 0x70
2497 static const struct mmc_fixup blk_fixups[] =
2499 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2500 MMC_QUIRK_INAND_CMD38),
2501 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2502 MMC_QUIRK_INAND_CMD38),
2503 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2504 MMC_QUIRK_INAND_CMD38),
2505 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2506 MMC_QUIRK_INAND_CMD38),
2507 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2508 MMC_QUIRK_INAND_CMD38),
2511 * Some MMC cards experience performance degradation with CMD23
2512 * instead of CMD12-bounded multiblock transfers. For now we'll
2513 * black list what's bad...
2514 * - Certain Toshiba cards.
2516 * N.B. This doesn't affect SD cards.
2518 MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2519 MMC_QUIRK_BLK_NO_CMD23),
2520 MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2521 MMC_QUIRK_BLK_NO_CMD23),
2522 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2523 MMC_QUIRK_BLK_NO_CMD23),
2524 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2525 MMC_QUIRK_BLK_NO_CMD23),
2526 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2527 MMC_QUIRK_BLK_NO_CMD23),
2530 * Some Micron MMC cards needs longer data read timeout than
2533 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
2534 MMC_QUIRK_LONG_READ_TIME),
2537 * On these Samsung MoviNAND parts, performing secure erase or
2538 * secure trim can result in unrecoverable corruption due to a
2541 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2542 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2543 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2544 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2545 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2546 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2547 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2548 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2549 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2550 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2551 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2552 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2553 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2554 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2555 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2556 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2559 * On Some Kingston eMMCs, performing trim can result in
2560 * unrecoverable data conrruption occasionally due to a firmware bug.
2562 MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2563 MMC_QUIRK_TRIM_BROKEN),
2564 MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2565 MMC_QUIRK_TRIM_BROKEN),
2570 static int mmc_blk_probe(struct mmc_card *card)
2572 struct mmc_blk_data *md, *part_md;
2576 * Check that the card supports the command class(es) we need.
2578 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2581 mmc_fixup_device(card, blk_fixups);
2583 md = mmc_blk_alloc(card);
2587 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
2588 cap_str, sizeof(cap_str));
2589 pr_info("%s: %s %s %s %s\n",
2590 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2591 cap_str, md->read_only ? "(ro)" : "");
2593 if (mmc_blk_alloc_parts(card, md))
2596 dev_set_drvdata(&card->dev, md);
2598 if (mmc_add_disk(md))
2601 list_for_each_entry(part_md, &md->part, part) {
2602 if (mmc_add_disk(part_md))
2606 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2607 pm_runtime_use_autosuspend(&card->dev);
2610 * Don't enable runtime PM for SD-combo cards here. Leave that
2611 * decision to be taken during the SDIO init sequence instead.
2613 if (card->type != MMC_TYPE_SD_COMBO) {
2614 pm_runtime_set_active(&card->dev);
2615 pm_runtime_enable(&card->dev);
2621 mmc_blk_remove_parts(card, md);
2622 mmc_blk_remove_req(md);
2626 static void mmc_blk_remove(struct mmc_card *card)
2628 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2630 mmc_blk_remove_parts(card, md);
2631 pm_runtime_get_sync(&card->dev);
2632 mmc_claim_host(card->host);
2633 mmc_blk_part_switch(card, md);
2634 mmc_release_host(card->host);
2635 if (card->type != MMC_TYPE_SD_COMBO)
2636 pm_runtime_disable(&card->dev);
2637 pm_runtime_put_noidle(&card->dev);
2638 mmc_blk_remove_req(md);
2639 dev_set_drvdata(&card->dev, NULL);
2642 static int _mmc_blk_suspend(struct mmc_card *card)
2644 struct mmc_blk_data *part_md;
2645 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2648 mmc_queue_suspend(&md->queue);
2649 list_for_each_entry(part_md, &md->part, part) {
2650 mmc_queue_suspend(&part_md->queue);
2656 static void mmc_blk_shutdown(struct mmc_card *card)
2658 _mmc_blk_suspend(card);
2661 #ifdef CONFIG_PM_SLEEP
2662 static int mmc_blk_suspend(struct device *dev)
2664 struct mmc_card *card = mmc_dev_to_card(dev);
2666 return _mmc_blk_suspend(card);
2669 static int mmc_blk_resume(struct device *dev)
2671 struct mmc_blk_data *part_md;
2672 struct mmc_blk_data *md = dev_get_drvdata(dev);
2676 * Resume involves the card going into idle state,
2677 * so current partition is always the main one.
2679 md->part_curr = md->part_type;
2680 mmc_queue_resume(&md->queue);
2681 list_for_each_entry(part_md, &md->part, part) {
2682 mmc_queue_resume(&part_md->queue);
2689 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2691 static struct mmc_driver mmc_driver = {
2694 .pm = &mmc_blk_pm_ops,
2696 .probe = mmc_blk_probe,
2697 .remove = mmc_blk_remove,
2698 .shutdown = mmc_blk_shutdown,
2701 static int __init mmc_blk_init(void)
2705 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2706 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2708 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
2710 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2714 res = mmc_register_driver(&mmc_driver);
2720 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2725 static void __exit mmc_blk_exit(void)
2727 mmc_unregister_driver(&mmc_driver);
2728 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2731 module_init(mmc_blk_init);
2732 module_exit(mmc_blk_exit);
2734 MODULE_LICENSE("GPL");
2735 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");