mirror of https://github.com/OpenIPC/firmware.git
1093 lines
29 KiB
Diff
1093 lines
29 KiB
Diff
--- linux-4.9.37/drivers/mmc/card/block.c 2017-07-12 16:42:41.000000000 +0300
|
|
+++ linux-4.9.y/drivers/mmc/card/block.c 2021-06-07 13:01:33.000000000 +0300
|
|
@@ -63,6 +63,8 @@
|
|
#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
|
|
#define MMC_SANITIZE_REQ_TIMEOUT 240000
|
|
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
|
|
+#define MMC_CMDQ_STOP_TIMEOUT_MS 100
|
|
+#define MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD 6 /* microseconds */
|
|
|
|
#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
|
|
(rq_data_dir(req) == WRITE))
|
|
@@ -103,6 +105,7 @@
|
|
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
|
|
#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
|
|
#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
|
|
+#define MMC_BLK_CMD_QUEUE (1 << 3) /* MMC command queue support */
|
|
|
|
unsigned int usage;
|
|
unsigned int read_only;
|
|
@@ -519,21 +522,40 @@
|
|
|
|
mrq.cmd = &cmd;
|
|
|
|
+ if (mmc_card_cmdq(card)) {
|
|
+ err = mmc_cmdq_halt_on_empty_queue(card->host);
|
|
+ if (err) {
|
|
+ pr_err("%s: halt failed while doing %s err (%d)\n",
|
|
+ mmc_hostname(card->host),
|
|
+ __func__, err);
|
|
+ return err;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (mmc_card_doing_bkops(card)) {
|
|
+ err = mmc_stop_bkops(card);
|
|
+ if (err) {
|
|
+ dev_err(mmc_dev(card->host),
|
|
+ "%s: stop_bkops failed %d\n", __func__, err);
|
|
+ goto cmd_rel_host;
|
|
+ }
|
|
+ }
|
|
+
|
|
err = mmc_blk_part_switch(card, md);
|
|
if (err)
|
|
- return err;
|
|
+ goto cmd_rel_host;
|
|
|
|
if (idata->ic.is_acmd) {
|
|
err = mmc_app_cmd(card->host, card);
|
|
if (err)
|
|
- return err;
|
|
+ goto cmd_rel_host;
|
|
}
|
|
|
|
if (is_rpmb) {
|
|
err = mmc_set_blockcount(card, data.blocks,
|
|
idata->ic.write_flag & (1 << 31));
|
|
if (err)
|
|
- return err;
|
|
+ goto cmd_rel_host;
|
|
}
|
|
|
|
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
|
|
@@ -544,7 +566,7 @@
|
|
pr_err("%s: ioctl_do_sanitize() failed. err = %d",
|
|
__func__, err);
|
|
|
|
- return err;
|
|
+ goto cmd_rel_host;
|
|
}
|
|
|
|
mmc_wait_for_req(card->host, &mrq);
|
|
@@ -552,12 +574,14 @@
|
|
if (cmd.error) {
|
|
dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
|
|
__func__, cmd.error);
|
|
- return cmd.error;
|
|
+ err = cmd.error;
|
|
+ goto cmd_rel_host;
|
|
}
|
|
if (data.error) {
|
|
dev_err(mmc_dev(card->host), "%s: data error %d\n",
|
|
__func__, data.error);
|
|
- return data.error;
|
|
+ err = data.error;
|
|
+ goto cmd_rel_host;
|
|
}
|
|
|
|
/*
|
|
@@ -581,6 +605,13 @@
|
|
__func__, status, err);
|
|
}
|
|
|
|
+cmd_rel_host:
|
|
+ if (mmc_card_cmdq(card)) {
|
|
+ if (mmc_cmdq_halt(card->host, false))
|
|
+ pr_err("%s: %s: cmdq unhalt failed\n",
|
|
+ mmc_hostname(card->host), __func__);
|
|
+ }
|
|
+
|
|
return err;
|
|
}
|
|
|
|
@@ -746,13 +777,64 @@
|
|
#endif
|
|
};
|
|
|
|
+static int mmc_blk_cmdq_switch(struct mmc_card *card,
|
|
+ struct mmc_blk_data *md, bool enable)
|
|
+{
|
|
+ int ret = 0;
|
|
+ bool cmdq_mode = !!mmc_card_cmdq(card);
|
|
+ struct mmc_host *host = card->host;
|
|
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
|
|
+
|
|
+ if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE) ||
|
|
+ !card->ext_csd.cmdq_support ||
|
|
+ (enable && !(md->flags & MMC_BLK_CMD_QUEUE)) ||
|
|
+ (cmdq_mode == enable))
|
|
+ return 0;
|
|
+
|
|
+ if (enable) {
|
|
+ ret = mmc_set_blocklen(card, MMC_CARD_CMDQ_BLK_SIZE);
|
|
+ if (ret) {
|
|
+ pr_err("%s: failed (%d) to set block-size to %d\n",
|
|
+ __func__, ret, MMC_CARD_CMDQ_BLK_SIZE);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ } else {
|
|
+ if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state)) {
|
|
+ ret = mmc_cmdq_halt(host, true);
|
|
+ if (ret) {
|
|
+ pr_err("%s: halt: failed: %d\n",
|
|
+ mmc_hostname(host), ret);
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
+ EXT_CSD_CMDQ, enable,
|
|
+ card->ext_csd.generic_cmd6_time);
|
|
+ if (ret) {
|
|
+ pr_err("%s: cmdq mode %sable failed %d\n",
|
|
+ md->disk->disk_name, enable ? "en" : "dis", ret);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (enable)
|
|
+ mmc_card_set_cmdq(card);
|
|
+ else
|
|
+ mmc_card_clr_cmdq(card);
|
|
+out:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static inline int mmc_blk_part_switch(struct mmc_card *card,
|
|
struct mmc_blk_data *md)
|
|
{
|
|
int ret;
|
|
struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
|
|
|
|
- if (main_md->part_curr == md->part_type)
|
|
+ if ((main_md->part_curr == md->part_type) &&
|
|
+ (card->part_curr == md->part_type))
|
|
return 0;
|
|
|
|
if (mmc_card_mmc(card)) {
|
|
@@ -761,6 +843,13 @@
|
|
if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
|
|
mmc_retune_pause(card->host);
|
|
|
|
+ if (md->part_type) {
|
|
+ /* disable CQ mode for non-user data partitions */
|
|
+ ret = mmc_blk_cmdq_switch(card, md, false);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
|
|
part_config |= md->part_type;
|
|
|
|
@@ -774,6 +863,7 @@
|
|
}
|
|
|
|
card->ext_csd.part_config = part_config;
|
|
+ card->part_curr = md->part_type;
|
|
|
|
if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
|
|
mmc_retune_unpause(card->host);
|
|
@@ -2210,6 +2300,813 @@
|
|
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
|
|
}
|
|
|
|
+/* prepare for non-data commands */
|
|
+static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd(
|
|
+ struct mmc_queue_req *mqrq, struct mmc_queue *mq)
|
|
+{
|
|
+ struct request *req = mqrq->req;
|
|
+ struct mmc_cmdq_req *cmdq_req = &mqrq->cmdq_req;
|
|
+
|
|
+ memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
|
|
+
|
|
+ cmdq_req->mrq.data = NULL;
|
|
+ cmdq_req->cmd_flags = req->cmd_flags;
|
|
+ cmdq_req->mrq.req = mqrq->req;
|
|
+ req->special = mqrq;
|
|
+ cmdq_req->cmdq_req_flags |= DCMD;
|
|
+ cmdq_req->mrq.cmdq_req = cmdq_req;
|
|
+
|
|
+ return &mqrq->cmdq_req;
|
|
+}
|
|
+
|
|
+#define IS_RT_CLASS_REQ(x) \
|
|
+ (IOPRIO_PRIO_CLASS(req_get_ioprio(x)) == IOPRIO_CLASS_RT)
|
|
+
|
|
+static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep(
|
|
+ struct mmc_queue_req *mqrq, struct mmc_queue *mq)
|
|
+{
|
|
+ struct mmc_card *card = mq->card;
|
|
+ struct request *req = mqrq->req;
|
|
+ struct mmc_blk_data *md = mq->data;
|
|
+ bool do_rel_wr = mmc_req_rel_wr(req) && (md->flags & MMC_BLK_REL_WR);
|
|
+ bool do_data_tag;
|
|
+ bool read_dir = (rq_data_dir(req) == READ);
|
|
+ bool prio = IS_RT_CLASS_REQ(req);
|
|
+ struct mmc_cmdq_req *cmdq_rq = &mqrq->cmdq_req;
|
|
+
|
|
+ memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
|
|
+
|
|
+ cmdq_rq->tag = req->tag;
|
|
+ if (read_dir) {
|
|
+ cmdq_rq->cmdq_req_flags |= DIR;
|
|
+ cmdq_rq->data.flags = MMC_DATA_READ;
|
|
+ } else {
|
|
+ cmdq_rq->data.flags = MMC_DATA_WRITE;
|
|
+ }
|
|
+ if (prio)
|
|
+ cmdq_rq->cmdq_req_flags |= PRIO;
|
|
+
|
|
+ if (do_rel_wr)
|
|
+ cmdq_rq->cmdq_req_flags |= REL_WR;
|
|
+
|
|
+ cmdq_rq->data.blocks = blk_rq_sectors(req);
|
|
+ cmdq_rq->blk_addr = blk_rq_pos(req);
|
|
+ cmdq_rq->data.blksz = MMC_CARD_CMDQ_BLK_SIZE;
|
|
+
|
|
+ mmc_set_data_timeout(&cmdq_rq->data, card);
|
|
+
|
|
+ do_data_tag = (card->ext_csd.data_tag_unit_size) &&
|
|
+ (req->cmd_flags & REQ_META) &&
|
|
+ (rq_data_dir(req) == WRITE) &&
|
|
+ ((cmdq_rq->data.blocks * cmdq_rq->data.blksz) >=
|
|
+ card->ext_csd.data_tag_unit_size);
|
|
+ if (do_data_tag)
|
|
+ cmdq_rq->cmdq_req_flags |= DAT_TAG;
|
|
+ cmdq_rq->data.sg = mqrq->sg;
|
|
+ cmdq_rq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
|
|
+
|
|
+ /*
|
|
+ * Adjust the sg list so it is the same size as the
|
|
+ * request.
|
|
+ */
|
|
+ if (cmdq_rq->data.blocks > card->host->max_blk_count)
|
|
+ cmdq_rq->data.blocks = card->host->max_blk_count;
|
|
+
|
|
+ if (cmdq_rq->data.blocks != blk_rq_sectors(req)) {
|
|
+ int i, data_size = cmdq_rq->data.blocks << 9;
|
|
+ struct scatterlist *sg;
|
|
+
|
|
+ for_each_sg(cmdq_rq->data.sg, sg, cmdq_rq->data.sg_len, i) {
|
|
+ data_size -= sg->length;
|
|
+ if (data_size <= 0) {
|
|
+ sg->length += data_size;
|
|
+ i++;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ cmdq_rq->data.sg_len = i;
|
|
+ }
|
|
+
|
|
+ mqrq->cmdq_req.cmd_flags = req->cmd_flags;
|
|
+ mqrq->cmdq_req.mrq.req = mqrq->req;
|
|
+ mqrq->cmdq_req.mrq.cmdq_req = &mqrq->cmdq_req;
|
|
+ mqrq->cmdq_req.mrq.data = &mqrq->cmdq_req.data;
|
|
+ mqrq->req->special = mqrq;
|
|
+
|
|
+ pr_debug("%s: %s: mrq: 0x%p req: 0x%p mqrq: 0x%p bytes to xf: %d mmc_cmdq_req: 0x%p card-addr: 0x%08x dir(r-1/w-0): %d\n",
|
|
+ mmc_hostname(card->host), __func__, &mqrq->cmdq_req.mrq,
|
|
+ mqrq->req, mqrq, (cmdq_rq->data.blocks * cmdq_rq->data.blksz),
|
|
+ cmdq_rq, cmdq_rq->blk_addr,
|
|
+ (cmdq_rq->cmdq_req_flags & DIR) ? 1 : 0);
|
|
+
|
|
+ return &mqrq->cmdq_req;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Complete reqs from block layer softirq context
|
|
+ * Invoked in irq context
|
|
+ */
|
|
+void mmc_blk_cmdq_req_done(struct mmc_request *mrq)
|
|
+{
|
|
+ struct request *req = mrq->req;
|
|
+
|
|
+ blk_complete_request(req);
|
|
+}
|
|
+EXPORT_SYMBOL(mmc_blk_cmdq_req_done);
|
|
+
|
|
+static int mmc_blk_cmdq_start_req(struct mmc_host *host,
|
|
+ struct mmc_cmdq_req *cmdq_req)
|
|
+{
|
|
+ struct mmc_request *mrq = &cmdq_req->mrq;
|
|
+
|
|
+ mrq->done = mmc_blk_cmdq_req_done;
|
|
+ return mmc_cmdq_start_req(host, cmdq_req);
|
|
+}
|
|
+
|
|
+static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
|
|
+{
|
|
+ struct mmc_queue_req *active_mqrq;
|
|
+ struct mmc_card *card = mq->card;
|
|
+ struct mmc_host *host = card->host;
|
|
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
|
|
+ struct mmc_cmdq_req *mc_rq;
|
|
+ u8 active_small_sector_read = 0;
|
|
+ int ret = 0;
|
|
+
|
|
+ BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
|
|
+ BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.data_active_reqs));
|
|
+ BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
|
|
+
|
|
+ active_mqrq = &mq->mqrq_cmdq[req->tag];
|
|
+ active_mqrq->req = req;
|
|
+
|
|
+ mc_rq = mmc_blk_cmdq_rw_prep(active_mqrq, mq);
|
|
+
|
|
+ if (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) {
|
|
+ unsigned int sectors = blk_rq_sectors(req);
|
|
+
|
|
+ if (((sectors > 0) && (sectors < 8))
|
|
+ && (rq_data_dir(req) == READ))
|
|
+ active_small_sector_read = 1;
|
|
+ }
|
|
+ ret = mmc_blk_cmdq_start_req(card->host, mc_rq);
|
|
+ if (!ret && active_small_sector_read)
|
|
+ host->cmdq_ctx.active_small_sector_read_reqs++;
|
|
+ /*
|
|
+ * When in SVS2 on low load scenario and there are lots of requests
|
|
+ * queued for CMDQ we need to wait till the queue is empty to scale
|
|
+ * back up to Nominal even if there is a sudden increase in load.
|
|
+ * This impacts performance where lots of IO get executed in SVS2
|
|
+ * frequency since the queue is full. As SVS2 is a low load use case
|
|
+ * we can serialize the requests and not queue them in parallel
|
|
+ * without impacting other use cases. This makes sure the queue gets
|
|
+ * empty faster and we will be able to scale up to Nominal frequency
|
|
+ * when needed.
|
|
+ */
|
|
+ if (!ret)
|
|
+ wait_event_interruptible(ctx->queue_empty_wq,
|
|
+ (!ctx->active_reqs));
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Issues a flush (dcmd) request
|
|
+ */
|
|
+int mmc_blk_cmdq_issue_flush_rq(struct mmc_queue *mq, struct request *req)
|
|
+{
|
|
+ int err;
|
|
+ struct mmc_queue_req *active_mqrq;
|
|
+ struct mmc_card *card = mq->card;
|
|
+ struct mmc_host *host;
|
|
+ struct mmc_cmdq_req *cmdq_req;
|
|
+ struct mmc_cmdq_context_info *ctx_info;
|
|
+
|
|
+ BUG_ON(!card);
|
|
+ host = card->host;
|
|
+ BUG_ON(!host);
|
|
+ BUG_ON(req->tag > card->ext_csd.cmdq_depth);
|
|
+ BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
|
|
+
|
|
+ ctx_info = &host->cmdq_ctx;
|
|
+
|
|
+ set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
|
|
+
|
|
+ active_mqrq = &mq->mqrq_cmdq[req->tag];
|
|
+ active_mqrq->req = req;
|
|
+
|
|
+ cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq);
|
|
+ cmdq_req->cmdq_req_flags |= QBR;
|
|
+ cmdq_req->mrq.cmd = &cmdq_req->cmd;
|
|
+ cmdq_req->tag = req->tag;
|
|
+
|
|
+ err = mmc_cmdq_prepare_flush(cmdq_req->mrq.cmd);
|
|
+ if (err) {
|
|
+ pr_err("%s: failed (%d) preparing flush req\n",
|
|
+ mmc_hostname(host), err);
|
|
+ return err;
|
|
+ }
|
|
+ err = mmc_blk_cmdq_start_req(card->host, cmdq_req);
|
|
+ return err;
|
|
+}
|
|
+EXPORT_SYMBOL(mmc_blk_cmdq_issue_flush_rq);
|
|
+
|
|
+static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card,
|
|
+ struct mmc_blk_data *md)
|
|
+{
|
|
+ struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
|
|
+ struct mmc_host *host = card->host;
|
|
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
|
|
+ u8 part_config = card->ext_csd.part_config;
|
|
+
|
|
+ if ((main_md->part_curr == md->part_type) &&
|
|
+ (card->part_curr == md->part_type))
|
|
+ return 0;
|
|
+
|
|
+ WARN_ON(!((card->host->caps2 & MMC_CAP2_CMD_QUEUE) &&
|
|
+ card->ext_csd.cmdq_support &&
|
|
+ (md->flags & MMC_BLK_CMD_QUEUE)));
|
|
+
|
|
+ if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state))
|
|
+ WARN_ON(mmc_cmdq_halt(host, true));
|
|
+
|
|
+ /* disable CQ mode in card */
|
|
+ if (mmc_card_cmdq(card)) {
|
|
+ WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
+ EXT_CSD_CMDQ, 0,
|
|
+ card->ext_csd.generic_cmd6_time));
|
|
+ mmc_card_clr_cmdq(card);
|
|
+ }
|
|
+
|
|
+ part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
|
|
+ part_config |= md->part_type;
|
|
+
|
|
+ WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
+ EXT_CSD_PART_CONFIG, part_config,
|
|
+ card->ext_csd.part_time));
|
|
+
|
|
+ card->ext_csd.part_config = part_config;
|
|
+ card->part_curr = md->part_type;
|
|
+
|
|
+ main_md->part_curr = md->part_type;
|
|
+
|
|
+ WARN_ON(mmc_blk_cmdq_switch(card, md, true));
|
|
+ WARN_ON(mmc_cmdq_halt(host, false));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq,
|
|
+ struct request *req)
|
|
+{
|
|
+ struct mmc_blk_data *md = mq->data;
|
|
+ struct mmc_card *card = md->queue.card;
|
|
+ struct mmc_host *host = card->host;
|
|
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
|
|
+ struct mmc_cmdq_req *cmdq_req;
|
|
+ struct mmc_queue_req *active_mqrq;
|
|
+
|
|
+ BUG_ON(req->tag > card->ext_csd.cmdq_depth);
|
|
+ BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
|
|
+
|
|
+ set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
|
|
+
|
|
+ active_mqrq = &mq->mqrq_cmdq[req->tag];
|
|
+ active_mqrq->req = req;
|
|
+
|
|
+ cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq);
|
|
+ cmdq_req->cmdq_req_flags |= QBR;
|
|
+ cmdq_req->mrq.cmd = &cmdq_req->cmd;
|
|
+ cmdq_req->tag = req->tag;
|
|
+ return cmdq_req;
|
|
+}
|
|
+
|
|
+static int mmc_blk_cmdq_issue_discard_rq(struct mmc_queue *mq,
|
|
+ struct request *req)
|
|
+{
|
|
+ struct mmc_blk_data *md = mq->data;
|
|
+ struct mmc_card *card = md->queue.card;
|
|
+ struct mmc_cmdq_req *cmdq_req = NULL;
|
|
+ unsigned int from, nr, arg;
|
|
+ int err = 0;
|
|
+
|
|
+ if (!mmc_can_erase(card)) {
|
|
+ err = -EOPNOTSUPP;
|
|
+ blk_end_request(req, err, blk_rq_bytes(req));
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ from = blk_rq_pos(req);
|
|
+ nr = blk_rq_sectors(req);
|
|
+
|
|
+ if (mmc_can_discard(card))
|
|
+ arg = MMC_DISCARD_ARG;
|
|
+ else if (mmc_can_trim(card))
|
|
+ arg = MMC_TRIM_ARG;
|
|
+ else
|
|
+ arg = MMC_ERASE_ARG;
|
|
+
|
|
+ cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req);
|
|
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
|
|
+ __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
|
|
+ EXT_CSD_CMD_SET_NORMAL,
|
|
+ INAND_CMD38_ARG_EXT_CSD,
|
|
+ arg == MMC_TRIM_ARG ?
|
|
+ INAND_CMD38_ARG_TRIM :
|
|
+ INAND_CMD38_ARG_ERASE,
|
|
+ 0, true, false);
|
|
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
|
|
+ if (err)
|
|
+ goto clear_dcmd;
|
|
+ }
|
|
+ err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
|
|
+clear_dcmd:
|
|
+ blk_complete_request(req);
|
|
+out:
|
|
+ return err ? 1 : 0;
|
|
+}
|
|
+
|
|
+static int mmc_blk_cmdq_issue_secdiscard_rq(struct mmc_queue *mq,
|
|
+ struct request *req)
|
|
+{
|
|
+ struct mmc_blk_data *md = mq->data;
|
|
+ struct mmc_card *card = md->queue.card;
|
|
+ struct mmc_cmdq_req *cmdq_req = NULL;
|
|
+ unsigned int from, nr, arg;
|
|
+ int err = 0;
|
|
+
|
|
+ if (!(mmc_can_secure_erase_trim(card))) {
|
|
+ err = -EOPNOTSUPP;
|
|
+ blk_end_request(req, err, blk_rq_bytes(req));
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ from = blk_rq_pos(req);
|
|
+ nr = blk_rq_sectors(req);
|
|
+
|
|
+ if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
|
|
+ arg = MMC_SECURE_TRIM1_ARG;
|
|
+ else
|
|
+ arg = MMC_SECURE_ERASE_ARG;
|
|
+
|
|
+ cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req);
|
|
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
|
|
+ __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
|
|
+ EXT_CSD_CMD_SET_NORMAL,
|
|
+ INAND_CMD38_ARG_EXT_CSD,
|
|
+ arg == MMC_SECURE_TRIM1_ARG ?
|
|
+ INAND_CMD38_ARG_SECTRIM1 :
|
|
+ INAND_CMD38_ARG_SECERASE,
|
|
+ 0, true, false);
|
|
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
|
|
+ if (err)
|
|
+ goto clear_dcmd;
|
|
+ }
|
|
+
|
|
+ err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
|
|
+ if (err)
|
|
+ goto clear_dcmd;
|
|
+
|
|
+ if (arg == MMC_SECURE_TRIM1_ARG) {
|
|
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
|
|
+ __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
|
|
+ EXT_CSD_CMD_SET_NORMAL,
|
|
+ INAND_CMD38_ARG_EXT_CSD,
|
|
+ INAND_CMD38_ARG_SECTRIM2,
|
|
+ 0, true, false);
|
|
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
|
|
+ if (err)
|
|
+ goto clear_dcmd;
|
|
+ }
|
|
+
|
|
+ err = mmc_cmdq_erase(cmdq_req, card, from, nr,
|
|
+ MMC_SECURE_TRIM2_ARG);
|
|
+ }
|
|
+clear_dcmd:
|
|
+ blk_complete_request(req);
|
|
+out:
|
|
+ return err ? 1 : 0;
|
|
+}
|
|
+
|
|
+static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
+{
|
|
+ int ret;
|
|
+ struct mmc_blk_data *md = mq->data;
|
|
+ struct mmc_card *card = md->queue.card;
|
|
+
|
|
+ mmc_get_card(card);
|
|
+
|
|
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
|
|
+ if (mmc_bus_needs_resume(card->host))
|
|
+ mmc_resume_bus(card->host);
|
|
+#endif
|
|
+ if (!card->host->cmdq_ctx.active_reqs && mmc_card_doing_bkops(card)) {
|
|
+ ret = mmc_cmdq_halt(card->host, true);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+ ret = mmc_stop_bkops(card);
|
|
+ if (ret) {
|
|
+ pr_err("%s: %s: mmc_stop_bkops failed %d\n",
|
|
+ md->disk->disk_name, __func__, ret);
|
|
+ goto out;
|
|
+ }
|
|
+ ret = mmc_cmdq_halt(card->host, false);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ ret = mmc_blk_cmdq_part_switch(card, md);
|
|
+ if (ret) {
|
|
+ pr_err("%s: %s: partition switch failed %d\n",
|
|
+ md->disk->disk_name, __func__, ret);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (req) {
|
|
+ struct mmc_host *host = card->host;
|
|
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
|
|
+
|
|
+ if ((req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD) &&
|
|
+ (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
|
|
+ ctx->active_small_sector_read_reqs) {
|
|
+ ret = wait_event_interruptible(ctx->queue_empty_wq,
|
|
+ !ctx->active_reqs);
|
|
+ if (ret) {
|
|
+ pr_err("%s: failed while waiting for the CMDQ to be empty %s err (%d)\n",
|
|
+ mmc_hostname(host),
|
|
+ __func__, ret);
|
|
+ BUG_ON(1);
|
|
+ }
|
|
+ /* clear the counter now */
|
|
+ ctx->active_small_sector_read_reqs = 0;
|
|
+ /*
|
|
+ * If there were small sector (less than 8 sectors) read
|
|
+ * operations in progress then we have to wait for the
|
|
+ * outstanding requests to finish and should also have
|
|
+ * atleast 6 microseconds delay before queuing the DCMD
|
|
+ * request.
|
|
+ */
|
|
+ udelay(MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD);
|
|
+ }
|
|
+
|
|
+ if (req_op(req) == REQ_OP_DISCARD) {
|
|
+ if (req_op(req) == REQ_OP_SECURE_ERASE &&
|
|
+ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
|
|
+ ret = mmc_blk_cmdq_issue_secdiscard_rq(mq, req);
|
|
+ else
|
|
+ ret = mmc_blk_cmdq_issue_discard_rq(mq, req);
|
|
+ } else if (req_op(req) == REQ_OP_FLUSH) {
|
|
+ ret = mmc_blk_cmdq_issue_flush_rq(mq, req);
|
|
+ } else {
|
|
+ ret = mmc_blk_cmdq_issue_rw_rq(mq, req);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+
|
|
+out:
|
|
+ if (req)
|
|
+ blk_end_request_all(req, ret);
|
|
+ mmc_put_card(card);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void mmc_blk_cmdq_reset(struct mmc_host *host, bool clear_all)
|
|
+{
|
|
+ int err = 0;
|
|
+
|
|
+ if (mmc_cmdq_halt(host, true)) {
|
|
+ pr_err("%s: halt failed\n", mmc_hostname(host));
|
|
+ goto reset;
|
|
+ }
|
|
+
|
|
+ if (clear_all)
|
|
+ mmc_cmdq_discard_queue(host, 0);
|
|
+reset:
|
|
+ host->cmdq_ops->disable(host, true);
|
|
+ err = mmc_cmdq_hw_reset(host);
|
|
+ if (err && err != -EOPNOTSUPP) {
|
|
+ pr_err("%s: failed to cmdq_hw_reset err = %d\n",
|
|
+ mmc_hostname(host), err);
|
|
+ host->cmdq_ops->enable(host);
|
|
+ mmc_cmdq_halt(host, false);
|
|
+ goto out;
|
|
+ }
|
|
+ /*
|
|
+ * CMDQ HW reset would have already made CQE
|
|
+ * in unhalted state, but reflect the same
|
|
+ * in software state of cmdq_ctx.
|
|
+ */
|
|
+ mmc_host_clr_halt(host);
|
|
+out:
|
|
+ return;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * is_cmdq_dcmd_req - Checks if tag belongs to DCMD request.
|
|
+ * @q: request_queue pointer.
|
|
+ * @tag: tag number of request to check.
|
|
+ *
|
|
+ * This function checks if the request with tag number "tag"
|
|
+ * is a DCMD request or not based on cmdq_req_flags set.
|
|
+ *
|
|
+ * returns true if DCMD req, otherwise false.
|
|
+ */
|
|
+static bool is_cmdq_dcmd_req(struct request_queue *q, int tag)
|
|
+{
|
|
+ struct request *req;
|
|
+ struct mmc_queue_req *mq_rq;
|
|
+ struct mmc_cmdq_req *cmdq_req;
|
|
+
|
|
+ req = blk_queue_find_tag(q, tag);
|
|
+ if (WARN_ON(!req))
|
|
+ goto out;
|
|
+ mq_rq = req->special;
|
|
+ if (WARN_ON(!mq_rq))
|
|
+ goto out;
|
|
+ cmdq_req = &(mq_rq->cmdq_req);
|
|
+ return (cmdq_req->cmdq_req_flags & DCMD);
|
|
+out:
|
|
+ return -ENOENT;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mmc_blk_cmdq_reset_all - Reset everything for CMDQ block request.
|
|
+ * @host: mmc_host pointer.
|
|
+ * @err: error for which reset is performed.
|
|
+ *
|
|
+ * This function implements reset_all functionality for
|
|
+ * cmdq. It resets the controller, power cycle the card,
|
|
+ * and invalidate all busy tags(requeue all request back to
|
|
+ * elevator).
|
|
+ */
|
|
+static void mmc_blk_cmdq_reset_all(struct mmc_host *host, int err)
|
|
+{
|
|
+ struct mmc_request *mrq = host->err_mrq;
|
|
+ struct mmc_card *card = host->card;
|
|
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
|
|
+ struct request_queue *q;
|
|
+ int itag = 0;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (WARN_ON(!mrq))
|
|
+ return;
|
|
+
|
|
+ q = mrq->req->q;
|
|
+ WARN_ON(!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
|
|
+
|
|
+ pr_debug("%s: %s: active_reqs = %lu\n",
|
|
+ mmc_hostname(host), __func__,
|
|
+ ctx_info->active_reqs);
|
|
+
|
|
+ mmc_blk_cmdq_reset(host, false);
|
|
+
|
|
+ for_each_set_bit(itag, &ctx_info->active_reqs,
|
|
+ host->num_cq_slots) {
|
|
+ ret = is_cmdq_dcmd_req(q, itag);
|
|
+ if (WARN_ON(ret == -ENOENT))
|
|
+ continue;
|
|
+ if (!ret) {
|
|
+ WARN_ON(!test_and_clear_bit(itag,
|
|
+ &ctx_info->data_active_reqs));
|
|
+ mmc_cmdq_post_req(host, itag, err);
|
|
+ } else {
|
|
+ clear_bit(CMDQ_STATE_DCMD_ACTIVE,
|
|
+ &ctx_info->curr_state);
|
|
+ }
|
|
+ WARN_ON(!test_and_clear_bit(itag,
|
|
+ &ctx_info->active_reqs));
|
|
+ mmc_put_card(card);
|
|
+ }
|
|
+
|
|
+ spin_lock_irq(q->queue_lock);
|
|
+ blk_queue_invalidate_tags(q);
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
+}
|
|
+
|
|
+static void mmc_blk_cmdq_shutdown(struct mmc_queue *mq)
|
|
+{
|
|
+ int err;
|
|
+ struct mmc_card *card = mq->card;
|
|
+ struct mmc_host *host = card->host;
|
|
+
|
|
+ mmc_get_card(card);
|
|
+ err = mmc_cmdq_halt(host, true);
|
|
+ if (err) {
|
|
+ pr_err("%s: halt: failed: %d\n", __func__, err);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /* disable CQ mode in card */
|
|
+ if (mmc_card_cmdq(card)) {
|
|
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
+ EXT_CSD_CMDQ, 0,
|
|
+ card->ext_csd.generic_cmd6_time);
|
|
+ if (err) {
|
|
+ pr_err("%s: failed to switch card to legacy mode: %d\n",
|
|
+ __func__, err);
|
|
+ goto out;
|
|
+ }
|
|
+ mmc_card_clr_cmdq(card);
|
|
+ }
|
|
+ host->cmdq_ops->disable(host, false);
|
|
+ host->card->cmdq_init = false;
|
|
+out:
|
|
+ mmc_put_card(card);
|
|
+}
|
|
+
|
|
+static enum blk_eh_timer_return mmc_blk_cmdq_req_timed_out(struct request *req)
|
|
+{
|
|
+ struct mmc_queue *mq = req->q->queuedata;
|
|
+ struct mmc_host *host = mq->card->host;
|
|
+ struct mmc_queue_req *mq_rq = req->special;
|
|
+ struct mmc_request *mrq;
|
|
+ struct mmc_cmdq_req *cmdq_req;
|
|
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
|
|
+
|
|
+ BUG_ON(!host);
|
|
+
|
|
+ /*
|
|
+ * The mmc_queue_req will be present only if the request
|
|
+ * is issued to the LLD. The request could be fetched from
|
|
+ * block layer queue but could be waiting to be issued
|
|
+ * (for e.g. clock scaling is waiting for an empty cmdq queue)
|
|
+ * Reset the timer in such cases to give LLD more time
|
|
+ */
|
|
+ if (!mq_rq) {
|
|
+ pr_warn("%s: restart timer for tag: %d\n", __func__, req->tag);
|
|
+ return BLK_EH_RESET_TIMER;
|
|
+ }
|
|
+
|
|
+ mrq = &mq_rq->cmdq_req.mrq;
|
|
+ cmdq_req = &mq_rq->cmdq_req;
|
|
+
|
|
+ BUG_ON(!mrq || !cmdq_req);
|
|
+
|
|
+ if (cmdq_req->cmdq_req_flags & DCMD)
|
|
+ mrq->cmd->error = -ETIMEDOUT;
|
|
+ else
|
|
+ mrq->data->error = -ETIMEDOUT;
|
|
+
|
|
+ if (mrq->cmd && mrq->cmd->error) {
|
|
+ if (!(req_op(req) == REQ_OP_FLUSH)) {
|
|
+ /*
|
|
+ * Notify completion for non flush commands like
|
|
+ * discard that wait for DCMD finish.
|
|
+ */
|
|
+ set_bit(CMDQ_STATE_REQ_TIMED_OUT,
|
|
+ &ctx_info->curr_state);
|
|
+ complete(&mrq->completion);
|
|
+ return BLK_EH_NOT_HANDLED;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (test_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state) ||
|
|
+ test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state))
|
|
+ return BLK_EH_NOT_HANDLED;
|
|
+
|
|
+ set_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
|
|
+ return BLK_EH_HANDLED;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * mmc_blk_cmdq_err: error handling of cmdq error requests.
|
|
+ * Function should be called in context of error out request
|
|
+ * which has claim_host and rpm acquired.
|
|
+ * This may be called with CQ engine halted. Make sure to
|
|
+ * unhalt it after error recovery.
|
|
+ *
|
|
+ * TODO: Currently cmdq error handler does reset_all in case
|
|
+ * of any erorr. Need to optimize error handling.
|
|
+ */
|
|
+static void mmc_blk_cmdq_err(struct mmc_queue *mq)
|
|
+{
|
|
+ struct mmc_host *host = mq->card->host;
|
|
+ struct mmc_request *mrq = host->err_mrq;
|
|
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
|
|
+ struct request_queue *q;
|
|
+ int err;
|
|
+
|
|
+ host->cmdq_ops->dumpstate(host);
|
|
+
|
|
+ if (WARN_ON(!mrq))
|
|
+ return;
|
|
+
|
|
+ q = mrq->req->q;
|
|
+ err = mmc_cmdq_halt(host, true);
|
|
+ if (err) {
|
|
+ pr_err("halt: failed: %d\n", err);
|
|
+ goto reset;
|
|
+ }
|
|
+
|
|
+ /* RED error - Fatal: requires reset */
|
|
+ if (mrq->cmdq_req->resp_err) {
|
|
+ err = mrq->cmdq_req->resp_err;
|
|
+ pr_crit("%s: Response error detected: Device in bad state\n",
|
|
+ mmc_hostname(host));
|
|
+ goto reset;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * In case of software request time-out, we schedule err work only for
|
|
+ * the first error out request and handles all other request in flight
|
|
+ * here.
|
|
+ */
|
|
+ if (test_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state)) {
|
|
+ err = -ETIMEDOUT;
|
|
+ } else if (mrq->data && mrq->data->error) {
|
|
+ err = mrq->data->error;
|
|
+ } else if (mrq->cmd && mrq->cmd->error) {
|
|
+ /* DCMD commands */
|
|
+ err = mrq->cmd->error;
|
|
+ }
|
|
+
|
|
+reset:
|
|
+ mmc_blk_cmdq_reset_all(host, err);
|
|
+ if (mrq->cmdq_req->resp_err)
|
|
+ mrq->cmdq_req->resp_err = false;
|
|
+ mmc_cmdq_halt(host, false);
|
|
+
|
|
+ host->err_mrq = NULL;
|
|
+ clear_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
|
|
+ WARN_ON(!test_and_clear_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
|
|
+ wake_up(&ctx_info->wait);
|
|
+}
|
|
+
|
|
+/* invoked by block layer in softirq context */
|
|
+void mmc_blk_cmdq_complete_rq(struct request *rq)
|
|
+{
|
|
+ struct mmc_queue_req *mq_rq = rq->special;
|
|
+ struct mmc_request *mrq = &mq_rq->cmdq_req.mrq;
|
|
+ struct mmc_host *host = mrq->host;
|
|
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
|
|
+ struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
|
|
+ struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
|
|
+ int err = 0;
|
|
+ bool is_dcmd = false;
|
|
+
|
|
+ if (mrq->cmd && mrq->cmd->error)
|
|
+ err = mrq->cmd->error;
|
|
+ else if (mrq->data && mrq->data->error)
|
|
+ err = mrq->data->error;
|
|
+
|
|
+ if (err || cmdq_req->resp_err) {
|
|
+ pr_err("%s: %s: txfr error(%d)/resp_err(%d)\n",
|
|
+ mmc_hostname(mrq->host), __func__, err,
|
|
+ cmdq_req->resp_err);
|
|
+ if (test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
|
|
+ pr_err("%s: CQ in error state, ending current req: %d\n",
|
|
+ __func__, err);
|
|
+ } else {
|
|
+ set_bit(CMDQ_STATE_ERR, &ctx_info->curr_state);
|
|
+ BUG_ON(host->err_mrq != NULL);
|
|
+ host->err_mrq = mrq;
|
|
+ schedule_work(&mq->cmdq_err_work);
|
|
+ }
|
|
+ goto out;
|
|
+ }
|
|
+ /*
|
|
+ * In case of error CMDQ is expected to be either in halted
|
|
+ * or disable state so cannot receive any completion of
|
|
+ * other requests.
|
|
+ */
|
|
+ BUG_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
|
|
+
|
|
+ /* clear pending request */
|
|
+ BUG_ON(!test_and_clear_bit(cmdq_req->tag,
|
|
+ &ctx_info->active_reqs));
|
|
+ if (cmdq_req->cmdq_req_flags & DCMD)
|
|
+ is_dcmd = true;
|
|
+ else
|
|
+ BUG_ON(!test_and_clear_bit(cmdq_req->tag,
|
|
+ &ctx_info->data_active_reqs));
|
|
+ if (!is_dcmd)
|
|
+ mmc_cmdq_post_req(host, cmdq_req->tag, err);
|
|
+ if (cmdq_req->cmdq_req_flags & DCMD) {
|
|
+ clear_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
|
|
+ blk_end_request_all(rq, err);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ blk_end_request(rq, err, cmdq_req->data.bytes_xfered);
|
|
+
|
|
+out:
|
|
+
|
|
+ if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
|
|
+ wake_up(&ctx_info->wait);
|
|
+ mmc_put_card(host->card);
|
|
+ }
|
|
+
|
|
+ if (!ctx_info->active_reqs)
|
|
+ wake_up_interruptible(&host->cmdq_ctx.queue_empty_wq);
|
|
+
|
|
+ if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
|
|
+ complete(&mq->cmdq_shutdown_complete);
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
|
|
struct device *parent,
|
|
sector_t size,
|
|
@@ -2262,7 +3159,7 @@
|
|
INIT_LIST_HEAD(&md->part);
|
|
md->usage = 1;
|
|
|
|
- ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
|
|
+ ret = mmc_init_queue(&md->queue, card, &md->lock, subname, area_type);
|
|
if (ret)
|
|
goto err_putdisk;
|
|
|
|
@@ -2318,7 +3215,16 @@
|
|
blk_queue_write_cache(md->queue.queue, true, true);
|
|
}
|
|
|
|
- if (mmc_card_mmc(card) &&
|
|
+ if (card->cmdq_init) {
|
|
+ md->flags |= MMC_BLK_CMD_QUEUE;
|
|
+ md->queue.cmdq_complete_fn = mmc_blk_cmdq_complete_rq;
|
|
+ md->queue.cmdq_issue_fn = mmc_blk_cmdq_issue_rq;
|
|
+ md->queue.cmdq_error_fn = mmc_blk_cmdq_err;
|
|
+ md->queue.cmdq_req_timed_out = mmc_blk_cmdq_req_timed_out;
|
|
+ md->queue.cmdq_shutdown = mmc_blk_cmdq_shutdown;
|
|
+ }
|
|
+
|
|
+ if (mmc_card_mmc(card) && !card->cmdq_init &&
|
|
(area_type == MMC_BLK_DATA_AREA_MAIN) &&
|
|
(md->flags & MMC_BLK_CMD23) &&
|
|
card->ext_csd.packed_event_en) {
|
|
@@ -2431,6 +3337,8 @@
|
|
mmc_cleanup_queue(&md->queue);
|
|
if (md->flags & MMC_BLK_PACKED_CMD)
|
|
mmc_packed_clean(&md->queue);
|
|
+ if (md->flags & MMC_BLK_CMD_QUEUE)
|
|
+ mmc_cmdq_clean(&md->queue, card);
|
|
if (md->disk->flags & GENHD_FL_UP) {
|
|
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
|
|
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
|
|
@@ -2648,23 +3556,36 @@
|
|
dev_set_drvdata(&card->dev, NULL);
|
|
}
|
|
|
|
-static int _mmc_blk_suspend(struct mmc_card *card)
|
|
+static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
|
|
{
|
|
struct mmc_blk_data *part_md;
|
|
struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
|
|
+ int rc = 0;
|
|
|
|
if (md) {
|
|
- mmc_queue_suspend(&md->queue);
|
|
+ rc = mmc_queue_suspend(&md->queue, wait);
|
|
+ if (rc)
|
|
+ goto out;
|
|
list_for_each_entry(part_md, &md->part, part) {
|
|
- mmc_queue_suspend(&part_md->queue);
|
|
+ rc = mmc_queue_suspend(&part_md->queue, wait);
|
|
+ if (rc)
|
|
+ goto out_resume;
|
|
}
|
|
}
|
|
- return 0;
|
|
+ goto out;
|
|
+
|
|
+ out_resume:
|
|
+ mmc_queue_resume(&md->queue);
|
|
+ list_for_each_entry(part_md, &md->part, part) {
|
|
+ mmc_queue_resume(&part_md->queue);
|
|
+ }
|
|
+ out:
|
|
+ return rc;
|
|
}
|
|
|
|
static void mmc_blk_shutdown(struct mmc_card *card)
|
|
{
|
|
- _mmc_blk_suspend(card);
|
|
+ _mmc_blk_suspend(card, 1);
|
|
}
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
@@ -2672,7 +3593,7 @@
|
|
{
|
|
struct mmc_card *card = mmc_dev_to_card(dev);
|
|
|
|
- return _mmc_blk_suspend(card);
|
|
+ return _mmc_blk_suspend(card, 0);
|
|
}
|
|
|
|
static int mmc_blk_resume(struct device *dev)
|