diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-02 17:34:32 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-02 17:34:32 -0700 |
commit | be580e7522eecfcf31c70abdf6fa0ae77b2e293b (patch) | |
tree | 1137d880a002ef342f9b1ab77331144c9ed956cf /drivers/mmc/core | |
parent | 8d65b08debc7e62b2c6032d7fe7389d895b92cbc (diff) | |
parent | a627f025eb0534052ff451427c16750b3530634c (diff) |
Merge tag 'mmc-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
Pull MMC updates from Ulf Hansson:
"MMC core:
- Continue to re-factor code to prepare for eMMC CMDQ and blkmq support
- Introduce queue semantics to prepare for eMMC CMDQ and blkmq support
- Add helper functions to manage temporary enable/disable of eMMC CMDQ
- Improve wait-busy detection for SDIO
MMC host:
- cavium: Add driver to support Cavium controllers
- cavium: Extend Cavium driver to support Octeon and ThunderX SOCs
- bcm2835: Add new driver for Broadcom BCM2835 controller
- sdhci-xenon: Add driver to support Marvell Xenon SDHCI controller
- sdhci-tegra: Add support for the Tegra186 variant
- sdhci-of-esdhc: Support for UHS-I SD cards
- sdhci-of-esdhc: Support for eMMC HS200 cards
- sdhci-cadence: Add eMMC HS400 enhanced strobe support
- sdhci-esdhc-imx: Reset tuning circuit when needed
- sdhci-pci: Modernize and clean-up some PM related code
- sdhci-pci: Avoid re-tuning at runtime PM for some Intel devices
- sdhci-pci|acpi: Use aggressive PM for some Intel BYT controllers
- sdhci: Re-factoring and modernizations
- sdhci: Optimize delay loops
- sdhci: Improve register dump print format
- sdhci: Add support for the Command Queue Engine
- meson-gx: Various improvements and clean-ups
- meson-gx: Add support for CMD23
- meson-gx: Basic tuning support to avoid CRC errors
- s3cmci: Enable probing via DT
- mediatek: Improve tuning support for eMMC HS200 and HS400 mode
- tmio: Improve DMA support
- tmio: Use correct response for CMD12
- dw_mmc: Minor improvements and clean-ups"
* tag 'mmc-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (148 commits)
mmc: sdhci-of-esdhc: limit SD clock for ls1012a/ls1046a
mmc: sdhci-of-esdhc: poll ESDHC_CLOCK_STABLE bit with udelay
mmc: sdhci-xenon: Fix default value of LOGIC_TIMING_ADJUST for eMMC5.0 PHY
mmc: sdhci-xenon: Fix the work flow in xenon_remove().
MIPS: Octeon: cavium_octeon_defconfig: Enable Octeon MMC
mmc: sdhci-xenon: Remove redundant dev_err call in get_dt_pad_ctrl_data()
mmc: cavium: Use module_pci_driver to simplify the code
mmc: cavium: Add MMC support for Octeon SOCs.
mmc: cavium: Fix detection of block or byte addressing.
mmc: core: Export API to allow hosts to get the card address
mmc: sdio: Fix sdio wait busy implement limitation
mmc: sdhci-esdhc-imx: reset tuning circuit when power on mmc card
clk: apn806: fix spelling mistake: "mising" -> "missing"
mmc: sdhci-of-esdhc: add delay between tuning cycles
mmc: sdhci: Control the delay between tuning commands
mmc: sdhci-of-esdhc: add tuning support
mmc: sdhci-of-esdhc: add support for signal voltage switch
mmc: sdhci-of-esdhc: add peripheral clock support
mmc: sdhci-pci: Allow for 3 bytes from Intel DSM
mmc: cavium: Fix a shift wrapping bug
...
Diffstat (limited to 'drivers/mmc/core')
-rw-r--r-- | drivers/mmc/core/block.c | 300 | ||||
-rw-r--r-- | drivers/mmc/core/core.c | 193 | ||||
-rw-r--r-- | drivers/mmc/core/mmc.c | 9 | ||||
-rw-r--r-- | drivers/mmc/core/mmc_ops.c | 36 | ||||
-rw-r--r-- | drivers/mmc/core/mmc_ops.h | 2 | ||||
-rw-r--r-- | drivers/mmc/core/mmc_test.c | 14 | ||||
-rw-r--r-- | drivers/mmc/core/queue.c | 307 | ||||
-rw-r--r-- | drivers/mmc/core/queue.h | 12 | ||||
-rw-r--r-- | drivers/mmc/core/sd.c | 4 | ||||
-rw-r--r-- | drivers/mmc/core/sd_ops.c | 19 | ||||
-rw-r--r-- | drivers/mmc/core/sd_ops.h | 2 | ||||
-rw-r--r-- | drivers/mmc/core/sdio_io.c | 54 | ||||
-rw-r--r-- | drivers/mmc/core/sdio_ops.c | 9 | ||||
-rw-r--r-- | drivers/mmc/core/sdio_ops.h | 10 |
14 files changed, 583 insertions, 388 deletions
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index ff3da960c473..8273b078686d 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -129,6 +129,13 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md); static int get_card_status(struct mmc_card *card, u32 *status, int retries); +static void mmc_blk_requeue(struct request_queue *q, struct request *req) +{ + spin_lock_irq(q->queue_lock); + blk_requeue_request(q, req); + spin_unlock_irq(q->queue_lock); +} + static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { struct mmc_blk_data *md; @@ -721,10 +728,41 @@ static const struct block_device_operations mmc_bdops = { #endif }; +static int mmc_blk_part_switch_pre(struct mmc_card *card, + unsigned int part_type) +{ + int ret = 0; + + if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { + if (card->ext_csd.cmdq_en) { + ret = mmc_cmdq_disable(card); + if (ret) + return ret; + } + mmc_retune_pause(card->host); + } + + return ret; +} + +static int mmc_blk_part_switch_post(struct mmc_card *card, + unsigned int part_type) +{ + int ret = 0; + + if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { + mmc_retune_unpause(card->host); + if (card->reenable_cmdq && !card->ext_csd.cmdq_en) + ret = mmc_cmdq_enable(card); + } + + return ret; +} + static inline int mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md) { - int ret; + int ret = 0; struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); if (main_md->part_curr == md->part_type) @@ -733,8 +771,9 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, if (mmc_card_mmc(card)) { u8 part_config = card->ext_csd.part_config; - if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) - mmc_retune_pause(card->host); + ret = mmc_blk_part_switch_pre(card, md->part_type); + if (ret) + return ret; part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; part_config |= md->part_type; @@ -743,19 +782,17 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, EXT_CSD_PART_CONFIG, part_config, card->ext_csd.part_time); if (ret) { - if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) - mmc_retune_unpause(card->host); + mmc_blk_part_switch_post(card, md->part_type); return ret; } card->ext_csd.part_config = part_config; - if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB) - mmc_retune_unpause(card->host); + ret = mmc_blk_part_switch_post(card, main_md->part_curr); } main_md->part_curr = md->part_type; - return 0; + return ret; } static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) @@ -1272,7 +1309,7 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, { if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { /* Legacy mode imposes restrictions on transfers. */ - if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) + if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) brq->data.blocks = 1; if (brq->data.blocks > card->ext_csd.rel_sectors) @@ -1396,36 +1433,39 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, return MMC_BLK_SUCCESS; } -static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, - struct mmc_card *card, - int disable_multi, - struct mmc_queue *mq) +static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, + int disable_multi, bool *do_rel_wr, + bool *do_data_tag) { - u32 readcmd, writecmd; + struct mmc_blk_data *md = mq->blkdata; + struct mmc_card *card = md->queue.card; struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mqrq->req; - struct mmc_blk_data *md = mq->blkdata; - bool do_data_tag; /* * Reliable writes are used to implement Forced Unit Access and * are supported only on MMCs. */ - bool do_rel_wr = (req->cmd_flags & REQ_FUA) && - (rq_data_dir(req) == WRITE) && - (md->flags & MMC_BLK_REL_WR); + *do_rel_wr = (req->cmd_flags & REQ_FUA) && + rq_data_dir(req) == WRITE && + (md->flags & MMC_BLK_REL_WR); memset(brq, 0, sizeof(struct mmc_blk_request)); - brq->mrq.cmd = &brq->cmd; + brq->mrq.data = &brq->data; - brq->cmd.arg = blk_rq_pos(req); - if (!mmc_card_blockaddr(card)) - brq->cmd.arg <<= 9; - brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; - brq->data.blksz = 512; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; + + if (rq_data_dir(req) == READ) { + brq->data.flags = MMC_DATA_READ; + brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; + } else { + brq->data.flags = MMC_DATA_WRITE; + brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + } + + brq->data.blksz = 512; brq->data.blocks = blk_rq_sectors(req); /* @@ -1456,6 +1496,68 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, brq->data.blocks); } + if (*do_rel_wr) + mmc_apply_rel_rw(brq, card, req); + + /* + * Data tag is used only during writing meta data to speed + * up write and any subsequent read of this meta data + */ + *do_data_tag = card->ext_csd.data_tag_unit_size && + (req->cmd_flags & REQ_META) && + (rq_data_dir(req) == WRITE) && + ((brq->data.blocks * brq->data.blksz) >= + card->ext_csd.data_tag_unit_size); + + mmc_set_data_timeout(&brq->data, card); + + brq->data.sg = mqrq->sg; + brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); + + /* + * Adjust the sg list so it is the same size as the + * request. + */ + if (brq->data.blocks != blk_rq_sectors(req)) { + int i, data_size = brq->data.blocks << 9; + struct scatterlist *sg; + + for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { + data_size -= sg->length; + if (data_size <= 0) { + sg->length += data_size; + i++; + break; + } + } + brq->data.sg_len = i; + } + + mqrq->areq.mrq = &brq->mrq; + + mmc_queue_bounce_pre(mqrq); +} + +static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, + struct mmc_card *card, + int disable_multi, + struct mmc_queue *mq) +{ + u32 readcmd, writecmd; + struct mmc_blk_request *brq = &mqrq->brq; + struct request *req = mqrq->req; + struct mmc_blk_data *md = mq->blkdata; + bool do_rel_wr, do_data_tag; + + mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag); + + brq->mrq.cmd = &brq->cmd; + + brq->cmd.arg = blk_rq_pos(req); + if (!mmc_card_blockaddr(card)) + brq->cmd.arg <<= 9; + brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; + if (brq->data.blocks > 1 || do_rel_wr) { /* SPI multiblock writes terminate using a special * token, not a STOP_TRANSMISSION request. @@ -1470,32 +1572,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, readcmd = MMC_READ_SINGLE_BLOCK; writecmd = MMC_WRITE_BLOCK; } - if (rq_data_dir(req) == READ) { - brq->cmd.opcode = readcmd; - brq->data.flags = MMC_DATA_READ; - if (brq->mrq.stop) - brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | - MMC_CMD_AC; - } else { - brq->cmd.opcode = writecmd; - brq->data.flags = MMC_DATA_WRITE; - if (brq->mrq.stop) - brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | - MMC_CMD_AC; - } - - if (do_rel_wr) - mmc_apply_rel_rw(brq, card, req); - - /* - * Data tag is used only during writing meta data to speed - * up write and any subsequent read of this meta data - */ - do_data_tag = (card->ext_csd.data_tag_unit_size) && - (req->cmd_flags & REQ_META) && - (rq_data_dir(req) == WRITE) && - ((brq->data.blocks * brq->data.blksz) >= - card->ext_csd.data_tag_unit_size); + brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd; /* * Pre-defined multi-block transfers are preferable to @@ -1526,34 +1603,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, brq->mrq.sbc = &brq->sbc; } - mmc_set_data_timeout(&brq->data, card); - - brq->data.sg = mqrq->sg; - brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); - - /* - * Adjust the sg list so it is the same size as the - * request. - */ - if (brq->data.blocks != blk_rq_sectors(req)) { - int i, data_size = brq->data.blocks << 9; - struct scatterlist *sg; - - for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { - data_size -= sg->length; - if (data_size <= 0) { - sg->length += data_size; - i++; - break; - } - } - brq->data.sg_len = i; - } - - mqrq->areq.mrq = &brq->mrq; mqrq->areq.err_check = mmc_blk_err_check; - - mmc_queue_bounce_pre(mqrq); } static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, @@ -1585,11 +1635,14 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, return req_pending; } -static void mmc_blk_rw_cmd_abort(struct mmc_card *card, struct request *req) +static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card, + struct request *req, + struct mmc_queue_req *mqrq) { if (mmc_card_removed(card)) req->rq_flags |= RQF_QUIET; while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req))); + mmc_queue_req_free(mq, mqrq); } /** @@ -1597,7 +1650,8 @@ static void mmc_blk_rw_cmd_abort(struct mmc_card *card, struct request *req) * @mq: the queue with the card and host to restart * @req: a new request that want to be started after the current one */ -static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req) +static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req, + struct mmc_queue_req *mqrq) { if (!req) return; @@ -1608,11 +1662,12 @@ static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req) if (mmc_card_removed(mq->card)) { req->rq_flags |= RQF_QUIET; blk_end_request_all(req, -EIO); + mmc_queue_req_free(mq, mqrq); return; } /* Else proceed and try to restart the current async request */ - mmc_blk_rw_rq_prep(mq->mqrq_cur, mq->card, 0, mq); - mmc_start_areq(mq->card->host, &mq->mqrq_cur->areq, NULL); + mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); + mmc_start_areq(mq->card->host, &mqrq->areq, NULL); } static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) @@ -1622,13 +1677,23 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) struct mmc_blk_request *brq; int disable_multi = 0, retry = 0, type, retune_retry_done = 0; enum mmc_blk_status status; + struct mmc_queue_req *mqrq_cur = NULL; struct mmc_queue_req *mq_rq; struct request *old_req; struct mmc_async_req *new_areq; struct mmc_async_req *old_areq; bool req_pending = true; - if (!new_req && !mq->mqrq_prev->req) + if (new_req) { + mqrq_cur = mmc_queue_req_find(mq, new_req); + if (!mqrq_cur) { + WARN_ON(1); + mmc_blk_requeue(mq->queue, new_req); + new_req = NULL; + } + } + + if (!mq->qcnt) return; do { @@ -1641,12 +1706,12 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) !IS_ALIGNED(blk_rq_sectors(new_req), 8)) { pr_err("%s: Transfer size is not 4KB sector size aligned\n", new_req->rq_disk->disk_name); - mmc_blk_rw_cmd_abort(card, new_req); + mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur); return; } - mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); - new_areq = &mq->mqrq_cur->areq; + mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq); + new_areq = &mqrq_cur->areq; } else new_areq = NULL; @@ -1657,8 +1722,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) * and there is nothing more to do until it is * complete. */ - if (status == MMC_BLK_NEW_REQUEST) - mq->new_request = true; return; } @@ -1691,7 +1754,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) pr_err("%s BUG rq_tot %d d_xfer %d\n", __func__, blk_rq_bytes(old_req), brq->data.bytes_xfered); - mmc_blk_rw_cmd_abort(card, old_req); + mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); return; } break; @@ -1699,12 +1762,15 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); if (mmc_blk_reset(md, card->host, type)) { if (req_pending) - mmc_blk_rw_cmd_abort(card, old_req); - mmc_blk_rw_try_restart(mq, new_req); + mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); + else + mmc_queue_req_free(mq, mq_rq); + mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); return; } if (!req_pending) { - mmc_blk_rw_try_restart(mq, new_req); + mmc_queue_req_free(mq, mq_rq); + mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); return; } break; @@ -1716,8 +1782,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) case MMC_BLK_ABORT: if (!mmc_blk_reset(md, card->host, type)) break; - mmc_blk_rw_cmd_abort(card, old_req); - mmc_blk_rw_try_restart(mq, new_req); + mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); + mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); return; case MMC_BLK_DATA_ERR: { int err; @@ -1726,8 +1792,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) if (!err) break; if (err == -ENODEV) { - mmc_blk_rw_cmd_abort(card, old_req); - mmc_blk_rw_try_restart(mq, new_req); + mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); + mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); return; } /* Fall through */ @@ -1748,19 +1814,20 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) req_pending = blk_end_request(old_req, -EIO, brq->data.blksz); if (!req_pending) { - mmc_blk_rw_try_restart(mq, new_req); + mmc_queue_req_free(mq, mq_rq); + mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); return; } break; case MMC_BLK_NOMEDIUM: - mmc_blk_rw_cmd_abort(card, old_req); - mmc_blk_rw_try_restart(mq, new_req); + mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); + mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); return; default: pr_err("%s: Unhandled return value (%d)", old_req->rq_disk->disk_name, status); - mmc_blk_rw_cmd_abort(card, old_req); - mmc_blk_rw_try_restart(mq, new_req); + mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); + mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); return; } @@ -1776,6 +1843,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) mq_rq->brq.retune_retry_done = retune_retry_done; } } while (req_pending); + + mmc_queue_req_free(mq, mq_rq); } void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) @@ -1783,9 +1852,8 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) int ret; struct mmc_blk_data *md = mq->blkdata; struct mmc_card *card = md->queue.card; - bool req_is_special = mmc_req_is_special(req); - if (req && !mq->mqrq_prev->req) + if (req && !mq->qcnt) /* claim host only for the first request */ mmc_get_card(card); @@ -1797,20 +1865,19 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) goto out; } - mq->new_request = false; if (req && req_op(req) == REQ_OP_DISCARD) { /* complete ongoing async transfer before issuing discard */ - if (card->host->areq) + if (mq->qcnt) mmc_blk_issue_rw_rq(mq, NULL); mmc_blk_issue_discard_rq(mq, req); } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) { /* complete ongoing async transfer before issuing secure erase*/ - if (card->host->areq) + if (mq->qcnt) mmc_blk_issue_rw_rq(mq, NULL); mmc_blk_issue_secdiscard_rq(mq, req); } else if (req && req_op(req) == REQ_OP_FLUSH) { /* complete ongoing async transfer before issuing flush */ - if (card->host->areq) + if (mq->qcnt) mmc_blk_issue_rw_rq(mq, NULL); mmc_blk_issue_flush(mq, req); } else { @@ -1819,13 +1886,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) } out: - if ((!req && !mq->new_request) || req_is_special) - /* - * Release host when there are no more requests - * and after special request(discard, flush) is done. - * In case sepecial request, there is no reentry to - * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'. - */ + if (!mq->qcnt) mmc_put_card(card); } @@ -2105,6 +2166,7 @@ static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md, *part_md; char cap_str[10]; + int ret; /* * Check that the card supports the command class(es) we need. @@ -2114,9 +2176,15 @@ static int mmc_blk_probe(struct mmc_card *card) mmc_fixup_device(card, mmc_blk_fixups); + ret = mmc_queue_alloc_shared_queue(card); + if (ret) + return ret; + md = mmc_blk_alloc(card); - if (IS_ERR(md)) + if (IS_ERR(md)) { + mmc_queue_free_shared_queue(card); return PTR_ERR(md); + } string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2, cap_str, sizeof(cap_str)); @@ -2154,6 +2222,7 @@ static int mmc_blk_probe(struct mmc_card *card) out: mmc_blk_remove_parts(card, md); mmc_blk_remove_req(md); + mmc_queue_free_shared_queue(card); return 0; } @@ -2171,6 +2240,7 @@ static void mmc_blk_remove(struct mmc_card *card) pm_runtime_put_noidle(&card->dev); mmc_blk_remove_req(md); dev_set_drvdata(&card->dev, NULL); + mmc_queue_free_shared_queue(card); } static int _mmc_blk_suspend(struct mmc_card *card) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 926e0fde07d7..82c45ddfa202 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -172,14 +172,16 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) trace_mmc_request_done(host, mrq); - if (err && cmd->retries && !mmc_card_removed(host->card)) { - /* - * Request starter must handle retries - see - * mmc_wait_for_req_done(). - */ - if (mrq->done) - mrq->done(mrq); - } else { + /* + * We list various conditions for the command to be considered + * properly done: + * + * - There was no error, OK fine then + * - We are not doing some kind of retry + * - The card was removed (...so just complete everything no matter + * if there are errors or retries) + */ + if (!err || !cmd->retries || mmc_card_removed(host->card)) { mmc_should_fail_request(host, mrq); if (!host->ongoing_mrq) @@ -211,10 +213,13 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) mrq->stop->resp[0], mrq->stop->resp[1], mrq->stop->resp[2], mrq->stop->resp[3]); } - - if (mrq->done) - mrq->done(mrq); } + /* + * Request starter must handle retries - see + * mmc_wait_for_req_done(). + */ + if (mrq->done) + mrq->done(mrq); } EXPORT_SYMBOL(mmc_request_done); @@ -234,8 +239,10 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) /* * For sdio rw commands we must wait for card busy otherwise some * sdio devices won't work properly. + * And bypass I/O abort, reset and bus suspend operations. */ - if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) { + if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) && + host->ops->card_busy) { int tries = 500; /* Wait aprox 500ms at maximum */ while (host->ops->card_busy(host) && --tries) @@ -262,26 +269,19 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) host->ops->request(host, mrq); } -static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) +static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq) { -#ifdef CONFIG_MMC_DEBUG - unsigned int i, sz; - struct scatterlist *sg; -#endif - mmc_retune_hold(host); - - if (mmc_card_removed(host->card)) - return -ENOMEDIUM; - if (mrq->sbc) { pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", mmc_hostname(host), mrq->sbc->opcode, mrq->sbc->arg, mrq->sbc->flags); } - pr_debug("%s: starting CMD%u arg %08x flags %08x\n", - mmc_hostname(host), mrq->cmd->opcode, - mrq->cmd->arg, mrq->cmd->flags); + if (mrq->cmd) { + pr_debug("%s: starting CMD%u arg %08x flags %08x\n", + mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg, + mrq->cmd->flags); + } if (mrq->data) { pr_debug("%s: blksz %d blocks %d flags %08x " @@ -297,11 +297,20 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) mmc_hostname(host), mrq->stop->opcode, mrq->stop->arg, mrq->stop->flags); } +} - WARN_ON(!host->claimed); +static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq) +{ +#ifdef CONFIG_MMC_DEBUG + unsigned int i, sz; + struct scatterlist *sg; +#endif - mrq->cmd->error = 0; - mrq->cmd->mrq = mrq; + if (mrq->cmd) { + mrq->cmd->error = 0; + mrq->cmd->mrq = mrq; + mrq->cmd->data = mrq->data; + } if (mrq->sbc) { mrq->sbc->error = 0; mrq->sbc->mrq = mrq; @@ -318,8 +327,6 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) if (sz != mrq->data->blocks * mrq->data->blksz) return -EINVAL; #endif - - mrq->cmd->data = mrq->data; mrq->data->error = 0; mrq->data->mrq = mrq; if (mrq->stop) { @@ -328,6 +335,27 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) mrq->stop->mrq = mrq; } } + + return 0; +} + +static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) +{ + int err; + + mmc_retune_hold(host); + + if (mmc_card_removed(host->card)) + return -ENOMEDIUM; + + mmc_mrq_pr_debug(host, mrq); + + WARN_ON(!host->claimed); + + err = mmc_mrq_prep(host, mrq); + if (err) + return err; + led_trigger_event(host->led, LED_FULL); __mmc_start_request(host, mrq); @@ -485,56 +513,6 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) return err; } -/* - * mmc_wait_for_data_req_done() - wait for request completed - * @host: MMC host to prepare the command. - * @mrq: MMC request to wait for - * - * Blocks MMC context till host controller will ack end of data request - * execution or new request notification arrives from the block layer. - * Handles command retries. - * - * Returns enum mmc_blk_status after checking errors. - */ -static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host, - struct mmc_request *mrq) -{ - struct mmc_command *cmd; - struct mmc_context_info *context_info = &host->context_info; - enum mmc_blk_status status; - - while (1) { - wait_event_interruptible(context_info->wait, - (context_info->is_done_rcv || - context_info->is_new_req)); - - if (context_info->is_done_rcv) { - context_info->is_done_rcv = false; - cmd = mrq->cmd; - - if (!cmd->error || !cmd->retries || - mmc_card_removed(host->card)) { - status = host->areq->err_check(host->card, - host->areq); - break; /* return status */ - } else { - mmc_retune_recheck(host); - pr_info("%s: req failed (CMD%u): %d, retrying...\n", - mmc_hostname(host), - cmd->opcode, cmd->error); - cmd->retries--; - cmd->error = 0; - __mmc_start_request(host, mrq); - continue; /* wait for done/new event again */ - } - } - - return MMC_BLK_NEW_REQUEST; - } - mmc_retune_release(host); - return status; -} - void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq) { struct mmc_command *cmd; @@ -639,14 +617,44 @@ static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, */ static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host) { + struct mmc_context_info *context_info = &host->context_info; enum mmc_blk_status status; if (!host->areq) return MMC_BLK_SUCCESS; - status = mmc_wait_for_data_req_done(host, host->areq->mrq); - if (status == MMC_BLK_NEW_REQUEST) - return status; + while (1) { + wait_event_interruptible(context_info->wait, + (context_info->is_done_rcv || + context_info->is_new_req)); + + if (context_info->is_done_rcv) { + struct mmc_command *cmd; + + context_info->is_done_rcv = false; + cmd = host->areq->mrq->cmd; + + if (!cmd->error || !cmd->retries || + mmc_card_removed(host->card)) { + status = host->areq->err_check(host->card, + host->areq); + break; /* return status */ + } else { + mmc_retune_recheck(host); + pr_info("%s: req failed (CMD%u): %d, retrying...\n", + mmc_hostname(host), + cmd->opcode, cmd->error); + cmd->retries--; + cmd->error = 0; + __mmc_start_request(host, host->areq->mrq); + continue; /* wait for done/new event again */ + } + } + + return MMC_BLK_NEW_REQUEST; + } + + mmc_retune_release(host); /* * Check BKOPS urgency for each R1 response @@ -683,7 +691,7 @@ struct mmc_async_req *mmc_start_areq(struct mmc_host *host, { enum mmc_blk_status status; int start_err = 0; - struct mmc_async_req *data = host->areq; + struct mmc_async_req *previous = host->areq; /* Prepare a new request */ if (areq) @@ -691,13 +699,12 @@ struct mmc_async_req *mmc_start_areq(struct mmc_host *host, /* Finalize previous request */ status = mmc_finalize_areq(host); + if (ret_stat) + *ret_stat = status; /* The previous request is still going on... */ - if (status == MMC_BLK_NEW_REQUEST) { - if (ret_stat) - *ret_stat = status; + if (status == MMC_BLK_NEW_REQUEST) return NULL; - } /* Fine so far, start the new request! */ if (status == MMC_BLK_SUCCESS && areq) @@ -716,9 +723,7 @@ struct mmc_async_req *mmc_start_areq(struct mmc_host *host, else host->areq = areq; - if (ret_stat) - *ret_stat = status; - return data; + return previous; } EXPORT_SYMBOL(mmc_start_areq); @@ -2555,6 +2560,12 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card) } EXPORT_SYMBOL(mmc_calc_max_discard); +bool mmc_card_is_blockaddr(struct mmc_card *card) +{ + return card ? mmc_card_blockaddr(card) : false; +} +EXPORT_SYMBOL(mmc_card_is_blockaddr); + int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) { struct mmc_command cmd = {}; diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index b502601df228..2c87dede5841 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -790,6 +790,7 @@ MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); MMC_DEV_ATTR(ocr, "%08x\n", card->ocr); +MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en); static ssize_t mmc_fwrev_show(struct device *dev, struct device_attribute *attr, @@ -845,6 +846,7 @@ static struct attribute *mmc_std_attrs[] = { &dev_attr_rel_sectors.attr, &dev_attr_ocr.attr, &dev_attr_dsr.attr, + &dev_attr_cmdq_en.attr, NULL, }; ATTRIBUTE_GROUPS(mmc_std); @@ -1788,6 +1790,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, } /* + * In some cases (e.g. RPMB or mmc_test), the Command Queue must be + * disabled for a time, so a flag is needed to indicate to re-enable the + * Command Queue. + */ + card->reenable_cmdq = card->ext_csd.cmdq_en; + + /* * The mandatory minimum values are defined for packed command. * read: 5, write: 3 */ diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index fe80f26d6971..78f75f00efc5 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -305,7 +305,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, int mmc_send_csd(struct mmc_card *card, u32 *csd) { int ret, i; - u32 *csd_tmp; + __be32 *csd_tmp; if (!mmc_host_is_spi(card->host)) return mmc_send_cxd_native(card->host, card->rca << 16, @@ -319,7 +319,7 @@ int mmc_send_csd(struct mmc_card *card, u32 *csd) if (ret) goto err; - for (i = 0;i < 4;i++) + for (i = 0; i < 4; i++) csd[i] = be32_to_cpu(csd_tmp[i]); err: @@ -330,7 +330,7 @@ err: int mmc_send_cid(struct mmc_host *host, u32 *cid) { int ret, i; - u32 *cid_tmp; + __be32 *cid_tmp; if (!mmc_host_is_spi(host)) { if (!host->card) @@ -347,7 +347,7 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid) if (ret) goto err; - for (i = 0;i < 4;i++) + for (i = 0; i < 4; i++) cid[i] = be32_to_cpu(cid_tmp[i]); err: @@ -838,3 +838,31 @@ int mmc_can_ext_csd(struct mmc_card *card) { return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); } + +static int mmc_cmdq_switch(struct mmc_card *card, bool enable) +{ + u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; + int err; + + if (!card->ext_csd.cmdq_support) + return -EOPNOTSUPP; + + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN, + val, card->ext_csd.generic_cmd6_time); + if (!err) + card->ext_csd.cmdq_en = enable; + + return err; +} + +int mmc_cmdq_enable(struct mmc_card *card) +{ + return mmc_cmdq_switch(card, true); +} +EXPORT_SYMBOL_GPL(mmc_cmdq_enable); + +int mmc_cmdq_disable(struct mmc_card *card) +{ + return mmc_cmdq_switch(card, false); +} +EXPORT_SYMBOL_GPL(mmc_cmdq_disable); diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index 74beea8a9c7e..978bd2e60f8a 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -46,6 +46,8 @@ int mmc_read_bkops_status(struct mmc_card *card); void mmc_start_bkops(struct mmc_card *card, bool from_exception); int mmc_can_reset(struct mmc_card *card); int mmc_flush_cache(struct mmc_card *card); +int mmc_cmdq_enable(struct mmc_card *card); +int mmc_cmdq_disable(struct mmc_card *card); #endif diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index f99ac3123fd2..fd1b4b8510b9 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -26,6 +26,7 @@ #include "card.h" #include "host.h" #include "bus.h" +#include "mmc_ops.h" #define RESULT_OK 0 #define RESULT_FAIL 1 @@ -3264,6 +3265,14 @@ static int mmc_test_probe(struct mmc_card *card) if (ret) return ret; + if (card->ext_csd.cmdq_en) { + mmc_claim_host(card->host); + ret = mmc_cmdq_disable(card); + mmc_release_host(card->host); + if (ret) + return ret; + } + dev_info(&card->dev, "Card claimed for testing.\n"); return 0; @@ -3271,6 +3280,11 @@ static int mmc_test_probe(struct mmc_card *card) static void mmc_test_remove(struct mmc_card *card) { + if (card->reenable_cmdq) { + mmc_claim_host(card->host); + mmc_cmdq_enable(card); + mmc_release_host(card->host); + } mmc_test_free_result(card); mmc_test_free_dbgfs_file(card); } diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 4c54ad34e17a..5c37b6be3e7b 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -40,6 +40,35 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) return BLKPREP_OK; } +struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq, + struct request *req) +{ + struct mmc_queue_req *mqrq; + int i = ffz(mq->qslots); + + if (i >= mq->qdepth) + return NULL; + + mqrq = &mq->mqrq[i]; + WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth || + test_bit(mqrq->task_id, &mq->qslots)); + mqrq->req = req; + mq->qcnt += 1; + __set_bit(mqrq->task_id, &mq->qslots); + + return mqrq; +} + +void mmc_queue_req_free(struct mmc_queue *mq, + struct mmc_queue_req *mqrq) +{ + WARN_ON(!mqrq->req || mq->qcnt < 1 || + !test_bit(mqrq->task_id, &mq->qslots)); + mqrq->req = NULL; + mq->qcnt -= 1; + __clear_bit(mqrq->task_id, &mq->qslots); +} + static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; @@ -50,7 +79,7 @@ static int mmc_queue_thread(void *d) down(&mq->thread_sem); do { - struct request *req = NULL; + struct request *req; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); @@ -63,38 +92,17 @@ static int mmc_queue_thread(void *d) * Dispatch queue is empty so set flags for * mmc_request_fn() to wake us up. */ - if (mq->mqrq_prev->req) + if (mq->qcnt) cntx->is_waiting_last_req = true; else mq->asleep = true; } - mq->mqrq_cur->req = req; spin_unlock_irq(q->queue_lock); - if (req || mq->mqrq_prev->req) { - bool req_is_special = mmc_req_is_special(req); - + if (req || mq->qcnt) { set_current_state(TASK_RUNNING); mmc_blk_issue_rq(mq, req); cond_resched(); - if (mq->new_request) { - mq->new_request = false; - continue; /* fetch again */ - } - - /* - * Current request becomes previous request - * and vice versa. - * In case of special requests, current request - * has been finished. Do not assign it to previous - * request. - */ - if (req_is_special) - mq->mqrq_cur->req = NULL; - - mq->mqrq_prev->brq.mrq.data = NULL; - mq->mqrq_prev->req = NULL; - swap(mq->mqrq_prev, mq->mqrq_cur); } else { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); @@ -141,17 +149,13 @@ static void mmc_request_fn(struct request_queue *q) wake_up_process(mq->thread); } -static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) +static struct scatterlist *mmc_alloc_sg(int sg_len) { struct scatterlist *sg; sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL); - if (!sg) - *err = -ENOMEM; - else { - *err = 0; + if (sg) sg_init_table(sg, sg_len); - } return sg; } @@ -175,80 +179,178 @@ static void mmc_queue_setup_discard(struct request_queue *q, queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); } -#ifdef CONFIG_MMC_BLOCK_BOUNCE -static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, - unsigned int bouncesz) +static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) +{ + kfree(mqrq->bounce_sg); + mqrq->bounce_sg = NULL; + + kfree(mqrq->sg); + mqrq->sg = NULL; + + kfree(mqrq->bounce_buf); + mqrq->bounce_buf = NULL; +} + +static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth) { int i; - for (i = 0; i < mq->qdepth; i++) { - mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); - if (!mq->mqrq[i].bounce_buf) - goto out_err; - } + for (i = 0; i < qdepth; i++) + mmc_queue_req_free_bufs(&mqrq[i]); +} - return true; +static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth) +{ + mmc_queue_reqs_free_bufs(mqrq, qdepth); + kfree(mqrq); +} -out_err: - while (--i >= 0) { - kfree(mq->mqrq[i].bounce_buf); - mq->mqrq[i].bounce_buf = NULL; +static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth) +{ + struct mmc_queue_req *mqrq; + int i; + + mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL); + if (mqrq) { + for (i = 0; i < qdepth; i++) + mqrq[i].task_id = i; } - pr_warn("%s: unable to allocate bounce buffers\n", - mmc_card_name(mq->card)); - return false; + + return mqrq; } -static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq, - unsigned int bouncesz) +#ifdef CONFIG_MMC_BLOCK_BOUNCE +static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth, + unsigned int bouncesz) { - int i, ret; + int i; - for (i = 0; i < mq->qdepth; i++) { - mq->mqrq[i].sg = mmc_alloc_sg(1, &ret); - if (ret) - return ret; + for (i = 0; i < qdepth; i++) { + mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); + if (!mqrq[i].bounce_buf) + return -ENOMEM; - mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); - if (ret) - return ret; + mqrq[i].sg = mmc_alloc_sg(1); + if (!mqrq[i].sg) + return -ENOMEM; + + mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512); + if (!mqrq[i].bounce_sg) + return -ENOMEM; } return 0; } + +static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth, + unsigned int bouncesz) +{ + int ret; + + ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz); + if (ret) + mmc_queue_reqs_free_bufs(mqrq, qdepth); + + return !ret; +} + +static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) +{ + unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; + + if (host->max_segs != 1) + return 0; + + if (bouncesz > host->max_req_size) + bouncesz = host->max_req_size; + if (bouncesz > host->max_seg_size) + bouncesz = host->max_seg_size; + if (bouncesz > host->max_blk_count * 512) + bouncesz = host->max_blk_count * 512; + + if (bouncesz <= 512) + return 0; + + return bouncesz; +} +#else +static inline bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, + int qdepth, unsigned int bouncesz) +{ + return false; +} + +static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) +{ + return 0; +} #endif -static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs) +static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth, + int max_segs) { - int i, ret; + int i; - for (i = 0; i < mq->qdepth; i++) { - mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret); - if (ret) - return ret; + for (i = 0; i < qdepth; i++) { + mqrq[i].sg = mmc_alloc_sg(max_segs); + if (!mqrq[i].sg) + return -ENOMEM; } return 0; } -static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) +void mmc_queue_free_shared_queue(struct mmc_card *card) { - kfree(mqrq->bounce_sg); - mqrq->bounce_sg = NULL; + if (card->mqrq) { + mmc_queue_free_mqrqs(card->mqrq, card->qdepth); + card->mqrq = NULL; + } +} - kfree(mqrq->sg); - mqrq->sg = NULL; +static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth) +{ + struct mmc_host *host = card->host; + struct mmc_queue_req *mqrq; + unsigned int bouncesz; + int ret = 0; - kfree(mqrq->bounce_buf); - mqrq->bounce_buf = NULL; + if (card->mqrq) + return -EINVAL; + + mqrq = mmc_queue_alloc_mqrqs(qdepth); + if (!mqrq) + return -ENOMEM; + + card->mqrq = mqrq; + card->qdepth = qdepth; + + bouncesz = mmc_queue_calc_bouncesz(host); + + if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) { + bouncesz = 0; + pr_warn("%s: unable to allocate bounce buffers\n", + mmc_card_name(card)); + } + + card->bouncesz = bouncesz; + + if (!bouncesz) { + ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs); + if (ret) + goto out_err; + } + + return ret; + +out_err: + mmc_queue_free_shared_queue(card); + return ret; } -static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq) +int mmc_queue_alloc_shared_queue(struct mmc_card *card) { - int i; - - for (i = 0; i < mq->qdepth; i++) - mmc_queue_req_free_bufs(&mq->mqrq[i]); + return __mmc_queue_alloc_shared_queue(card, 2); } /** @@ -265,7 +367,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; - bool bounce = false; int ret = -ENOMEM; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) @@ -276,13 +377,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, if (!mq->queue) return -ENOMEM; - mq->qdepth = 2; - mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req), - GFP_KERNEL); - if (!mq->mqrq) - goto blk_cleanup; - mq->mqrq_cur = &mq->mqrq[0]; - mq->mqrq_prev = &mq->mqrq[1]; + mq->mqrq = card->mqrq; + mq->qdepth = card->qdepth; mq->queue->queuedata = mq; blk_queue_prep_rq(mq->queue, mmc_prep_request); @@ -291,44 +387,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); -#ifdef CONFIG_MMC_BLOCK_BOUNCE - if (host->max_segs == 1) { - unsigned int bouncesz; - - bouncesz = MMC_QUEUE_BOUNCESZ; - - if (bouncesz > host->max_req_size) - bouncesz = host->max_req_size; - if (bouncesz > host->max_seg_size) - bouncesz = host->max_seg_size; - if (bouncesz > (host->max_blk_count * 512)) - bouncesz = host->max_blk_count * 512; - - if (bouncesz > 512 && - mmc_queue_alloc_bounce_bufs(mq, bouncesz)) { - blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); - blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); - blk_queue_max_segments(mq->queue, bouncesz / 512); - blk_queue_max_segment_size(mq->queue, bouncesz); - - ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz); - if (ret) - goto cleanup_queue; - bounce = true; - } - } -#endif - - if (!bounce) { + if (card->bouncesz) { + blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); + blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); + blk_queue_max_segments(mq->queue, card->bouncesz / 512); + blk_queue_max_segment_size(mq->queue, card->bouncesz); + } else { blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); - - ret = mmc_queue_alloc_sgs(mq, host->max_segs); - if (ret) - goto cleanup_queue; } sema_init(&mq->thread_sem, 1); @@ -343,11 +412,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, return 0; - cleanup_queue: - mmc_queue_reqs_free_bufs(mq); - kfree(mq->mqrq); +cleanup_queue: mq->mqrq = NULL; -blk_cleanup: blk_cleanup_queue(mq->queue); return ret; } @@ -369,10 +435,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq) blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); - mmc_queue_reqs_free_bufs(mq); - kfree(mq->mqrq); mq->mqrq = NULL; - mq->card = NULL; } EXPORT_SYMBOL(mmc_cleanup_queue); diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index e298f100101b..871796c3f406 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -34,23 +34,25 @@ struct mmc_queue_req { struct scatterlist *bounce_sg; unsigned int bounce_sg_len; struct mmc_async_req areq; + int task_id; }; struct mmc_queue { struct mmc_card *card; struct task_struct *thread; struct semaphore thread_sem; - bool new_request; bool suspended; bool asleep; struct mmc_blk_data *blkdata; struct request_queue *queue; struct mmc_queue_req *mqrq; - struct mmc_queue_req *mqrq_cur; - struct mmc_queue_req *mqrq_prev; int qdepth; + int qcnt; + unsigned long qslots; }; +extern int mmc_queue_alloc_shared_queue(struct mmc_card *card); +extern void mmc_queue_free_shared_queue(struct mmc_card *card); extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, const char *); extern void mmc_cleanup_queue(struct mmc_queue *); @@ -64,4 +66,8 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *); extern int mmc_access_rpmb(struct mmc_queue *); +extern struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *, + struct request *); +extern void mmc_queue_req_free(struct mmc_queue *, struct mmc_queue_req *); + #endif diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 89531b48ae84..d109634fbfce 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -225,7 +225,7 @@ static int mmc_decode_scr(struct mmc_card *card) static int mmc_read_ssr(struct mmc_card *card) { unsigned int au, es, et, eo; - u32 *raw_ssr; + __be32 *raw_ssr; int i; if (!(card->csd.cmdclass & CCC_APP_SPEC)) { @@ -853,7 +853,7 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, /* * Fetch SCR from card. */ - err = mmc_app_send_scr(card, card->raw_scr); + err = mmc_app_send_scr(card); if (err) return err; diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 9d5824a37586..47056d8d1bac 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c @@ -232,14 +232,14 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca) return 0; } -int mmc_app_send_scr(struct mmc_card *card, u32 *scr) +int mmc_app_send_scr(struct mmc_card *card) { int err; struct mmc_request mrq = {}; struct mmc_command cmd = {}; struct mmc_data data = {}; struct scatterlist sg; - void *data_buf; + __be32 *scr; /* NOTE: caller guarantees scr is heap-allocated */ @@ -250,8 +250,8 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) /* dma onto stack is unsafe/nonportable, but callers to this * routine normally provide temporary on-stack buffers ... */ - data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL); - if (data_buf == NULL) + scr = kmalloc(sizeof(card->raw_scr), GFP_KERNEL); + if (!scr) return -ENOMEM; mrq.cmd = &cmd; @@ -267,23 +267,22 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) data.sg = &sg; data.sg_len = 1; - sg_init_one(&sg, data_buf, 8); + sg_init_one(&sg, scr, 8); mmc_set_data_timeout(&data, card); mmc_wait_for_req(card->host, &mrq); - memcpy(scr, data_buf, sizeof(card->raw_scr)); - kfree(data_buf); + card->raw_scr[0] = be32_to_cpu(scr[0]); + card->raw_scr[1] = be32_to_cpu(scr[1]); + + kfree(scr); if (cmd.error) return cmd.error; if (data.error) return data.error; - scr[0] = be32_to_cpu(scr[0]); - scr[1] = be32_to_cpu(scr[1]); - return 0; } diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h index 784f8e6b6baa..0e6c3d51e66d 100644 --- a/drivers/mmc/core/sd_ops.h +++ b/drivers/mmc/core/sd_ops.h @@ -22,7 +22,7 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width); int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr); int mmc_send_if_cond(struct mmc_host *host, u32 ocr); int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca); -int mmc_app_send_scr(struct mmc_card *card, u32 *scr); +int mmc_app_send_scr(struct mmc_card *card); int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp); int mmc_app_sd_status(struct mmc_card *card, void *ssr); diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index 74195d772f5a..d40744bbafa9 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -373,19 +373,16 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret) u8 val; if (!func) { - *err_ret = -EINVAL; + if (err_ret) + *err_ret = -EINVAL; return 0xFF; } - if (err_ret) - *err_ret = 0; - ret = mmc_io_rw_direct(func->card, 0, func->num, addr, 0, &val); - if (ret) { - if (err_ret) - *err_ret = ret; + if (err_ret) + *err_ret = ret; + if (ret) return 0xFF; - } return val; } @@ -407,7 +404,8 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret) int ret; if (!func) { - *err_ret = -EINVAL; + if (err_ret) + *err_ret = -EINVAL; return; } @@ -441,7 +439,7 @@ u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte, if (err_ret) *err_ret = ret; if (ret) - val = 0xff; + return 0xff; return val; } @@ -529,15 +527,11 @@ u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret) { int ret; - if (err_ret) - *err_ret = 0; - ret = sdio_memcpy_fromio(func, func->tmpbuf, addr, 2); - if (ret) { - if (err_ret) - *err_ret = ret; + if (err_ret) + *err_ret = ret; + if (ret) return 0xFFFF; - } return le16_to_cpup((__le16 *)func->tmpbuf); } @@ -581,15 +575,11 @@ u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret) { int ret; - if (err_ret) - *err_ret = 0; - ret = sdio_memcpy_fromio(func, func->tmpbuf, addr, 4); - if (ret) { - if (err_ret) - *err_ret = ret; + if (err_ret) + *err_ret = ret; + if (ret) return 0xFFFFFFFF; - } return le32_to_cpup((__le32 *)func->tmpbuf); } @@ -635,19 +625,16 @@ unsigned char sdio_f0_readb(struct sdio_func *func, unsigned int addr, unsigned char val; if (!func) { - *err_ret = -EINVAL; + if (err_ret) + *err_ret = -EINVAL; return 0xFF; } - if (err_ret) - *err_ret = 0; - ret = mmc_io_rw_direct(func->card, 0, 0, addr, 0, &val); - if (ret) { - if (err_ret) - *err_ret = ret; + if (err_ret) + *err_ret = ret; + if (ret) return 0xFF; - } return val; } @@ -673,7 +660,8 @@ void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr, int ret; if (!func) { - *err_ret = -EINVAL; + if (err_ret) + *err_ret = -EINVAL; return; } diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c index 3c0d3ab4324c..abaaba38514f 100644 --- a/drivers/mmc/core/sdio_ops.c +++ b/drivers/mmc/core/sdio_ops.c @@ -152,7 +152,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; left_size = data.blksz * data.blocks; - nents = (left_size - 1) / seg_size + 1; + nents = DIV_ROUND_UP(left_size, seg_size); if (nents > 1) { if (sg_alloc_table(&sgtable, nents, GFP_KERNEL)) return -ENOMEM; @@ -161,10 +161,9 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, data.sg_len = nents; for_each_sg(data.sg, sg_ptr, data.sg_len, i) { - sg_set_page(sg_ptr, virt_to_page(buf + (i * seg_size)), - min(seg_size, left_size), - offset_in_page(buf + (i * seg_size))); - left_size = left_size - seg_size; + sg_set_buf(sg_ptr, buf + i * seg_size, + min(seg_size, left_size)); + left_size -= seg_size; } } else { data.sg = &sg; diff --git a/drivers/mmc/core/sdio_ops.h b/drivers/mmc/core/sdio_ops.h index bed8a8377fec..ee35cb4d170e 100644 --- a/drivers/mmc/core/sdio_ops.h +++ b/drivers/mmc/core/sdio_ops.h @@ -26,9 +26,15 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, int sdio_reset(struct mmc_host *host); unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz); -static inline bool mmc_is_io_op(u32 opcode) +static inline bool sdio_is_io_busy(u32 opcode, u32 arg) { - return opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED; + u32 addr; + + addr = (arg >> 9) & 0x1FFFF; + + return (opcode == SD_IO_RW_EXTENDED || + (opcode == SD_IO_RW_DIRECT && + !(addr == SDIO_CCCR_ABORT || addr == SDIO_CCCR_SUSPEND))); } #endif |