diff options
Diffstat (limited to 'drivers/nvme/host/pci.c')
-rw-r--r-- | drivers/nvme/host/pci.c | 147 |
1 files changed, 106 insertions, 41 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index ca2ee806d74b..d8585df2c2fd 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -500,22 +500,13 @@ static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) nvmeq->last_sq_tail = nvmeq->sq_tail; } -/** - * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell - * @nvmeq: The queue to use - * @cmd: The command to send - * @write_sq: whether to write to the SQ doorbell - */ -static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, - bool write_sq) +static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq, + struct nvme_command *cmd) { - spin_lock(&nvmeq->sq_lock); memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), - cmd, sizeof(*cmd)); + absolute_pointer(cmd), sizeof(*cmd)); if (++nvmeq->sq_tail == nvmeq->q_depth) nvmeq->sq_tail = 0; - nvme_write_sq_db(nvmeq, write_sq); - spin_unlock(&nvmeq->sq_lock); } static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) @@ -912,52 +903,32 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, return BLK_STS_OK; } -/* - * NOTE: ns is NULL when called on the admin queue. - */ -static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, - const struct blk_mq_queue_data *bd) +static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) { - struct nvme_ns *ns = hctx->queue->queuedata; - struct nvme_queue *nvmeq = hctx->driver_data; - struct nvme_dev *dev = nvmeq->dev; - struct request *req = bd->rq; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - struct nvme_command *cmnd = &iod->cmd; blk_status_t ret; iod->aborted = 0; iod->npages = -1; iod->nents = 0; - /* - * We should not need to do this, but we're still using this to - * ensure we can drain requests on a dying queue. - */ - if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) - return BLK_STS_IOERR; - - if (!nvme_check_ready(&dev->ctrl, req, true)) - return nvme_fail_nonready_command(&dev->ctrl, req); - - ret = nvme_setup_cmd(ns, req); + ret = nvme_setup_cmd(req->q->queuedata, req); if (ret) return ret; if (blk_rq_nr_phys_segments(req)) { - ret = nvme_map_data(dev, req, cmnd); + ret = nvme_map_data(dev, req, &iod->cmd); if (ret) goto out_free_cmd; } if (blk_integrity_rq(req)) { - ret = nvme_map_metadata(dev, req, cmnd); + ret = nvme_map_metadata(dev, req, &iod->cmd); if (ret) goto out_unmap_data; } blk_mq_start_request(req); - nvme_submit_cmd(nvmeq, cmnd, bd->last); return BLK_STS_OK; out_unmap_data: nvme_unmap_data(dev, req); @@ -966,6 +937,96 @@ out_free_cmd: return ret; } +/* + * NOTE: ns is NULL when called on the admin queue. + */ +static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct nvme_queue *nvmeq = hctx->driver_data; + struct nvme_dev *dev = nvmeq->dev; + struct request *req = bd->rq; + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + blk_status_t ret; + + /* + * We should not need to do this, but we're still using this to + * ensure we can drain requests on a dying queue. + */ + if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) + return BLK_STS_IOERR; + + if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) + return nvme_fail_nonready_command(&dev->ctrl, req); + + ret = nvme_prep_rq(dev, req); + if (unlikely(ret)) + return ret; + spin_lock(&nvmeq->sq_lock); + nvme_sq_copy_cmd(nvmeq, &iod->cmd); + nvme_write_sq_db(nvmeq, bd->last); + spin_unlock(&nvmeq->sq_lock); + return BLK_STS_OK; +} + +static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) +{ + spin_lock(&nvmeq->sq_lock); + while (!rq_list_empty(*rqlist)) { + struct request *req = rq_list_pop(rqlist); + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + nvme_sq_copy_cmd(nvmeq, &iod->cmd); + } + nvme_write_sq_db(nvmeq, true); + spin_unlock(&nvmeq->sq_lock); +} + +static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) +{ + /* + * We should not need to do this, but we're still using this to + * ensure we can drain requests on a dying queue. + */ + if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) + return false; + if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) + return false; + + req->mq_hctx->tags->rqs[req->tag] = req; + return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; +} + +static void nvme_queue_rqs(struct request **rqlist) +{ + struct request *req, *next, *prev = NULL; + struct request *requeue_list = NULL; + + rq_list_for_each_safe(rqlist, req, next) { + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + + if (!nvme_prep_rq_batch(nvmeq, req)) { + /* detach 'req' and add to remainder list */ + rq_list_move(rqlist, &requeue_list, req, prev); + + req = prev; + if (!req) + continue; + } + + if (!next || req->mq_hctx != next->mq_hctx) { + /* detach rest of list, and submit */ + req->rq_next = NULL; + nvme_submit_cmds(nvmeq, rqlist); + *rqlist = next; + prev = NULL; + } else + prev = req; + } + + *rqlist = requeue_list; +} + static __always_inline void nvme_pci_unmap_rq(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); @@ -1140,7 +1201,11 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) c.common.opcode = nvme_admin_async_event; c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; - nvme_submit_cmd(nvmeq, &c, true); + + spin_lock(&nvmeq->sq_lock); + nvme_sq_copy_cmd(nvmeq, &c); + nvme_write_sq_db(nvmeq, true); + spin_unlock(&nvmeq->sq_lock); } static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) @@ -1371,7 +1436,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) } abort_req->end_io_data = NULL; - blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio); + blk_execute_rq_nowait(abort_req, false, abort_endio); /* * The aborted req will be completed on receiving the abort req. @@ -1663,6 +1728,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = { static const struct blk_mq_ops nvme_mq_ops = { .queue_rq = nvme_queue_rq, + .queue_rqs = nvme_queue_rqs, .complete = nvme_pci_complete_rq, .commit_rqs = nvme_commit_rqs, .init_hctx = nvme_init_hctx, @@ -2416,9 +2482,8 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) req->end_io_data = nvmeq; init_completion(&nvmeq->delete_done); - blk_execute_rq_nowait(NULL, req, false, - opcode == nvme_admin_delete_cq ? - nvme_del_cq_end : nvme_del_queue_end); + blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ? + nvme_del_cq_end : nvme_del_queue_end); return 0; } |