diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-02 13:13:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-02 13:13:50 -0400 |
commit | 2b7497739fc8b4f411b70b78044a9fc21d34d18d (patch) | |
tree | 5b838bd7af5e28ba9fe176cc7358c4d946cbcaca | |
parent | 26d147799001edfe479c0b015ba1cb038def5ae7 (diff) | |
parent | 2e45a49531fef55f4abbd6738c052545f53f43d4 (diff) |
Merge tag 'block-6.4-2023-06-02' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe:
"Just an NVMe pull request with (mostly) KATO fixes, a regression fix
for zoned device revalidation, and a fix for an md raid5 regression"
* tag 'block-6.4-2023-06-02' of git://git.kernel.dk/linux:
nvme: fix the name of Zone Append for verbose logging
nvme: improve handling of long keep alives
nvme: check IO start time when deciding to defer KA
nvme: double KA polling frequency to avoid KATO with TBKAS on
nvme: fix miss command type check
block: fix revalidate performance regression
md/raid5: fix miscalculation of 'end_sector' in raid5_read_one_chunk()
-rw-r--r-- | block/blk-settings.c | 3 | ||||
-rw-r--r-- | drivers/md/raid5.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/constants.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 52 | ||||
-rw-r--r-- | drivers/nvme/host/ioctl.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 3 | ||||
-rw-r--r-- | drivers/nvme/target/passthru.c | 2 |
7 files changed, 56 insertions, 10 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 896b4654ab00..4dd59059b788 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -915,6 +915,7 @@ static bool disk_has_partitions(struct gendisk *disk) void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model) { struct request_queue *q = disk->queue; + unsigned int old_model = q->limits.zoned; switch (model) { case BLK_ZONED_HM: @@ -952,7 +953,7 @@ void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model) */ blk_queue_zone_write_granularity(q, queue_logical_block_size(q)); - } else { + } else if (old_model != BLK_ZONED_NONE) { disk_clear_zone_settings(disk); } } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4739ed891e75..9ea285fbc4a6 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5516,7 +5516,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0, &dd_idx, NULL); - end_sector = bio_end_sector(raid_bio); + end_sector = sector + bio_sectors(raid_bio); rcu_read_lock(); if (r5c_big_stripe_cached(conf, sector)) diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c index bc523ca02254..5e4f8848dce0 100644 --- a/drivers/nvme/host/constants.c +++ b/drivers/nvme/host/constants.c @@ -21,7 +21,7 @@ static const char * const nvme_ops[] = { [nvme_cmd_resv_release] = "Reservation Release", [nvme_cmd_zone_mgmt_send] = "Zone Management Send", [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive", - [nvme_cmd_zone_append] = "Zone Management Append", + [nvme_cmd_zone_append] = "Zone Append", }; static const char * const nvme_admin_ops[] = { diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 1f0cbb77b249..3ec38e2b9173 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -397,7 +397,16 @@ void nvme_complete_rq(struct request *req) trace_nvme_complete_rq(req); nvme_cleanup_cmd(req); - if (ctrl->kas) + /* + * Completions of long-running commands should not be able to + * defer sending of periodic keep alives, since the controller + * may have completed processing such commands a long time ago + * (arbitrarily close to command submission time). + * req->deadline - req->timeout is the command submission time + * in jiffies. + */ + if (ctrl->kas && + req->deadline - req->timeout >= ctrl->ka_last_check_time) ctrl->comp_seen = true; switch (nvme_decide_disposition(req)) { @@ -1115,7 +1124,7 @@ u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) } EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU); -void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, +void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, struct nvme_command *cmd, int status) { if (effects & NVME_CMD_EFFECTS_CSE_MASK) { @@ -1132,6 +1141,8 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, nvme_queue_scan(ctrl); flush_work(&ctrl->scan_work); } + if (ns) + return; switch (cmd->common.opcode) { case nvme_admin_set_features: @@ -1161,9 +1172,25 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); * The host should send Keep Alive commands at half of the Keep Alive Timeout * accounting for transport roundtrip times [..]. */ +static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl) +{ + unsigned long delay = ctrl->kato * HZ / 2; + + /* + * When using Traffic Based Keep Alive, we need to run + * nvme_keep_alive_work at twice the normal frequency, as one + * command completion can postpone sending a keep alive command + * by up to twice the delay between runs. + */ + if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) + delay /= 2; + return delay; +} + static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) { - queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2); + queue_delayed_work(nvme_wq, &ctrl->ka_work, + nvme_keep_alive_work_period(ctrl)); } static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, @@ -1172,6 +1199,20 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, struct nvme_ctrl *ctrl = rq->end_io_data; unsigned long flags; bool startka = false; + unsigned long rtt = jiffies - (rq->deadline - rq->timeout); + unsigned long delay = nvme_keep_alive_work_period(ctrl); + + /* + * Subtract off the keepalive RTT so nvme_keep_alive_work runs + * at the desired frequency. + */ + if (rtt <= delay) { + delay -= rtt; + } else { + dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n", + jiffies_to_msecs(rtt)); + delay = 0; + } blk_mq_free_request(rq); @@ -1182,6 +1223,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, return RQ_END_IO_NONE; } + ctrl->ka_last_check_time = jiffies; ctrl->comp_seen = false; spin_lock_irqsave(&ctrl->lock, flags); if (ctrl->state == NVME_CTRL_LIVE || @@ -1189,7 +1231,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, startka = true; spin_unlock_irqrestore(&ctrl->lock, flags); if (startka) - nvme_queue_keep_alive_work(ctrl); + queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); return RQ_END_IO_NONE; } @@ -1200,6 +1242,8 @@ static void nvme_keep_alive_work(struct work_struct *work) bool comp_seen = ctrl->comp_seen; struct request *rq; + ctrl->ka_last_check_time = jiffies; + if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { dev_dbg(ctrl->device, "reschedule traffic based keep-alive timer\n"); diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 81c5c9e38477..f15e7330b75a 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -254,7 +254,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, blk_mq_free_request(req); if (effects) - nvme_passthru_end(ctrl, effects, cmd, ret); + nvme_passthru_end(ctrl, ns, effects, cmd, ret); return ret; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index a2d4f59e0535..8657811f8b88 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -328,6 +328,7 @@ struct nvme_ctrl { struct delayed_work ka_work; struct delayed_work failfast_work; struct nvme_command ka_cmd; + unsigned long ka_last_check_time; struct work_struct fw_act_work; unsigned long events; @@ -1077,7 +1078,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); int nvme_execute_rq(struct request *rq, bool at_head); -void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, +void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, struct nvme_command *cmd, int status); struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 511c980d538d..71a9c1cc57f5 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -243,7 +243,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) blk_mq_free_request(rq); if (effects) - nvme_passthru_end(ctrl, effects, req->cmd, status); + nvme_passthru_end(ctrl, ns, effects, req->cmd, status); } static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq, |