diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-18 16:50:08 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-18 16:50:08 -0800 |
commit | 77a0cfafa9af9c0d5b43534eb90d530c189edca1 (patch) | |
tree | 30e513ad7c9732213cede4c00bb1651e1a08b023 /drivers/nvme/host | |
parent | 3d1b536c13f7cd966aa660d1730855b26d01c9ae (diff) | |
parent | 88d47f629313730f26a3b00224d1e1a5e3b7bb79 (diff) |
Merge tag 'for-6.13/block-20241118' of git://git.kernel.dk/linux
Pull block updates from Jens Axboe:
- NVMe updates via Keith:
- Use uring_cmd helper (Pavel)
- Host Memory Buffer allocation enhancements (Christoph)
- Target persistent reservation support (Guixin)
- Persistent reservation tracing (Guixen)
- NVMe 2.1 specification support (Keith)
- Rotational Meta Support (Matias, Wang, Keith)
- Volatile cache detection enhancment (Guixen)
- MD updates via Song:
- Maintainers update
- raid5 sync IO fix
- Enhance handling of faulty and blocked devices
- raid5-ppl atomic improvement
- md-bitmap fix
- Support for manually defining embedded partition tables
- Zone append fixes and cleanups
- Stop sending the queued requests in the plug list to the driver
->queue_rqs() handle in reverse order.
- Zoned write plug cleanups
- Cleanups disk stats tracking and add support for disk stats for
passthrough IO
- Add preparatory support for file system atomic writes
- Add lockdep support for queue freezing. Already found a bunch of
issues, and some fixes for that are in here. More will be coming.
- Fix race between queue stopping/quiescing and IO queueing
- ublk recovery improvements
- Fix ublk mmap for 64k pages
- Various fixes and cleanups
* tag 'for-6.13/block-20241118' of git://git.kernel.dk/linux: (118 commits)
MAINTAINERS: Update git tree for mdraid subsystem
block: make struct rq_list available for !CONFIG_BLOCK
block/genhd: use seq_put_decimal_ull for diskstats decimal values
block: don't reorder requests in blk_mq_add_to_batch
block: don't reorder requests in blk_add_rq_to_plug
block: add a rq_list type
block: remove rq_list_move
virtio_blk: reverse request order in virtio_queue_rqs
nvme-pci: reverse request order in nvme_queue_rqs
btrfs: validate queue limits
block: export blk_validate_limits
nvmet: add tracing of reservation commands
nvme: parse reservation commands's action and rtype to string
nvmet: report ns's vwc not present
md/raid5: Increase r5conf.cache_name size
block: remove the ioprio field from struct request
block: remove the write_hint field from struct request
nvme: check ns's volatile write cache not present
nvme: add rotational support
nvme: use command set independent id ns if available
...
Diffstat (limited to 'drivers/nvme/host')
-rw-r--r-- | drivers/nvme/host/apple.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 38 | ||||
-rw-r--r-- | drivers/nvme/host/ioctl.c | 21 | ||||
-rw-r--r-- | drivers/nvme/host/multipath.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 1 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 120 | ||||
-rw-r--r-- | drivers/nvme/host/trace.c | 58 | ||||
-rw-r--r-- | drivers/nvme/host/zns.c | 2 |
8 files changed, 172 insertions, 72 deletions
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index b1387dc459a3..7cd1102a8d2c 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -649,7 +649,7 @@ static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force) found = apple_nvme_poll_cq(q, &iob); - if (!rq_list_empty(iob.req_list)) + if (!rq_list_empty(&iob.req_list)) apple_nvme_complete_batch(&iob); return found; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 855b42c92284..1a8d32a4a5c3 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -42,6 +42,8 @@ struct nvme_ns_info { bool is_readonly; bool is_ready; bool is_removed; + bool is_rotational; + bool no_vwc; }; unsigned int admin_timeout = 60; @@ -1639,6 +1641,8 @@ static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; info->is_ready = id->nstat & NVME_NSTAT_NRDY; + info->is_rotational = id->nsfeat & NVME_NS_ROTATIONAL; + info->no_vwc = id->nsfeat & NVME_NS_VWC_NOT_PRESENT; } kfree(id); return ret; @@ -2185,11 +2189,14 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, ns->head->ids.csi == NVME_CSI_ZNS) nvme_update_zone_info(ns, &lim, &zi); - if (ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) + if ((ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) && !info->no_vwc) lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; else lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); + if (info->is_rotational) + lim.features |= BLK_FEAT_ROTATIONAL; + /* * Register a metadata profile for PI, or the plain non-integrity NVMe * metadata masquerading as Type 0 if supported, otherwise reject block @@ -3636,6 +3643,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, head->ns_id = info->nsid; head->ids = info->ids; head->shared = info->is_shared; + head->rotational = info->is_rotational; ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1); ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE); kref_init(&head->ref); @@ -4017,7 +4025,7 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns_info info = { .nsid = nsid }; struct nvme_ns *ns; - int ret; + int ret = 1; if (nvme_identify_ns_descs(ctrl, &info)) return; @@ -4034,9 +4042,10 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) * set up a namespace. If not fall back to the legacy version. */ if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || - (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) + (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS) || + ctrl->vs >= NVME_VS(2, 0, 0)) ret = nvme_ns_info_from_id_cs_indep(ctrl, &info); - else + if (ret > 0) ret = nvme_ns_info_from_identify(ctrl, &info); if (info.is_removed) @@ -4895,7 +4904,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl) srcu_idx = srcu_read_lock(&ctrl->srcu); list_for_each_entry_srcu(ns, &ctrl->namespaces, list, srcu_read_lock_held(&ctrl->srcu)) - blk_mq_unfreeze_queue(ns->queue); + blk_mq_unfreeze_queue_non_owner(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); clear_bit(NVME_CTRL_FROZEN, &ctrl->flags); } @@ -4940,7 +4949,12 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl) srcu_idx = srcu_read_lock(&ctrl->srcu); list_for_each_entry_srcu(ns, &ctrl->namespaces, list, srcu_read_lock_held(&ctrl->srcu)) - blk_freeze_queue_start(ns->queue); + /* + * Typical non_owner use case is from pci driver, in which + * start_freeze is called from timeout work function, but + * unfreeze is done in reset work context + */ + blk_freeze_queue_start_non_owner(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } EXPORT_SYMBOL_GPL(nvme_start_freeze); @@ -5036,6 +5050,8 @@ static inline void _nvme_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); + BUILD_BUG_ON(sizeof(struct nvme_endurance_group_log) != 512); + BUILD_BUG_ON(sizeof(struct nvme_rotational_media_log) != 512); BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512); @@ -5044,22 +5060,20 @@ static inline void _nvme_check_size(void) static int __init nvme_core_init(void) { + unsigned int wq_flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS; int result = -ENOMEM; _nvme_check_size(); - nvme_wq = alloc_workqueue("nvme-wq", - WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); + nvme_wq = alloc_workqueue("nvme-wq", wq_flags, 0); if (!nvme_wq) goto out; - nvme_reset_wq = alloc_workqueue("nvme-reset-wq", - WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); + nvme_reset_wq = alloc_workqueue("nvme-reset-wq", wq_flags, 0); if (!nvme_reset_wq) goto destroy_wq; - nvme_delete_wq = alloc_workqueue("nvme-delete-wq", - WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); + nvme_delete_wq = alloc_workqueue("nvme-delete-wq", wq_flags, 0); if (!nvme_delete_wq) goto destroy_reset_wq; diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index a96976b22fa7..6522ae16531c 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -114,7 +114,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, static int nvme_map_user_request(struct request *req, u64 ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, - u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags) + struct io_uring_cmd *ioucmd, unsigned int flags) { struct request_queue *q = req->q; struct nvme_ns *ns = q->queuedata; @@ -152,8 +152,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, bio_set_dev(bio, bdev); if (has_metadata) { - ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len, - meta_seed); + ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len); if (ret) goto out_unmap; } @@ -170,7 +169,7 @@ out: static int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, u64 ubuffer, unsigned bufflen, - void __user *meta_buffer, unsigned meta_len, u32 meta_seed, + void __user *meta_buffer, unsigned meta_len, u64 *result, unsigned timeout, unsigned int flags) { struct nvme_ns *ns = q->queuedata; @@ -187,7 +186,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, req->timeout = timeout; if (ubuffer && bufflen) { ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer, - meta_len, meta_seed, NULL, flags); + meta_len, NULL, flags); if (ret) return ret; } @@ -268,7 +267,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.lbatm = cpu_to_le16(io.appmask); return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata, - meta_len, lower_32_bits(io.slba), NULL, 0, 0); + meta_len, NULL, 0, 0); } static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, @@ -323,7 +322,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), - cmd.metadata_len, 0, &result, timeout, 0); + cmd.metadata_len, &result, timeout, 0); if (status >= 0) { if (put_user(result, &ucmd->result)) @@ -370,7 +369,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), - cmd.metadata_len, 0, &cmd.result, timeout, flags); + cmd.metadata_len, &cmd.result, timeout, flags); if (status >= 0) { if (put_user(cmd.result, &ucmd->result)) @@ -402,7 +401,7 @@ struct nvme_uring_cmd_pdu { static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu( struct io_uring_cmd *ioucmd) { - return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu; + return io_uring_cmd_to_pdu(ioucmd, struct nvme_uring_cmd_pdu); } static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, @@ -507,7 +506,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, if (d.addr && d.data_len) { ret = nvme_map_user_request(req, d.addr, d.data_len, nvme_to_user_ptr(d.metadata), - d.metadata_len, 0, ioucmd, vec); + d.metadata_len, ioucmd, vec); if (ret) return ret; } @@ -635,8 +634,6 @@ static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd, struct nvme_ctrl *ctrl = ns->ctrl; int ret; - BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu)); - ret = nvme_uring_cmd_checks(issue_flags); if (ret) return ret; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 6a15873055b9..f04cfe3fb936 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -635,8 +635,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL; if (head->ids.csi == NVME_CSI_ZNS) lim.features |= BLK_FEAT_ZONED; - else - lim.max_zone_append_sectors = 0; head->disk = blk_alloc_disk(&lim, ctrl->numa_node); if (IS_ERR(head->disk)) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 093cb423f536..900719c4c70c 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -474,6 +474,7 @@ struct nvme_ns_head { struct list_head entry; struct kref ref; bool shared; + bool rotational; bool passthru_err_log_enabled; struct nvme_effects_log *effects; u64 nuse; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 4b9fda0b1d9a..5f2e3ad2cc52 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -141,6 +141,7 @@ struct nvme_dev { struct nvme_ctrl ctrl; u32 last_ps; bool hmb; + struct sg_table *hmb_sgt; mempool_t *iod_mempool; @@ -153,6 +154,7 @@ struct nvme_dev { /* host memory buffer support: */ u64 host_mem_size; u32 nr_host_mem_descs; + u32 host_mem_descs_size; dma_addr_t host_mem_descs_dma; struct nvme_host_mem_buf_desc *host_mem_descs; void **host_mem_desc_bufs; @@ -902,11 +904,12 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; } -static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) +static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist) { + struct request *req; + spin_lock(&nvmeq->sq_lock); - while (!rq_list_empty(*rqlist)) { - struct request *req = rq_list_pop(rqlist); + while ((req = rq_list_pop(rqlist))) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_sq_copy_cmd(nvmeq, &iod->cmd); @@ -929,33 +932,26 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; } -static void nvme_queue_rqs(struct request **rqlist) +static void nvme_queue_rqs(struct rq_list *rqlist) { - struct request *req, *next, *prev = NULL; - struct request *requeue_list = NULL; - - rq_list_for_each_safe(rqlist, req, next) { - struct nvme_queue *nvmeq = req->mq_hctx->driver_data; - - if (!nvme_prep_rq_batch(nvmeq, req)) { - /* detach 'req' and add to remainder list */ - rq_list_move(rqlist, &requeue_list, req, prev); + struct rq_list submit_list = { }; + struct rq_list requeue_list = { }; + struct nvme_queue *nvmeq = NULL; + struct request *req; - req = prev; - if (!req) - continue; - } + while ((req = rq_list_pop(rqlist))) { + if (nvmeq && nvmeq != req->mq_hctx->driver_data) + nvme_submit_cmds(nvmeq, &submit_list); + nvmeq = req->mq_hctx->driver_data; - if (!next || req->mq_hctx != next->mq_hctx) { - /* detach rest of list, and submit */ - req->rq_next = NULL; - nvme_submit_cmds(nvmeq, rqlist); - *rqlist = next; - prev = NULL; - } else - prev = req; + if (nvme_prep_rq_batch(nvmeq, req)) + rq_list_add_tail(&submit_list, req); + else + rq_list_add_tail(&requeue_list, req); } + if (nvmeq) + nvme_submit_cmds(nvmeq, &submit_list); *rqlist = requeue_list; } @@ -1083,7 +1079,7 @@ static irqreturn_t nvme_irq(int irq, void *data) DEFINE_IO_COMP_BATCH(iob); if (nvme_poll_cq(nvmeq, &iob)) { - if (!rq_list_empty(iob.req_list)) + if (!rq_list_empty(&iob.req_list)) nvme_pci_complete_batch(&iob); return IRQ_HANDLED; } @@ -1951,7 +1947,7 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) return ret; } -static void nvme_free_host_mem(struct nvme_dev *dev) +static void nvme_free_host_mem_multi(struct nvme_dev *dev) { int i; @@ -1966,18 +1962,54 @@ static void nvme_free_host_mem(struct nvme_dev *dev) kfree(dev->host_mem_desc_bufs); dev->host_mem_desc_bufs = NULL; - dma_free_coherent(dev->dev, - dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), +} + +static void nvme_free_host_mem(struct nvme_dev *dev) +{ + if (dev->hmb_sgt) + dma_free_noncontiguous(dev->dev, dev->host_mem_size, + dev->hmb_sgt, DMA_BIDIRECTIONAL); + else + nvme_free_host_mem_multi(dev); + + dma_free_coherent(dev->dev, dev->host_mem_descs_size, dev->host_mem_descs, dev->host_mem_descs_dma); dev->host_mem_descs = NULL; + dev->host_mem_descs_size = 0; dev->nr_host_mem_descs = 0; } -static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, +static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size) +{ + dev->hmb_sgt = dma_alloc_noncontiguous(dev->dev, size, + DMA_BIDIRECTIONAL, GFP_KERNEL, 0); + if (!dev->hmb_sgt) + return -ENOMEM; + + dev->host_mem_descs = dma_alloc_coherent(dev->dev, + sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma, + GFP_KERNEL); + if (!dev->host_mem_descs) { + dma_free_noncontiguous(dev->dev, dev->host_mem_size, + dev->hmb_sgt, DMA_BIDIRECTIONAL); + dev->hmb_sgt = NULL; + return -ENOMEM; + } + dev->host_mem_size = size; + dev->host_mem_descs_size = sizeof(*dev->host_mem_descs); + dev->nr_host_mem_descs = 1; + + dev->host_mem_descs[0].addr = + cpu_to_le64(dev->hmb_sgt->sgl->dma_address); + dev->host_mem_descs[0].size = cpu_to_le32(size / NVME_CTRL_PAGE_SIZE); + return 0; +} + +static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred, u32 chunk_size) { struct nvme_host_mem_buf_desc *descs; - u32 max_entries, len; + u32 max_entries, len, descs_size; dma_addr_t descs_dma; int i = 0; void **bufs; @@ -1990,8 +2022,9 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) max_entries = dev->ctrl.hmmaxd; - descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), - &descs_dma, GFP_KERNEL); + descs_size = max_entries * sizeof(*descs); + descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma, + GFP_KERNEL); if (!descs) goto out; @@ -2020,6 +2053,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, dev->host_mem_size = size; dev->host_mem_descs = descs; dev->host_mem_descs_dma = descs_dma; + dev->host_mem_descs_size = descs_size; dev->host_mem_desc_bufs = bufs; return 0; @@ -2034,8 +2068,7 @@ out_free_bufs: kfree(bufs); out_free_descs: - dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, - descs_dma); + dma_free_coherent(dev->dev, descs_size, descs, descs_dma); out: dev->host_mem_descs = NULL; return -ENOMEM; @@ -2047,9 +2080,18 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); u64 chunk_size; + /* + * If there is an IOMMU that can merge pages, try a virtually + * non-contiguous allocation for a single segment first. + */ + if (!(PAGE_SIZE & dma_get_merge_boundary(dev->dev))) { + if (!nvme_alloc_host_mem_single(dev, preferred)) + return 0; + } + /* start big and work our way down */ for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { - if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { + if (!nvme_alloc_host_mem_multi(dev, preferred, chunk_size)) { if (!min || dev->host_mem_size >= min) return 0; nvme_free_host_mem(dev); @@ -2097,8 +2139,10 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) } dev_info(dev->ctrl.device, - "allocated %lld MiB host memory buffer.\n", - dev->host_mem_size >> ilog2(SZ_1M)); + "allocated %lld MiB host memory buffer (%u segment%s).\n", + dev->host_mem_size >> ilog2(SZ_1M), + dev->nr_host_mem_descs, + str_plural(dev->nr_host_mem_descs)); } ret = nvme_set_host_mem(dev, enable_bits); diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c index 87c437fc070d..ad25ad1e4041 100644 --- a/drivers/nvme/host/trace.c +++ b/drivers/nvme/host/trace.c @@ -228,27 +228,61 @@ static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10) static const char *nvme_trace_resv_reg(struct trace_seq *p, u8 *cdw10) { + static const char * const rrega_strs[] = { + [0x00] = "register", + [0x01] = "unregister", + [0x02] = "replace", + }; const char *ret = trace_seq_buffer_ptr(p); u8 rrega = cdw10[0] & 0x7; u8 iekey = (cdw10[0] >> 3) & 0x1; u8 ptpl = (cdw10[3] >> 6) & 0x3; + const char *rrega_str; + + if (rrega < ARRAY_SIZE(rrega_strs) && rrega_strs[rrega]) + rrega_str = rrega_strs[rrega]; + else + rrega_str = "reserved"; - trace_seq_printf(p, "rrega=%u, iekey=%u, ptpl=%u", - rrega, iekey, ptpl); + trace_seq_printf(p, "rrega=%u:%s, iekey=%u, ptpl=%u", + rrega, rrega_str, iekey, ptpl); trace_seq_putc(p, 0); return ret; } +static const char * const rtype_strs[] = { + [0x00] = "reserved", + [0x01] = "write exclusive", + [0x02] = "exclusive access", + [0x03] = "write exclusive registrants only", + [0x04] = "exclusive access registrants only", + [0x05] = "write exclusive all registrants", + [0x06] = "exclusive access all registrants", +}; + static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10) { + static const char * const racqa_strs[] = { + [0x00] = "acquire", + [0x01] = "preempt", + [0x02] = "preempt and abort", + }; const char *ret = trace_seq_buffer_ptr(p); u8 racqa = cdw10[0] & 0x7; u8 iekey = (cdw10[0] >> 3) & 0x1; u8 rtype = cdw10[1]; + const char *racqa_str = "reserved"; + const char *rtype_str = "reserved"; - trace_seq_printf(p, "racqa=%u, iekey=%u, rtype=%u", - racqa, iekey, rtype); + if (racqa < ARRAY_SIZE(racqa_strs) && racqa_strs[racqa]) + racqa_str = racqa_strs[racqa]; + + if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype]) + rtype_str = rtype_strs[rtype]; + + trace_seq_printf(p, "racqa=%u:%s, iekey=%u, rtype=%u:%s", + racqa, racqa_str, iekey, rtype, rtype_str); trace_seq_putc(p, 0); return ret; @@ -256,13 +290,25 @@ static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10) static const char *nvme_trace_resv_rel(struct trace_seq *p, u8 *cdw10) { + static const char * const rrela_strs[] = { + [0x00] = "release", + [0x01] = "clear", + }; const char *ret = trace_seq_buffer_ptr(p); u8 rrela = cdw10[0] & 0x7; u8 iekey = (cdw10[0] >> 3) & 0x1; u8 rtype = cdw10[1]; + const char *rrela_str = "reserved"; + const char *rtype_str = "reserved"; + + if (rrela < ARRAY_SIZE(rrela_strs) && rrela_strs[rrela]) + rrela_str = rrela_strs[rrela]; + + if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype]) + rtype_str = rtype_strs[rtype]; - trace_seq_printf(p, "rrela=%u, iekey=%u, rtype=%u", - rrela, iekey, rtype); + trace_seq_printf(p, "rrela=%u:%s, iekey=%u, rtype=%u:%s", + rrela, rrela_str, iekey, rtype, rtype_str); trace_seq_putc(p, 0); return ret; diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 9a06f9d98cd6..382949e18c6a 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -111,7 +111,7 @@ void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, lim->features |= BLK_FEAT_ZONED; lim->max_open_zones = zi->max_open_zones; lim->max_active_zones = zi->max_active_zones; - lim->max_zone_append_sectors = ns->ctrl->max_zone_append; + lim->max_hw_zone_append_sectors = ns->ctrl->max_zone_append; lim->chunk_sectors = ns->head->zsze = nvme_lba_to_sect(ns->head, zi->zone_size); } |