summaryrefslogtreecommitdiff
path: root/drivers/nvme/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host')
-rw-r--r--drivers/nvme/host/apple.c2
-rw-r--r--drivers/nvme/host/core.c92
-rw-r--r--drivers/nvme/host/fc.c36
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/nvme.h44
-rw-r--r--drivers/nvme/host/pci.c38
-rw-r--r--drivers/nvme/host/sysfs.c2
-rw-r--r--drivers/nvme/host/tcp.c88
8 files changed, 178 insertions, 126 deletions
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 4319ab50c10d..1de11b722f04 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1251,7 +1251,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
anv->admin_tagset.numa_node = NUMA_NO_NODE;
anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
- anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
anv->admin_tagset.driver_data = &anv->adminq;
ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
@@ -1275,7 +1274,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
anv->tagset.timeout = NVME_IO_TIMEOUT;
anv->tagset.numa_node = NUMA_NO_NODE;
anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
- anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
anv->tagset.driver_data = &anv->ioq;
ret = blk_mq_alloc_tag_set(&anv->tagset);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index d169a30eb935..818d4e49aab5 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -885,6 +885,12 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_STS_OK;
}
+static void nvme_set_app_tag(struct request *req, struct nvme_command *cmnd)
+{
+ cmnd->rw.lbat = cpu_to_le16(bio_integrity(req->bio)->app_tag);
+ cmnd->rw.lbatm = cpu_to_le16(0xffff);
+}
+
static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
struct request *req)
{
@@ -1017,18 +1023,17 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
control |= NVME_RW_PRINFO_PRACT;
}
- switch (ns->head->pi_type) {
- case NVME_NS_DPS_PI_TYPE3:
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
control |= NVME_RW_PRINFO_PRCHK_GUARD;
- break;
- case NVME_NS_DPS_PI_TYPE1:
- case NVME_NS_DPS_PI_TYPE2:
- control |= NVME_RW_PRINFO_PRCHK_GUARD |
- NVME_RW_PRINFO_PRCHK_REF;
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_REFTAG)) {
+ control |= NVME_RW_PRINFO_PRCHK_REF;
if (op == nvme_cmd_zone_append)
control |= NVME_RW_APPEND_PIREMAP;
nvme_set_ref_tag(ns, cmnd, req);
- break;
+ }
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_APPTAG)) {
+ control |= NVME_RW_PRINFO_PRCHK_APP;
+ nvme_set_app_tag(req, cmnd);
}
}
@@ -1695,7 +1700,13 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
&result);
- if (status < 0)
+
+ /*
+ * It's either a kernel error or the host observed a connection
+ * lost. In either case it's not possible communicate with the
+ * controller and thus enter the error code path.
+ */
+ if (status < 0 || status == NVME_SC_HOST_PATH_ERROR)
return status;
/*
@@ -2002,6 +2013,7 @@ static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns,
lim->atomic_write_hw_boundary = boundary;
lim->atomic_write_hw_unit_min = bs;
lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs);
+ lim->features |= BLK_FEAT_ATOMIC_WRITES;
}
static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
@@ -2034,7 +2046,7 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
* or smaller than a sector size yet, so catch this early and don't
* allow block I/O.
*/
- if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) {
+ if (blk_validate_block_size(bs)) {
bs = (1 << 9);
valid = false;
}
@@ -2126,14 +2138,16 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns,
struct nvme_ns_info *info)
{
struct queue_limits lim;
+ unsigned int memflags;
int ret;
- blk_mq_freeze_queue(ns->disk->queue);
lim = queue_limits_start_update(ns->disk->queue);
nvme_set_ctrl_limits(ns->ctrl, &lim);
+
+ memflags = blk_mq_freeze_queue(ns->disk->queue);
ret = queue_limits_commit_update(ns->disk->queue, &lim);
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
- blk_mq_unfreeze_queue(ns->disk->queue);
+ blk_mq_unfreeze_queue(ns->disk->queue, memflags);
/* Hide the block-interface for these devices */
if (!ret)
@@ -2148,6 +2162,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
struct nvme_id_ns_nvm *nvm = NULL;
struct nvme_zone_info zi = {};
struct nvme_id_ns *id;
+ unsigned int memflags;
sector_t capacity;
unsigned lbaf;
int ret;
@@ -2177,12 +2192,12 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
goto out;
}
- blk_mq_freeze_queue(ns->disk->queue);
+ lim = queue_limits_start_update(ns->disk->queue);
+
+ memflags = blk_mq_freeze_queue(ns->disk->queue);
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
-
- lim = queue_limits_start_update(ns->disk->queue);
nvme_set_ctrl_limits(ns->ctrl, &lim);
nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
nvme_set_chunk_sectors(ns, id, &lim);
@@ -2212,7 +2227,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ret = queue_limits_commit_update(ns->disk->queue, &lim);
if (ret) {
- blk_mq_unfreeze_queue(ns->disk->queue);
+ blk_mq_unfreeze_queue(ns->disk->queue, memflags);
goto out;
}
@@ -2228,7 +2243,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ns->head->features |= NVME_NS_DEAC;
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags);
- blk_mq_unfreeze_queue(ns->disk->queue);
+ blk_mq_unfreeze_queue(ns->disk->queue, memflags);
if (blk_queue_is_zoned(ns->queue)) {
ret = blk_revalidate_disk_zones(ns->disk);
@@ -2284,8 +2299,10 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
if (!ret && nvme_ns_head_multipath(ns->head)) {
struct queue_limits *ns_lim = &ns->disk->queue->limits;
struct queue_limits lim;
+ unsigned int memflags;
- blk_mq_freeze_queue(ns->head->disk->queue);
+ lim = queue_limits_start_update(ns->head->disk->queue);
+ memflags = blk_mq_freeze_queue(ns->head->disk->queue);
/*
* queue_limits mixes values that are the hardware limitations
* for bio splitting with what is the device configuration.
@@ -2301,7 +2318,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
* the splitting limits in to make sure we still obey possibly
* lower limitations of other controllers.
*/
- lim = queue_limits_start_update(ns->head->disk->queue);
lim.logical_block_size = ns_lim->logical_block_size;
lim.physical_block_size = ns_lim->physical_block_size;
lim.io_min = ns_lim->io_min;
@@ -2318,7 +2334,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
- blk_mq_unfreeze_queue(ns->head->disk->queue);
+ blk_mq_unfreeze_queue(ns->head->disk->queue, memflags);
}
return ret;
@@ -3092,7 +3108,7 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
struct nvme_effects_log **log)
{
- struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
+ struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi);
int ret;
if (cel)
@@ -3109,7 +3125,11 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
return ret;
}
- xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
+ old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ kfree(cel);
+ return xa_err(old);
+ }
out:
*log = cel;
return 0;
@@ -3171,6 +3191,25 @@ free_data:
return ret;
}
+static int nvme_init_effects_log(struct nvme_ctrl *ctrl,
+ u8 csi, struct nvme_effects_log **log)
+{
+ struct nvme_effects_log *effects, *old;
+
+ effects = kzalloc(sizeof(*effects), GFP_KERNEL);
+ if (!effects)
+ return -ENOMEM;
+
+ old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ kfree(effects);
+ return xa_err(old);
+ }
+
+ *log = effects;
+ return 0;
+}
+
static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
{
struct nvme_effects_log *log = ctrl->effects;
@@ -3217,10 +3256,9 @@ static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
}
if (!ctrl->effects) {
- ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
- if (!ctrl->effects)
- return -ENOMEM;
- xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL);
+ ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
+ if (ret < 0)
+ return ret;
}
nvme_init_known_nvm_effects(ctrl);
@@ -4564,7 +4602,6 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
/* Reserved for fabric connect and keep alive */
set->reserved_tags = 2;
set->numa_node = ctrl->numa_node;
- set->flags = BLK_MQ_F_NO_SCHED;
if (ctrl->ops->flags & NVME_F_BLOCKING)
set->flags |= BLK_MQ_F_BLOCKING;
set->cmd_size = cmd_size;
@@ -4639,7 +4676,6 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
/* Reserved for fabric connect */
set->reserved_tags = 1;
set->numa_node = ctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
if (ctrl->ops->flags & NVME_F_BLOCKING)
set->flags |= BLK_MQ_F_BLOCKING;
set->cmd_size = cmd_size;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b81af7919e94..f4f1866fbd5b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -16,7 +16,6 @@
#include <linux/nvme-fc.h>
#include "fc.h"
#include <scsi/scsi_transport_fc.h>
-#include <linux/blk-mq-pci.h>
/* *************************** Data Structures/Defines ****************** */
@@ -782,11 +781,19 @@ restart:
static void
nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
{
+ enum nvme_ctrl_state state;
+ unsigned long flags;
+
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: controller connectivity lost. Awaiting "
"Reconnect", ctrl->cnum);
- switch (nvme_ctrl_state(&ctrl->ctrl)) {
+ spin_lock_irqsave(&ctrl->lock, flags);
+ set_bit(ASSOC_FAILED, &ctrl->flags);
+ state = nvme_ctrl_state(&ctrl->ctrl);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ switch (state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
/*
@@ -2080,7 +2087,8 @@ done:
nvme_fc_complete_rq(rq);
check_error:
- if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
+ if (terminate_assoc &&
+ nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_RESETTING)
queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
@@ -2534,6 +2542,8 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
/*
* if an error (io timeout, etc) while (re)connecting, the remote
* port requested terminating of the association (disconnect_ls)
@@ -2541,9 +2551,8 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
* the controller. Abort any ios on the association and let the
* create_association error path resolve things.
*/
- if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
+ if (state == NVME_CTRL_CONNECTING) {
__nvme_fc_abort_outstanding_ios(ctrl, true);
- set_bit(ASSOC_FAILED, &ctrl->flags);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport error during (re)connect\n",
ctrl->cnum);
@@ -2551,7 +2560,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
}
/* Otherwise, only proceed if in LIVE state - e.g. on first error */
- if (ctrl->ctrl.state != NVME_CTRL_LIVE)
+ if (state != NVME_CTRL_LIVE)
return;
dev_warn(ctrl->ctrl.device,
@@ -3168,12 +3177,18 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
else
ret = nvme_fc_recreate_io_queues(ctrl);
}
- if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
- ret = -EIO;
if (ret)
goto out_term_aen_ops;
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (!test_bit(ASSOC_FAILED, &ctrl->flags))
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ else
+ ret = -EIO;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ if (ret)
+ goto out_term_aen_ops;
ctrl->ctrl.nr_reconnects = 0;
@@ -3579,8 +3594,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
- !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
goto fail_ctrl;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index a85d190942bd..2a7635565083 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -60,7 +60,7 @@ void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry)
if (h->disk)
- blk_mq_unfreeze_queue(h->disk->queue);
+ blk_mq_unfreeze_queue_nomemrestore(h->disk->queue);
}
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 611b02c8a8b3..7be92d07430e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -173,6 +173,11 @@ enum nvme_quirks {
* MSI (but not MSI-X) interrupts are broken and never fire.
*/
NVME_QUIRK_BROKEN_MSI = (1 << 21),
+
+ /*
+ * Align dma pool segment size to 512 bytes
+ */
+ NVME_QUIRK_DMAPOOL_ALIGN_512 = (1 << 22),
};
/*
@@ -1182,43 +1187,4 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
}
-#ifdef CONFIG_NVME_VERBOSE_ERRORS
-const char *nvme_get_error_status_str(u16 status);
-const char *nvme_get_opcode_str(u8 opcode);
-const char *nvme_get_admin_opcode_str(u8 opcode);
-const char *nvme_get_fabrics_opcode_str(u8 opcode);
-#else /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const char *nvme_get_error_status_str(u16 status)
-{
- return "I/O Error";
-}
-static inline const char *nvme_get_opcode_str(u8 opcode)
-{
- return "I/O Cmd";
-}
-static inline const char *nvme_get_admin_opcode_str(u8 opcode)
-{
- return "Admin Cmd";
-}
-
-static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
-{
- return "Fabrics Cmd";
-}
-#endif /* CONFIG_NVME_VERBOSE_ERRORS */
-
-static inline const char *nvme_opcode_str(int qid, u8 opcode)
-{
- return qid ? nvme_get_opcode_str(opcode) :
- nvme_get_admin_opcode_str(opcode);
-}
-
-static inline const char *nvme_fabrics_opcode_str(
- int qid, const struct nvme_command *cmd)
-{
- if (nvme_is_fabrics(cmd))
- return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
-
- return nvme_opcode_str(qid, cmd->common.opcode);
-}
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 1a5ba80f1811..9197a5b173fd 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -8,7 +8,6 @@
#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/blk-integrity.h>
#include <linux/dmi.h>
#include <linux/init.h>
@@ -373,7 +372,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
/*
* Ensure that the doorbell is updated before reading the event
* index from memory. The controller needs to provide similar
- * ordering to ensure the envent index is updated before reading
+ * ordering to ensure the event index is updated before reading
* the doorbell.
*/
mb();
@@ -463,7 +462,7 @@ static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL && offset)
- blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
+ blk_mq_map_hw_queues(map, dev->dev, offset);
else
blk_mq_map_queues(map);
qoff += map->nr_queues;
@@ -1148,13 +1147,13 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
}
}
-static inline int nvme_poll_cq(struct nvme_queue *nvmeq,
- struct io_comp_batch *iob)
+static inline bool nvme_poll_cq(struct nvme_queue *nvmeq,
+ struct io_comp_batch *iob)
{
- int found = 0;
+ bool found = false;
while (nvme_cqe_pending(nvmeq)) {
- found++;
+ found = true;
/*
* load-load control dependency between phase and the rest of
* the cqe requires a full read memory barrier
@@ -2086,8 +2085,8 @@ static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size)
sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma,
GFP_KERNEL);
if (!dev->host_mem_descs) {
- dma_free_noncontiguous(dev->dev, dev->host_mem_size,
- dev->hmb_sgt, DMA_BIDIRECTIONAL);
+ dma_free_noncontiguous(dev->dev, size, dev->hmb_sgt,
+ DMA_BIDIRECTIONAL);
dev->hmb_sgt = NULL;
return -ENOMEM;
}
@@ -2154,14 +2153,6 @@ static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred,
return 0;
out_free_bufs:
- while (--i >= 0) {
- size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;
-
- dma_free_attrs(dev->dev, size, bufs[i],
- le64_to_cpu(descs[i].addr),
- DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
- }
-
kfree(bufs);
out_free_descs:
dma_free_coherent(dev->dev, descs_size, descs, descs_dma);
@@ -2834,15 +2825,20 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
+ size_t small_align = 256;
+
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
NVME_CTRL_PAGE_SIZE,
NVME_CTRL_PAGE_SIZE, 0);
if (!dev->prp_page_pool)
return -ENOMEM;
+ if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
+ small_align = 512;
+
/* Optimisation for I/Os between 4k and 128k */
dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
- 256, 256, 0);
+ 256, small_align, 0);
if (!dev->prp_small_pool) {
dma_pool_destroy(dev->prp_page_pool);
return -ENOMEM;
@@ -3143,7 +3139,9 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
* because of high power consumption (> 2 Watt) in s2idle
* sleep. Only some boards with Intel CPU are affected.
*/
- if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
+ dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
@@ -3607,7 +3605,7 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
- .driver_data = NVME_QUIRK_QDEPTH_ONE },
+ .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
{ PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_BOGUS_NID, },
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index b68a9e5f1ea3..3a41b9ab0f13 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -792,7 +792,7 @@ static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-const struct attribute_group nvme_tls_attrs_group = {
+static const struct attribute_group nvme_tls_attrs_group = {
.attrs = nvme_tls_attrs,
.is_visible = nvme_tls_attrs_are_visible,
};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 28c76a3e1bd2..841238f38fdd 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -54,6 +54,8 @@ MODULE_PARM_DESC(tls_handshake_timeout,
"nvme TLS handshake timeout in seconds (default 10)");
#endif
+static atomic_t nvme_tcp_cpu_queues[NR_CPUS];
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* lockdep can detect a circular dependency of the form
* sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
@@ -127,6 +129,7 @@ enum nvme_tcp_queue_flags {
NVME_TCP_Q_ALLOCATED = 0,
NVME_TCP_Q_LIVE = 1,
NVME_TCP_Q_POLLING = 2,
+ NVME_TCP_Q_IO_CPU_SET = 3,
};
enum nvme_tcp_recv_state {
@@ -1562,23 +1565,56 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
ctrl->io_queues[HCTX_TYPE_POLL];
}
+/**
+ * Track the number of queues assigned to each cpu using a global per-cpu
+ * counter and select the least used cpu from the mq_map. Our goal is to spread
+ * different controllers I/O threads across different cpu cores.
+ *
+ * Note that the accounting is not 100% perfect, but we don't need to be, we're
+ * simply putting our best effort to select the best candidate cpu core that we
+ * find at any given point.
+ */
static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_ctrl *ctrl = queue->ctrl;
- int qid = nvme_tcp_queue_id(queue);
- int n = 0;
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int qid = nvme_tcp_queue_id(queue) - 1;
+ unsigned int *mq_map = NULL;
+ int cpu, min_queues = INT_MAX, io_cpu;
+
+ if (wq_unbound)
+ goto out;
if (nvme_tcp_default_queue(queue))
- n = qid - 1;
+ mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map;
else if (nvme_tcp_read_queue(queue))
- n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
+ mq_map = set->map[HCTX_TYPE_READ].mq_map;
else if (nvme_tcp_poll_queue(queue))
- n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
- ctrl->io_queues[HCTX_TYPE_READ] - 1;
- if (wq_unbound)
- queue->io_cpu = WORK_CPU_UNBOUND;
- else
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ mq_map = set->map[HCTX_TYPE_POLL].mq_map;
+
+ if (WARN_ON(!mq_map))
+ goto out;
+
+ /* Search for the least used cpu from the mq_map */
+ io_cpu = WORK_CPU_UNBOUND;
+ for_each_online_cpu(cpu) {
+ int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]);
+
+ if (mq_map[cpu] != qid)
+ continue;
+ if (num_queues < min_queues) {
+ io_cpu = cpu;
+ min_queues = num_queues;
+ }
+ }
+ if (io_cpu != WORK_CPU_UNBOUND) {
+ queue->io_cpu = io_cpu;
+ atomic_inc(&nvme_tcp_cpu_queues[io_cpu]);
+ set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags);
+ }
+out:
+ dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n",
+ qid, queue->io_cpu);
}
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
@@ -1722,7 +1758,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
queue->sock->sk->sk_allocation = GFP_ATOMIC;
queue->sock->sk->sk_use_task_frag = false;
- nvme_tcp_set_queue_io_cpu(queue);
+ queue->io_cpu = WORK_CPU_UNBOUND;
queue->request = NULL;
queue->data_remaining = 0;
queue->ddgst_remaining = 0;
@@ -1844,6 +1880,9 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
return;
+ if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags))
+ atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]);
+
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue);
@@ -1878,9 +1917,10 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
nvme_tcp_init_recv_ctx(queue);
nvme_tcp_setup_sock_ops(queue);
- if (idx)
+ if (idx) {
+ nvme_tcp_set_queue_io_cpu(queue);
ret = nvmf_connect_io_queue(nctrl, idx);
- else
+ } else
ret = nvmf_connect_admin_queue(nctrl);
if (!ret) {
@@ -2024,14 +2064,6 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
return __nvme_tcp_alloc_io_queues(ctrl);
}
-static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
-{
- nvme_tcp_stop_io_queues(ctrl);
- if (remove)
- nvme_remove_io_tag_set(ctrl);
- nvme_tcp_free_io_queues(ctrl);
-}
-
static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
{
int ret, nr_queues;
@@ -2176,9 +2208,11 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
nvme_cancel_tagset(ctrl);
- if (remove)
+ if (remove) {
nvme_unquiesce_io_queues(ctrl);
- nvme_tcp_destroy_io_queues(ctrl, remove);
+ nvme_remove_io_tag_set(ctrl);
+ }
+ nvme_tcp_free_io_queues(ctrl);
}
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
@@ -2267,7 +2301,9 @@ destroy_io:
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
nvme_cancel_tagset(ctrl);
- nvme_tcp_destroy_io_queues(ctrl, new);
+ if (new)
+ nvme_remove_io_tag_set(ctrl);
+ nvme_tcp_free_io_queues(ctrl);
}
destroy_admin:
nvme_stop_keep_alive(ctrl);
@@ -2849,6 +2885,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
static int __init nvme_tcp_init_module(void)
{
unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
+ int cpu;
BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
@@ -2866,6 +2903,9 @@ static int __init nvme_tcp_init_module(void)
if (!nvme_tcp_wq)
return -ENOMEM;
+ for_each_possible_cpu(cpu)
+ atomic_set(&nvme_tcp_cpu_queues[cpu], 0);
+
nvmf_register_transport(&nvme_tcp_transport);
return 0;
}