summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
authorPalmer Dabbelt <palmer@rivosinc.com>2024-11-11 07:35:09 -0800
committerPalmer Dabbelt <palmer@rivosinc.com>2024-11-11 07:35:09 -0800
commit64f7b77f0bd9271861ed9e410e9856b6b0b21c48 (patch)
treee769c7d3d612098540221cfe79b6575f20db3a3c /drivers/nvme
parent075fde581896bde171d43a994df8617b9728eae7 (diff)
parentab83647fadae2f1f723119dc066b39a461d6d288 (diff)
Merge patch series "Zacas/Zabha support and qspinlocks"
Alexandre Ghiti <alexghiti@rivosinc.com> says: This implements [cmp]xchgXX() macros using Zacas and Zabha extensions and finally uses those newly introduced macros to add support for qspinlocks: note that this implementation of qspinlocks satisfies the forward progress guarantee. It also uses Ziccrse to provide the qspinlock implementation. Thanks to Guo and Leonardo for their work! * b4-shazam-merge: (1314 commits) riscv: Add qspinlock support dt-bindings: riscv: Add Ziccrse ISA extension description riscv: Add ISA extension parsing for Ziccrse asm-generic: ticket-lock: Add separate ticket-lock.h asm-generic: ticket-lock: Reuse arch_spinlock_t of qspinlock riscv: Implement xchg8/16() using Zabha riscv: Implement arch_cmpxchg128() using Zacas riscv: Improve zacas fully-ordered cmpxchg() riscv: Implement cmpxchg8/16() using Zabha dt-bindings: riscv: Add Zabha ISA extension description riscv: Implement cmpxchg32/64() using Zacas riscv: Do not fail to build on byte/halfword operations with Zawrs riscv: Move cpufeature.h macros into their own header Link: https://lore.kernel.org/r/20241103145153.105097-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/common/auth.c2
-rw-r--r--drivers/nvme/host/auth.c2
-rw-r--r--drivers/nvme/host/core.c43
-rw-r--r--drivers/nvme/host/hwmon.c2
-rw-r--r--drivers/nvme/host/multipath.c40
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c19
-rw-r--r--drivers/nvme/host/pr.c2
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/host/tcp.c7
-rw-r--r--drivers/nvme/host/trace.c2
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/auth.c2
-rw-r--r--drivers/nvme/target/loop.c13
-rw-r--r--drivers/nvme/target/passthru.c6
-rw-r--r--drivers/nvme/target/rdma.c58
-rw-r--r--drivers/nvme/target/trace.c2
17 files changed, 124 insertions, 81 deletions
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index a3455f1d67fa..9b7126e1a19d 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -8,7 +8,7 @@
#include <linux/base64.h>
#include <linux/prandom.h>
#include <linux/scatterlist.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <crypto/hash.h>
#include <crypto/dh.h>
#include <linux/nvme.h>
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 371e14f0a203..5ea0e21709da 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -6,7 +6,7 @@
#include <linux/crc32.h>
#include <linux/base64.h>
#include <linux/prandom.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <crypto/hash.h>
#include <crypto/dh.h>
#include "nvme.h"
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ba6508455e18..84cb859a911d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -22,7 +22,7 @@
#include <linux/nvme_ioctl.h>
#include <linux/pm_qos.h>
#include <linux/ratelimit.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "nvme.h"
#include "fabrics.h"
@@ -1292,14 +1292,12 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
}
-static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
- blk_status_t status)
+static void nvme_keep_alive_finish(struct request *rq,
+ blk_status_t status, struct nvme_ctrl *ctrl)
{
- struct nvme_ctrl *ctrl = rq->end_io_data;
- unsigned long flags;
- bool startka = false;
unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
unsigned long delay = nvme_keep_alive_work_period(ctrl);
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
/*
* Subtract off the keepalive RTT so nvme_keep_alive_work runs
@@ -1313,25 +1311,17 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
delay = 0;
}
- blk_mq_free_request(rq);
-
if (status) {
dev_err(ctrl->device,
"failed nvme_keep_alive_end_io error=%d\n",
status);
- return RQ_END_IO_NONE;
+ return;
}
ctrl->ka_last_check_time = jiffies;
ctrl->comp_seen = false;
- spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->state == NVME_CTRL_LIVE ||
- ctrl->state == NVME_CTRL_CONNECTING)
- startka = true;
- spin_unlock_irqrestore(&ctrl->lock, flags);
- if (startka)
+ if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING)
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
- return RQ_END_IO_NONE;
}
static void nvme_keep_alive_work(struct work_struct *work)
@@ -1340,6 +1330,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
struct nvme_ctrl, ka_work);
bool comp_seen = ctrl->comp_seen;
struct request *rq;
+ blk_status_t status;
ctrl->ka_last_check_time = jiffies;
@@ -1362,9 +1353,9 @@ static void nvme_keep_alive_work(struct work_struct *work)
nvme_init_request(rq, &ctrl->ka_cmd);
rq->timeout = ctrl->kato * HZ;
- rq->end_io = nvme_keep_alive_end_io;
- rq->end_io_data = ctrl;
- blk_execute_rq_nowait(rq, false);
+ status = blk_execute_rq(rq, false);
+ nvme_keep_alive_finish(rq, status, ctrl);
+ blk_mq_free_request(rq);
}
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
@@ -2458,8 +2449,13 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
else
ctrl->ctrl_config = NVME_CC_CSS_NVM;
- if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
- ctrl->ctrl_config |= NVME_CC_CRIME;
+ /*
+ * Setting CRIME results in CSTS.RDY before the media is ready. This
+ * makes it possible for media related commands to return the error
+ * NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is
+ * restructured to handle retries, disable CC.CRIME.
+ */
+ ctrl->ctrl_config &= ~NVME_CC_CRIME;
ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
@@ -2489,10 +2485,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
* devices are known to get this wrong. Use the larger of the
* two values.
*/
- if (ctrl->ctrl_config & NVME_CC_CRIME)
- ready_timeout = NVME_CRTO_CRIMT(crto);
- else
- ready_timeout = NVME_CRTO_CRWMT(crto);
+ ready_timeout = NVME_CRTO_CRWMT(crto);
if (ready_timeout < timeout)
dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index 8df73a0b3980..89a1a1043d63 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -6,7 +6,7 @@
#include <linux/hwmon.h>
#include <linux/units.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "nvme.h"
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 48e7a8906d01..6a15873055b9 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -431,7 +431,6 @@ static bool nvme_available_path(struct nvme_ns_head *head)
case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
- /* fallthru */
return true;
default:
break;
@@ -580,6 +579,20 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
return ret;
}
+static void nvme_partition_scan_work(struct work_struct *work)
+{
+ struct nvme_ns_head *head =
+ container_of(work, struct nvme_ns_head, partition_scan_work);
+
+ if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
+ &head->disk->state)))
+ return;
+
+ mutex_lock(&head->disk->open_mutex);
+ bdev_disk_changed(head->disk, false);
+ mutex_unlock(&head->disk->open_mutex);
+}
+
static void nvme_requeue_work(struct work_struct *work)
{
struct nvme_ns_head *head =
@@ -606,6 +619,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
bio_list_init(&head->requeue_list);
spin_lock_init(&head->requeue_lock);
INIT_WORK(&head->requeue_work, nvme_requeue_work);
+ INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work);
/*
* Add a multipath node if the subsystems supports multiple controllers.
@@ -629,6 +643,16 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
return PTR_ERR(head->disk);
head->disk->fops = &nvme_ns_head_ops;
head->disk->private_data = head;
+
+ /*
+ * We need to suppress the partition scan from occuring within the
+ * controller's scan_work context. If a path error occurs here, the IO
+ * will wait until a path becomes available or all paths are torn down,
+ * but that action also occurs within scan_work, so it would deadlock.
+ * Defer the partion scan to a different context that does not block
+ * scan_work.
+ */
+ set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state);
sprintf(head->disk->disk_name, "nvme%dn%d",
ctrl->subsys->instance, head->instance);
return 0;
@@ -655,6 +679,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
return;
}
nvme_add_ns_head_cdev(head);
+ kblockd_schedule_work(&head->partition_scan_work);
}
mutex_lock(&head->lock);
@@ -974,14 +999,14 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
return;
if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
nvme_cdev_del(&head->cdev, &head->cdev_device);
+ /*
+ * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
+ * to allow multipath to fail all I/O.
+ */
+ synchronize_srcu(&head->srcu);
+ kblockd_schedule_work(&head->requeue_work);
del_gendisk(head->disk);
}
- /*
- * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
- * to allow multipath to fail all I/O.
- */
- synchronize_srcu(&head->srcu);
- kblockd_schedule_work(&head->requeue_work);
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
@@ -991,6 +1016,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
+ flush_work(&head->partition_scan_work);
put_disk(head->disk);
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 313a4f978a2c..093cb423f536 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -494,6 +494,7 @@ struct nvme_ns_head {
struct bio_list requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
+ struct work_struct partition_scan_work;
struct mutex lock;
unsigned long flags;
#define NVME_NSHEAD_DISK_LIVE 0
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 7990c3f22ecf..4b9fda0b1d9a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2506,17 +2506,29 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
return 1;
}
-static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
+static bool nvme_pci_update_nr_queues(struct nvme_dev *dev)
{
if (!dev->ctrl.tagset) {
nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
- return;
+ return true;
+ }
+
+ /* Give up if we are racing with nvme_dev_disable() */
+ if (!mutex_trylock(&dev->shutdown_lock))
+ return false;
+
+ /* Check if nvme_dev_disable() has been executed already */
+ if (!dev->online_queues) {
+ mutex_unlock(&dev->shutdown_lock);
+ return false;
}
blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
/* free previously allocated queues that are no longer usable */
nvme_free_queues(dev, dev->online_queues);
+ mutex_unlock(&dev->shutdown_lock);
+ return true;
}
static int nvme_pci_enable(struct nvme_dev *dev)
@@ -2797,7 +2809,8 @@ static void nvme_reset_work(struct work_struct *work)
nvme_dbbuf_set(dev);
nvme_unquiesce_io_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- nvme_pci_update_nr_queues(dev);
+ if (!nvme_pci_update_nr_queues(dev))
+ goto out;
nvme_unfreeze(&dev->ctrl);
} else {
dev_warn(dev->ctrl.device, "IO queues lost\n");
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index 7347ddf85f00..dc7922f22600 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -5,7 +5,7 @@
*/
#include <linux/blkdev.h>
#include <linux/pr.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "nvme.h"
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index c8fd0e8f0237..24a2759798d0 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -18,7 +18,7 @@
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/nvme.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 89c44413c593..3e416af2659f 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2644,10 +2644,11 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
len = nvmf_get_address(ctrl, buf, size);
+ if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ return len;
+
mutex_lock(&queue->queue_lock);
- if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
- goto done;
ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
if (ret > 0) {
if (len > 0)
@@ -2655,7 +2656,7 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
(len) ? "," : "", &src_addr);
}
-done:
+
mutex_unlock(&queue->queue_lock);
return len;
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 0288315f0050..87c437fc070d 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -4,7 +4,7 @@
* Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "trace.h"
static const char *nvme_trace_delete_sq(struct trace_seq *p, u8 *cdw10)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 954d4c074770..081f0473cd9e 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -9,7 +9,7 @@
#include <linux/part_stat.h>
#include <generated/utsrelease.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "nvmet.h"
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index 7897d02c681d..29f8639cfe7f 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -15,7 +15,7 @@
#include <linux/ctype.h>
#include <linux/random.h>
#include <linux/nvme-auth.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "nvmet.h"
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index e32790d8fc26..a9d112d34d4f 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -265,6 +265,13 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return;
+ /*
+ * It's possible that some requests might have been added
+ * after admin queue is stopped/quiesced. So now start the
+ * queue to flush these requests to the completion.
+ */
+ nvme_unquiesce_admin_queue(&ctrl->ctrl);
+
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
nvme_remove_admin_tag_set(&ctrl->ctrl);
}
@@ -297,6 +304,12 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
}
ctrl->ctrl.queue_count = 1;
+ /*
+ * It's possible that some requests might have been added
+ * after io queue is stopped/quiesced. So now start the
+ * queue to flush these requests to the completion.
+ */
+ nvme_unquiesce_io_queues(&ctrl->ctrl);
}
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 24d0e2418d2e..0f9b280c438d 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -535,10 +535,6 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
break;
case nvme_admin_identify:
switch (req->cmd->identify.cns) {
- case NVME_ID_CNS_CTRL:
- req->execute = nvmet_passthru_execute_cmd;
- req->p.use_workqueue = true;
- return NVME_SC_SUCCESS;
case NVME_ID_CNS_CS_CTRL:
switch (req->cmd->identify.csi) {
case NVME_CSI_ZNS:
@@ -547,7 +543,9 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
return NVME_SC_SUCCESS;
}
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
+ case NVME_ID_CNS_CTRL:
case NVME_ID_CNS_NS:
+ case NVME_ID_CNS_NS_DESC_LIST:
req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1b6264fa5803..1afd93026f9b 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -16,7 +16,7 @@
#include <linux/string.h>
#include <linux/wait.h>
#include <linux/inet.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
@@ -39,6 +39,8 @@
#define NVMET_RDMA_BACKLOG 128
+#define NVMET_RDMA_DISCRETE_RSP_TAG -1
+
struct nvmet_rdma_srq;
struct nvmet_rdma_cmd {
@@ -75,7 +77,7 @@ struct nvmet_rdma_rsp {
u32 invalidate_rkey;
struct list_head wait_list;
- struct list_head free_list;
+ int tag;
};
enum nvmet_rdma_queue_state {
@@ -98,8 +100,7 @@ struct nvmet_rdma_queue {
struct nvmet_sq nvme_sq;
struct nvmet_rdma_rsp *rsps;
- struct list_head free_rsps;
- spinlock_t rsps_lock;
+ struct sbitmap rsp_tags;
struct nvmet_rdma_cmd *cmds;
struct work_struct release_work;
@@ -172,7 +173,8 @@ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r);
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
- struct nvmet_rdma_rsp *r);
+ struct nvmet_rdma_rsp *r,
+ int tag);
static const struct nvmet_fabrics_ops nvmet_rdma_ops;
@@ -210,15 +212,12 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
static inline struct nvmet_rdma_rsp *
nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
{
- struct nvmet_rdma_rsp *rsp;
- unsigned long flags;
+ struct nvmet_rdma_rsp *rsp = NULL;
+ int tag;
- spin_lock_irqsave(&queue->rsps_lock, flags);
- rsp = list_first_entry_or_null(&queue->free_rsps,
- struct nvmet_rdma_rsp, free_list);
- if (likely(rsp))
- list_del(&rsp->free_list);
- spin_unlock_irqrestore(&queue->rsps_lock, flags);
+ tag = sbitmap_get(&queue->rsp_tags);
+ if (tag >= 0)
+ rsp = &queue->rsps[tag];
if (unlikely(!rsp)) {
int ret;
@@ -226,13 +225,12 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
if (unlikely(!rsp))
return NULL;
- ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
+ ret = nvmet_rdma_alloc_rsp(queue->dev, rsp,
+ NVMET_RDMA_DISCRETE_RSP_TAG);
if (unlikely(ret)) {
kfree(rsp);
return NULL;
}
-
- rsp->allocated = true;
}
return rsp;
@@ -241,17 +239,13 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
static inline void
nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
{
- unsigned long flags;
-
- if (unlikely(rsp->allocated)) {
+ if (unlikely(rsp->tag == NVMET_RDMA_DISCRETE_RSP_TAG)) {
nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
kfree(rsp);
return;
}
- spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
- list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
- spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
+ sbitmap_clear_bit(&rsp->queue->rsp_tags, rsp->tag);
}
static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
@@ -404,7 +398,7 @@ static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
}
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
- struct nvmet_rdma_rsp *r)
+ struct nvmet_rdma_rsp *r, int tag)
{
/* NVMe CQE / RDMA SEND */
r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
@@ -432,6 +426,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
r->read_cqe.done = nvmet_rdma_read_data_done;
/* Data Out / RDMA WRITE */
r->write_cqe.done = nvmet_rdma_write_data_done;
+ r->tag = tag;
return 0;
@@ -454,21 +449,23 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
{
struct nvmet_rdma_device *ndev = queue->dev;
int nr_rsps = queue->recv_queue_size * 2;
- int ret = -EINVAL, i;
+ int ret = -ENOMEM, i;
+
+ if (sbitmap_init_node(&queue->rsp_tags, nr_rsps, -1, GFP_KERNEL,
+ NUMA_NO_NODE, false, true))
+ goto out;
queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
GFP_KERNEL);
if (!queue->rsps)
- goto out;
+ goto out_free_sbitmap;
for (i = 0; i < nr_rsps; i++) {
struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
- ret = nvmet_rdma_alloc_rsp(ndev, rsp);
+ ret = nvmet_rdma_alloc_rsp(ndev, rsp, i);
if (ret)
goto out_free;
-
- list_add_tail(&rsp->free_list, &queue->free_rsps);
}
return 0;
@@ -477,6 +474,8 @@ out_free:
while (--i >= 0)
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps);
+out_free_sbitmap:
+ sbitmap_free(&queue->rsp_tags);
out:
return ret;
}
@@ -489,6 +488,7 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
for (i = 0; i < nr_rsps; i++)
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps);
+ sbitmap_free(&queue->rsp_tags);
}
static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
@@ -1447,8 +1447,6 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
INIT_LIST_HEAD(&queue->rsp_wait_list);
INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
spin_lock_init(&queue->rsp_wr_wait_lock);
- INIT_LIST_HEAD(&queue->free_rsps);
- spin_lock_init(&queue->rsps_lock);
INIT_LIST_HEAD(&queue->queue_list);
queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL);
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index 8d1806a82887..9a3548179a8e 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -4,7 +4,7 @@
* Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "trace.h"
static const char *nvmet_trace_admin_identify(struct trace_seq *p, u8 *cdw10)