summaryrefslogtreecommitdiff
path: root/drivers/nvme/host/rdma.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2020-09-17 18:25:39 +0100
committerMark Brown <broonie@kernel.org>2020-09-17 18:25:39 +0100
commit0199f866615921ddc5d22fbbab7510e8b403d40c (patch)
treec8eb5b58efd4b22ef136f29ca9f62564ec57c1b2 /drivers/nvme/host/rdma.c
parent2b37a18b58ed12b711591ec54c2b2a0e2068cf6e (diff)
parentb014e9fae7e7de4329a7092ade4256982c5ce974 (diff)
Merge series "Support ROHM BD9576MUF and BD9573MUF PMICs" from Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>:
Initial support for ROHM BD9576MUF and BD9573MUF PMICs. These PMICs are primarily intended to be used to power the R-Car family processors. BD9576MUF includes some additional safety features the BD9573MUF does not have. This initial version of drivers does not utilize these features and for now the SW behaviour is identical. Please note that this version of drivers is only tested on BD9576MUF but according to the data-sheets the relevant parts of registers should be same so drivers should also work on BD9573MUF. This patch series includes MFD, watchdog and regulator drivers with basic functionality such as: - Enabling and pinging the watchdog - configuring watchog timeout / window from device-tree - reading regulator states/voltages - enabling/disabling VOUT1 (VD50) when control mode B is used. This patch series does not bring interrupt support. BD9576MUF and BD9573MUF are designed to keep the IRQ line low for whole duration of error condition. IRQ can't be 'acked'. So proper IRQ support would require some IRQ limiter implementation (delayed unmask?) in order to not hog the CPU. --- Matti Vaittinen (6): dt_bindings: mfd: Add ROHM BD9576MUF and BD9573MUF PMICs dt_bindings: regulator: Add ROHM BD9576MUF and BD9573MUF PMICs mfd: Support ROHM BD9576MUF and BD9573MUF wdt: Support wdt on ROHM BD9576MUF and BD9573MUF regulator: Support ROHM BD9576MUF and BD9573MUF MAINTAINERS: Add ROHM BD9576MUF and BD9573MUF drivers .../bindings/mfd/rohm,bd9576-pmic.yaml | 129 +++++++ .../regulator/rohm,bd9576-regulator.yaml | 33 ++ MAINTAINERS | 4 + drivers/mfd/Kconfig | 11 + drivers/mfd/Makefile | 1 + drivers/mfd/rohm-bd9576.c | 130 +++++++ drivers/regulator/Kconfig | 10 + drivers/regulator/Makefile | 1 + drivers/regulator/bd9576-regulator.c | 337 ++++++++++++++++++ drivers/watchdog/Kconfig | 13 + drivers/watchdog/Makefile | 1 + drivers/watchdog/bd9576_wdt.c | 295 +++++++++++++++ include/linux/mfd/rohm-bd957x.h | 61 ++++ include/linux/mfd/rohm-generic.h | 2 + 14 files changed, 1028 insertions(+) create mode 100644 Documentation/devicetree/bindings/mfd/rohm,bd9576-pmic.yaml create mode 100644 Documentation/devicetree/bindings/regulator/rohm,bd9576-regulator.yaml create mode 100644 drivers/mfd/rohm-bd9576.c create mode 100644 drivers/regulator/bd9576-regulator.c create mode 100644 drivers/watchdog/bd9576_wdt.c create mode 100644 include/linux/mfd/rohm-bd957x.h base-commit: f4d51dffc6c01a9e94650d95ce0104964f8ae822 -- 2.21.0 -- Matti Vaittinen, Linux device drivers ROHM Semiconductors, Finland SWDC Kiviharjunlenkki 1E 90220 OULU FINLAND ~~~ "I don't think so," said Rene Descartes. Just then he vanished ~~~ Simon says - in Latin please. ~~~ "non cogito me" dixit Rene Descarte, deinde evanescavit ~~~ Thanks to Simon Glass for the translation =]
Diffstat (limited to 'drivers/nvme/host/rdma.c')
-rw-r--r--drivers/nvme/host/rdma.c72
1 files changed, 53 insertions, 19 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 44c76ffbb264..8e5ffe2f117d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -122,6 +122,7 @@ struct nvme_rdma_ctrl {
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
+ struct mutex teardown_lock;
bool use_inline_data;
u32 io_queues[HCTX_MAX_TYPES];
};
@@ -975,7 +976,15 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
if (!new) {
nvme_start_queues(&ctrl->ctrl);
- nvme_wait_freeze(&ctrl->ctrl);
+ if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
+ /*
+ * If we timed out waiting for freeze we are likely to
+ * be stuck. Fail the controller initialization just
+ * to be safe.
+ */
+ ret = -ENODEV;
+ goto out_wait_freeze_timed_out;
+ }
blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
ctrl->ctrl.queue_count - 1);
nvme_unfreeze(&ctrl->ctrl);
@@ -983,6 +992,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
return 0;
+out_wait_freeze_timed_out:
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
out_cleanup_connect_q:
if (new)
blk_cleanup_queue(ctrl->ctrl.connect_q);
@@ -997,6 +1009,7 @@ out_free_io_queues:
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
+ mutex_lock(&ctrl->teardown_lock);
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
if (ctrl->ctrl.admin_tagset) {
@@ -1007,11 +1020,13 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (remove)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl, remove);
+ mutex_unlock(&ctrl->teardown_lock);
}
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
+ mutex_lock(&ctrl->teardown_lock);
if (ctrl->ctrl.queue_count > 1) {
nvme_start_freeze(&ctrl->ctrl);
nvme_stop_queues(&ctrl->ctrl);
@@ -1025,6 +1040,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove);
}
+ mutex_unlock(&ctrl->teardown_lock);
}
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
@@ -1180,6 +1196,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return;
+ dev_warn(ctrl->ctrl.device, "starting error recovery\n");
queue_work(nvme_reset_wq, &ctrl->err_work);
}
@@ -1189,7 +1206,7 @@ static void nvme_rdma_end_request(struct nvme_rdma_request *req)
if (!refcount_dec_and_test(&req->ref))
return;
- if (!nvme_end_request(rq, req->status, req->result))
+ if (!nvme_try_complete_req(rq, req->status, req->result))
nvme_rdma_complete_rq(rq);
}
@@ -1915,7 +1932,7 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
nvme_rdma_destroy_queue_ib(queue);
- /* fall through */
+ fallthrough;
case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event);
@@ -1946,6 +1963,22 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
return 0;
}
+static void nvme_rdma_complete_timed_out(struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+
+ /* fence other contexts that may complete the command */
+ mutex_lock(&ctrl->teardown_lock);
+ nvme_rdma_stop_queue(queue);
+ if (!blk_mq_request_completed(rq)) {
+ nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
+ blk_mq_complete_request(rq);
+ }
+ mutex_unlock(&ctrl->teardown_lock);
+}
+
static enum blk_eh_timer_return
nvme_rdma_timeout(struct request *rq, bool reserved)
{
@@ -1956,29 +1989,29 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
rq->tag, nvme_rdma_queue_idx(queue));
- /*
- * Restart the timer if a controller reset is already scheduled. Any
- * timed out commands would be handled before entering the connecting
- * state.
- */
- if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
- return BLK_EH_RESET_TIMER;
-
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
/*
- * Teardown immediately if controller times out while starting
- * or we are already started error recovery. all outstanding
- * requests are completed on shutdown, so we return BLK_EH_DONE.
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
*/
- flush_work(&ctrl->err_work);
- nvme_rdma_teardown_io_queues(ctrl, false);
- nvme_rdma_teardown_admin_queue(ctrl, false);
+ nvme_rdma_complete_timed_out(rq);
return BLK_EH_DONE;
}
- dev_warn(ctrl->ctrl.device, "starting error recovery\n");
+ /*
+ * LIVE state should trigger the normal error recovery which will
+ * handle completing this request.
+ */
nvme_rdma_error_recovery(ctrl);
-
return BLK_EH_RESET_TIMER;
}
@@ -2278,6 +2311,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
return ERR_PTR(-ENOMEM);
ctrl->ctrl.opts = opts;
INIT_LIST_HEAD(&ctrl->list);
+ mutex_init(&ctrl->teardown_lock);
if (!(opts->mask & NVMF_OPT_TRSVCID)) {
opts->trsvcid =