From 16cc33b23732d3ec55e428ddadb39c225f23de7e Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Mon, 29 Nov 2021 08:24:34 -0800 Subject: [PATCH 1/6] nvme: show subsys nqn for duplicate cntlids The driver assigned nvme handle isn't persistent across reboots, so is not enough information to match up where the collisions are occuring. Add the subsys nqn string to the output so that it can more easily be identified later. Link: https://bugzilla.kernel.org/show_bug.cgi?id=215099 Signed-off-by: Keith Busch Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 4c63564adeaa..d476ad65def3 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2696,8 +2696,9 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, if (tmp->cntlid == ctrl->cntlid) { dev_err(ctrl->device, - "Duplicate cntlid %u with %s, rejecting\n", - ctrl->cntlid, dev_name(tmp->device)); + "Duplicate cntlid %u with %s, subsys %s, rejecting\n", + ctrl->cntlid, dev_name(tmp->device), + subsys->subnqn); return false; } From d39ad2a45c0e38def3e0c95f5b90d9af4274c939 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 30 Nov 2021 08:14:54 -0800 Subject: [PATCH 2/6] nvme: disable namespace access for unsupported metadata The only fabrics target that supports metadata handling through the separate integrity buffer is RDMA. It is currently usable only if the size is 8B per block and formatted for protection information. If an rdma target were to export a namespace with a different format (ex: 4k+64B), the driver will not be able to submit valid read/write commands for that namespace. Suppress setting the metadata feature in the namespace so that the gendisk capacity will be set to 0. This will prevent read/write access through the block stack, but will continue to allow ioctl passthrough commands. Cc: Max Gurtovoy Cc: Sagi Grimberg Signed-off-by: Keith Busch Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index d476ad65def3..4ee7d2f8b8d8 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1749,9 +1749,20 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) */ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) return -EINVAL; - if (ctrl->max_integrity_segments) - ns->features |= - (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); + + ns->features |= NVME_NS_EXT_LBAS; + + /* + * The current fabrics transport drivers support namespace + * metadata formats only if nvme_ns_has_pi() returns true. + * Suppress support for all other formats so the namespace will + * have a 0 capacity and not be usable through the block stack. + * + * Note, this check will need to be modified if any drivers + * gain the ability to use other metadata formats. + */ + if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns)) + ns->features |= NVME_NS_METADATA_SUPPORTED; } else { /* * For PCIe controllers, we can't easily remap the separate From 793fcab83f38661e22e6f7c682dfba6fd0d97bb2 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Fri, 26 Nov 2021 10:42:44 +0000 Subject: [PATCH 3/6] nvme: report write pointer for a full zone as zone start + zone len The write pointer in NVMe ZNS is invalid for a zone in zone state full. The same also holds true for ZAC/ZBC. The current behavior for NVMe is to simply propagate the wp reported by the drive, even for full zones. Since the wp is invalid for a full zone, the wp reported by the drive may be any value. The way that the sd_zbc driver handles a full zone is to always report the wp as zone start + zone len, regardless of what the drive reported. null_blk also follows this convention. Do the same for NVMe, so that a BLKREPORTZONE ioctl reports the write pointer for a full zone in a consistent way, regardless of the interface of the underlying zoned block device. blkzone report before patch: start: 0x000040000, len 0x040000, cap 0x03e000, wptr 0xfffffffffffbfff8 reset:0 non-seq:0, zcond:14(fu) [type: 2(SEQ_WRITE_REQUIRED)] blkzone report after patch: start: 0x000040000, len 0x040000, cap 0x03e000, wptr 0x040000 reset:0 non-seq:0, zcond:14(fu) [type: 2(SEQ_WRITE_REQUIRED)] Signed-off-by: Niklas Cassel Reviewed-by: Keith Busch Reviewed-by: Damien Le Moal Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig --- drivers/nvme/host/zns.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index bfc259e0d7b8..9f81beb4df4e 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -166,7 +166,10 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns, zone.len = ns->zsze; zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap)); zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba)); - zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp)); + if (zone.cond == BLK_ZONE_COND_FULL) + zone.wp = zone.start + zone.len; + else + zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp)); return cb(&zone, idx, data); } From c7c15ae3dc50c0ab46c5cbbf8d2f3d3307e51f37 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 3 Dec 2021 19:47:15 +0800 Subject: [PATCH 4/6] nvme-multipath: set ana_log_size to 0 after free ana_log_buf Set ana_log_size to 0 when ana_log_buf is freed to make sure nvme_mpath_init_identify will do the right thing when retrying after an earlier failure. Signed-off-by: Hou Tao Reviewed-by: Keith Busch Signed-off-by: Christoph Hellwig --- drivers/nvme/host/multipath.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 7f2071f2460c..13e5d503ed07 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -866,7 +866,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) } if (ana_log_size > ctrl->ana_log_size) { nvme_mpath_stop(ctrl); - kfree(ctrl->ana_log_buf); + nvme_mpath_uninit(ctrl); ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL); if (!ctrl->ana_log_buf) return -ENOMEM; @@ -886,4 +886,5 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl) { kfree(ctrl->ana_log_buf); ctrl->ana_log_buf = NULL; + ctrl->ana_log_size = 0; } From 8b77fa6fdce0fc7147bab91b1011048758290ca4 Mon Sep 17 00:00:00 2001 From: Ruozhu Li Date: Thu, 4 Nov 2021 15:13:32 +0800 Subject: [PATCH 5/6] nvme: fix use after free when disconnecting a reconnecting ctrl A crash happens when trying to disconnect a reconnecting ctrl: 1) The network was cut off when the connection was just established, scan work hang there waiting for some IOs complete. Those I/Os were retried because we return BLK_STS_RESOURCE to blk in reconnecting. 2) After a while, I tried to disconnect this connection. This procedure also hangs because it tried to obtain ctrl->scan_lock. It should be noted that now we have switched the controller state to NVME_CTRL_DELETING. 3) In nvme_check_ready(), we always return true when ctrl->state is NVME_CTRL_DELETING, so those retrying I/Os were issued to the bottom device which was already freed. To fix this, when ctrl->state is NVME_CTRL_DELETING, issue cmd to bottom device only when queue state is live. If not, return host path error to the block layer Signed-off-by: Ruozhu Li Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 1 + drivers/nvme/host/nvme.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 4ee7d2f8b8d8..1af8a4513708 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -666,6 +666,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, struct request *rq) { if (ctrl->state != NVME_CTRL_DELETING_NOIO && + ctrl->state != NVME_CTRL_DELETING && ctrl->state != NVME_CTRL_DEAD && !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index b334af8aa264..9b095ee01364 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -709,7 +709,7 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, return true; if (ctrl->ops->flags & NVME_F_FABRICS && ctrl->state == NVME_CTRL_DELETING) - return true; + return queue_live; return __nvme_check_ready(ctrl, rq, queue_live); } int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, From 30e32f300be6d0160fd1b3fc6d0f62917acd9be2 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 8 Dec 2021 15:35:06 +0200 Subject: [PATCH 6/6] nvmet-tcp: fix possible list corruption for unexpected command failure nvmet_tcp_handle_req_failure needs to understand weather to prepare for incoming data or the next pdu. However if we misidentify this, we will wait for 0-length data, and queue the response although nvmet_req_init already did that. The particular command was namespace management command with no data, which was incorrectly categorized as a command with incapsule data. Also, add a code comment of what we are trying to do here. Signed-off-by: Sagi Grimberg Reviewed-by: Keith Busch Signed-off-by: Christoph Hellwig --- drivers/nvme/target/tcp.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index cb6a473c3eaf..7c1c43ce466b 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -922,7 +922,14 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); int ret; - if (!nvme_is_write(cmd->req.cmd) || + /* + * This command has not been processed yet, hence we are trying to + * figure out if there is still pending data left to receive. If + * we don't, we can simply prepare for the next pdu and bail out, + * otherwise we will need to prepare a buffer and receive the + * stale data before continuing forward. + */ + if (!nvme_is_write(cmd->req.cmd) || !data_len || data_len > cmd->req.port->inline_data_size) { nvmet_prepare_receive_pdu(queue); return;