From 9134ae2a2546cb96abddcd4469a79c77ee3a4480 Mon Sep 17 00:00:00 2001 From: Prabhath Sajeepa Date: Mon, 9 Mar 2020 15:07:53 -0600 Subject: [PATCH 1/2] nvme-rdma: Avoid double freeing of async event data The timeout of identify cmd, which is invoked as part of admin queue creation, can result in freeing of async event data both in nvme_rdma_timeout handler and error handling path of nvme_rdma_configure_admin queue thus causing NULL pointer reference. Call Trace: ? nvme_rdma_setup_ctrl+0x223/0x800 [nvme_rdma] nvme_rdma_create_ctrl+0x2ba/0x3f7 [nvme_rdma] nvmf_dev_write+0xa54/0xcc6 [nvme_fabrics] __vfs_write+0x1b/0x40 vfs_write+0xb2/0x1b0 ksys_write+0x61/0xd0 __x64_sys_write+0x1a/0x20 do_syscall_64+0x60/0x1e0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Reviewed-by: Roland Dreier Reviewed-by: Max Gurtovoy Reviewed-by: Christoph Hellwig Signed-off-by: Prabhath Sajeepa Signed-off-by: Keith Busch --- drivers/nvme/host/rdma.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 3e85c5cacefd..0fe08c4dfd2f 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -850,9 +850,11 @@ out_free_tagset: if (new) blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); out_free_async_qe: - nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, - sizeof(struct nvme_command), DMA_TO_DEVICE); - ctrl->async_event_sqe.data = NULL; + if (ctrl->async_event_sqe.data) { + nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, + sizeof(struct nvme_command), DMA_TO_DEVICE); + ctrl->async_event_sqe.data = NULL; + } out_free_queue: nvme_rdma_free_queue(&ctrl->queues[0]); return error; From 98fd5c723730f560e5bea919a64ac5b83d45eb72 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 12 Mar 2020 16:06:38 -0700 Subject: [PATCH 2/2] nvmet-tcp: set MSG_MORE only if we actually have more to send When we send PDU data, we want to optimize the tcp stack operation if we have more data to send. So when we set MSG_MORE when: - We have more fragments coming in the batch, or - We have a more data to send in this PDU - We don't have a data digest trailer - We optimize with the SUCCESS flag and omit the NVMe completion (used if sq_head pointer update is disabled) This addresses a regression in QD=1 with SUCCESS flag optimization as we unconditionally set MSG_MORE when we didn't actually have more data to send. Fixes: 70583295388a ("nvmet-tcp: implement C2HData SUCCESS optimization") Reported-by: Mark Wunderlich Tested-by: Mark Wunderlich Signed-off-by: Sagi Grimberg Signed-off-by: Keith Busch --- drivers/nvme/target/tcp.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index af674fc0bb1e..5bb5342b8d0c 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -515,7 +515,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) return 1; } -static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd) +static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { struct nvmet_tcp_queue *queue = cmd->queue; int ret; @@ -523,9 +523,15 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd) while (cmd->cur_sg) { struct page *page = sg_page(cmd->cur_sg); u32 left = cmd->cur_sg->length - cmd->offset; + int flags = MSG_DONTWAIT; + + if ((!last_in_batch && cmd->queue->send_list_len) || + cmd->wbytes_done + left < cmd->req.transfer_len || + queue->data_digest || !queue->nvme_sq.sqhd_disabled) + flags |= MSG_MORE; ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, - left, MSG_DONTWAIT | MSG_MORE); + left, flags); if (ret <= 0) return ret; @@ -660,7 +666,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, } if (cmd->state == NVMET_TCP_SEND_DATA) { - ret = nvmet_try_send_data(cmd); + ret = nvmet_try_send_data(cmd, last_in_batch); if (ret <= 0) goto done_send; }