nvme-rdma: add clean action for failed reconnection

A crash happens when inject failed reconnection.
If reconnect failed after start io queues, the queues will be unquiesced
and new requests continue to be delivered. Reconnection error handling
process directly free queues without cancel suspend requests. The
suppend request will time out, and then crash due to use the queue
after free.

Add sync queues and cancel suppend requests for reconnection error
handling.

Signed-off-by: Chao Leng <lengchao@huawei.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Chao Leng 2021-01-21 11:32:37 +08:00 committed by Christoph Hellwig
parent 2547906982
commit 958dc1d32c

View File

@ -919,12 +919,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
error = nvme_init_identify(&ctrl->ctrl); error = nvme_init_identify(&ctrl->ctrl);
if (error) if (error)
goto out_stop_queue; goto out_quiesce_queue;
return 0; return 0;
out_quiesce_queue:
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_sync_queue(ctrl->ctrl.admin_q);
out_stop_queue: out_stop_queue:
nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
out_cleanup_queue: out_cleanup_queue:
if (new) if (new)
blk_cleanup_queue(ctrl->ctrl.admin_q); blk_cleanup_queue(ctrl->ctrl.admin_q);
@ -1001,8 +1005,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
out_wait_freeze_timed_out: out_wait_freeze_timed_out:
nvme_stop_queues(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl); nvme_rdma_stop_io_queues(ctrl);
out_cleanup_connect_q: out_cleanup_connect_q:
nvme_cancel_tagset(&ctrl->ctrl);
if (new) if (new)
blk_cleanup_queue(ctrl->ctrl.connect_q); blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tag_set: out_free_tag_set:
@ -1144,10 +1150,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
return 0; return 0;
destroy_io: destroy_io:
if (ctrl->ctrl.queue_count > 1) if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
nvme_cancel_tagset(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, new); nvme_rdma_destroy_io_queues(ctrl, new);
}
destroy_admin: destroy_admin:
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl, new); nvme_rdma_destroy_admin_queue(ctrl, new);
return ret; return ret;
} }