mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
nvme fixes for Linux 5.13:
- fix corruption in RDMA in-capsule SGLs (Sagi Grimberg) - nvme-loop reset fixes (Hannes Reinecke) - nvmet fix for freeing unallocated p2pmem (Max Gurtovoy) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmC4fkoLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYPmthAAnNJRbt0uyPCtrbYk69bBoRr+zHg6SbcUxb7/kZEi 54qotY6Db/mZlRS9Gz7L0MGEdJvPC8+kexbF6guoEV+7rB37A4UCf6hAy5a/rbot 1BWeFtFx5KE9BGZJ9UMgoEv3nW0iLj7MKI6tF2PIkZ8u7a8iU8LnTX/Em07fqdmp ATtGaOysKQp6ErvLhYYn/eBT+uQJ3M1ihcEy7ag4+4YW/g3oX4Y3zHXV6f28MRsH TDa0lZOc8LL3HlmBNq59maj78+K2Pc6wl/5qxDjpNXufxSXk4hm4V0wd3j71WoIm 3zsMX3AuJ/ULflrI+20vghXxhncA6l8sESU4hA3UNxQSFu1IGIVNhRC1E6oa9IpB 14eQ7hbxAhf4ygDCTTmHNRKcYkVkzZoW8fMdsDuX48EW3STODzOu/Wgg3lF/T4vP xBdGGi47WryxWFbdjQJxD9i2Mu0KSo4nhyJ8MBe8Q8nZI1H8LLerA8Gl9pvIYMch PXJpEOD8uHnjVvon8UbKLjVyd8ef/HvVnf98OApG/m+dBrQyhalCr+T4foSE5T5N e72Bzia55h1YjTnI79yncLHD1Vdbxxy7683aeBokLmxsp8DALD8VlSH0pIsYryWO r0is/TwMqEdStBVhSIoww1XZqMgm9I2c2INTpUIA6g7VQVe+SLaPMJkK4XY/UM/k HR4= =lxkK -----END PGP SIGNATURE----- Merge tag 'nvme-5.13-2021-06-03' of git://git.infradead.org/nvme into block-5.13 Pull NVMe fixes from Christoph: "nvme fixes for Linux 5.13: - fix corruption in RDMA in-capsule SGLs (Sagi Grimberg) - nvme-loop reset fixes (Hannes Reinecke) - nvmet fix for freeing unallocated p2pmem (Max Gurtovoy)" * tag 'nvme-5.13-2021-06-03' of git://git.infradead.org/nvme: nvmet: fix freeing unallocated p2pmem nvme-loop: do not warn for deleted controllers during reset nvme-loop: check for NVME_LOOP_Q_LIVE in nvme_loop_destroy_admin_queue() nvme-loop: clear NVME_LOOP_Q_LIVE when nvme_loop_configure_admin_queue() fails nvme-loop: reset queue count to 1 in nvme_loop_destroy_io_queues() nvme-rdma: fix in-casule data send for chained sgls
This commit is contained in:
commit
e369edbb0d
@ -1320,16 +1320,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
|
||||
int count)
|
||||
{
|
||||
struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
|
||||
struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
|
||||
struct ib_sge *sge = &req->sge[1];
|
||||
struct scatterlist *sgl;
|
||||
u32 len = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++, sgl++, sge++) {
|
||||
for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
|
||||
sge->addr = sg_dma_address(sgl);
|
||||
sge->length = sg_dma_len(sgl);
|
||||
sge->lkey = queue->device->pd->local_dma_lkey;
|
||||
len += sge->length;
|
||||
sge++;
|
||||
}
|
||||
|
||||
sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
|
||||
|
@ -1005,19 +1005,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
|
||||
return req->transfer_len - req->metadata_len;
|
||||
}
|
||||
|
||||
static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
|
||||
static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
|
||||
struct nvmet_req *req)
|
||||
{
|
||||
req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
|
||||
req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
|
||||
nvmet_data_transfer_len(req));
|
||||
if (!req->sg)
|
||||
goto out_err;
|
||||
|
||||
if (req->metadata_len) {
|
||||
req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
|
||||
req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
|
||||
&req->metadata_sg_cnt, req->metadata_len);
|
||||
if (!req->metadata_sg)
|
||||
goto out_free_sg;
|
||||
}
|
||||
|
||||
req->p2p_dev = p2p_dev;
|
||||
|
||||
return 0;
|
||||
out_free_sg:
|
||||
pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
|
||||
@ -1025,25 +1029,19 @@ out_err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
|
||||
static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
|
||||
return false;
|
||||
|
||||
if (req->sq->ctrl && req->sq->qid && req->ns) {
|
||||
req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
|
||||
req->ns->nsid);
|
||||
if (req->p2p_dev)
|
||||
return true;
|
||||
}
|
||||
|
||||
req->p2p_dev = NULL;
|
||||
return false;
|
||||
if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
|
||||
!req->sq->ctrl || !req->sq->qid || !req->ns)
|
||||
return NULL;
|
||||
return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
|
||||
}
|
||||
|
||||
int nvmet_req_alloc_sgls(struct nvmet_req *req)
|
||||
{
|
||||
if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
|
||||
struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
|
||||
|
||||
if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
|
||||
return 0;
|
||||
|
||||
req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
|
||||
@ -1072,6 +1070,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
|
||||
pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
|
||||
if (req->metadata_sg)
|
||||
pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
|
||||
req->p2p_dev = NULL;
|
||||
} else {
|
||||
sgl_free(req->sg);
|
||||
if (req->metadata_sg)
|
||||
|
@ -263,7 +263,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
|
||||
|
||||
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
|
||||
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
|
||||
return;
|
||||
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
|
||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
||||
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
|
||||
@ -299,6 +300,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
|
||||
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
|
||||
}
|
||||
ctrl->ctrl.queue_count = 1;
|
||||
}
|
||||
|
||||
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||
@ -405,6 +407,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
|
||||
return 0;
|
||||
|
||||
out_cleanup_queue:
|
||||
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
|
||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
||||
out_cleanup_fabrics_q:
|
||||
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
|
||||
@ -462,8 +465,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
|
||||
nvme_loop_shutdown_ctrl(ctrl);
|
||||
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
|
||||
/* state change failure should never happen */
|
||||
WARN_ON_ONCE(1);
|
||||
if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
|
||||
ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
|
||||
/* state change failure for non-deleted ctrl? */
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user