block-5.6-20200320

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl51dZoQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpt1GD/4qD5KahkC9cdRRcliGoYrY5CLJbEx+Hwlu
 QNJKggGJKMs3f4KqD1JwGsZCppD9bPTD2qgjs8Hjkw6HCOabXx8elQBKwyhjVglu
 5ogv981fmbzPau3n5gQ1llqjF6aslKpSkA9arQPgKov7gIoa2U3Gc1ZIO5Mz/X/T
 J0Z4TqmjMhbjAyP9BqfVIyQyDR9WGvO4U/9XmTclKU4Rex7lT5JRiGVF0ZpqfXDQ
 pkJOaqsltZVXN0J6Uy2e0qL5nkWIFhfrqjvoBG2V/ivt9zOfiPzmt9DLNl3S/QyU
 TYtNvAg6wuw/DBOdDsLoHztQUWbBqUMhn6892ADc6786TwFZv0/Ytv+CqL2mxbYy
 wImli5cnowWNevkNFpm3RLAMA0Oi8NiULb31AmRP23OyGSPB51JWhpnlqEylRnz2
 aa8KkA+VHiuaMnsII6Caq6tXsXyNfoDPGYvy5vCIyzZXPTvvH4i7rPr0QYb6VjAa
 v89mGE+Nx/eiC9FFEzPCXU5tgA6AMiMzoqXodRoSUSl7Lm+iGm4pPhUX4EIia1NG
 6Jc1A4cOQTjM8mptJKPIEbAovHsVMRUext5pinQYtMB58R16ZQfpdzQY1ojJUllk
 u2nWPyGswifMfpJ7daMhhYx/z6yQNy/MOqEPG8dNyc96R6OJoFGe+Wp41/9oP4Ly
 OjnPSFygKA==
 =q5rs
 -----END PGP SIGNATURE-----

Merge tag 'block-5.6-20200320' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Just two NVMe fabrics fixes that should go into 5.6"

* tag 'block-5.6-20200320' of git://git.kernel.dk/linux-block:
  nvmet-tcp: set MSG_MORE only if we actually have more to send
  nvme-rdma: Avoid double freeing of async event data
This commit is contained in:
Linus Torvalds 2020-03-21 12:08:26 -07:00
commit b74b991fb8
2 changed files with 14 additions and 6 deletions

View File

@ -850,9 +850,11 @@ out_free_tagset:
if (new)
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
out_free_async_qe:
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ctrl->async_event_sqe.data = NULL;
if (ctrl->async_event_sqe.data) {
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ctrl->async_event_sqe.data = NULL;
}
out_free_queue:
nvme_rdma_free_queue(&ctrl->queues[0]);
return error;

View File

@ -515,7 +515,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
return 1;
}
static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{
struct nvmet_tcp_queue *queue = cmd->queue;
int ret;
@ -523,9 +523,15 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
while (cmd->cur_sg) {
struct page *page = sg_page(cmd->cur_sg);
u32 left = cmd->cur_sg->length - cmd->offset;
int flags = MSG_DONTWAIT;
if ((!last_in_batch && cmd->queue->send_list_len) ||
cmd->wbytes_done + left < cmd->req.transfer_len ||
queue->data_digest || !queue->nvme_sq.sqhd_disabled)
flags |= MSG_MORE;
ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
left, MSG_DONTWAIT | MSG_MORE);
left, flags);
if (ret <= 0)
return ret;
@ -660,7 +666,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
}
if (cmd->state == NVMET_TCP_SEND_DATA) {
ret = nvmet_try_send_data(cmd);
ret = nvmet_try_send_data(cmd, last_in_batch);
if (ret <= 0)
goto done_send;
}