mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 01:51:34 +00:00
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A few select fixes that should go into this series. Mainly for NVMe, but also a single stable fix for nbd from Josef" * 'for-linus' of git://git.kernel.dk/linux-block: nbd: handle interrupted sendmsg with a sndtimeo set nvme-rdma: Fix error status return in tagset allocation failure nvme-rdma: Fix possible double free in reconnect flow nvmet: synchronize sqhd update nvme-fc: retry initial controller connections 3 times nvme-fc: fix iowait hang
This commit is contained in:
commit
3b5a9a8e65
@ -386,6 +386,15 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Different settings for sk->sk_sndtimeo can result in different return values
|
||||
* if there is a signal pending when we enter sendmsg, because reasons?
|
||||
*/
|
||||
static inline int was_interrupted(int result)
|
||||
{
|
||||
return result == -ERESTARTSYS || result == -EINTR;
|
||||
}
|
||||
|
||||
/* always call with the tx_lock held */
|
||||
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
||||
{
|
||||
@ -458,7 +467,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
||||
result = sock_xmit(nbd, index, 1, &from,
|
||||
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
|
||||
if (result <= 0) {
|
||||
if (result == -ERESTARTSYS) {
|
||||
if (was_interrupted(result)) {
|
||||
/* If we havne't sent anything we can just return BUSY,
|
||||
* however if we have sent something we need to make
|
||||
* sure we only allow this req to be sent until we are
|
||||
@ -502,7 +511,7 @@ send_pages:
|
||||
}
|
||||
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
|
||||
if (result <= 0) {
|
||||
if (result == -ERESTARTSYS) {
|
||||
if (was_interrupted(result)) {
|
||||
/* We've already sent the header, we
|
||||
* have no choice but to set pending and
|
||||
* return BUSY.
|
||||
|
@ -2545,10 +2545,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
|
||||
nvme_fc_abort_aen_ops(ctrl);
|
||||
|
||||
/* wait for all io that had to be aborted */
|
||||
spin_lock_irqsave(&ctrl->lock, flags);
|
||||
spin_lock_irq(&ctrl->lock);
|
||||
wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
|
||||
ctrl->flags &= ~FCCTRL_TERMIO;
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
spin_unlock_irq(&ctrl->lock);
|
||||
|
||||
nvme_fc_term_aen_ops(ctrl);
|
||||
|
||||
@ -2734,7 +2734,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
||||
{
|
||||
struct nvme_fc_ctrl *ctrl;
|
||||
unsigned long flags;
|
||||
int ret, idx;
|
||||
int ret, idx, retry;
|
||||
|
||||
if (!(rport->remoteport.port_role &
|
||||
(FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
|
||||
@ -2760,6 +2760,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
||||
ctrl->rport = rport;
|
||||
ctrl->dev = lport->dev;
|
||||
ctrl->cnum = idx;
|
||||
init_waitqueue_head(&ctrl->ioabort_wait);
|
||||
|
||||
get_device(ctrl->dev);
|
||||
kref_init(&ctrl->ref);
|
||||
@ -2825,9 +2826,37 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
||||
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
|
||||
spin_unlock_irqrestore(&rport->lock, flags);
|
||||
|
||||
ret = nvme_fc_create_association(ctrl);
|
||||
/*
|
||||
* It's possible that transactions used to create the association
|
||||
* may fail. Examples: CreateAssociation LS or CreateIOConnection
|
||||
* LS gets dropped/corrupted/fails; or a frame gets dropped or a
|
||||
* command times out for one of the actions to init the controller
|
||||
* (Connect, Get/Set_Property, Set_Features, etc). Many of these
|
||||
* transport errors (frame drop, LS failure) inherently must kill
|
||||
* the association. The transport is coded so that any command used
|
||||
* to create the association (prior to a LIVE state transition
|
||||
* while NEW or RECONNECTING) will fail if it completes in error or
|
||||
* times out.
|
||||
*
|
||||
* As such: as the connect request was mostly likely due to a
|
||||
* udev event that discovered the remote port, meaning there is
|
||||
* not an admin or script there to restart if the connect
|
||||
* request fails, retry the initial connection creation up to
|
||||
* three times before giving up and declaring failure.
|
||||
*/
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
ret = nvme_fc_create_association(ctrl);
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
/* couldn't schedule retry - fail out */
|
||||
dev_err(ctrl->ctrl.device,
|
||||
"NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
|
||||
|
||||
ctrl->ctrl.opts = NULL;
|
||||
|
||||
/* initiate nvme ctrl ref counting teardown */
|
||||
nvme_uninit_ctrl(&ctrl->ctrl);
|
||||
nvme_put_ctrl(&ctrl->ctrl);
|
||||
|
@ -571,6 +571,12 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
|
||||
if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
|
||||
return;
|
||||
|
||||
if (nvme_rdma_queue_idx(queue) == 0) {
|
||||
nvme_rdma_free_qe(queue->device->dev,
|
||||
&queue->ctrl->async_event_sqe,
|
||||
sizeof(struct nvme_command), DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
nvme_rdma_destroy_queue_ib(queue);
|
||||
rdma_destroy_id(queue->cm_id);
|
||||
}
|
||||
@ -739,8 +745,6 @@ out:
|
||||
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
|
||||
bool remove)
|
||||
{
|
||||
nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
|
||||
sizeof(struct nvme_command), DMA_TO_DEVICE);
|
||||
nvme_rdma_stop_queue(&ctrl->queues[0]);
|
||||
if (remove) {
|
||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
||||
@ -765,8 +769,10 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
|
||||
|
||||
if (new) {
|
||||
ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
|
||||
if (IS_ERR(ctrl->ctrl.admin_tagset))
|
||||
if (IS_ERR(ctrl->ctrl.admin_tagset)) {
|
||||
error = PTR_ERR(ctrl->ctrl.admin_tagset);
|
||||
goto out_free_queue;
|
||||
}
|
||||
|
||||
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
|
||||
if (IS_ERR(ctrl->ctrl.admin_q)) {
|
||||
@ -846,8 +852,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
|
||||
|
||||
if (new) {
|
||||
ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
|
||||
if (IS_ERR(ctrl->ctrl.tagset))
|
||||
if (IS_ERR(ctrl->ctrl.tagset)) {
|
||||
ret = PTR_ERR(ctrl->ctrl.tagset);
|
||||
goto out_free_io_queues;
|
||||
}
|
||||
|
||||
ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
|
||||
if (IS_ERR(ctrl->ctrl.connect_q)) {
|
||||
|
@ -387,12 +387,21 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
|
||||
|
||||
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
|
||||
{
|
||||
u32 old_sqhd, new_sqhd;
|
||||
u16 sqhd;
|
||||
|
||||
if (status)
|
||||
nvmet_set_status(req, status);
|
||||
|
||||
if (req->sq->size)
|
||||
req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size;
|
||||
req->rsp->sq_head = cpu_to_le16(req->sq->sqhd);
|
||||
if (req->sq->size) {
|
||||
do {
|
||||
old_sqhd = req->sq->sqhd;
|
||||
new_sqhd = (old_sqhd + 1) % req->sq->size;
|
||||
} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
|
||||
old_sqhd);
|
||||
}
|
||||
sqhd = req->sq->sqhd & 0x0000FFFF;
|
||||
req->rsp->sq_head = cpu_to_le16(sqhd);
|
||||
req->rsp->sq_id = cpu_to_le16(req->sq->qid);
|
||||
req->rsp->command_id = req->cmd->common.command_id;
|
||||
|
||||
|
@ -74,7 +74,7 @@ struct nvmet_sq {
|
||||
struct percpu_ref ref;
|
||||
u16 qid;
|
||||
u16 size;
|
||||
u16 sqhd;
|
||||
u32 sqhd;
|
||||
struct completion free_done;
|
||||
struct completion confirm_done;
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user