forked from Minki/linux
scsi: virtio_scsi: fix IO hang caused by automatic irq vector affinity
Since commit84676c1f21
("genirq/affinity: assign vectors to all possible CPUs") it is possible to end up in a scenario where only offline CPUs are mapped to an interrupt vector. This is only an issue for the legacy I/O path since with blk-mq/scsi-mq an I/O can't be submitted to a hardware queue if the queue isn't mapped to an online CPU. Fix this issue by forcing virtio-scsi to use blk-mq. [mkp: commit desc] Cc: Omar Sandoval <osandov@fb.com>, Cc: "Martin K. Petersen" <martin.petersen@oracle.com>, Cc: James Bottomley <james.bottomley@hansenpartnership.com>, Cc: Christoph Hellwig <hch@lst.de>, Cc: Don Brace <don.brace@microsemi.com> Cc: Kashyap Desai <kashyap.desai@broadcom.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: Laurence Oberman <loberman@redhat.com> Fixes:84676c1f21
("genirq/affinity: assign vectors to all possible CPUs") Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
2f31115e94
commit
b5b6e8c8d3
@ -91,9 +91,6 @@ struct virtio_scsi_vq {
|
|||||||
struct virtio_scsi_target_state {
|
struct virtio_scsi_target_state {
|
||||||
seqcount_t tgt_seq;
|
seqcount_t tgt_seq;
|
||||||
|
|
||||||
/* Count of outstanding requests. */
|
|
||||||
atomic_t reqs;
|
|
||||||
|
|
||||||
/* Currently active virtqueue for requests sent to this target. */
|
/* Currently active virtqueue for requests sent to this target. */
|
||||||
struct virtio_scsi_vq *req_vq;
|
struct virtio_scsi_vq *req_vq;
|
||||||
};
|
};
|
||||||
@ -152,8 +149,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
|
|||||||
struct virtio_scsi_cmd *cmd = buf;
|
struct virtio_scsi_cmd *cmd = buf;
|
||||||
struct scsi_cmnd *sc = cmd->sc;
|
struct scsi_cmnd *sc = cmd->sc;
|
||||||
struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
|
struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
|
||||||
struct virtio_scsi_target_state *tgt =
|
|
||||||
scsi_target(sc->device)->hostdata;
|
|
||||||
|
|
||||||
dev_dbg(&sc->device->sdev_gendev,
|
dev_dbg(&sc->device->sdev_gendev,
|
||||||
"cmd %p response %u status %#02x sense_len %u\n",
|
"cmd %p response %u status %#02x sense_len %u\n",
|
||||||
@ -210,8 +205,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
|
|||||||
}
|
}
|
||||||
|
|
||||||
sc->scsi_done(sc);
|
sc->scsi_done(sc);
|
||||||
|
|
||||||
atomic_dec(&tgt->reqs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtscsi_vq_done(struct virtio_scsi *vscsi,
|
static void virtscsi_vq_done(struct virtio_scsi *vscsi,
|
||||||
@ -580,10 +573,7 @@ static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
|
|||||||
struct scsi_cmnd *sc)
|
struct scsi_cmnd *sc)
|
||||||
{
|
{
|
||||||
struct virtio_scsi *vscsi = shost_priv(sh);
|
struct virtio_scsi *vscsi = shost_priv(sh);
|
||||||
struct virtio_scsi_target_state *tgt =
|
|
||||||
scsi_target(sc->device)->hostdata;
|
|
||||||
|
|
||||||
atomic_inc(&tgt->reqs);
|
|
||||||
return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
|
return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -596,55 +586,11 @@ static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
|
|||||||
return &vscsi->req_vqs[hwq];
|
return &vscsi->req_vqs[hwq];
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
|
|
||||||
struct virtio_scsi_target_state *tgt)
|
|
||||||
{
|
|
||||||
struct virtio_scsi_vq *vq;
|
|
||||||
unsigned long flags;
|
|
||||||
u32 queue_num;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
if (atomic_inc_return(&tgt->reqs) > 1) {
|
|
||||||
unsigned long seq;
|
|
||||||
|
|
||||||
do {
|
|
||||||
seq = read_seqcount_begin(&tgt->tgt_seq);
|
|
||||||
vq = tgt->req_vq;
|
|
||||||
} while (read_seqcount_retry(&tgt->tgt_seq, seq));
|
|
||||||
} else {
|
|
||||||
/* no writes can be concurrent because of atomic_t */
|
|
||||||
write_seqcount_begin(&tgt->tgt_seq);
|
|
||||||
|
|
||||||
/* keep previous req_vq if a reader just arrived */
|
|
||||||
if (unlikely(atomic_read(&tgt->reqs) > 1)) {
|
|
||||||
vq = tgt->req_vq;
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
queue_num = smp_processor_id();
|
|
||||||
while (unlikely(queue_num >= vscsi->num_queues))
|
|
||||||
queue_num -= vscsi->num_queues;
|
|
||||||
tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
|
|
||||||
unlock:
|
|
||||||
write_seqcount_end(&tgt->tgt_seq);
|
|
||||||
}
|
|
||||||
local_irq_restore(flags);
|
|
||||||
|
|
||||||
return vq;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
|
static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
|
||||||
struct scsi_cmnd *sc)
|
struct scsi_cmnd *sc)
|
||||||
{
|
{
|
||||||
struct virtio_scsi *vscsi = shost_priv(sh);
|
struct virtio_scsi *vscsi = shost_priv(sh);
|
||||||
struct virtio_scsi_target_state *tgt =
|
struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
|
||||||
scsi_target(sc->device)->hostdata;
|
|
||||||
struct virtio_scsi_vq *req_vq;
|
|
||||||
|
|
||||||
if (shost_use_blk_mq(sh))
|
|
||||||
req_vq = virtscsi_pick_vq_mq(vscsi, sc);
|
|
||||||
else
|
|
||||||
req_vq = virtscsi_pick_vq(vscsi, tgt);
|
|
||||||
|
|
||||||
return virtscsi_queuecommand(vscsi, req_vq, sc);
|
return virtscsi_queuecommand(vscsi, req_vq, sc);
|
||||||
}
|
}
|
||||||
@ -775,7 +721,6 @@ static int virtscsi_target_alloc(struct scsi_target *starget)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
seqcount_init(&tgt->tgt_seq);
|
seqcount_init(&tgt->tgt_seq);
|
||||||
atomic_set(&tgt->reqs, 0);
|
|
||||||
tgt->req_vq = &vscsi->req_vqs[0];
|
tgt->req_vq = &vscsi->req_vqs[0];
|
||||||
|
|
||||||
starget->hostdata = tgt;
|
starget->hostdata = tgt;
|
||||||
@ -823,6 +768,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
|
|||||||
.target_alloc = virtscsi_target_alloc,
|
.target_alloc = virtscsi_target_alloc,
|
||||||
.target_destroy = virtscsi_target_destroy,
|
.target_destroy = virtscsi_target_destroy,
|
||||||
.track_queue_depth = 1,
|
.track_queue_depth = 1,
|
||||||
|
.force_blk_mq = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct scsi_host_template virtscsi_host_template_multi = {
|
static struct scsi_host_template virtscsi_host_template_multi = {
|
||||||
@ -844,6 +790,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
|
|||||||
.target_destroy = virtscsi_target_destroy,
|
.target_destroy = virtscsi_target_destroy,
|
||||||
.map_queues = virtscsi_map_queues,
|
.map_queues = virtscsi_map_queues,
|
||||||
.track_queue_depth = 1,
|
.track_queue_depth = 1,
|
||||||
|
.force_blk_mq = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define virtscsi_config_get(vdev, fld) \
|
#define virtscsi_config_get(vdev, fld) \
|
||||||
|
Loading…
Reference in New Issue
Block a user