forked from Minki/linux
block,scsi: fixup blk_get_request dead queue scenarios
The blk_get_request function may fail in low-memory conditions or during device removal (even if __GFP_WAIT is set). To distinguish between these errors, modify the blk_get_request call stack to return the appropriate ERR_PTR. Verify that all callers check the return status and consider IS_ERR instead of a simple NULL pointer check. For consistency, make a similar change to the blk_mq_alloc_request leg of blk_get_request. It may fail if the queue is dead, or the caller was unwilling to wait. Signed-off-by: Joe Lawrence <joe.lawrence@stratus.com> Acked-by: Jiri Kosina <jkosina@suse.cz> [for pktdvd] Acked-by: Boaz Harrosh <bharrosh@panasas.com> [for osd] Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
eb571eeade
commit
a492f07545
@ -933,9 +933,9 @@ static struct io_context *rq_ioc(struct bio *bio)
|
||||
* Get a free request from @q. This function may fail under memory
|
||||
* pressure or if @q is dead.
|
||||
*
|
||||
* Must be callled with @q->queue_lock held and,
|
||||
* Returns %NULL on failure, with @q->queue_lock held.
|
||||
* Returns !%NULL on success, with @q->queue_lock *not held*.
|
||||
* Must be called with @q->queue_lock held and,
|
||||
* Returns ERR_PTR on failure, with @q->queue_lock held.
|
||||
* Returns request pointer on success, with @q->queue_lock *not held*.
|
||||
*/
|
||||
static struct request *__get_request(struct request_list *rl, int rw_flags,
|
||||
struct bio *bio, gfp_t gfp_mask)
|
||||
@ -949,7 +949,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
|
||||
int may_queue;
|
||||
|
||||
if (unlikely(blk_queue_dying(q)))
|
||||
return NULL;
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
may_queue = elv_may_queue(q, rw_flags);
|
||||
if (may_queue == ELV_MQUEUE_NO)
|
||||
@ -974,7 +974,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
|
||||
* process is not a "batcher", and not
|
||||
* exempted by the IO scheduler
|
||||
*/
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -992,7 +992,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
|
||||
* allocated with any setting of ->nr_requests
|
||||
*/
|
||||
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
q->nr_rqs[is_sync]++;
|
||||
rl->count[is_sync]++;
|
||||
@ -1097,7 +1097,7 @@ fail_alloc:
|
||||
rq_starved:
|
||||
if (unlikely(rl->count[is_sync] == 0))
|
||||
rl->starved[is_sync] = 1;
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1110,9 +1110,9 @@ rq_starved:
|
||||
* Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
|
||||
* function keeps retrying under memory pressure and fails iff @q is dead.
|
||||
*
|
||||
* Must be callled with @q->queue_lock held and,
|
||||
* Returns %NULL on failure, with @q->queue_lock held.
|
||||
* Returns !%NULL on success, with @q->queue_lock *not held*.
|
||||
* Must be called with @q->queue_lock held and,
|
||||
* Returns ERR_PTR on failure, with @q->queue_lock held.
|
||||
* Returns request pointer on success, with @q->queue_lock *not held*.
|
||||
*/
|
||||
static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||
struct bio *bio, gfp_t gfp_mask)
|
||||
@ -1125,12 +1125,12 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
|
||||
retry:
|
||||
rq = __get_request(rl, rw_flags, bio, gfp_mask);
|
||||
if (rq)
|
||||
if (!IS_ERR(rq))
|
||||
return rq;
|
||||
|
||||
if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
|
||||
blk_put_rl(rl);
|
||||
return NULL;
|
||||
return rq;
|
||||
}
|
||||
|
||||
/* wait on @rl and retry */
|
||||
@ -1167,7 +1167,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
rq = get_request(q, rw, NULL, gfp_mask);
|
||||
if (!rq)
|
||||
if (IS_ERR(rq))
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
/* q->queue_lock is unlocked at this point */
|
||||
|
||||
@ -1219,8 +1219,8 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
|
||||
{
|
||||
struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
|
||||
|
||||
if (unlikely(!rq))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(rq))
|
||||
return rq;
|
||||
|
||||
blk_rq_set_block_pc(rq);
|
||||
|
||||
@ -1615,8 +1615,8 @@ get_rq:
|
||||
* Returns with the queue unlocked.
|
||||
*/
|
||||
req = get_request(q, rw_flags, bio, GFP_NOIO);
|
||||
if (unlikely(!req)) {
|
||||
bio_endio(bio, -ENODEV); /* @q is dead */
|
||||
if (IS_ERR(req)) {
|
||||
bio_endio(bio, PTR_ERR(req)); /* @q is dead */
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
@ -218,9 +218,11 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct request *rq;
|
||||
struct blk_mq_alloc_data alloc_data;
|
||||
int ret;
|
||||
|
||||
if (blk_mq_queue_enter(q))
|
||||
return NULL;
|
||||
ret = blk_mq_queue_enter(q);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ctx = blk_mq_get_ctx(q);
|
||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||
@ -240,6 +242,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
|
||||
ctx = alloc_data.ctx;
|
||||
}
|
||||
blk_mq_put_ctx(ctx);
|
||||
if (!rq)
|
||||
return ERR_PTR(-EWOULDBLOCK);
|
||||
return rq;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_alloc_request);
|
||||
|
@ -270,8 +270,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
|
||||
* map scatter-gather elements separately and string them to request
|
||||
*/
|
||||
rq = blk_get_request(q, rw, GFP_KERNEL);
|
||||
if (!rq)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(rq))
|
||||
return rq;
|
||||
blk_rq_set_block_pc(rq);
|
||||
|
||||
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
|
||||
@ -285,8 +285,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
|
||||
}
|
||||
|
||||
next_rq = blk_get_request(q, READ, GFP_KERNEL);
|
||||
if (!next_rq) {
|
||||
ret = -ENOMEM;
|
||||
if (IS_ERR(next_rq)) {
|
||||
ret = PTR_ERR(next_rq);
|
||||
goto out;
|
||||
}
|
||||
rq->next_rq = next_rq;
|
||||
|
@ -318,8 +318,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
|
||||
at_head = 1;
|
||||
|
||||
rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
|
||||
if (!rq)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
blk_rq_set_block_pc(rq);
|
||||
|
||||
if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
|
||||
@ -448,8 +448,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
||||
}
|
||||
|
||||
rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
|
||||
if (!rq) {
|
||||
err = -ENODEV;
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto error_free_buffer;
|
||||
}
|
||||
|
||||
@ -539,8 +539,8 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
|
||||
int err;
|
||||
|
||||
rq = blk_get_request(q, WRITE, __GFP_WAIT);
|
||||
if (!rq)
|
||||
return -ENODEV;
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
blk_rq_set_block_pc(rq);
|
||||
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
|
||||
rq->cmd[0] = cmd;
|
||||
|
@ -722,8 +722,8 @@ static int pd_special_command(struct pd_unit *disk,
|
||||
int err = 0;
|
||||
|
||||
rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT);
|
||||
if (!rq)
|
||||
return -ENODEV;
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
rq->cmd_type = REQ_TYPE_SPECIAL;
|
||||
rq->special = func;
|
||||
|
@ -704,8 +704,8 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
|
||||
|
||||
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
|
||||
WRITE : READ, __GFP_WAIT);
|
||||
if (!rq)
|
||||
return -ENODEV;
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
blk_rq_set_block_pc(rq);
|
||||
|
||||
if (cgc->buflen) {
|
||||
|
@ -568,7 +568,7 @@ static struct carm_request *carm_get_special(struct carm_host *host)
|
||||
return NULL;
|
||||
|
||||
rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL);
|
||||
if (!rq) {
|
||||
if (IS_ERR(rq)) {
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
carm_put_request(host, crq);
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
@ -2180,8 +2180,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
|
||||
len = nr * CD_FRAMESIZE_RAW;
|
||||
|
||||
rq = blk_get_request(q, READ, GFP_KERNEL);
|
||||
if (!rq) {
|
||||
ret = -ENOMEM;
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
break;
|
||||
}
|
||||
blk_rq_set_block_pc(rq);
|
||||
|
@ -46,7 +46,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
|
||||
* timeout has expired, so power management will be reenabled.
|
||||
*/
|
||||
rq = blk_get_request(q, READ, GFP_NOWAIT);
|
||||
if (unlikely(!rq))
|
||||
if (IS_ERR(rq))
|
||||
goto out;
|
||||
|
||||
rq->cmd[0] = REQ_UNPARK_HEADS;
|
||||
|
@ -115,7 +115,7 @@ static struct request *get_alua_req(struct scsi_device *sdev,
|
||||
|
||||
rq = blk_get_request(q, rw, GFP_NOIO);
|
||||
|
||||
if (!rq) {
|
||||
if (IS_ERR(rq)) {
|
||||
sdev_printk(KERN_INFO, sdev,
|
||||
"%s: blk_get_request failed\n", __func__);
|
||||
return NULL;
|
||||
|
@ -275,7 +275,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
|
||||
|
||||
rq = blk_get_request(sdev->request_queue,
|
||||
(cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
|
||||
if (!rq) {
|
||||
if (IS_ERR(rq)) {
|
||||
sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
|
||||
return NULL;
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
|
||||
|
||||
retry:
|
||||
req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
|
||||
if (!req)
|
||||
if (IS_ERR(req))
|
||||
return SCSI_DH_RES_TEMP_UNAVAIL;
|
||||
|
||||
blk_rq_set_block_pc(req);
|
||||
@ -247,7 +247,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
|
||||
struct request *req;
|
||||
|
||||
req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC);
|
||||
if (!req)
|
||||
if (IS_ERR(req))
|
||||
return SCSI_DH_RES_TEMP_UNAVAIL;
|
||||
|
||||
blk_rq_set_block_pc(req);
|
||||
|
@ -274,7 +274,7 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
|
||||
|
||||
rq = blk_get_request(q, rw, GFP_NOIO);
|
||||
|
||||
if (!rq) {
|
||||
if (IS_ERR(rq)) {
|
||||
sdev_printk(KERN_INFO, sdev,
|
||||
"get_rdac_req: blk_get_request failed.\n");
|
||||
return NULL;
|
||||
|
@ -1567,8 +1567,8 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
|
||||
struct request *req;
|
||||
|
||||
req = blk_get_request(q, has_write ? WRITE : READ, flags);
|
||||
if (unlikely(!req))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(req))
|
||||
return req;
|
||||
|
||||
blk_rq_set_block_pc(req);
|
||||
return req;
|
||||
|
@ -362,7 +362,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
|
||||
int write = (data_direction == DMA_TO_DEVICE);
|
||||
|
||||
req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL);
|
||||
if (!req)
|
||||
if (IS_ERR(req))
|
||||
return DRIVER_ERROR << 24;
|
||||
|
||||
blk_rq_set_block_pc(req);
|
||||
|
@ -1960,7 +1960,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
|
||||
* request becomes available
|
||||
*/
|
||||
req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
|
||||
if (!req)
|
||||
if (IS_ERR(req))
|
||||
return;
|
||||
|
||||
blk_rq_set_block_pc(req);
|
||||
|
@ -221,7 +221,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
|
||||
int ret = DRIVER_ERROR << 24;
|
||||
|
||||
req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
|
||||
if (!req)
|
||||
if (IS_ERR(req))
|
||||
return ret;
|
||||
blk_rq_set_block_pc(req);
|
||||
|
||||
|
@ -1711,9 +1711,9 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
|
||||
}
|
||||
|
||||
rq = blk_get_request(q, rw, GFP_ATOMIC);
|
||||
if (!rq) {
|
||||
if (IS_ERR(rq)) {
|
||||
kfree(long_cmdp);
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(rq);
|
||||
}
|
||||
|
||||
blk_rq_set_block_pc(rq);
|
||||
|
@ -490,7 +490,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
|
||||
|
||||
req = blk_get_request(SRpnt->stp->device->request_queue, write,
|
||||
GFP_KERNEL);
|
||||
if (!req)
|
||||
if (IS_ERR(req))
|
||||
return DRIVER_ERROR << 24;
|
||||
|
||||
blk_rq_set_block_pc(req);
|
||||
|
@ -1050,7 +1050,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
|
||||
req = blk_get_request(pdv->pdv_sd->request_queue,
|
||||
(data_direction == DMA_TO_DEVICE),
|
||||
GFP_KERNEL);
|
||||
if (!req) {
|
||||
if (IS_ERR(req)) {
|
||||
pr_err("PSCSI: blk_get_request() failed\n");
|
||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
goto fail;
|
||||
|
Loading…
Reference in New Issue
Block a user