xen-blkfront: use blk_mq_alloc_disk and blk_cleanup_disk
Use blk_mq_alloc_disk and blk_cleanup_disk to simplify the gendisk and request_queue allocation. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Link: https://lore.kernel.org/r/20210602065345.355274-26-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
693874035e
commit
3b62c140e9
@ -968,48 +968,6 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
|
||||
blk_queue_dma_alignment(rq, 511);
|
||||
}
|
||||
|
||||
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
|
||||
unsigned int physical_sector_size)
|
||||
{
|
||||
struct request_queue *rq;
|
||||
struct blkfront_info *info = gd->private_data;
|
||||
|
||||
memset(&info->tag_set, 0, sizeof(info->tag_set));
|
||||
info->tag_set.ops = &blkfront_mq_ops;
|
||||
info->tag_set.nr_hw_queues = info->nr_rings;
|
||||
if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
|
||||
/*
|
||||
* When indirect descriptior is not supported, the I/O request
|
||||
* will be split between multiple request in the ring.
|
||||
* To avoid problems when sending the request, divide by
|
||||
* 2 the depth of the queue.
|
||||
*/
|
||||
info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2;
|
||||
} else
|
||||
info->tag_set.queue_depth = BLK_RING_SIZE(info);
|
||||
info->tag_set.numa_node = NUMA_NO_NODE;
|
||||
info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
info->tag_set.cmd_size = sizeof(struct blkif_req);
|
||||
info->tag_set.driver_data = info;
|
||||
|
||||
if (blk_mq_alloc_tag_set(&info->tag_set))
|
||||
return -EINVAL;
|
||||
rq = blk_mq_init_queue(&info->tag_set);
|
||||
if (IS_ERR(rq)) {
|
||||
blk_mq_free_tag_set(&info->tag_set);
|
||||
return PTR_ERR(rq);
|
||||
}
|
||||
|
||||
rq->queuedata = info;
|
||||
info->rq = gd->queue = rq;
|
||||
info->gd = gd;
|
||||
info->sector_size = sector_size;
|
||||
info->physical_sector_size = physical_sector_size;
|
||||
blkif_set_queue_limits(info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *flush_info(struct blkfront_info *info)
|
||||
{
|
||||
if (info->feature_flush && info->feature_fua)
|
||||
@ -1146,12 +1104,36 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
|
||||
|
||||
err = xlbd_reserve_minors(minor, nr_minors);
|
||||
if (err)
|
||||
goto out;
|
||||
return err;
|
||||
err = -ENODEV;
|
||||
|
||||
gd = alloc_disk(nr_minors);
|
||||
if (gd == NULL)
|
||||
goto release;
|
||||
memset(&info->tag_set, 0, sizeof(info->tag_set));
|
||||
info->tag_set.ops = &blkfront_mq_ops;
|
||||
info->tag_set.nr_hw_queues = info->nr_rings;
|
||||
if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
|
||||
/*
|
||||
* When indirect descriptior is not supported, the I/O request
|
||||
* will be split between multiple request in the ring.
|
||||
* To avoid problems when sending the request, divide by
|
||||
* 2 the depth of the queue.
|
||||
*/
|
||||
info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2;
|
||||
} else
|
||||
info->tag_set.queue_depth = BLK_RING_SIZE(info);
|
||||
info->tag_set.numa_node = NUMA_NO_NODE;
|
||||
info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
info->tag_set.cmd_size = sizeof(struct blkif_req);
|
||||
info->tag_set.driver_data = info;
|
||||
|
||||
err = blk_mq_alloc_tag_set(&info->tag_set);
|
||||
if (err)
|
||||
goto out_release_minors;
|
||||
|
||||
gd = blk_mq_alloc_disk(&info->tag_set, info);
|
||||
if (IS_ERR(gd)) {
|
||||
err = PTR_ERR(gd);
|
||||
goto out_free_tag_set;
|
||||
}
|
||||
|
||||
strcpy(gd->disk_name, DEV_NAME);
|
||||
ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
|
||||
@ -1164,14 +1146,16 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
|
||||
|
||||
gd->major = XENVBD_MAJOR;
|
||||
gd->first_minor = minor;
|
||||
gd->minors = nr_minors;
|
||||
gd->fops = &xlvbd_block_fops;
|
||||
gd->private_data = info;
|
||||
set_capacity(gd, capacity);
|
||||
|
||||
if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
|
||||
del_gendisk(gd);
|
||||
goto release;
|
||||
}
|
||||
info->rq = gd->queue;
|
||||
info->gd = gd;
|
||||
info->sector_size = sector_size;
|
||||
info->physical_sector_size = physical_sector_size;
|
||||
blkif_set_queue_limits(info);
|
||||
|
||||
xlvbd_flush(info);
|
||||
|
||||
@ -1186,9 +1170,10 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
|
||||
|
||||
return 0;
|
||||
|
||||
release:
|
||||
out_free_tag_set:
|
||||
blk_mq_free_tag_set(&info->tag_set);
|
||||
out_release_minors:
|
||||
xlbd_release_minors(minor, nr_minors);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1217,12 +1202,9 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
|
||||
nr_minors = info->gd->minors;
|
||||
xlbd_release_minors(minor, nr_minors);
|
||||
|
||||
blk_cleanup_queue(info->rq);
|
||||
blk_mq_free_tag_set(&info->tag_set);
|
||||
info->rq = NULL;
|
||||
|
||||
put_disk(info->gd);
|
||||
blk_cleanup_disk(info->gd);
|
||||
info->gd = NULL;
|
||||
blk_mq_free_tag_set(&info->tag_set);
|
||||
}
|
||||
|
||||
/* Already hold rinfo->ring_lock. */
|
||||
|
Loading…
Reference in New Issue
Block a user