nvme: add common helpers to allocate and free tagsets

Add common helpers to allocate and tear down the admin and I/O tag sets,
including the special queues allocated with them.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
This commit is contained in:
Christoph Hellwig 2022-09-04 15:18:30 +03:00
parent 3c69ed7aa5
commit fe60e8c534
2 changed files with 110 additions and 0 deletions

View File

@ -4796,6 +4796,108 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int flags,
unsigned int cmd_size)
{
int ret;
memset(set, 0, sizeof(*set));
set->ops = ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
if (ctrl->ops->flags & NVME_F_FABRICS)
set->reserved_tags = NVMF_RESERVED_TAGS;
set->numa_node = ctrl->numa_node;
set->flags = flags;
set->cmd_size = cmd_size;
set->driver_data = ctrl;
set->nr_hw_queues = 1;
set->timeout = NVME_ADMIN_TIMEOUT;
ret = blk_mq_alloc_tag_set(set);
if (ret)
return ret;
ctrl->admin_q = blk_mq_init_queue(set);
if (IS_ERR(ctrl->admin_q)) {
ret = PTR_ERR(ctrl->admin_q);
goto out_free_tagset;
}
if (ctrl->ops->flags & NVME_F_FABRICS) {
ctrl->fabrics_q = blk_mq_init_queue(set);
if (IS_ERR(ctrl->fabrics_q)) {
ret = PTR_ERR(ctrl->fabrics_q);
goto out_cleanup_admin_q;
}
}
ctrl->admin_tagset = set;
return 0;
out_cleanup_admin_q:
blk_mq_destroy_queue(ctrl->fabrics_q);
out_free_tagset:
blk_mq_free_tag_set(ctrl->admin_tagset);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
{
blk_mq_destroy_queue(ctrl->admin_q);
if (ctrl->ops->flags & NVME_F_FABRICS)
blk_mq_destroy_queue(ctrl->fabrics_q);
blk_mq_free_tag_set(ctrl->admin_tagset);
}
EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int flags,
unsigned int cmd_size)
{
int ret;
memset(set, 0, sizeof(*set));
set->ops = ops;
set->queue_depth = ctrl->sqsize + 1;
set->reserved_tags = NVMF_RESERVED_TAGS;
set->numa_node = ctrl->numa_node;
set->flags = flags;
set->cmd_size = cmd_size,
set->driver_data = ctrl;
set->nr_hw_queues = ctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT;
if (ops->map_queues)
set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
ret = blk_mq_alloc_tag_set(set);
if (ret)
return ret;
if (ctrl->ops->flags & NVME_F_FABRICS) {
ctrl->connect_q = blk_mq_init_queue(set);
if (IS_ERR(ctrl->connect_q)) {
ret = PTR_ERR(ctrl->connect_q);
goto out_free_tag_set;
}
}
ctrl->tagset = set;
return 0;
out_free_tag_set:
blk_mq_free_tag_set(set);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
{
if (ctrl->ops->flags & NVME_F_FABRICS)
blk_mq_destroy_queue(ctrl->connect_q);
blk_mq_free_tag_set(ctrl->tagset);
}
EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);

View File

@ -737,6 +737,14 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int flags,
unsigned int cmd_size);
void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int flags,
unsigned int cmd_size);
void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);