blk-mq-rdma: pass in queue map to blk_mq_rdma_map_queues
Will be used by nvme-rdma for queue map separation support. Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
23454d59cc
commit
e42b3867de
@ -29,24 +29,24 @@
|
|||||||
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
|
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
|
||||||
* vector, we fallback to the naive mapping.
|
* vector, we fallback to the naive mapping.
|
||||||
*/
|
*/
|
||||||
int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
|
int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
||||||
struct ib_device *dev, int first_vec)
|
struct ib_device *dev, int first_vec)
|
||||||
{
|
{
|
||||||
const struct cpumask *mask;
|
const struct cpumask *mask;
|
||||||
unsigned int queue, cpu;
|
unsigned int queue, cpu;
|
||||||
|
|
||||||
for (queue = 0; queue < set->nr_hw_queues; queue++) {
|
for (queue = 0; queue < map->nr_queues; queue++) {
|
||||||
mask = ib_get_vector_affinity(dev, first_vec + queue);
|
mask = ib_get_vector_affinity(dev, first_vec + queue);
|
||||||
if (!mask)
|
if (!mask)
|
||||||
goto fallback;
|
goto fallback;
|
||||||
|
|
||||||
for_each_cpu(cpu, mask)
|
for_each_cpu(cpu, mask)
|
||||||
set->map[0].mq_map[cpu] = queue;
|
map->mq_map[cpu] = map->queue_offset + queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fallback:
|
fallback:
|
||||||
return blk_mq_map_queues(&set->map[0]);
|
return blk_mq_map_queues(map);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
|
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
|
||||||
|
@ -1751,7 +1751,7 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
|
|||||||
{
|
{
|
||||||
struct nvme_rdma_ctrl *ctrl = set->driver_data;
|
struct nvme_rdma_ctrl *ctrl = set->driver_data;
|
||||||
|
|
||||||
return blk_mq_rdma_map_queues(set, ctrl->device->dev, 0);
|
return blk_mq_rdma_map_queues(&set->map[0], ctrl->device->dev, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct blk_mq_ops nvme_rdma_mq_ops = {
|
static const struct blk_mq_ops nvme_rdma_mq_ops = {
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
struct blk_mq_tag_set;
|
struct blk_mq_tag_set;
|
||||||
struct ib_device;
|
struct ib_device;
|
||||||
|
|
||||||
int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
|
int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
||||||
struct ib_device *dev, int first_vec);
|
struct ib_device *dev, int first_vec);
|
||||||
|
|
||||||
#endif /* _LINUX_BLK_MQ_RDMA_H */
|
#endif /* _LINUX_BLK_MQ_RDMA_H */
|
||||||
|
Loading…
Reference in New Issue
Block a user