2019-04-30 18:42:39 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2017-07-13 08:09:43 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Sagi Grimberg.
|
|
|
|
*/
|
|
|
|
#include <linux/blk-mq.h>
|
|
|
|
#include <linux/blk-mq-rdma.h>
|
|
|
|
#include <rdma/ib_verbs.h>
|
|
|
|
|
|
|
|
/**
|
|
|
|
* blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
|
2019-05-31 00:00:49 +00:00
|
|
|
* @map: CPU to hardware queue map.
|
|
|
|
* @dev: rdma device to provide a mapping for.
|
2017-07-13 08:09:43 +00:00
|
|
|
* @first_vec: first interrupt vectors to use for queues (usually 0)
|
|
|
|
*
|
|
|
|
* This function assumes the rdma device @dev has at least as many available
|
|
|
|
* interrupt vetors as @set has queues. It will then query it's affinity mask
|
|
|
|
* and built queue mapping that maps a queue to the CPUs that have irq affinity
|
|
|
|
* for the corresponding vector.
|
|
|
|
*
|
|
|
|
* In case either the driver passed a @dev with less vectors than
|
|
|
|
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
|
|
|
|
* vector, we fallback to the naive mapping.
|
|
|
|
*/
|
2022-08-15 17:00:43 +00:00
|
|
|
void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
2017-07-13 08:09:43 +00:00
|
|
|
struct ib_device *dev, int first_vec)
|
|
|
|
{
|
|
|
|
const struct cpumask *mask;
|
|
|
|
unsigned int queue, cpu;
|
|
|
|
|
2018-12-12 07:38:54 +00:00
|
|
|
for (queue = 0; queue < map->nr_queues; queue++) {
|
2017-07-13 08:09:43 +00:00
|
|
|
mask = ib_get_vector_affinity(dev, first_vec + queue);
|
|
|
|
if (!mask)
|
|
|
|
goto fallback;
|
|
|
|
|
|
|
|
for_each_cpu(cpu, mask)
|
2018-12-12 07:38:54 +00:00
|
|
|
map->mq_map[cpu] = map->queue_offset + queue;
|
2017-07-13 08:09:43 +00:00
|
|
|
}
|
|
|
|
|
2022-08-15 17:00:43 +00:00
|
|
|
return;
|
2017-07-13 08:09:43 +00:00
|
|
|
|
|
|
|
fallback:
|
2022-08-15 17:00:43 +00:00
|
|
|
blk_mq_map_queues(map);
|
2017-07-13 08:09:43 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
|