mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
blk-mq: don't schedule block kworker on isolated CPUs
Kernel parameter of `isolcpus=` or 'nohz_full=' are used to isolate CPUs for specific task, and it isn't expected to let block IO disturb these CPUs. blk-mq kworker shouldn't be scheduled on isolated CPUs. Also if isolated CPUs is run for blk-mq kworker, long block IO latency can be caused. Kernel workqueue only respects CPU isolation for WQ_UNBOUND, for bound WQ, the responsibility is on user because CPU is specified as WQ API parameter, such as mod_delayed_work_on(cpu), queue_delayed_work_on(cpu) and queue_work_on(cpu). So not run blk-mq kworker on isolated CPUs by removing isolated CPUs from hctx->cpumask. Meantime use queue map to check if all CPUs in this hw queue are offline instead of hctx->cpumask, this way can avoid any cost in fast IO code path, and is safe since hctx->cpumask are only used in the two cases. Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Andrew Theurer <atheurer@redhat.com> Cc: Joe Mario <jmario@redhat.com> Cc: Sebastian Jug <sejug@redhat.com> Cc: Frederic Weisbecker <frederic@kernel.org> Cc: Bart Van Assche <bvanassche@acm.org> Cc: Tejun Heo <tj@kernel.org> Tesed-by: Joe Mario <jmario@redhat.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Ewan D. Milne <emilne@redhat.com> Link: https://lore.kernel.org/r/20240322021244.1056223-1-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
7d8d35791b
commit
a46c27026d
@ -28,6 +28,7 @@
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/blk-crypto.h>
|
||||
#include <linux/part_stat.h>
|
||||
#include <linux/sched/isolation.h>
|
||||
|
||||
#include <trace/events/block.h>
|
||||
|
||||
@ -2163,6 +2164,15 @@ static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
|
||||
return cpu;
|
||||
}
|
||||
|
||||
/*
|
||||
* ->next_cpu is always calculated from hctx->cpumask, so simply use
|
||||
* it for speeding up the check
|
||||
*/
|
||||
static bool blk_mq_hctx_empty_cpumask(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
return hctx->next_cpu >= nr_cpu_ids;
|
||||
}
|
||||
|
||||
/*
|
||||
* It'd be great if the workqueue API had a way to pass
|
||||
* in a mask and had some smarts for more clever placement.
|
||||
@ -2174,7 +2184,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
||||
bool tried = false;
|
||||
int next_cpu = hctx->next_cpu;
|
||||
|
||||
if (hctx->queue->nr_hw_queues == 1)
|
||||
/* Switch to unbound if no allowable CPUs in this hctx */
|
||||
if (hctx->queue->nr_hw_queues == 1 || blk_mq_hctx_empty_cpumask(hctx))
|
||||
return WORK_CPU_UNBOUND;
|
||||
|
||||
if (--hctx->next_cpu_batch <= 0) {
|
||||
@ -3483,14 +3494,30 @@ static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
|
||||
return data.has_rq;
|
||||
}
|
||||
|
||||
static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
|
||||
struct blk_mq_hw_ctx *hctx)
|
||||
static bool blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx *hctx,
|
||||
unsigned int this_cpu)
|
||||
{
|
||||
if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
|
||||
return false;
|
||||
if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
|
||||
return false;
|
||||
return true;
|
||||
enum hctx_type type = hctx->type;
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* hctx->cpumask has to rule out isolated CPUs, but userspace still
|
||||
* might submit IOs on these isolated CPUs, so use the queue map to
|
||||
* check if all CPUs mapped to this hctx are offline
|
||||
*/
|
||||
for_each_online_cpu(cpu) {
|
||||
struct blk_mq_hw_ctx *h = blk_mq_map_queue_type(hctx->queue,
|
||||
type, cpu);
|
||||
|
||||
if (h != hctx)
|
||||
continue;
|
||||
|
||||
/* this hctx has at least one online CPU */
|
||||
if (this_cpu != cpu)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
|
||||
@ -3498,8 +3525,7 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
|
||||
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
|
||||
struct blk_mq_hw_ctx, cpuhp_online);
|
||||
|
||||
if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
|
||||
!blk_mq_last_cpu_in_hctx(cpu, hctx))
|
||||
if (blk_mq_hctx_has_online_cpu(hctx, cpu))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@ -3907,6 +3933,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
||||
}
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* If no software queues are mapped to this hardware queue,
|
||||
* disable it and free the request entries.
|
||||
@ -3933,6 +3961,15 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
||||
*/
|
||||
sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
|
||||
|
||||
/*
|
||||
* Rule out isolated CPUs from hctx->cpumask to avoid
|
||||
* running block kworker on isolated CPUs
|
||||
*/
|
||||
for_each_cpu(cpu, hctx->cpumask) {
|
||||
if (cpu_is_isolated(cpu))
|
||||
cpumask_clear_cpu(cpu, hctx->cpumask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize batch roundrobin counts
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user