Merge tag 'sched_urgent_for_v5.18_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Borislav Petkov: - Use the correct static key checking primitive on the IRQ exit path - Two fixes for the new forceidle balancer * tag 'sched_urgent_for_v5.18_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: entry: Fix compile error in dynamic_irqentry_exit_cond_resched() sched: Teach the forced-newidle balancer about CPU affinity limitation. sched/core: Fix forceidle balancing
This commit is contained in:
@@ -392,7 +392,7 @@ DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
|
||||
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
|
||||
void dynamic_irqentry_exit_cond_resched(void)
|
||||
{
|
||||
if (!static_key_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
|
||||
if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
|
||||
return;
|
||||
raw_irqentry_exit_cond_resched();
|
||||
}
|
||||
|
||||
@@ -5752,6 +5752,8 @@ static inline struct task_struct *pick_task(struct rq *rq)
|
||||
|
||||
extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
|
||||
|
||||
static void queue_core_balance(struct rq *rq);
|
||||
|
||||
static struct task_struct *
|
||||
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||
{
|
||||
@@ -5801,7 +5803,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||
}
|
||||
|
||||
rq->core_pick = NULL;
|
||||
return next;
|
||||
goto out;
|
||||
}
|
||||
|
||||
put_prev_task_balance(rq, prev, rf);
|
||||
@@ -5851,7 +5853,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||
*/
|
||||
WARN_ON_ONCE(fi_before);
|
||||
task_vruntime_update(rq, next, false);
|
||||
goto done;
|
||||
goto out_set_next;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5970,8 +5972,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||
resched_curr(rq_i);
|
||||
}
|
||||
|
||||
done:
|
||||
out_set_next:
|
||||
set_next_task(rq, next);
|
||||
out:
|
||||
if (rq->core->core_forceidle_count && next == rq->idle)
|
||||
queue_core_balance(rq);
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
@@ -6000,7 +6006,7 @@ static bool try_steal_cookie(int this, int that)
|
||||
if (p == src->core_pick || p == src->curr)
|
||||
goto next;
|
||||
|
||||
if (!cpumask_test_cpu(this, &p->cpus_mask))
|
||||
if (!is_cpu_allowed(p, this))
|
||||
goto next;
|
||||
|
||||
if (p->core_occupation > dst->idle->core_occupation)
|
||||
@@ -6066,7 +6072,7 @@ static void sched_core_balance(struct rq *rq)
|
||||
|
||||
static DEFINE_PER_CPU(struct callback_head, core_balance_head);
|
||||
|
||||
void queue_core_balance(struct rq *rq)
|
||||
static void queue_core_balance(struct rq *rq)
|
||||
{
|
||||
if (!sched_core_enabled(rq))
|
||||
return;
|
||||
|
||||
@@ -434,7 +434,6 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
|
||||
{
|
||||
update_idle_core(rq);
|
||||
schedstat_inc(rq->sched_goidle);
|
||||
queue_core_balance(rq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
@@ -1232,8 +1232,6 @@ static inline bool sched_group_cookie_match(struct rq *rq,
|
||||
return false;
|
||||
}
|
||||
|
||||
extern void queue_core_balance(struct rq *rq);
|
||||
|
||||
static inline bool sched_core_enqueued(struct task_struct *p)
|
||||
{
|
||||
return !RB_EMPTY_NODE(&p->core_node);
|
||||
@@ -1267,10 +1265,6 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
|
||||
return &rq->__lock;
|
||||
}
|
||||
|
||||
static inline void queue_core_balance(struct rq *rq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
return true;
|
||||
|
||||
Reference in New Issue
Block a user