Merge branch 'sched/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into for-6.11

d329605287 ("sched/fair: set_load_weight() must also call reweight_task()
for SCHED_IDLE tasks") applied to sched/core changes how reweight_task() is
called causing conflicts with e83edbf88f ("sched: Add
sched_class->reweight_task()"). Resolve the conflicts by taking
set_load_weight() changes from d329605287 and updating
sched_class->reweight_task() to take pointer to struct load_weight instead
of int prio.

Signed-off-by: Tejun Heo<tj@kernel.org>
This commit is contained in:
Tejun Heo 2024-07-08 07:01:58 -10:00
commit 7b9f6c864a
5 changed files with 44 additions and 46 deletions

View File

@ -1339,27 +1339,24 @@ int tg_nop(struct task_group *tg, void *data)
void set_load_weight(struct task_struct *p, bool update_load) void set_load_weight(struct task_struct *p, bool update_load)
{ {
int prio = p->static_prio - MAX_RT_PRIO; int prio = p->static_prio - MAX_RT_PRIO;
struct load_weight *load = &p->se.load; struct load_weight lw;
/*
* SCHED_IDLE tasks get minimal weight:
*/
if (task_has_idle_policy(p)) { if (task_has_idle_policy(p)) {
load->weight = scale_load(WEIGHT_IDLEPRIO); lw.weight = scale_load(WEIGHT_IDLEPRIO);
load->inv_weight = WMULT_IDLEPRIO; lw.inv_weight = WMULT_IDLEPRIO;
return; } else {
lw.weight = scale_load(sched_prio_to_weight[prio]);
lw.inv_weight = sched_prio_to_wmult[prio];
} }
/* /*
* SCHED_OTHER tasks have to update their load when changing their * SCHED_OTHER tasks have to update their load when changing their
* weight * weight
*/ */
if (update_load && p->sched_class->reweight_task) { if (update_load && p->sched_class->reweight_task)
p->sched_class->reweight_task(task_rq(p), p, prio); p->sched_class->reweight_task(task_rq(p), p, &lw);
} else { else
load->weight = scale_load(sched_prio_to_weight[prio]); p->se.load = lw;
load->inv_weight = sched_prio_to_wmult[prio];
}
} }
#ifdef CONFIG_UCLAMP_TASK #ifdef CONFIG_UCLAMP_TASK

View File

@ -3403,7 +3403,8 @@ void sched_ext_free(struct task_struct *p)
} }
} }
static void reweight_task_scx(struct rq *rq, struct task_struct *p, int newprio) static void reweight_task_scx(struct rq *rq, struct task_struct *p,
const struct load_weight *lw)
{ {
lockdep_assert_rq_held(task_rq(p)); lockdep_assert_rq_held(task_rq(p));

View File

@ -3835,15 +3835,15 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
} }
} }
static void reweight_task_fair(struct rq *rq, struct task_struct *p, int prio) static void reweight_task_fair(struct rq *rq, struct task_struct *p,
const struct load_weight *lw)
{ {
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se); struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct load_weight *load = &se->load; struct load_weight *load = &se->load;
unsigned long weight = scale_load(sched_prio_to_weight[prio]);
reweight_entity(cfs_rq, se, weight); reweight_entity(cfs_rq, se, lw->weight);
load->inv_weight = sched_prio_to_wmult[prio]; load->inv_weight = lw->inv_weight;
} }
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);

View File

@ -218,28 +218,32 @@ void __init psi_init(void)
group_init(&psi_system); group_init(&psi_system);
} }
static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu) static u32 test_states(unsigned int *tasks, u32 state_mask)
{ {
switch (state) { const bool oncpu = state_mask & PSI_ONCPU;
case PSI_IO_SOME:
return unlikely(tasks[NR_IOWAIT]); if (tasks[NR_IOWAIT]) {
case PSI_IO_FULL: state_mask |= BIT(PSI_IO_SOME);
return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]); if (!tasks[NR_RUNNING])
case PSI_MEM_SOME: state_mask |= BIT(PSI_IO_FULL);
return unlikely(tasks[NR_MEMSTALL]);
case PSI_MEM_FULL:
return unlikely(tasks[NR_MEMSTALL] &&
tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]);
case PSI_CPU_SOME:
return unlikely(tasks[NR_RUNNING] > oncpu);
case PSI_CPU_FULL:
return unlikely(tasks[NR_RUNNING] && !oncpu);
case PSI_NONIDLE:
return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
tasks[NR_RUNNING];
default:
return false;
} }
if (tasks[NR_MEMSTALL]) {
state_mask |= BIT(PSI_MEM_SOME);
if (tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING])
state_mask |= BIT(PSI_MEM_FULL);
}
if (tasks[NR_RUNNING] > oncpu)
state_mask |= BIT(PSI_CPU_SOME);
if (tasks[NR_RUNNING] && !oncpu)
state_mask |= BIT(PSI_CPU_FULL);
if (tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || tasks[NR_RUNNING])
state_mask |= BIT(PSI_NONIDLE);
return state_mask;
} }
static void get_recent_times(struct psi_group *group, int cpu, static void get_recent_times(struct psi_group *group, int cpu,
@ -770,7 +774,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
{ {
struct psi_group_cpu *groupc; struct psi_group_cpu *groupc;
unsigned int t, m; unsigned int t, m;
enum psi_states s;
u32 state_mask; u32 state_mask;
groupc = per_cpu_ptr(group->pcpu, cpu); groupc = per_cpu_ptr(group->pcpu, cpu);
@ -841,10 +844,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
return; return;
} }
for (s = 0; s < NR_PSI_STATES; s++) { state_mask = test_states(groupc->tasks, state_mask);
if (test_state(groupc->tasks, s, state_mask & PSI_ONCPU))
state_mask |= (1 << s);
}
/* /*
* Since we care about lost potential, a memstall is FULL * Since we care about lost potential, a memstall is FULL
@ -1194,7 +1194,7 @@ void psi_cgroup_restart(struct psi_group *group)
/* /*
* After we disable psi_group->enabled, we don't actually * After we disable psi_group->enabled, we don't actually
* stop percpu tasks accounting in each psi_group_cpu, * stop percpu tasks accounting in each psi_group_cpu,
* instead only stop test_state() loop, record_times() * instead only stop test_states() loop, record_times()
* and averaging worker, see psi_group_change() for details. * and averaging worker, see psi_group_change() for details.
* *
* When disable cgroup PSI, this function has nothing to sync * When disable cgroup PSI, this function has nothing to sync
@ -1202,7 +1202,7 @@ void psi_cgroup_restart(struct psi_group *group)
* would see !psi_group->enabled and only do task accounting. * would see !psi_group->enabled and only do task accounting.
* *
* When re-enable cgroup PSI, this function use psi_group_change() * When re-enable cgroup PSI, this function use psi_group_change()
* to get correct state mask from test_state() loop on tasks[], * to get correct state mask from test_states() loop on tasks[],
* and restart groupc->state_start from now, use .clear = .set = 0 * and restart groupc->state_start from now, use .clear = .set = 0
* here since no task status really changed. * here since no task status really changed.
*/ */

View File

@ -2389,7 +2389,7 @@ struct sched_class {
void (*switched_from)(struct rq *this_rq, struct task_struct *task); void (*switched_from)(struct rq *this_rq, struct task_struct *task);
void (*switched_to) (struct rq *this_rq, struct task_struct *task); void (*switched_to) (struct rq *this_rq, struct task_struct *task);
void (*reweight_task)(struct rq *this_rq, struct task_struct *task, void (*reweight_task)(struct rq *this_rq, struct task_struct *task,
int newprio); const struct load_weight *lw);
void (*prio_changed) (struct rq *this_rq, struct task_struct *task, void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio); int oldprio);