Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes: various scheduler metrics corner case fixes, a sched_features deadlock fix, and a topology fix for certain NUMA systems" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Fix kernel-doc notation warning sched/fair: Fix load_balance redo for !imbalance sched/fair: Fix scale_rt_capacity() for SMT sched/fair: Fix vruntime_normalized() for remote non-migration wakeup sched/pelt: Fix update_blocked_averages() for RT and DL classes sched/topology: Set correct NUMA topology type sched/debug: Fix potential deadlock when writing to sched_features
This commit is contained in:
commit
4314daa55b
@ -89,12 +89,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
|
|||||||
|
|
||||||
static void sched_feat_disable(int i)
|
static void sched_feat_disable(int i)
|
||||||
{
|
{
|
||||||
static_key_disable(&sched_feat_keys[i]);
|
static_key_disable_cpuslocked(&sched_feat_keys[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sched_feat_enable(int i)
|
static void sched_feat_enable(int i)
|
||||||
{
|
{
|
||||||
static_key_enable(&sched_feat_keys[i]);
|
static_key_enable_cpuslocked(&sched_feat_keys[i]);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void sched_feat_disable(int i) { };
|
static void sched_feat_disable(int i) { };
|
||||||
@ -146,9 +146,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
|
|||||||
|
|
||||||
/* Ensure the static_key remains in a consistent state */
|
/* Ensure the static_key remains in a consistent state */
|
||||||
inode = file_inode(filp);
|
inode = file_inode(filp);
|
||||||
|
cpus_read_lock();
|
||||||
inode_lock(inode);
|
inode_lock(inode);
|
||||||
ret = sched_feat_set(cmp);
|
ret = sched_feat_set(cmp);
|
||||||
inode_unlock(inode);
|
inode_unlock(inode);
|
||||||
|
cpus_read_unlock();
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -3362,6 +3362,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
|||||||
* attach_entity_load_avg - attach this entity to its cfs_rq load avg
|
* attach_entity_load_avg - attach this entity to its cfs_rq load avg
|
||||||
* @cfs_rq: cfs_rq to attach to
|
* @cfs_rq: cfs_rq to attach to
|
||||||
* @se: sched_entity to attach
|
* @se: sched_entity to attach
|
||||||
|
* @flags: migration hints
|
||||||
*
|
*
|
||||||
* Must call update_cfs_rq_load_avg() before this, since we rely on
|
* Must call update_cfs_rq_load_avg() before this, since we rely on
|
||||||
* cfs_rq->avg.last_update_time being current.
|
* cfs_rq->avg.last_update_time being current.
|
||||||
@ -7263,6 +7264,7 @@ static void update_blocked_averages(int cpu)
|
|||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
struct cfs_rq *cfs_rq, *pos;
|
struct cfs_rq *cfs_rq, *pos;
|
||||||
|
const struct sched_class *curr_class;
|
||||||
struct rq_flags rf;
|
struct rq_flags rf;
|
||||||
bool done = true;
|
bool done = true;
|
||||||
|
|
||||||
@ -7299,8 +7301,10 @@ static void update_blocked_averages(int cpu)
|
|||||||
if (cfs_rq_has_blocked(cfs_rq))
|
if (cfs_rq_has_blocked(cfs_rq))
|
||||||
done = false;
|
done = false;
|
||||||
}
|
}
|
||||||
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
|
|
||||||
update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
|
curr_class = rq->curr->sched_class;
|
||||||
|
update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
|
||||||
|
update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
|
||||||
update_irq_load_avg(rq, 0);
|
update_irq_load_avg(rq, 0);
|
||||||
/* Don't need periodic decay once load/util_avg are null */
|
/* Don't need periodic decay once load/util_avg are null */
|
||||||
if (others_have_blocked(rq))
|
if (others_have_blocked(rq))
|
||||||
@ -7365,13 +7369,16 @@ static inline void update_blocked_averages(int cpu)
|
|||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
struct cfs_rq *cfs_rq = &rq->cfs;
|
struct cfs_rq *cfs_rq = &rq->cfs;
|
||||||
|
const struct sched_class *curr_class;
|
||||||
struct rq_flags rf;
|
struct rq_flags rf;
|
||||||
|
|
||||||
rq_lock_irqsave(rq, &rf);
|
rq_lock_irqsave(rq, &rf);
|
||||||
update_rq_clock(rq);
|
update_rq_clock(rq);
|
||||||
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
|
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
|
||||||
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
|
|
||||||
update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
|
curr_class = rq->curr->sched_class;
|
||||||
|
update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
|
||||||
|
update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
|
||||||
update_irq_load_avg(rq, 0);
|
update_irq_load_avg(rq, 0);
|
||||||
#ifdef CONFIG_NO_HZ_COMMON
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
rq->last_blocked_load_update_tick = jiffies;
|
rq->last_blocked_load_update_tick = jiffies;
|
||||||
@ -7482,10 +7489,10 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
|
|||||||
return load_idx;
|
return load_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long scale_rt_capacity(int cpu)
|
static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
|
||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
|
unsigned long max = arch_scale_cpu_capacity(sd, cpu);
|
||||||
unsigned long used, free;
|
unsigned long used, free;
|
||||||
unsigned long irq;
|
unsigned long irq;
|
||||||
|
|
||||||
@ -7507,7 +7514,7 @@ static unsigned long scale_rt_capacity(int cpu)
|
|||||||
|
|
||||||
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||||
{
|
{
|
||||||
unsigned long capacity = scale_rt_capacity(cpu);
|
unsigned long capacity = scale_rt_capacity(sd, cpu);
|
||||||
struct sched_group *sdg = sd->groups;
|
struct sched_group *sdg = sd->groups;
|
||||||
|
|
||||||
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
|
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
|
||||||
@ -8269,7 +8276,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
|
|||||||
force_balance:
|
force_balance:
|
||||||
/* Looks like there is an imbalance. Compute it */
|
/* Looks like there is an imbalance. Compute it */
|
||||||
calculate_imbalance(env, &sds);
|
calculate_imbalance(env, &sds);
|
||||||
return sds.busiest;
|
return env->imbalance ? sds.busiest : NULL;
|
||||||
|
|
||||||
out_balanced:
|
out_balanced:
|
||||||
env->imbalance = 0;
|
env->imbalance = 0;
|
||||||
@ -9638,7 +9645,8 @@ static inline bool vruntime_normalized(struct task_struct *p)
|
|||||||
* - A task which has been woken up by try_to_wake_up() and
|
* - A task which has been woken up by try_to_wake_up() and
|
||||||
* waiting for actually being woken up by sched_ttwu_pending().
|
* waiting for actually being woken up by sched_ttwu_pending().
|
||||||
*/
|
*/
|
||||||
if (!se->sum_exec_runtime || p->state == TASK_WAKING)
|
if (!se->sum_exec_runtime ||
|
||||||
|
(p->state == TASK_WAKING && p->sched_remote_wakeup))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -1295,7 +1295,7 @@ static void init_numa_topology_type(void)
|
|||||||
|
|
||||||
n = sched_max_numa_distance;
|
n = sched_max_numa_distance;
|
||||||
|
|
||||||
if (sched_domains_numa_levels <= 1) {
|
if (sched_domains_numa_levels <= 2) {
|
||||||
sched_numa_topology_type = NUMA_DIRECT;
|
sched_numa_topology_type = NUMA_DIRECT;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1380,9 +1380,6 @@ void sched_init_numa(void)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!level)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 'level' contains the number of unique distances
|
* 'level' contains the number of unique distances
|
||||||
*
|
*
|
||||||
|
Loading…
Reference in New Issue
Block a user