mirror of
https://github.com/torvalds/linux.git
synced 2024-11-28 15:11:31 +00:00
sched/fair: Rework throttle_count sync
Since we already take rq->lock when creating a cgroup, use it to also sync the throttle_count and avoid the extra state and enqueue path branch. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bsegall@google.com Cc: linux-kernel@vger.kernel.org [ Fixed build warning. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
599b4840b0
commit
55e16d30bd
@ -4241,26 +4241,6 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
|
|||||||
if (!cfs_bandwidth_used())
|
if (!cfs_bandwidth_used())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Synchronize hierarchical throttle counter: */
|
|
||||||
if (unlikely(!cfs_rq->throttle_uptodate)) {
|
|
||||||
struct rq *rq = rq_of(cfs_rq);
|
|
||||||
struct cfs_rq *pcfs_rq;
|
|
||||||
struct task_group *tg;
|
|
||||||
|
|
||||||
cfs_rq->throttle_uptodate = 1;
|
|
||||||
|
|
||||||
/* Get closest up-to-date node, because leaves go first: */
|
|
||||||
for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
|
|
||||||
pcfs_rq = tg->cfs_rq[cpu_of(rq)];
|
|
||||||
if (pcfs_rq->throttle_uptodate)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (tg) {
|
|
||||||
cfs_rq->throttle_count = pcfs_rq->throttle_count;
|
|
||||||
cfs_rq->throttled_clock_task = rq_clock_task(rq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* an active group must be handled by the update_curr()->put() path */
|
/* an active group must be handled by the update_curr()->put() path */
|
||||||
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
|
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
|
||||||
return;
|
return;
|
||||||
@ -4275,6 +4255,23 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
|
|||||||
throttle_cfs_rq(cfs_rq);
|
throttle_cfs_rq(cfs_rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sync_throttle(struct task_group *tg, int cpu)
|
||||||
|
{
|
||||||
|
struct cfs_rq *pcfs_rq, *cfs_rq;
|
||||||
|
|
||||||
|
if (!cfs_bandwidth_used())
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!tg->parent)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cfs_rq = tg->cfs_rq[cpu];
|
||||||
|
pcfs_rq = tg->parent->cfs_rq[cpu];
|
||||||
|
|
||||||
|
cfs_rq->throttle_count = pcfs_rq->throttle_count;
|
||||||
|
pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
|
||||||
|
}
|
||||||
|
|
||||||
/* conditionally throttle active cfs_rq's from put_prev_entity() */
|
/* conditionally throttle active cfs_rq's from put_prev_entity() */
|
||||||
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
|
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
|
||||||
{
|
{
|
||||||
@ -4414,6 +4411,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
|
|||||||
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
|
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
|
||||||
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
|
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
|
||||||
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
|
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
|
||||||
|
static inline void sync_throttle(struct task_group *tg, int cpu) {}
|
||||||
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
|
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
|
||||||
|
|
||||||
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
|
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
|
||||||
@ -8646,6 +8644,7 @@ void online_fair_sched_group(struct task_group *tg)
|
|||||||
|
|
||||||
raw_spin_lock_irq(&rq->lock);
|
raw_spin_lock_irq(&rq->lock);
|
||||||
post_init_entity_util_avg(se);
|
post_init_entity_util_avg(se);
|
||||||
|
sync_throttle(tg, i);
|
||||||
raw_spin_unlock_irq(&rq->lock);
|
raw_spin_unlock_irq(&rq->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -438,7 +438,7 @@ struct cfs_rq {
|
|||||||
|
|
||||||
u64 throttled_clock, throttled_clock_task;
|
u64 throttled_clock, throttled_clock_task;
|
||||||
u64 throttled_clock_task_time;
|
u64 throttled_clock_task_time;
|
||||||
int throttled, throttle_count, throttle_uptodate;
|
int throttled, throttle_count;
|
||||||
struct list_head throttled_list;
|
struct list_head throttled_list;
|
||||||
#endif /* CONFIG_CFS_BANDWIDTH */
|
#endif /* CONFIG_CFS_BANDWIDTH */
|
||||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||||
|
Loading…
Reference in New Issue
Block a user