Merge tag 'sched-urgent-2021-07-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Three fixes: - Fix load tracking bug/inconsistency - Fix a sporadic CFS bandwidth constraints enforcement bug - Fix a uclamp utilization tracking bug for newly woken tasks" * tag 'sched-urgent-2021-07-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/uclamp: Ignore max aggregation if rq is idle sched/fair: Fix CFS bandwidth hrtimer expiry type sched/fair: Sync load_sum with load_avg after dequeue
This commit is contained in:
@@ -3037,8 +3037,9 @@ enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
static inline void
|
||||
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
u32 divider = get_pelt_divider(&se->avg);
|
||||
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
|
||||
sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
|
||||
cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
|
||||
}
|
||||
#else
|
||||
static inline void
|
||||
@@ -5081,7 +5082,7 @@ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
|
||||
static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
|
||||
{
|
||||
struct hrtimer *refresh_timer = &cfs_b->period_timer;
|
||||
u64 remaining;
|
||||
s64 remaining;
|
||||
|
||||
/* if the call-back is running a quota refresh is already occurring */
|
||||
if (hrtimer_callback_running(refresh_timer))
|
||||
@@ -5089,7 +5090,7 @@ static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
|
||||
|
||||
/* is a quota refresh about to occur? */
|
||||
remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
|
||||
if (remaining < min_expire)
|
||||
if (remaining < (s64)min_expire)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -2818,20 +2818,27 @@ static __always_inline
|
||||
unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
|
||||
struct task_struct *p)
|
||||
{
|
||||
unsigned long min_util;
|
||||
unsigned long max_util;
|
||||
unsigned long min_util = 0;
|
||||
unsigned long max_util = 0;
|
||||
|
||||
if (!static_branch_likely(&sched_uclamp_used))
|
||||
return util;
|
||||
|
||||
min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
|
||||
max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
|
||||
|
||||
if (p) {
|
||||
min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
|
||||
max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX));
|
||||
min_util = uclamp_eff_value(p, UCLAMP_MIN);
|
||||
max_util = uclamp_eff_value(p, UCLAMP_MAX);
|
||||
|
||||
/*
|
||||
* Ignore last runnable task's max clamp, as this task will
|
||||
* reset it. Similarly, no need to read the rq's min clamp.
|
||||
*/
|
||||
if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
|
||||
goto out;
|
||||
}
|
||||
|
||||
min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
|
||||
max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
|
||||
out:
|
||||
/*
|
||||
* Since CPU's {min,max}_util clamps are MAX aggregated considering
|
||||
* RUNNABLE tasks with _different_ clamps, we can end up with an
|
||||
|
||||
Reference in New Issue
Block a user