forked from Minki/linux
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Two fixes: a guest-cputime accounting fix, and a cgroup bandwidth quota precision fix" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/vtime: Fix guest/system mis-accounting on task switch sched/fair: Scale bandwidth quota and period without losing quota/period ratio precision
This commit is contained in:
commit
328fefadd9
@ -740,7 +740,7 @@ void vtime_account_system(struct task_struct *tsk)
|
||||
|
||||
write_seqcount_begin(&vtime->seqcount);
|
||||
/* We might have scheduled out from guest path */
|
||||
if (current->flags & PF_VCPU)
|
||||
if (tsk->flags & PF_VCPU)
|
||||
vtime_account_guest(tsk, vtime);
|
||||
else
|
||||
__vtime_account_system(tsk, vtime);
|
||||
@ -783,7 +783,7 @@ void vtime_guest_enter(struct task_struct *tsk)
|
||||
*/
|
||||
write_seqcount_begin(&vtime->seqcount);
|
||||
__vtime_account_system(tsk, vtime);
|
||||
current->flags |= PF_VCPU;
|
||||
tsk->flags |= PF_VCPU;
|
||||
write_seqcount_end(&vtime->seqcount);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtime_guest_enter);
|
||||
@ -794,7 +794,7 @@ void vtime_guest_exit(struct task_struct *tsk)
|
||||
|
||||
write_seqcount_begin(&vtime->seqcount);
|
||||
vtime_account_guest(tsk, vtime);
|
||||
current->flags &= ~PF_VCPU;
|
||||
tsk->flags &= ~PF_VCPU;
|
||||
write_seqcount_end(&vtime->seqcount);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtime_guest_exit);
|
||||
|
@ -4926,20 +4926,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
|
||||
if (++count > 3) {
|
||||
u64 new, old = ktime_to_ns(cfs_b->period);
|
||||
|
||||
new = (old * 147) / 128; /* ~115% */
|
||||
new = min(new, max_cfs_quota_period);
|
||||
/*
|
||||
* Grow period by a factor of 2 to avoid losing precision.
|
||||
* Precision loss in the quota/period ratio can cause __cfs_schedulable
|
||||
* to fail.
|
||||
*/
|
||||
new = old * 2;
|
||||
if (new < max_cfs_quota_period) {
|
||||
cfs_b->period = ns_to_ktime(new);
|
||||
cfs_b->quota *= 2;
|
||||
|
||||
cfs_b->period = ns_to_ktime(new);
|
||||
|
||||
/* since max is 1s, this is limited to 1e9^2, which fits in u64 */
|
||||
cfs_b->quota *= new;
|
||||
cfs_b->quota = div64_u64(cfs_b->quota, old);
|
||||
|
||||
pr_warn_ratelimited(
|
||||
"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
|
||||
smp_processor_id(),
|
||||
div_u64(new, NSEC_PER_USEC),
|
||||
div_u64(cfs_b->quota, NSEC_PER_USEC));
|
||||
pr_warn_ratelimited(
|
||||
"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
|
||||
smp_processor_id(),
|
||||
div_u64(new, NSEC_PER_USEC),
|
||||
div_u64(cfs_b->quota, NSEC_PER_USEC));
|
||||
} else {
|
||||
pr_warn_ratelimited(
|
||||
"cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
|
||||
smp_processor_id(),
|
||||
div_u64(old, NSEC_PER_USEC),
|
||||
div_u64(cfs_b->quota, NSEC_PER_USEC));
|
||||
}
|
||||
|
||||
/* reset count so we don't come right back in here */
|
||||
count = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user