forked from Minki/linux
sched/numa: Initialise numa_next_scan properly
Scan delay logic and resets are currently initialised to start scanning immediately instead of delaying properly. Initialise them properly at fork time and catch when a new mm has been allocated. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-17-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b726b7dfb4
commit
7e8d16b6cb
@ -1624,8 +1624,8 @@ static void __sched_fork(struct task_struct *p)
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
|
||||
p->mm->numa_next_scan = jiffies;
|
||||
p->mm->numa_next_reset = jiffies;
|
||||
p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
|
||||
p->mm->numa_next_reset = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
|
||||
p->mm->numa_scan_seq = 0;
|
||||
}
|
||||
|
||||
|
@ -900,6 +900,13 @@ void task_numa_work(struct callback_head *work)
|
||||
if (p->flags & PF_EXITING)
|
||||
return;
|
||||
|
||||
if (!mm->numa_next_reset || !mm->numa_next_scan) {
|
||||
mm->numa_next_scan = now +
|
||||
msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
|
||||
mm->numa_next_reset = now +
|
||||
msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset the scan period if enough time has gone by. Objective is that
|
||||
* scanning will be reduced if pages are properly placed. As tasks
|
||||
|
Loading…
Reference in New Issue
Block a user