forked from Minki/linux
x86, 64bit, mm: Make pgd next calculation consistent with pud/pmd
Just like the way we calculate next for pud and pmd, aka round down and add size. Also, do not do boundary-checking with 'next', and just pass 'end' down to phys_pud_init() instead. Because the loop in phys_pud_init() stops at PTRS_PER_PUD and thus can handle a possibly bigger 'end' properly. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1359058816-7615-6-git-send-email-yinghai@kernel.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
parent
b422a30917
commit
c2bdee594e
@ -530,9 +530,7 @@ kernel_physical_mapping_init(unsigned long start,
|
||||
pgd_t *pgd = pgd_offset_k(start);
|
||||
pud_t *pud;
|
||||
|
||||
next = (start + PGDIR_SIZE) & PGDIR_MASK;
|
||||
if (next > end)
|
||||
next = end;
|
||||
next = (start & PGDIR_MASK) + PGDIR_SIZE;
|
||||
|
||||
if (pgd_val(*pgd)) {
|
||||
pud = (pud_t *)pgd_page_vaddr(*pgd);
|
||||
@ -542,7 +540,7 @@ kernel_physical_mapping_init(unsigned long start,
|
||||
}
|
||||
|
||||
pud = alloc_low_page();
|
||||
last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
|
||||
last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
|
||||
page_size_mask);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
|
Loading…
Reference in New Issue
Block a user