forked from Minki/linux
vmalloc: choose a better start address in vm_area_register_early()
Percpu embedded first chunk allocator is the firstly option, but it could fail on ARM64, eg, percpu: max_distance=0x5fcfdc640000 too large for vmalloc space 0x781fefff0000 percpu: max_distance=0x600000540000 too large for vmalloc space 0x7dffb7ff0000 percpu: max_distance=0x5fff9adb0000 too large for vmalloc space 0x5dffb7ff0000 then we could get to WARNING: CPU: 15 PID: 461 at vmalloc.c:3087 pcpu_get_vm_areas+0x488/0x838 and the system cannot boot successfully. Let's implement page mapping percpu first chunk allocator as a fallback to the embedding allocator to increase the robustness of the system. Also fix a crash when both NEED_PER_CPU_PAGE_FIRST_CHUNK and KASAN_VMALLOC enabled. Tested on ARM64 qemu with cmdline "percpu_alloc=page". This patch (of 3): There are some fixed locations in the vmalloc area be reserved in ARM(see iotable_init()) and ARM64(see map_kernel()), but for pcpu_page_first_chunk(), it calls vm_area_register_early() and choose VMALLOC_START as the start address of vmap area which could be conflicted with above address, then could trigger a BUG_ON in vm_area_add_early(). Let's choose a suit start address by traversing the vmlist. Link: https://lkml.kernel.org/r/20210910053354.26721-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20210910053354.26721-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Marco Elver <elver@google.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
dd544141b9
commit
0eb68437a7
18
mm/vmalloc.c
18
mm/vmalloc.c
@ -2276,15 +2276,21 @@ void __init vm_area_add_early(struct vm_struct *vm)
|
||||
*/
|
||||
void __init vm_area_register_early(struct vm_struct *vm, size_t align)
|
||||
{
|
||||
static size_t vm_init_off __initdata;
|
||||
unsigned long addr;
|
||||
unsigned long addr = ALIGN(VMALLOC_START, align);
|
||||
struct vm_struct *cur, **p;
|
||||
|
||||
addr = ALIGN(VMALLOC_START + vm_init_off, align);
|
||||
vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
|
||||
BUG_ON(vmap_initialized);
|
||||
|
||||
for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
|
||||
if ((unsigned long)cur->addr - addr >= vm->size)
|
||||
break;
|
||||
addr = ALIGN((unsigned long)cur->addr + cur->size, align);
|
||||
}
|
||||
|
||||
BUG_ON(addr > VMALLOC_END - vm->size);
|
||||
vm->addr = (void *)addr;
|
||||
|
||||
vm_area_add_early(vm);
|
||||
vm->next = *p;
|
||||
*p = vm;
|
||||
}
|
||||
|
||||
static void vmap_init_free_space(void)
|
||||
|
Loading…
Reference in New Issue
Block a user