b10d6bca87
There are several occurrences of the following pattern: for_each_memblock(memory, reg) { start = __pfn_to_phys(memblock_region_memory_base_pfn(reg); end = __pfn_to_phys(memblock_region_memory_end_pfn(reg)); /* do something with start and end */ } Using for_each_mem_range() iterator is more appropriate in such cases and allows simpler and cleaner code. [akpm@linux-foundation.org: fix arch/arm/mm/pmsa-v7.c build] [rppt@linux.ibm.com: mips: fix cavium-octeon build caused by memblock refactoring] Link: http://lkml.kernel.org/r/20200827124549.GD167163@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Daniel Axtens <dja@axtens.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Emil Renner Berthing <kernel@esmil.dk> Cc: Hari Bathini <hbathini@linux.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: https://lkml.kernel.org/r/20200818151634.14343-13-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
114 lines
3.0 KiB
C
114 lines
3.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// Copyright (C) 2019 Andes Technology Corporation
|
|
|
|
#include <linux/pfn.h>
|
|
#include <linux/init_task.h>
|
|
#include <linux/kasan.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/pgtable.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/fixmap.h>
|
|
|
|
extern pgd_t early_pg_dir[PTRS_PER_PGD];
|
|
asmlinkage void __init kasan_early_init(void)
|
|
{
|
|
uintptr_t i;
|
|
pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
|
|
|
|
for (i = 0; i < PTRS_PER_PTE; ++i)
|
|
set_pte(kasan_early_shadow_pte + i,
|
|
mk_pte(virt_to_page(kasan_early_shadow_page),
|
|
PAGE_KERNEL));
|
|
|
|
for (i = 0; i < PTRS_PER_PMD; ++i)
|
|
set_pmd(kasan_early_shadow_pmd + i,
|
|
pfn_pmd(PFN_DOWN
|
|
(__pa((uintptr_t) kasan_early_shadow_pte)),
|
|
__pgprot(_PAGE_TABLE)));
|
|
|
|
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
|
|
i += PGDIR_SIZE, ++pgd)
|
|
set_pgd(pgd,
|
|
pfn_pgd(PFN_DOWN
|
|
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
|
|
__pgprot(_PAGE_TABLE)));
|
|
|
|
/* init for swapper_pg_dir */
|
|
pgd = pgd_offset_k(KASAN_SHADOW_START);
|
|
|
|
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
|
|
i += PGDIR_SIZE, ++pgd)
|
|
set_pgd(pgd,
|
|
pfn_pgd(PFN_DOWN
|
|
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
|
|
__pgprot(_PAGE_TABLE)));
|
|
|
|
local_flush_tlb_all();
|
|
}
|
|
|
|
static void __init populate(void *start, void *end)
|
|
{
|
|
unsigned long i, offset;
|
|
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
|
|
unsigned long vend = PAGE_ALIGN((unsigned long)end);
|
|
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
|
|
unsigned long n_ptes =
|
|
((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
|
|
unsigned long n_pmds =
|
|
((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
|
|
|
|
pte_t *pte =
|
|
memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
|
|
pmd_t *pmd =
|
|
memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
|
|
pgd_t *pgd = pgd_offset_k(vaddr);
|
|
|
|
for (i = 0; i < n_pages; i++) {
|
|
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
|
|
set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
|
|
}
|
|
|
|
for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
|
|
set_pmd(&pmd[i],
|
|
pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
|
|
__pgprot(_PAGE_TABLE)));
|
|
|
|
for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
|
|
set_pgd(&pgd[i],
|
|
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
|
|
__pgprot(_PAGE_TABLE)));
|
|
|
|
local_flush_tlb_all();
|
|
memset(start, 0, end - start);
|
|
}
|
|
|
|
void __init kasan_init(void)
|
|
{
|
|
phys_addr_t _start, _end;
|
|
u64 i;
|
|
|
|
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
|
|
(void *)kasan_mem_to_shadow((void *)
|
|
VMALLOC_END));
|
|
|
|
for_each_mem_range(i, &_start, &_end) {
|
|
void *start = (void *)_start;
|
|
void *end = (void *)_end;
|
|
|
|
if (start >= end)
|
|
break;
|
|
|
|
populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
|
|
};
|
|
|
|
for (i = 0; i < PTRS_PER_PTE; i++)
|
|
set_pte(&kasan_early_shadow_pte[i],
|
|
mk_pte(virt_to_page(kasan_early_shadow_page),
|
|
__pgprot(_PAGE_PRESENT | _PAGE_READ |
|
|
_PAGE_ACCESSED)));
|
|
|
|
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
|
|
init_task.kasan_depth = 0;
|
|
}
|