forked from Minki/linux
Merge branch 'akpm' (patches from Andrew)
Merge yet more updates from Andrew Morton: - More MM work. 100ish more to go. Mike Rapoport's "mm: remove __ARCH_HAS_5LEVEL_HACK" series should fix the current ppc issue - Various other little subsystems * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (127 commits) lib/ubsan.c: fix gcc-10 warnings tools/testing/selftests/vm: remove duplicate headers selftests: vm: pkeys: fix multilib builds for x86 selftests: vm: pkeys: use the correct page size on powerpc selftests/vm/pkeys: override access right definitions on powerpc selftests/vm/pkeys: test correct behaviour of pkey-0 selftests/vm/pkeys: introduce a sub-page allocator selftests/vm/pkeys: detect write violation on a mapped access-denied-key page selftests/vm/pkeys: associate key on a mapped page and detect write violation selftests/vm/pkeys: associate key on a mapped page and detect access violation selftests/vm/pkeys: improve checks to determine pkey support selftests/vm/pkeys: fix assertion in test_pkey_alloc_exhaust() selftests/vm/pkeys: fix number of reserved powerpc pkeys selftests/vm/pkeys: introduce powerpc support selftests/vm/pkeys: introduce generic pkey abstractions selftests: vm: pkeys: use the correct huge page size selftests/vm/pkeys: fix alloc_random_pkey() to make it really random selftests/vm/pkeys: fix assertion in pkey_disable_set/clear() selftests/vm/pkeys: fix pkey_disable_clear() selftests: vm: pkeys: add helpers for pkey bits ...
This commit is contained in:
commit
886d7de631
@ -217,14 +217,15 @@ This allows to collect coverage from two types of kernel background
|
||||
threads: the global ones, that are spawned during kernel boot in a limited
|
||||
number of instances (e.g. one USB hub_event() worker thread is spawned per
|
||||
USB HCD); and the local ones, that are spawned when a user interacts with
|
||||
some kernel interface (e.g. vhost workers).
|
||||
some kernel interface (e.g. vhost workers); as well as from soft
|
||||
interrupts.
|
||||
|
||||
To enable collecting coverage from a global background thread, a unique
|
||||
global handle must be assigned and passed to the corresponding
|
||||
kcov_remote_start() call. Then a userspace process can pass a list of such
|
||||
handles to the KCOV_REMOTE_ENABLE ioctl in the handles array field of the
|
||||
kcov_remote_arg struct. This will attach the used kcov device to the code
|
||||
sections, that are referenced by those handles.
|
||||
To enable collecting coverage from a global background thread or from a
|
||||
softirq, a unique global handle must be assigned and passed to the
|
||||
corresponding kcov_remote_start() call. Then a userspace process can pass
|
||||
a list of such handles to the KCOV_REMOTE_ENABLE ioctl in the handles
|
||||
array field of the kcov_remote_arg struct. This will attach the used kcov
|
||||
device to the code sections, that are referenced by those handles.
|
||||
|
||||
Since there might be many local background threads spawned from different
|
||||
userspace processes, we can't use a single global handle per annotation.
|
||||
@ -242,7 +243,7 @@ handles as they don't belong to a particular subsystem. The bytes 4-7 are
|
||||
currently reserved and must be zero. In the future the number of bytes
|
||||
used for the subsystem or handle ids might be increased.
|
||||
|
||||
When a particular userspace proccess collects coverage by via a common
|
||||
When a particular userspace proccess collects coverage via a common
|
||||
handle, kcov will collect coverage for each code section that is annotated
|
||||
to use the common handle obtained as kcov_handle from the current
|
||||
task_struct. However non common handles allow to collect coverage
|
||||
|
@ -0,0 +1,34 @@
|
||||
#
|
||||
# Feature name: debug-vm-pgtable
|
||||
# Kconfig: ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
# description: arch supports pgtable tests for semantics compliance
|
||||
#
|
||||
-----------------------
|
||||
| arch |status|
|
||||
-----------------------
|
||||
| alpha: | TODO |
|
||||
| arc: | ok |
|
||||
| arm: | TODO |
|
||||
| arm64: | ok |
|
||||
| c6x: | TODO |
|
||||
| csky: | TODO |
|
||||
| h8300: | TODO |
|
||||
| hexagon: | TODO |
|
||||
| ia64: | TODO |
|
||||
| m68k: | TODO |
|
||||
| microblaze: | TODO |
|
||||
| mips: | TODO |
|
||||
| nds32: | TODO |
|
||||
| nios2: | TODO |
|
||||
| openrisc: | TODO |
|
||||
| parisc: | TODO |
|
||||
| powerpc: | ok |
|
||||
| riscv: | TODO |
|
||||
| s390: | ok |
|
||||
| sh: | TODO |
|
||||
| sparc: | TODO |
|
||||
| um: | TODO |
|
||||
| unicore32: | TODO |
|
||||
| x86: | ok |
|
||||
| xtensa: | TODO |
|
||||
-----------------------
|
@ -6,6 +6,7 @@
|
||||
config ARC
|
||||
def_bool y
|
||||
select ARC_TIMERS
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
select ARCH_HAS_DMA_PREP_COHERENT
|
||||
select ARCH_HAS_PTE_SPECIAL
|
||||
select ARCH_HAS_SETUP_DMA_OPS
|
||||
|
@ -25,17 +25,8 @@
|
||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
||||
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
|
||||
|
||||
#define kmap_prot PAGE_KERNEL
|
||||
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
extern void *kmap(struct page *page);
|
||||
extern void *kmap_high(struct page *page);
|
||||
extern void *kmap_atomic(struct page *page);
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
extern void kunmap_high(struct page *page);
|
||||
|
||||
extern void kmap_init(void);
|
||||
|
||||
static inline void flush_cache_kmaps(void)
|
||||
@ -43,15 +34,6 @@ static inline void flush_cache_kmaps(void)
|
||||
flush_cache_all();
|
||||
}
|
||||
|
||||
static inline void kunmap(struct page *page)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
if (!PageHighMem(page))
|
||||
return;
|
||||
kunmap_high(page);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -49,38 +49,23 @@
|
||||
extern pte_t * pkmap_page_table;
|
||||
static pte_t * fixmap_page_table;
|
||||
|
||||
void *kmap(struct page *page)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
return kmap_high(page);
|
||||
}
|
||||
EXPORT_SYMBOL(kmap);
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
|
||||
{
|
||||
int idx, cpu_idx;
|
||||
unsigned long vaddr;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
cpu_idx = kmap_atomic_idx_push();
|
||||
idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
|
||||
vaddr = FIXMAP_ADDR(idx);
|
||||
|
||||
set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
|
||||
mk_pte(page, kmap_prot));
|
||||
mk_pte(page, prot));
|
||||
|
||||
return (void *)vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
EXPORT_SYMBOL(kmap_atomic_high_prot);
|
||||
|
||||
void __kunmap_atomic(void *kv)
|
||||
void kunmap_atomic_high(void *kv)
|
||||
{
|
||||
unsigned long kvaddr = (unsigned long)kv;
|
||||
|
||||
@ -102,11 +87,8 @@ void __kunmap_atomic(void *kv)
|
||||
|
||||
kmap_atomic_idx_pop();
|
||||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
EXPORT_SYMBOL(kunmap_atomic_high);
|
||||
|
||||
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
|
||||
{
|
||||
|
@ -10,8 +10,6 @@
|
||||
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
|
||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
||||
|
||||
#define kmap_prot PAGE_KERNEL
|
||||
|
||||
#define flush_cache_kmaps() \
|
||||
do { \
|
||||
if (cache_is_vivt()) \
|
||||
@ -20,9 +18,6 @@
|
||||
|
||||
extern pte_t *pkmap_page_table;
|
||||
|
||||
extern void *kmap_high(struct page *page);
|
||||
extern void kunmap_high(struct page *page);
|
||||
|
||||
/*
|
||||
* The reason for kmap_high_get() is to ensure that the currently kmap'd
|
||||
* page usage count does not decrease to zero while we're using its
|
||||
@ -63,10 +58,6 @@ static inline void *kmap_high_get(struct page *page)
|
||||
* when CONFIG_HIGHMEM is not set.
|
||||
*/
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
extern void *kmap(struct page *page);
|
||||
extern void kunmap(struct page *page);
|
||||
extern void *kmap_atomic(struct page *page);
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
extern void *kmap_atomic_pfn(unsigned long pfn);
|
||||
#endif
|
||||
|
||||
|
@ -17,7 +17,6 @@
|
||||
|
||||
#else
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
|
@ -24,6 +24,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
|
||||
{
|
||||
unsigned long addr = (unsigned long)_addr;
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pud_t *pud;
|
||||
@ -33,7 +34,11 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
|
||||
if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (unlikely(pud_none(*pud) || pud_bad(*pud)))
|
||||
return 0;
|
||||
|
||||
|
@ -633,7 +633,7 @@ static void __init map_sa1100_gpio_regs( void )
|
||||
int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
|
||||
pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset_k(virt), virt), virt), virt);
|
||||
*pmd = __pmd(phys | prot);
|
||||
flush_pmd_entry(pmd);
|
||||
}
|
||||
|
@ -207,6 +207,7 @@ struct pg_level {
|
||||
static struct pg_level pg_level[] = {
|
||||
{
|
||||
}, { /* pgd */
|
||||
}, { /* p4d */
|
||||
}, { /* pud */
|
||||
}, { /* pmd */
|
||||
.bits = section_bits,
|
||||
@ -308,7 +309,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start,
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
|
||||
addr = start + i * PAGE_SIZE;
|
||||
note_page(st, addr, 4, pte_val(*pte), domain);
|
||||
note_page(st, addr, 5, pte_val(*pte), domain);
|
||||
}
|
||||
}
|
||||
|
||||
@ -350,14 +351,14 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
||||
addr += SECTION_SIZE;
|
||||
pmd++;
|
||||
domain = get_domain_name(pmd);
|
||||
note_page(st, addr, 3, pmd_val(*pmd), domain);
|
||||
note_page(st, addr, 4, pmd_val(*pmd), domain);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, 0);
|
||||
pud_t *pud = pud_offset(p4d, 0);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
@ -366,7 +367,23 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
if (!pud_none(*pud)) {
|
||||
walk_pmd(st, pud, addr);
|
||||
} else {
|
||||
note_page(st, addr, 2, pud_val(*pud), NULL);
|
||||
note_page(st, addr, 3, pud_val(*pud), NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
{
|
||||
p4d_t *p4d = p4d_offset(pgd, 0);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
|
||||
addr = start + i * P4D_SIZE;
|
||||
if (!p4d_none(*p4d)) {
|
||||
walk_pud(st, p4d, addr);
|
||||
} else {
|
||||
note_page(st, addr, 2, p4d_val(*p4d), NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -381,7 +398,7 @@ static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
|
||||
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
|
||||
addr = start + i * PGDIR_SIZE;
|
||||
if (!pgd_none(*pgd)) {
|
||||
walk_pud(st, pgd, addr);
|
||||
walk_p4d(st, pgd, addr);
|
||||
} else {
|
||||
note_page(st, addr, 1, pgd_val(*pgd), NULL);
|
||||
}
|
||||
|
@ -91,6 +91,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
|
||||
{
|
||||
spinlock_t *ptl;
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
@ -100,7 +101,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
|
||||
if (pgd_none_or_clear_bad(pgd))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
p4d = p4d_offset(pgd, address);
|
||||
if (p4d_none_or_clear_bad(p4d))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(p4d, address);
|
||||
if (pud_none_or_clear_bad(pud))
|
||||
return 0;
|
||||
|
||||
|
@ -43,19 +43,21 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
|
||||
printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd));
|
||||
|
||||
do {
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
if (pgd_none(*pgd))
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (p4d_none(*p4d))
|
||||
break;
|
||||
|
||||
if (pgd_bad(*pgd)) {
|
||||
if (p4d_bad(*p4d)) {
|
||||
pr_cont("(bad)");
|
||||
break;
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (PTRS_PER_PUD != 1)
|
||||
pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
|
||||
|
||||
@ -405,6 +407,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
|
||||
{
|
||||
unsigned int index;
|
||||
pgd_t *pgd, *pgd_k;
|
||||
p4d_t *p4d, *p4d_k;
|
||||
pud_t *pud, *pud_k;
|
||||
pmd_t *pmd, *pmd_k;
|
||||
|
||||
@ -419,13 +422,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
|
||||
pgd = cpu_get_pgd() + index;
|
||||
pgd_k = init_mm.pgd + index;
|
||||
|
||||
if (pgd_none(*pgd_k))
|
||||
goto bad_area;
|
||||
if (!pgd_present(*pgd))
|
||||
set_pgd(pgd, *pgd_k);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
p4d_k = p4d_offset(pgd_k, addr);
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
pud_k = pud_offset(pgd_k, addr);
|
||||
if (p4d_none(*p4d_k))
|
||||
goto bad_area;
|
||||
if (!p4d_present(*p4d))
|
||||
set_p4d(p4d, *p4d_k);
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
pud_k = pud_offset(p4d_k, addr);
|
||||
|
||||
if (pud_none(*pud_k))
|
||||
goto bad_area;
|
||||
|
@ -31,36 +31,13 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr)
|
||||
return *ptep;
|
||||
}
|
||||
|
||||
void *kmap(struct page *page)
|
||||
{
|
||||
might_sleep();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
return kmap_high(page);
|
||||
}
|
||||
EXPORT_SYMBOL(kmap);
|
||||
|
||||
void kunmap(struct page *page)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
if (!PageHighMem(page))
|
||||
return;
|
||||
kunmap_high(page);
|
||||
}
|
||||
EXPORT_SYMBOL(kunmap);
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
|
||||
{
|
||||
unsigned int idx;
|
||||
unsigned long vaddr;
|
||||
void *kmap;
|
||||
int type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
/*
|
||||
* There is no cache coherency issue when non VIVT, so force the
|
||||
@ -90,13 +67,13 @@ void *kmap_atomic(struct page *page)
|
||||
* in place, so the contained TLB flush ensures the TLB is updated
|
||||
* with the new mapping.
|
||||
*/
|
||||
set_fixmap_pte(idx, mk_pte(page, kmap_prot));
|
||||
set_fixmap_pte(idx, mk_pte(page, prot));
|
||||
|
||||
return (void *)vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
EXPORT_SYMBOL(kmap_atomic_high_prot);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
void kunmap_atomic_high(void *kvaddr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
||||
int idx, type;
|
||||
@ -118,10 +95,8 @@ void __kunmap_atomic(void *kvaddr)
|
||||
/* this address was obtained through kmap_high_get() */
|
||||
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
|
||||
}
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
EXPORT_SYMBOL(kunmap_atomic_high);
|
||||
|
||||
void *kmap_atomic_pfn(unsigned long pfn)
|
||||
{
|
||||
|
@ -68,7 +68,8 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
||||
static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
unsigned long prot)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, addr);
|
||||
p4d_t *p4d = p4d_offset(pgd, addr);
|
||||
pud_t *pud = pud_offset(p4d, addr);
|
||||
unsigned long next;
|
||||
|
||||
do {
|
||||
|
@ -519,7 +519,7 @@ static inline void section_update(unsigned long addr, pmdval_t mask,
|
||||
{
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
|
||||
pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
|
||||
|
@ -142,12 +142,14 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
|
||||
{
|
||||
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmdp;
|
||||
|
||||
flush_cache_vunmap(addr, end);
|
||||
pgd = pgd_offset_k(addr);
|
||||
pud = pud_offset(pgd, addr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
pud = pud_offset(p4d, addr);
|
||||
pmdp = pmd_offset(pud, addr);
|
||||
do {
|
||||
pmd_t pmd = *pmdp;
|
||||
@ -190,6 +192,7 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
|
||||
{
|
||||
unsigned long addr = virt, end = virt + size;
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
@ -200,7 +203,8 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
|
||||
unmap_area_sections(virt, size);
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
pud = pud_offset(pgd, addr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
pud = pud_offset(p4d, addr);
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
|
||||
@ -222,6 +226,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
|
||||
{
|
||||
unsigned long addr = virt, end = virt + size;
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
@ -232,7 +237,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
|
||||
unmap_area_sections(virt, size);
|
||||
|
||||
pgd = pgd_offset_k(virt);
|
||||
pud = pud_offset(pgd, addr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
pud = pud_offset(p4d, addr);
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
unsigned long super_pmd_val, i;
|
||||
|
@ -38,7 +38,7 @@ static inline pte_t get_top_pte(unsigned long va)
|
||||
|
||||
static inline pmd_t *pmd_off_k(unsigned long virt)
|
||||
{
|
||||
return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
|
||||
return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(virt), virt), virt), virt);
|
||||
}
|
||||
|
||||
struct mem_type {
|
||||
|
@ -357,7 +357,8 @@ static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
|
||||
static inline pmd_t * __init fixmap_pmd(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k(addr);
|
||||
pud_t *pud = pud_offset(pgd, addr);
|
||||
p4d_t *p4d = p4d_offset(pgd, addr);
|
||||
pud_t *pud = pud_offset(p4d, addr);
|
||||
pmd_t *pmd = pmd_offset(pud, addr);
|
||||
|
||||
return pmd;
|
||||
@ -801,12 +802,12 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
||||
static void __init alloc_init_pud(p4d_t *p4d, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
const struct mem_type *type,
|
||||
void *(*alloc)(unsigned long sz), bool ng)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, addr);
|
||||
pud_t *pud = pud_offset(p4d, addr);
|
||||
unsigned long next;
|
||||
|
||||
do {
|
||||
@ -816,6 +817,21 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
||||
} while (pud++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void __init alloc_init_p4d(pgd_t *pgd, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
const struct mem_type *type,
|
||||
void *(*alloc)(unsigned long sz), bool ng)
|
||||
{
|
||||
p4d_t *p4d = p4d_offset(pgd, addr);
|
||||
unsigned long next;
|
||||
|
||||
do {
|
||||
next = p4d_addr_end(addr, end);
|
||||
alloc_init_pud(p4d, addr, next, phys, type, alloc, ng);
|
||||
phys += next - addr;
|
||||
} while (p4d++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
static void __init create_36bit_mapping(struct mm_struct *mm,
|
||||
struct map_desc *md,
|
||||
@ -863,7 +879,8 @@ static void __init create_36bit_mapping(struct mm_struct *mm,
|
||||
pgd = pgd_offset(mm, addr);
|
||||
end = addr + length;
|
||||
do {
|
||||
pud_t *pud = pud_offset(pgd, addr);
|
||||
p4d_t *p4d = p4d_offset(pgd, addr);
|
||||
pud_t *pud = pud_offset(p4d, addr);
|
||||
pmd_t *pmd = pmd_offset(pud, addr);
|
||||
int i;
|
||||
|
||||
@ -914,7 +931,7 @@ static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
|
||||
do {
|
||||
unsigned long next = pgd_addr_end(addr, end);
|
||||
|
||||
alloc_init_pud(pgd, addr, next, phys, type, alloc, ng);
|
||||
alloc_init_p4d(pgd, addr, next, phys, type, alloc, ng);
|
||||
|
||||
phys += next - addr;
|
||||
addr = next;
|
||||
@ -950,7 +967,13 @@ void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
|
||||
bool ng)
|
||||
{
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
|
||||
p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
|
||||
if (!WARN_ON(!p4d))
|
||||
return;
|
||||
pud = pud_alloc(mm, p4d, md->virtual);
|
||||
if (WARN_ON(!pud))
|
||||
return;
|
||||
pmd_alloc(mm, pud, 0);
|
||||
|
@ -30,6 +30,7 @@
|
||||
pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
pgd_t *new_pgd, *init_pgd;
|
||||
p4d_t *new_p4d, *init_p4d;
|
||||
pud_t *new_pud, *init_pud;
|
||||
pmd_t *new_pmd, *init_pmd;
|
||||
pte_t *new_pte, *init_pte;
|
||||
@ -53,8 +54,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
/*
|
||||
* Allocate PMD table for modules and pkmap mappings.
|
||||
*/
|
||||
new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
|
||||
new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
|
||||
MODULES_VADDR);
|
||||
if (!new_p4d)
|
||||
goto no_p4d;
|
||||
|
||||
new_pud = pud_alloc(mm, new_p4d, MODULES_VADDR);
|
||||
if (!new_pud)
|
||||
goto no_pud;
|
||||
|
||||
@ -69,7 +74,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
* contains the machine vectors. The vectors are always high
|
||||
* with LPAE.
|
||||
*/
|
||||
new_pud = pud_alloc(mm, new_pgd, 0);
|
||||
new_p4d = p4d_alloc(mm, new_pgd, 0);
|
||||
if (!new_p4d)
|
||||
goto no_p4d;
|
||||
|
||||
new_pud = pud_alloc(mm, new_p4d, 0);
|
||||
if (!new_pud)
|
||||
goto no_pud;
|
||||
|
||||
@ -91,7 +100,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
|
||||
#endif
|
||||
|
||||
init_pud = pud_offset(init_pgd, 0);
|
||||
init_p4d = p4d_offset(init_pgd, 0);
|
||||
init_pud = pud_offset(init_p4d, 0);
|
||||
init_pmd = pmd_offset(init_pud, 0);
|
||||
init_pte = pte_offset_map(init_pmd, 0);
|
||||
set_pte_ext(new_pte + 0, init_pte[0], 0);
|
||||
@ -108,6 +118,8 @@ no_pte:
|
||||
no_pmd:
|
||||
pud_free(mm, new_pud);
|
||||
no_pud:
|
||||
p4d_free(mm, new_p4d);
|
||||
no_p4d:
|
||||
__pgd_free(new_pgd);
|
||||
no_pgd:
|
||||
return NULL;
|
||||
@ -116,6 +128,7 @@ no_pgd:
|
||||
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pgtable_t pte;
|
||||
@ -127,7 +140,11 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
|
||||
if (pgd_none_or_clear_bad(pgd))
|
||||
goto no_pgd;
|
||||
|
||||
pud = pud_offset(pgd, 0);
|
||||
p4d = p4d_offset(pgd, 0);
|
||||
if (p4d_none_or_clear_bad(p4d))
|
||||
goto no_p4d;
|
||||
|
||||
pud = pud_offset(p4d, 0);
|
||||
if (pud_none_or_clear_bad(pud))
|
||||
goto no_pud;
|
||||
|
||||
@ -144,8 +161,11 @@ no_pmd:
|
||||
pmd_free(mm, pmd);
|
||||
mm_dec_nr_pmds(mm);
|
||||
no_pud:
|
||||
pgd_clear(pgd);
|
||||
p4d_clear(p4d);
|
||||
pud_free(mm, pud);
|
||||
no_p4d:
|
||||
pgd_clear(pgd);
|
||||
p4d_free(mm, p4d);
|
||||
no_pgd:
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
/*
|
||||
@ -156,15 +176,21 @@ no_pgd:
|
||||
continue;
|
||||
if (pgd_val(*pgd) & L_PGD_SWAPPER)
|
||||
continue;
|
||||
pud = pud_offset(pgd, 0);
|
||||
p4d = p4d_offset(pgd, 0);
|
||||
if (p4d_none_or_clear_bad(p4d))
|
||||
continue;
|
||||
pud = pud_offset(p4d, 0);
|
||||
if (pud_none_or_clear_bad(pud))
|
||||
continue;
|
||||
pmd = pmd_offset(pud, 0);
|
||||
pud_clear(pud);
|
||||
pmd_free(mm, pmd);
|
||||
mm_dec_nr_pmds(mm);
|
||||
pgd_clear(pgd);
|
||||
p4d_clear(p4d);
|
||||
pud_free(mm, pud);
|
||||
mm_dec_nr_puds(mm);
|
||||
pgd_clear(pgd);
|
||||
p4d_free(mm, p4d);
|
||||
}
|
||||
#endif
|
||||
__pgd_free(pgd_base);
|
||||
|
@ -12,6 +12,7 @@ config ARM64
|
||||
select ARCH_HAS_DEBUG_WX
|
||||
select ARCH_BINFMT_ELF_STATE
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_DMA_PREP_COHERENT
|
||||
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
|
||||
|
@ -172,8 +172,8 @@ void kvm_clear_hyp_idmap(void);
|
||||
__pmd(__phys_to_pmd_val(__pa(ptep)) | PMD_TYPE_TABLE)
|
||||
#define kvm_mk_pud(pmdp) \
|
||||
__pud(__phys_to_pud_val(__pa(pmdp)) | PMD_TYPE_TABLE)
|
||||
#define kvm_mk_pgd(pudp) \
|
||||
__pgd(__phys_to_pgd_val(__pa(pudp)) | PUD_TYPE_TABLE)
|
||||
#define kvm_mk_p4d(pmdp) \
|
||||
__p4d(__phys_to_p4d_val(__pa(pmdp)) | PUD_TYPE_TABLE)
|
||||
|
||||
#define kvm_set_pud(pudp, pud) set_pud(pudp, pud)
|
||||
|
||||
@ -299,6 +299,12 @@ static inline bool kvm_s2pud_young(pud_t pud)
|
||||
#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
|
||||
#endif
|
||||
|
||||
#ifdef __PAGETABLE_P4D_FOLDED
|
||||
#define hyp_p4d_table_empty(p4dp) (0)
|
||||
#else
|
||||
#define hyp_p4d_table_empty(p4dp) kvm_page_empty(p4dp)
|
||||
#endif
|
||||
|
||||
struct kvm;
|
||||
|
||||
#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
|
||||
|
@ -73,17 +73,17 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
|
||||
free_page((unsigned long)pudp);
|
||||
}
|
||||
|
||||
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
|
||||
static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
|
||||
{
|
||||
set_pgd(pgdp, __pgd(__phys_to_pgd_val(pudp) | prot));
|
||||
set_p4d(p4dp, __p4d(__phys_to_p4d_val(pudp) | prot));
|
||||
}
|
||||
|
||||
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, pud_t *pudp)
|
||||
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4dp, pud_t *pudp)
|
||||
{
|
||||
__pgd_populate(pgdp, __pa(pudp), PUD_TYPE_TABLE);
|
||||
__p4d_populate(p4dp, __pa(pudp), PUD_TYPE_TABLE);
|
||||
}
|
||||
#else
|
||||
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
|
||||
static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
|
||||
{
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
typedef u64 pteval_t;
|
||||
typedef u64 pmdval_t;
|
||||
typedef u64 pudval_t;
|
||||
typedef u64 p4dval_t;
|
||||
typedef u64 pgdval_t;
|
||||
|
||||
/*
|
||||
@ -44,13 +45,11 @@ typedef struct { pteval_t pgprot; } pgprot_t;
|
||||
#define __pgprot(x) ((pgprot_t) { (x) } )
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 2
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
#elif CONFIG_PGTABLE_LEVELS == 3
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#elif CONFIG_PGTABLE_LEVELS == 4
|
||||
#include <asm-generic/5level-fixup.h>
|
||||
#include <asm-generic/pgtable-nop4d.h>
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_PGTABLE_TYPES_H */
|
||||
|
@ -298,6 +298,11 @@ static inline pte_t pgd_pte(pgd_t pgd)
|
||||
return __pte(pgd_val(pgd));
|
||||
}
|
||||
|
||||
static inline pte_t p4d_pte(p4d_t p4d)
|
||||
{
|
||||
return __pte(p4d_val(p4d));
|
||||
}
|
||||
|
||||
static inline pte_t pud_pte(pud_t pud)
|
||||
{
|
||||
return __pte(pud_val(pud));
|
||||
@ -401,6 +406,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
|
||||
|
||||
#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
|
||||
|
||||
#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
|
||||
#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
|
||||
|
||||
#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
|
||||
#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
|
||||
|
||||
@ -592,49 +600,50 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
|
||||
|
||||
#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
|
||||
|
||||
#define pgd_none(pgd) (!pgd_val(pgd))
|
||||
#define pgd_bad(pgd) (!(pgd_val(pgd) & 2))
|
||||
#define pgd_present(pgd) (pgd_val(pgd))
|
||||
#define p4d_none(p4d) (!p4d_val(p4d))
|
||||
#define p4d_bad(p4d) (!(p4d_val(p4d) & 2))
|
||||
#define p4d_present(p4d) (p4d_val(p4d))
|
||||
|
||||
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
|
||||
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
|
||||
{
|
||||
if (in_swapper_pgdir(pgdp)) {
|
||||
set_swapper_pgd(pgdp, pgd);
|
||||
if (in_swapper_pgdir(p4dp)) {
|
||||
set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
|
||||
return;
|
||||
}
|
||||
|
||||
WRITE_ONCE(*pgdp, pgd);
|
||||
WRITE_ONCE(*p4dp, p4d);
|
||||
dsb(ishst);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void pgd_clear(pgd_t *pgdp)
|
||||
static inline void p4d_clear(p4d_t *p4dp)
|
||||
{
|
||||
set_pgd(pgdp, __pgd(0));
|
||||
set_p4d(p4dp, __p4d(0));
|
||||
}
|
||||
|
||||
static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
|
||||
static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
|
||||
{
|
||||
return __pgd_to_phys(pgd);
|
||||
return __p4d_to_phys(p4d);
|
||||
}
|
||||
|
||||
/* Find an entry in the frst-level page table. */
|
||||
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
|
||||
|
||||
#define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
|
||||
#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
|
||||
#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
|
||||
|
||||
#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
|
||||
#define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
|
||||
#define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr))
|
||||
#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
|
||||
|
||||
#define pgd_page(pgd) phys_to_page(__pgd_to_phys(pgd))
|
||||
#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
|
||||
|
||||
/* use ONLY for statically allocated translation tables */
|
||||
#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
|
||||
|
||||
#else
|
||||
|
||||
#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
|
||||
#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
|
||||
|
||||
/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
|
||||
|
@ -68,41 +68,67 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
|
||||
#define S2_PUD_SIZE (1UL << S2_PUD_SHIFT)
|
||||
#define S2_PUD_MASK (~(S2_PUD_SIZE - 1))
|
||||
|
||||
static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd)
|
||||
#define stage2_pgd_none(kvm, pgd) pgd_none(pgd)
|
||||
#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd)
|
||||
#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
|
||||
#define stage2_pgd_populate(kvm, pgd, p4d) pgd_populate(NULL, pgd, p4d)
|
||||
|
||||
static inline p4d_t *stage2_p4d_offset(struct kvm *kvm,
|
||||
pgd_t *pgd, unsigned long address)
|
||||
{
|
||||
return p4d_offset(pgd, address);
|
||||
}
|
||||
|
||||
static inline void stage2_p4d_free(struct kvm *kvm, p4d_t *p4d)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool stage2_p4d_table_empty(struct kvm *kvm, p4d_t *p4dp)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline phys_addr_t stage2_p4d_addr_end(struct kvm *kvm,
|
||||
phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
return end;
|
||||
}
|
||||
|
||||
static inline bool stage2_p4d_none(struct kvm *kvm, p4d_t p4d)
|
||||
{
|
||||
if (kvm_stage2_has_pud(kvm))
|
||||
return pgd_none(pgd);
|
||||
return p4d_none(p4d);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp)
|
||||
static inline void stage2_p4d_clear(struct kvm *kvm, p4d_t *p4dp)
|
||||
{
|
||||
if (kvm_stage2_has_pud(kvm))
|
||||
pgd_clear(pgdp);
|
||||
p4d_clear(p4dp);
|
||||
}
|
||||
|
||||
static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd)
|
||||
static inline bool stage2_p4d_present(struct kvm *kvm, p4d_t p4d)
|
||||
{
|
||||
if (kvm_stage2_has_pud(kvm))
|
||||
return pgd_present(pgd);
|
||||
return p4d_present(p4d);
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud)
|
||||
static inline void stage2_p4d_populate(struct kvm *kvm, p4d_t *p4d, pud_t *pud)
|
||||
{
|
||||
if (kvm_stage2_has_pud(kvm))
|
||||
pgd_populate(NULL, pgd, pud);
|
||||
p4d_populate(NULL, p4d, pud);
|
||||
}
|
||||
|
||||
static inline pud_t *stage2_pud_offset(struct kvm *kvm,
|
||||
pgd_t *pgd, unsigned long address)
|
||||
p4d_t *p4d, unsigned long address)
|
||||
{
|
||||
if (kvm_stage2_has_pud(kvm))
|
||||
return pud_offset(pgd, address);
|
||||
return pud_offset(p4d, address);
|
||||
else
|
||||
return (pud_t *)pgd;
|
||||
return (pud_t *)p4d;
|
||||
}
|
||||
|
||||
static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
|
||||
|
@ -184,6 +184,7 @@ static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
|
||||
pgprot_t pgprot)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
@ -196,7 +197,15 @@ static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
|
||||
pgd_populate(&init_mm, pgdp, pudp);
|
||||
}
|
||||
|
||||
pudp = pud_offset(pgdp, dst_addr);
|
||||
p4dp = p4d_offset(pgdp, dst_addr);
|
||||
if (p4d_none(READ_ONCE(*p4dp))) {
|
||||
pudp = (void *)get_safe_page(GFP_ATOMIC);
|
||||
if (!pudp)
|
||||
return -ENOMEM;
|
||||
p4d_populate(&init_mm, p4dp, pudp);
|
||||
}
|
||||
|
||||
pudp = pud_offset(p4dp, dst_addr);
|
||||
if (pud_none(READ_ONCE(*pudp))) {
|
||||
pmdp = (void *)get_safe_page(GFP_ATOMIC);
|
||||
if (!pmdp)
|
||||
@ -419,7 +428,7 @@ static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
|
||||
static int copy_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
pud_t *dst_pudp;
|
||||
@ -427,15 +436,15 @@ static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
|
||||
unsigned long next;
|
||||
unsigned long addr = start;
|
||||
|
||||
if (pgd_none(READ_ONCE(*dst_pgdp))) {
|
||||
if (p4d_none(READ_ONCE(*dst_p4dp))) {
|
||||
dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pudp)
|
||||
return -ENOMEM;
|
||||
pgd_populate(&init_mm, dst_pgdp, dst_pudp);
|
||||
p4d_populate(&init_mm, dst_p4dp, dst_pudp);
|
||||
}
|
||||
dst_pudp = pud_offset(dst_pgdp, start);
|
||||
dst_pudp = pud_offset(dst_p4dp, start);
|
||||
|
||||
src_pudp = pud_offset(src_pgdp, start);
|
||||
src_pudp = pud_offset(src_p4dp, start);
|
||||
do {
|
||||
pud_t pud = READ_ONCE(*src_pudp);
|
||||
|
||||
@ -454,6 +463,27 @@ static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
p4d_t *dst_p4dp;
|
||||
p4d_t *src_p4dp;
|
||||
unsigned long next;
|
||||
unsigned long addr = start;
|
||||
|
||||
dst_p4dp = p4d_offset(dst_pgdp, start);
|
||||
src_p4dp = p4d_offset(src_pgdp, start);
|
||||
do {
|
||||
next = p4d_addr_end(addr, end);
|
||||
if (p4d_none(READ_ONCE(*src_p4dp)))
|
||||
continue;
|
||||
if (copy_pud(dst_p4dp, src_p4dp, addr, next))
|
||||
return -ENOMEM;
|
||||
} while (dst_p4dp++, src_p4dp++, addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
@ -466,7 +496,7 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(READ_ONCE(*src_pgdp)))
|
||||
continue;
|
||||
if (copy_pud(dst_pgdp, src_pgdp, addr, next))
|
||||
if (copy_p4d(dst_pgdp, src_pgdp, addr, next))
|
||||
return -ENOMEM;
|
||||
} while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
|
||||
|
||||
|
@ -158,13 +158,22 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
|
||||
|
||||
static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
|
||||
{
|
||||
pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
|
||||
p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL);
|
||||
stage2_pgd_clear(kvm, pgd);
|
||||
kvm_tlb_flush_vmid_ipa(kvm, addr);
|
||||
stage2_pud_free(kvm, pud_table);
|
||||
stage2_p4d_free(kvm, p4d_table);
|
||||
put_page(virt_to_page(pgd));
|
||||
}
|
||||
|
||||
static void clear_stage2_p4d_entry(struct kvm *kvm, p4d_t *p4d, phys_addr_t addr)
|
||||
{
|
||||
pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, p4d, 0);
|
||||
stage2_p4d_clear(kvm, p4d);
|
||||
kvm_tlb_flush_vmid_ipa(kvm, addr);
|
||||
stage2_pud_free(kvm, pud_table);
|
||||
put_page(virt_to_page(p4d));
|
||||
}
|
||||
|
||||
static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
|
||||
{
|
||||
pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
|
||||
@ -208,12 +217,20 @@ static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
|
||||
dsb(ishst);
|
||||
}
|
||||
|
||||
static inline void kvm_pgd_populate(pgd_t *pgdp, pud_t *pudp)
|
||||
static inline void kvm_p4d_populate(p4d_t *p4dp, pud_t *pudp)
|
||||
{
|
||||
WRITE_ONCE(*pgdp, kvm_mk_pgd(pudp));
|
||||
WRITE_ONCE(*p4dp, kvm_mk_p4d(pudp));
|
||||
dsb(ishst);
|
||||
}
|
||||
|
||||
static inline void kvm_pgd_populate(pgd_t *pgdp, p4d_t *p4dp)
|
||||
{
|
||||
#ifndef __PAGETABLE_P4D_FOLDED
|
||||
WRITE_ONCE(*pgdp, kvm_mk_pgd(p4dp));
|
||||
dsb(ishst);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmapping vs dcache management:
|
||||
*
|
||||
@ -293,13 +310,13 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
|
||||
clear_stage2_pud_entry(kvm, pud, start_addr);
|
||||
}
|
||||
|
||||
static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
|
||||
static void unmap_stage2_puds(struct kvm *kvm, p4d_t *p4d,
|
||||
phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
phys_addr_t next, start_addr = addr;
|
||||
pud_t *pud, *start_pud;
|
||||
|
||||
start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
|
||||
start_pud = pud = stage2_pud_offset(kvm, p4d, addr);
|
||||
do {
|
||||
next = stage2_pud_addr_end(kvm, addr, end);
|
||||
if (!stage2_pud_none(kvm, *pud)) {
|
||||
@ -317,6 +334,23 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
|
||||
} while (pud++, addr = next, addr != end);
|
||||
|
||||
if (stage2_pud_table_empty(kvm, start_pud))
|
||||
clear_stage2_p4d_entry(kvm, p4d, start_addr);
|
||||
}
|
||||
|
||||
static void unmap_stage2_p4ds(struct kvm *kvm, pgd_t *pgd,
|
||||
phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
phys_addr_t next, start_addr = addr;
|
||||
p4d_t *p4d, *start_p4d;
|
||||
|
||||
start_p4d = p4d = stage2_p4d_offset(kvm, pgd, addr);
|
||||
do {
|
||||
next = stage2_p4d_addr_end(kvm, addr, end);
|
||||
if (!stage2_p4d_none(kvm, *p4d))
|
||||
unmap_stage2_puds(kvm, p4d, addr, next);
|
||||
} while (p4d++, addr = next, addr != end);
|
||||
|
||||
if (stage2_p4d_table_empty(kvm, start_p4d))
|
||||
clear_stage2_pgd_entry(kvm, pgd, start_addr);
|
||||
}
|
||||
|
||||
@ -351,7 +385,7 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
|
||||
break;
|
||||
next = stage2_pgd_addr_end(kvm, addr, end);
|
||||
if (!stage2_pgd_none(kvm, *pgd))
|
||||
unmap_stage2_puds(kvm, pgd, addr, next);
|
||||
unmap_stage2_p4ds(kvm, pgd, addr, next);
|
||||
/*
|
||||
* If the range is too large, release the kvm->mmu_lock
|
||||
* to prevent starvation and lockup detector warnings.
|
||||
@ -391,13 +425,13 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
|
||||
static void stage2_flush_puds(struct kvm *kvm, p4d_t *p4d,
|
||||
phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
pud_t *pud;
|
||||
phys_addr_t next;
|
||||
|
||||
pud = stage2_pud_offset(kvm, pgd, addr);
|
||||
pud = stage2_pud_offset(kvm, p4d, addr);
|
||||
do {
|
||||
next = stage2_pud_addr_end(kvm, addr, end);
|
||||
if (!stage2_pud_none(kvm, *pud)) {
|
||||
@ -409,6 +443,20 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
|
||||
} while (pud++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void stage2_flush_p4ds(struct kvm *kvm, pgd_t *pgd,
|
||||
phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
p4d_t *p4d;
|
||||
phys_addr_t next;
|
||||
|
||||
p4d = stage2_p4d_offset(kvm, pgd, addr);
|
||||
do {
|
||||
next = stage2_p4d_addr_end(kvm, addr, end);
|
||||
if (!stage2_p4d_none(kvm, *p4d))
|
||||
stage2_flush_puds(kvm, p4d, addr, next);
|
||||
} while (p4d++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void stage2_flush_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot)
|
||||
{
|
||||
@ -421,7 +469,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
|
||||
do {
|
||||
next = stage2_pgd_addr_end(kvm, addr, end);
|
||||
if (!stage2_pgd_none(kvm, *pgd))
|
||||
stage2_flush_puds(kvm, pgd, addr, next);
|
||||
stage2_flush_p4ds(kvm, pgd, addr, next);
|
||||
|
||||
if (next != end)
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
@ -454,12 +502,21 @@ static void stage2_flush_vm(struct kvm *kvm)
|
||||
|
||||
static void clear_hyp_pgd_entry(pgd_t *pgd)
|
||||
{
|
||||
pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
|
||||
p4d_t *p4d_table __maybe_unused = p4d_offset(pgd, 0UL);
|
||||
pgd_clear(pgd);
|
||||
pud_free(NULL, pud_table);
|
||||
p4d_free(NULL, p4d_table);
|
||||
put_page(virt_to_page(pgd));
|
||||
}
|
||||
|
||||
static void clear_hyp_p4d_entry(p4d_t *p4d)
|
||||
{
|
||||
pud_t *pud_table __maybe_unused = pud_offset(p4d, 0UL);
|
||||
VM_BUG_ON(p4d_huge(*p4d));
|
||||
p4d_clear(p4d);
|
||||
pud_free(NULL, pud_table);
|
||||
put_page(virt_to_page(p4d));
|
||||
}
|
||||
|
||||
static void clear_hyp_pud_entry(pud_t *pud)
|
||||
{
|
||||
pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
|
||||
@ -511,12 +568,12 @@ static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
|
||||
clear_hyp_pud_entry(pud);
|
||||
}
|
||||
|
||||
static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
|
||||
static void unmap_hyp_puds(p4d_t *p4d, phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
phys_addr_t next;
|
||||
pud_t *pud, *start_pud;
|
||||
|
||||
start_pud = pud = pud_offset(pgd, addr);
|
||||
start_pud = pud = pud_offset(p4d, addr);
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
/* Hyp doesn't use huge puds */
|
||||
@ -525,6 +582,23 @@ static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
|
||||
} while (pud++, addr = next, addr != end);
|
||||
|
||||
if (hyp_pud_table_empty(start_pud))
|
||||
clear_hyp_p4d_entry(p4d);
|
||||
}
|
||||
|
||||
static void unmap_hyp_p4ds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
phys_addr_t next;
|
||||
p4d_t *p4d, *start_p4d;
|
||||
|
||||
start_p4d = p4d = p4d_offset(pgd, addr);
|
||||
do {
|
||||
next = p4d_addr_end(addr, end);
|
||||
/* Hyp doesn't use huge p4ds */
|
||||
if (!p4d_none(*p4d))
|
||||
unmap_hyp_puds(p4d, addr, next);
|
||||
} while (p4d++, addr = next, addr != end);
|
||||
|
||||
if (hyp_p4d_table_empty(start_p4d))
|
||||
clear_hyp_pgd_entry(pgd);
|
||||
}
|
||||
|
||||
@ -548,7 +622,7 @@ static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd,
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (!pgd_none(*pgd))
|
||||
unmap_hyp_puds(pgd, addr, next);
|
||||
unmap_hyp_p4ds(pgd, addr, next);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
@ -658,7 +732,7 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
|
||||
static int create_hyp_pud_mappings(p4d_t *p4d, unsigned long start,
|
||||
unsigned long end, unsigned long pfn,
|
||||
pgprot_t prot)
|
||||
{
|
||||
@ -669,7 +743,7 @@ static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
|
||||
|
||||
addr = start;
|
||||
do {
|
||||
pud = pud_offset(pgd, addr);
|
||||
pud = pud_offset(p4d, addr);
|
||||
|
||||
if (pud_none_or_clear_bad(pud)) {
|
||||
pmd = pmd_alloc_one(NULL, addr);
|
||||
@ -691,12 +765,45 @@ static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int create_hyp_p4d_mappings(pgd_t *pgd, unsigned long start,
|
||||
unsigned long end, unsigned long pfn,
|
||||
pgprot_t prot)
|
||||
{
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
unsigned long addr, next;
|
||||
int ret;
|
||||
|
||||
addr = start;
|
||||
do {
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
|
||||
if (p4d_none(*p4d)) {
|
||||
pud = pud_alloc_one(NULL, addr);
|
||||
if (!pud) {
|
||||
kvm_err("Cannot allocate Hyp pud\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
kvm_p4d_populate(p4d, pud);
|
||||
get_page(virt_to_page(p4d));
|
||||
}
|
||||
|
||||
next = p4d_addr_end(addr, end);
|
||||
ret = create_hyp_pud_mappings(p4d, addr, next, pfn, prot);
|
||||
if (ret)
|
||||
return ret;
|
||||
pfn += (next - addr) >> PAGE_SHIFT;
|
||||
} while (addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
p4d_t *p4d;
|
||||
unsigned long addr, next;
|
||||
int err = 0;
|
||||
|
||||
@ -707,18 +814,18 @@ static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
|
||||
pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
|
||||
|
||||
if (pgd_none(*pgd)) {
|
||||
pud = pud_alloc_one(NULL, addr);
|
||||
if (!pud) {
|
||||
kvm_err("Cannot allocate Hyp pud\n");
|
||||
p4d = p4d_alloc_one(NULL, addr);
|
||||
if (!p4d) {
|
||||
kvm_err("Cannot allocate Hyp p4d\n");
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
kvm_pgd_populate(pgd, pud);
|
||||
kvm_pgd_populate(pgd, p4d);
|
||||
get_page(virt_to_page(pgd));
|
||||
}
|
||||
|
||||
next = pgd_addr_end(addr, end);
|
||||
err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
|
||||
err = create_hyp_p4d_mappings(pgd, addr, next, pfn, prot);
|
||||
if (err)
|
||||
goto out;
|
||||
pfn += (next - addr) >> PAGE_SHIFT;
|
||||
@ -1015,22 +1122,40 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
|
||||
free_pages_exact(pgd, stage2_pgd_size(kvm));
|
||||
}
|
||||
|
||||
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
||||
static p4d_t *stage2_get_p4d(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
||||
phys_addr_t addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
p4d_t *p4d;
|
||||
|
||||
pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
|
||||
if (stage2_pgd_none(kvm, *pgd)) {
|
||||
if (!cache)
|
||||
return NULL;
|
||||
pud = mmu_memory_cache_alloc(cache);
|
||||
stage2_pgd_populate(kvm, pgd, pud);
|
||||
p4d = mmu_memory_cache_alloc(cache);
|
||||
stage2_pgd_populate(kvm, pgd, p4d);
|
||||
get_page(virt_to_page(pgd));
|
||||
}
|
||||
|
||||
return stage2_pud_offset(kvm, pgd, addr);
|
||||
return stage2_p4d_offset(kvm, pgd, addr);
|
||||
}
|
||||
|
||||
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
||||
phys_addr_t addr)
|
||||
{
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
|
||||
p4d = stage2_get_p4d(kvm, cache, addr);
|
||||
if (stage2_p4d_none(kvm, *p4d)) {
|
||||
if (!cache)
|
||||
return NULL;
|
||||
pud = mmu_memory_cache_alloc(cache);
|
||||
stage2_p4d_populate(kvm, p4d, pud);
|
||||
get_page(virt_to_page(p4d));
|
||||
}
|
||||
|
||||
return stage2_pud_offset(kvm, p4d, addr);
|
||||
}
|
||||
|
||||
static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
||||
@ -1423,18 +1548,18 @@ static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_wp_puds - write protect PGD range
|
||||
* stage2_wp_puds - write protect P4D range
|
||||
* @pgd: pointer to pgd entry
|
||||
* @addr: range start address
|
||||
* @end: range end address
|
||||
*/
|
||||
static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
|
||||
static void stage2_wp_puds(struct kvm *kvm, p4d_t *p4d,
|
||||
phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
pud_t *pud;
|
||||
phys_addr_t next;
|
||||
|
||||
pud = stage2_pud_offset(kvm, pgd, addr);
|
||||
pud = stage2_pud_offset(kvm, p4d, addr);
|
||||
do {
|
||||
next = stage2_pud_addr_end(kvm, addr, end);
|
||||
if (!stage2_pud_none(kvm, *pud)) {
|
||||
@ -1448,6 +1573,26 @@ static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
|
||||
} while (pud++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_wp_p4ds - write protect PGD range
|
||||
* @pgd: pointer to pgd entry
|
||||
* @addr: range start address
|
||||
* @end: range end address
|
||||
*/
|
||||
static void stage2_wp_p4ds(struct kvm *kvm, pgd_t *pgd,
|
||||
phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
p4d_t *p4d;
|
||||
phys_addr_t next;
|
||||
|
||||
p4d = stage2_p4d_offset(kvm, pgd, addr);
|
||||
do {
|
||||
next = stage2_p4d_addr_end(kvm, addr, end);
|
||||
if (!stage2_p4d_none(kvm, *p4d))
|
||||
stage2_wp_puds(kvm, p4d, addr, next);
|
||||
} while (p4d++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_wp_range() - write protect stage2 memory region range
|
||||
* @kvm: The KVM pointer
|
||||
@ -1475,7 +1620,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
|
||||
break;
|
||||
next = stage2_pgd_addr_end(kvm, addr, end);
|
||||
if (stage2_pgd_present(kvm, *pgd))
|
||||
stage2_wp_puds(kvm, pgd, addr, next);
|
||||
stage2_wp_p4ds(kvm, pgd, addr, next);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
|
@ -145,6 +145,7 @@ static void show_pte(unsigned long addr)
|
||||
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
|
||||
|
||||
do {
|
||||
p4d_t *p4dp, p4d;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
pte_t *ptep, pte;
|
||||
@ -152,7 +153,13 @@ static void show_pte(unsigned long addr)
|
||||
if (pgd_none(pgd) || pgd_bad(pgd))
|
||||
break;
|
||||
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
p4dp = p4d_offset(pgdp, addr);
|
||||
p4d = READ_ONCE(*p4dp);
|
||||
pr_cont(", p4d=%016llx", p4d_val(p4d));
|
||||
if (p4d_none(p4d) || p4d_bad(p4d))
|
||||
break;
|
||||
|
||||
pudp = pud_offset(p4dp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
pr_cont(", pud=%016llx", pud_val(pud));
|
||||
if (pud_none(pud) || pud_bad(pud))
|
||||
|
@ -67,11 +67,13 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, size_t *pgsize)
|
||||
{
|
||||
pgd_t *pgdp = pgd_offset(mm, addr);
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
|
||||
*pgsize = PAGE_SIZE;
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
p4dp = p4d_offset(pgdp, addr);
|
||||
pudp = pud_offset(p4dp, addr);
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
if ((pte_t *)pmdp == ptep) {
|
||||
*pgsize = PMD_SIZE;
|
||||
@ -217,12 +219,14 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep = NULL;
|
||||
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
pudp = pud_alloc(mm, pgdp, addr);
|
||||
p4dp = p4d_offset(pgdp, addr);
|
||||
pudp = pud_alloc(mm, p4dp, addr);
|
||||
if (!pudp)
|
||||
return NULL;
|
||||
|
||||
@ -261,6 +265,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
|
||||
@ -268,7 +273,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||
if (!pgd_present(READ_ONCE(*pgdp)))
|
||||
return NULL;
|
||||
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
p4dp = p4d_offset(pgdp, addr);
|
||||
if (!p4d_present(READ_ONCE(*p4dp)))
|
||||
return NULL;
|
||||
|
||||
pudp = pud_offset(p4dp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
if (sz != PUD_SIZE && pud_none(pud))
|
||||
return NULL;
|
||||
|
@ -84,17 +84,17 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
|
||||
return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
|
||||
}
|
||||
|
||||
static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
|
||||
static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
|
||||
bool early)
|
||||
{
|
||||
if (pgd_none(READ_ONCE(*pgdp))) {
|
||||
if (p4d_none(READ_ONCE(*p4dp))) {
|
||||
phys_addr_t pud_phys = early ?
|
||||
__pa_symbol(kasan_early_shadow_pud)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
|
||||
__p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE);
|
||||
}
|
||||
|
||||
return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr);
|
||||
return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
|
||||
}
|
||||
|
||||
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
|
||||
@ -126,11 +126,11 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
|
||||
} while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
|
||||
}
|
||||
|
||||
static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
|
||||
static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
|
||||
unsigned long end, int node, bool early)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);
|
||||
pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
|
||||
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
@ -138,6 +138,18 @@ static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
|
||||
} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
|
||||
}
|
||||
|
||||
static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
|
||||
unsigned long end, int node, bool early)
|
||||
{
|
||||
unsigned long next;
|
||||
p4d_t *p4dp = p4d_offset(pgdp, addr);
|
||||
|
||||
do {
|
||||
next = p4d_addr_end(addr, end);
|
||||
kasan_pud_populate(p4dp, addr, next, node, early);
|
||||
} while (p4dp++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
|
||||
int node, bool early)
|
||||
{
|
||||
@ -147,7 +159,7 @@ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
|
||||
pgdp = pgd_offset_k(addr);
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
kasan_pud_populate(pgdp, addr, next, node, early);
|
||||
kasan_p4d_populate(pgdp, addr, next, node, early);
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
|
@ -290,18 +290,19 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t *pudp;
|
||||
pgd_t pgd = READ_ONCE(*pgdp);
|
||||
p4d_t *p4dp = p4d_offset(pgdp, addr);
|
||||
p4d_t p4d = READ_ONCE(*p4dp);
|
||||
|
||||
if (pgd_none(pgd)) {
|
||||
if (p4d_none(p4d)) {
|
||||
phys_addr_t pud_phys;
|
||||
BUG_ON(!pgtable_alloc);
|
||||
pud_phys = pgtable_alloc(PUD_SHIFT);
|
||||
__pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
|
||||
pgd = READ_ONCE(*pgdp);
|
||||
__p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
|
||||
p4d = READ_ONCE(*p4dp);
|
||||
}
|
||||
BUG_ON(pgd_bad(pgd));
|
||||
BUG_ON(p4d_bad(p4d));
|
||||
|
||||
pudp = pud_set_fixmap_offset(pgdp, addr);
|
||||
pudp = pud_set_fixmap_offset(p4dp, addr);
|
||||
do {
|
||||
pud_t old_pud = READ_ONCE(*pudp);
|
||||
|
||||
@ -672,6 +673,7 @@ static void __init map_kernel(pgd_t *pgdp)
|
||||
READ_ONCE(*pgd_offset_k(FIXADDR_START)));
|
||||
} else if (CONFIG_PGTABLE_LEVELS > 3) {
|
||||
pgd_t *bm_pgdp;
|
||||
p4d_t *bm_p4dp;
|
||||
pud_t *bm_pudp;
|
||||
/*
|
||||
* The fixmap shares its top level pgd entry with the kernel
|
||||
@ -681,7 +683,8 @@ static void __init map_kernel(pgd_t *pgdp)
|
||||
*/
|
||||
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
||||
bm_pgdp = pgd_offset_raw(pgdp, FIXADDR_START);
|
||||
bm_pudp = pud_set_fixmap_offset(bm_pgdp, FIXADDR_START);
|
||||
bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START);
|
||||
bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START);
|
||||
pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
|
||||
pud_clear_fixmap();
|
||||
} else {
|
||||
@ -715,6 +718,7 @@ void __init paging_init(void)
|
||||
int kern_addr_valid(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
pte_t *ptep, pte;
|
||||
@ -726,7 +730,11 @@ int kern_addr_valid(unsigned long addr)
|
||||
if (pgd_none(READ_ONCE(*pgdp)))
|
||||
return 0;
|
||||
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
p4dp = p4d_offset(pgdp, addr);
|
||||
if (p4d_none(READ_ONCE(*p4dp)))
|
||||
return 0;
|
||||
|
||||
pudp = pud_offset(p4dp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
if (pud_none(pud))
|
||||
return 0;
|
||||
@ -1069,6 +1077,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
|
||||
unsigned long addr = start;
|
||||
unsigned long next;
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
|
||||
@ -1079,7 +1088,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
|
||||
if (!pgdp)
|
||||
return -ENOMEM;
|
||||
|
||||
pudp = vmemmap_pud_populate(pgdp, addr, node);
|
||||
p4dp = vmemmap_p4d_populate(pgdp, addr, node);
|
||||
if (!p4dp)
|
||||
return -ENOMEM;
|
||||
|
||||
pudp = vmemmap_pud_populate(p4dp, addr, node);
|
||||
if (!pudp)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1114,11 +1127,12 @@ void vmemmap_free(unsigned long start, unsigned long end,
|
||||
static inline pud_t * fixmap_pud(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgdp = pgd_offset_k(addr);
|
||||
pgd_t pgd = READ_ONCE(*pgdp);
|
||||
p4d_t *p4dp = p4d_offset(pgdp, addr);
|
||||
p4d_t p4d = READ_ONCE(*p4dp);
|
||||
|
||||
BUG_ON(pgd_none(pgd) || pgd_bad(pgd));
|
||||
BUG_ON(p4d_none(p4d) || p4d_bad(p4d));
|
||||
|
||||
return pud_offset_kimg(pgdp, addr);
|
||||
return pud_offset_kimg(p4dp, addr);
|
||||
}
|
||||
|
||||
static inline pmd_t * fixmap_pmd(unsigned long addr)
|
||||
@ -1144,25 +1158,27 @@ static inline pte_t * fixmap_pte(unsigned long addr)
|
||||
*/
|
||||
void __init early_fixmap_init(void)
|
||||
{
|
||||
pgd_t *pgdp, pgd;
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp, p4d;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
unsigned long addr = FIXADDR_START;
|
||||
|
||||
pgdp = pgd_offset_k(addr);
|
||||
pgd = READ_ONCE(*pgdp);
|
||||
p4dp = p4d_offset(pgdp, addr);
|
||||
p4d = READ_ONCE(*p4dp);
|
||||
if (CONFIG_PGTABLE_LEVELS > 3 &&
|
||||
!(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
|
||||
!(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) {
|
||||
/*
|
||||
* We only end up here if the kernel mapping and the fixmap
|
||||
* share the top level pgd entry, which should only happen on
|
||||
* 16k/4 levels configurations.
|
||||
*/
|
||||
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
||||
pudp = pud_offset_kimg(pgdp, addr);
|
||||
pudp = pud_offset_kimg(p4dp, addr);
|
||||
} else {
|
||||
if (pgd_none(pgd))
|
||||
__pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
|
||||
if (p4d_none(p4d))
|
||||
__p4d_populate(p4dp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
|
||||
pudp = fixmap_pud(addr);
|
||||
}
|
||||
if (pud_none(READ_ONCE(*pudp)))
|
||||
|
@ -198,6 +198,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
bool kernel_page_present(struct page *page)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
pte_t *ptep;
|
||||
@ -210,7 +211,11 @@ bool kernel_page_present(struct page *page)
|
||||
if (pgd_none(READ_ONCE(*pgdp)))
|
||||
return false;
|
||||
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
p4dp = p4d_offset(pgdp, addr);
|
||||
if (p4d_none(READ_ONCE(*p4dp)))
|
||||
return false;
|
||||
|
||||
pudp = pud_offset(p4dp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
if (pud_none(pud))
|
||||
return false;
|
||||
|
@ -30,22 +30,14 @@ extern pte_t *pkmap_page_table;
|
||||
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
|
||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
||||
|
||||
extern void *kmap_high(struct page *page);
|
||||
extern void kunmap_high(struct page *page);
|
||||
|
||||
extern void *kmap(struct page *page);
|
||||
extern void kunmap(struct page *page);
|
||||
extern void *kmap_atomic(struct page *page);
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
#define ARCH_HAS_KMAP_FLUSH_TLB
|
||||
extern void kmap_flush_tlb(unsigned long addr);
|
||||
extern void *kmap_atomic_pfn(unsigned long pfn);
|
||||
extern struct page *kmap_atomic_to_page(void *ptr);
|
||||
|
||||
#define flush_cache_kmaps() do {} while (0)
|
||||
|
||||
extern void kmap_init(void);
|
||||
|
||||
#define kmap_prot PAGE_KERNEL
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __ASM_CSKY_HIGHMEM_H */
|
||||
|
@ -13,59 +13,39 @@ static pte_t *kmap_pte;
|
||||
|
||||
unsigned long highstart_pfn, highend_pfn;
|
||||
|
||||
void *kmap(struct page *page)
|
||||
void kmap_flush_tlb(unsigned long addr)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
might_sleep();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
addr = kmap_high(page);
|
||||
flush_tlb_one((unsigned long)addr);
|
||||
|
||||
return addr;
|
||||
flush_tlb_one(addr);
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_flush_tlb);
|
||||
|
||||
EXPORT_SYMBOL(kmap);
|
||||
|
||||
void kunmap(struct page *page)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
if (!PageHighMem(page))
|
||||
return;
|
||||
kunmap_high(page);
|
||||
}
|
||||
EXPORT_SYMBOL(kunmap);
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(!pte_none(*(kmap_pte - idx)));
|
||||
#endif
|
||||
set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
|
||||
set_pte(kmap_pte-idx, mk_pte(page, prot));
|
||||
flush_tlb_one((unsigned long)vaddr);
|
||||
|
||||
return (void *)vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
EXPORT_SYMBOL(kmap_atomic_high_prot);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
void kunmap_atomic_high(void *kvaddr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
||||
int idx;
|
||||
|
||||
if (vaddr < FIXADDR_START)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
|
||||
@ -78,11 +58,8 @@ void __kunmap_atomic(void *kvaddr)
|
||||
(void) idx; /* to kill a warning */
|
||||
#endif
|
||||
kmap_atomic_idx_pop();
|
||||
out:
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
EXPORT_SYMBOL(kunmap_atomic_high);
|
||||
|
||||
/*
|
||||
* This is the same as kmap_atomic() but can map memory that doesn't
|
||||
@ -104,19 +81,6 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
||||
return (void *) vaddr;
|
||||
}
|
||||
|
||||
struct page *kmap_atomic_to_page(void *ptr)
|
||||
{
|
||||
unsigned long idx, vaddr = (unsigned long)ptr;
|
||||
pte_t *pte;
|
||||
|
||||
if (vaddr < FIXADDR_START)
|
||||
return virt_to_page(ptr);
|
||||
|
||||
idx = virt_to_fix(vaddr);
|
||||
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
|
||||
return pte_page(*pte);
|
||||
}
|
||||
|
||||
static void __init kmap_pages_init(void)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
|
@ -1,7 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _H8300_PGTABLE_H
|
||||
#define _H8300_PGTABLE_H
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#include <asm-generic/pgtable.h>
|
||||
extern void paging_init(void);
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include <asm-generic/fixmap.h>
|
||||
|
||||
#define kmap_get_fixmap_pte(vaddr) \
|
||||
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \
|
||||
(vaddr)), (vaddr)), (vaddr))
|
||||
pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), \
|
||||
(vaddr)), (vaddr)), (vaddr)), (vaddr))
|
||||
|
||||
#endif
|
||||
|
@ -12,7 +12,6 @@
|
||||
* Page table definitions for Qualcomm Hexagon processor.
|
||||
*/
|
||||
#include <asm/page.h>
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
/* A handy thing to have if one has the RAM. Declared in head.S */
|
||||
|
@ -36,9 +36,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
static inline void
|
||||
pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
|
||||
p4d_populate(struct mm_struct *mm, p4d_t * p4d_entry, pud_t * pud)
|
||||
{
|
||||
pgd_val(*pgd_entry) = __pa(pud);
|
||||
p4d_val(*p4d_entry) = __pa(pud);
|
||||
}
|
||||
|
||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
|
@ -283,12 +283,12 @@ extern unsigned long VMALLOC_END;
|
||||
#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
#define pgd_none(pgd) (!pgd_val(pgd))
|
||||
#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
|
||||
#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
|
||||
#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
|
||||
#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
|
||||
#define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET))
|
||||
#define p4d_none(p4d) (!p4d_val(p4d))
|
||||
#define p4d_bad(p4d) (!ia64_phys_addr_valid(p4d_val(p4d)))
|
||||
#define p4d_present(p4d) (p4d_val(p4d) != 0UL)
|
||||
#define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
|
||||
#define p4d_page_vaddr(p4d) ((unsigned long) __va(p4d_val(p4d) & _PFN_MASK))
|
||||
#define p4d_page(p4d) virt_to_page((p4d_val(p4d) + PAGE_OFFSET))
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -386,7 +386,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address)
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
/* Find an entry in the second-level page table.. */
|
||||
#define pud_offset(dir,addr) \
|
||||
((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
|
||||
((pud_t *) p4d_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
|
||||
#endif
|
||||
|
||||
/* Find an entry in the third-level page table.. */
|
||||
@ -580,10 +580,9 @@ extern struct page *zero_page_memmap_ptr;
|
||||
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#endif
|
||||
#include <asm-generic/5level-fixup.h>
|
||||
#include <asm-generic/pgtable-nop4d.h>
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
#endif /* _ASM_IA64_PGTABLE_H */
|
||||
|
@ -29,6 +29,7 @@ static int
|
||||
mapped_kernel_page_is_present (unsigned long address)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *ptep, pte;
|
||||
@ -37,7 +38,11 @@ mapped_kernel_page_is_present (unsigned long address)
|
||||
if (pgd_none(*pgd) || pgd_bad(*pgd))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
p4d = p4d_offset(pgd, address);
|
||||
if (p4d_none(*p4d) || p4d_bad(*p4d))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(p4d, address);
|
||||
if (pud_none(*pud) || pud_bad(*pud))
|
||||
return 0;
|
||||
|
||||
|
@ -30,12 +30,14 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
|
||||
{
|
||||
unsigned long taddr = htlbpage_to_page(addr);
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
pgd = pgd_offset(mm, taddr);
|
||||
pud = pud_alloc(mm, pgd, taddr);
|
||||
p4d = p4d_offset(pgd, taddr);
|
||||
pud = pud_alloc(mm, p4d, taddr);
|
||||
if (pud) {
|
||||
pmd = pmd_alloc(mm, pud, taddr);
|
||||
if (pmd)
|
||||
@ -49,19 +51,23 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
|
||||
{
|
||||
unsigned long taddr = htlbpage_to_page(addr);
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
pgd = pgd_offset(mm, taddr);
|
||||
if (pgd_present(*pgd)) {
|
||||
pud = pud_offset(pgd, taddr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (p4d_present(*p4d)) {
|
||||
pud = pud_offset(p4d, taddr);
|
||||
if (pud_present(*pud)) {
|
||||
pmd = pmd_offset(pud, taddr);
|
||||
if (pmd_present(*pmd))
|
||||
pte = pte_offset_map(pmd, taddr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
@ -208,6 +208,7 @@ static struct page * __init
|
||||
put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
@ -215,7 +216,10 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
|
||||
pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
|
||||
|
||||
{
|
||||
pud = pud_alloc(&init_mm, pgd, address);
|
||||
p4d = p4d_alloc(&init_mm, pgd, address);
|
||||
if (!p4d)
|
||||
goto out;
|
||||
pud = pud_alloc(&init_mm, p4d, address);
|
||||
if (!pud)
|
||||
goto out;
|
||||
pmd = pmd_alloc(&init_mm, pud, address);
|
||||
@ -382,6 +386,7 @@ int vmemmap_find_next_valid_pfn(int node, int i)
|
||||
|
||||
do {
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
@ -392,7 +397,13 @@ int vmemmap_find_next_valid_pfn(int node, int i)
|
||||
continue;
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, end_address);
|
||||
p4d = p4d_offset(pgd, end_address);
|
||||
if (p4d_none(*p4d)) {
|
||||
end_address += P4D_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
pud = pud_offset(p4d, end_address);
|
||||
if (pud_none(*pud)) {
|
||||
end_address += PUD_SIZE;
|
||||
continue;
|
||||
@ -430,6 +441,7 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
|
||||
struct page *map_start, *map_end;
|
||||
int node;
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
@ -444,12 +456,20 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
|
||||
for (address = start_page; address < end_page; address += PAGE_SIZE) {
|
||||
pgd = pgd_offset_k(address);
|
||||
if (pgd_none(*pgd)) {
|
||||
p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
|
||||
if (!p4d)
|
||||
goto err_alloc;
|
||||
pgd_populate(&init_mm, pgd, p4d);
|
||||
}
|
||||
p4d = p4d_offset(pgd, address);
|
||||
|
||||
if (p4d_none(*p4d)) {
|
||||
pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
|
||||
if (!pud)
|
||||
goto err_alloc;
|
||||
pgd_populate(&init_mm, pgd, pud);
|
||||
p4d_populate(&init_mm, p4d, pud);
|
||||
}
|
||||
pud = pud_offset(pgd, address);
|
||||
pud = pud_offset(p4d, address);
|
||||
|
||||
if (pud_none(*pud)) {
|
||||
pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
extern pte_t *kmap_pte;
|
||||
extern pgprot_t kmap_prot;
|
||||
extern pte_t *pkmap_page_table;
|
||||
|
||||
/*
|
||||
@ -51,32 +50,6 @@ extern pte_t *pkmap_page_table;
|
||||
#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
|
||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
||||
|
||||
extern void *kmap_high(struct page *page);
|
||||
extern void kunmap_high(struct page *page);
|
||||
extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
|
||||
static inline void *kmap(struct page *page)
|
||||
{
|
||||
might_sleep();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
return kmap_high(page);
|
||||
}
|
||||
|
||||
static inline void kunmap(struct page *page)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
if (!PageHighMem(page))
|
||||
return;
|
||||
kunmap_high(page);
|
||||
}
|
||||
|
||||
static inline void *kmap_atomic(struct page *page)
|
||||
{
|
||||
return kmap_atomic_prot(page, kmap_prot);
|
||||
}
|
||||
|
||||
#define flush_cache_kmaps() { flush_icache(); flush_dcache(); }
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
@ -32,18 +32,12 @@
|
||||
*/
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
||||
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
|
||||
{
|
||||
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
@ -55,19 +49,16 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
||||
|
||||
return (void *) vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic_prot);
|
||||
EXPORT_SYMBOL(kmap_atomic_high_prot);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
void kunmap_atomic_high(void *kvaddr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
||||
int type;
|
||||
unsigned int idx;
|
||||
|
||||
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
if (vaddr < __fix_to_virt(FIX_KMAP_END))
|
||||
return;
|
||||
}
|
||||
|
||||
type = kmap_atomic_idx();
|
||||
|
||||
@ -83,7 +74,5 @@ void __kunmap_atomic(void *kvaddr)
|
||||
local_flush_tlb_page(NULL, vaddr);
|
||||
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
EXPORT_SYMBOL(kunmap_atomic_high);
|
||||
|
@ -49,8 +49,6 @@ unsigned long lowmem_size;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
pte_t *kmap_pte;
|
||||
EXPORT_SYMBOL(kmap_pte);
|
||||
pgprot_t kmap_prot;
|
||||
EXPORT_SYMBOL(kmap_prot);
|
||||
|
||||
static inline pte_t *virt_to_kpte(unsigned long vaddr)
|
||||
{
|
||||
@ -68,7 +66,6 @@ static void __init highmem_init(void)
|
||||
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
|
||||
|
||||
kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
|
||||
kmap_prot = PAGE_KERNEL;
|
||||
}
|
||||
|
||||
static void highmem_setup(void)
|
||||
|
@ -46,21 +46,14 @@ extern pte_t *pkmap_page_table;
|
||||
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
|
||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
||||
|
||||
extern void * kmap_high(struct page *page);
|
||||
extern void kunmap_high(struct page *page);
|
||||
|
||||
extern void *kmap(struct page *page);
|
||||
extern void kunmap(struct page *page);
|
||||
extern void *kmap_atomic(struct page *page);
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
#define ARCH_HAS_KMAP_FLUSH_TLB
|
||||
extern void kmap_flush_tlb(unsigned long addr);
|
||||
extern void *kmap_atomic_pfn(unsigned long pfn);
|
||||
|
||||
#define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases)
|
||||
|
||||
extern void kmap_init(void);
|
||||
|
||||
#define kmap_prot PAGE_KERNEL
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_HIGHMEM_H */
|
||||
|
@ -14,9 +14,9 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/highmem.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpu-features.h>
|
||||
@ -103,7 +103,7 @@ void __flush_dcache_page(struct page *page)
|
||||
flush_data_cache_page(addr);
|
||||
|
||||
if (PageHighMem(page))
|
||||
__kunmap_atomic((void *)addr);
|
||||
kunmap_atomic((void *)addr);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__flush_dcache_page);
|
||||
@ -146,7 +146,7 @@ void __update_cache(unsigned long address, pte_t pte)
|
||||
flush_data_cache_page(addr);
|
||||
|
||||
if (PageHighMem(page))
|
||||
__kunmap_atomic((void *)addr);
|
||||
kunmap_atomic((void *)addr);
|
||||
|
||||
ClearPageDcacheDirty(page);
|
||||
}
|
||||
|
@ -12,71 +12,37 @@ static pte_t *kmap_pte;
|
||||
|
||||
unsigned long highstart_pfn, highend_pfn;
|
||||
|
||||
void *kmap(struct page *page)
|
||||
void kmap_flush_tlb(unsigned long addr)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
might_sleep();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
addr = kmap_high(page);
|
||||
flush_tlb_one((unsigned long)addr);
|
||||
|
||||
return addr;
|
||||
flush_tlb_one(addr);
|
||||
}
|
||||
EXPORT_SYMBOL(kmap);
|
||||
EXPORT_SYMBOL(kmap_flush_tlb);
|
||||
|
||||
void kunmap(struct page *page)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
if (!PageHighMem(page))
|
||||
return;
|
||||
kunmap_high(page);
|
||||
}
|
||||
EXPORT_SYMBOL(kunmap);
|
||||
|
||||
/*
|
||||
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
|
||||
* no global lock is needed and because the kmap code must perform a global TLB
|
||||
* invalidation when the kmap pool wraps.
|
||||
*
|
||||
* However when holding an atomic kmap is is not legal to sleep, so atomic
|
||||
* kmaps are appropriate for short, tight code paths only.
|
||||
*/
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(!pte_none(*(kmap_pte - idx)));
|
||||
#endif
|
||||
set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
|
||||
set_pte(kmap_pte-idx, mk_pte(page, prot));
|
||||
local_flush_tlb_one((unsigned long)vaddr);
|
||||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
EXPORT_SYMBOL(kmap_atomic_high_prot);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
void kunmap_atomic_high(void *kvaddr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
||||
int type __maybe_unused;
|
||||
|
||||
if (vaddr < FIXADDR_START) { // FIXME
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
if (vaddr < FIXADDR_START)
|
||||
return;
|
||||
}
|
||||
|
||||
type = kmap_atomic_idx();
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
@ -94,10 +60,8 @@ void __kunmap_atomic(void *kvaddr)
|
||||
}
|
||||
#endif
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
EXPORT_SYMBOL(kunmap_atomic_high);
|
||||
|
||||
/*
|
||||
* This is the same as kmap_atomic() but can map memory that doesn't
|
||||
|
@ -32,7 +32,6 @@
|
||||
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
|
||||
#define PKMAP_NR(virt) (((virt) - (PKMAP_BASE)) >> PAGE_SHIFT)
|
||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
||||
#define kmap_prot PAGE_KERNEL
|
||||
|
||||
static inline void flush_cache_kmaps(void)
|
||||
{
|
||||
@ -44,9 +43,6 @@ extern unsigned long highstart_pfn, highend_pfn;
|
||||
|
||||
extern pte_t *pkmap_page_table;
|
||||
|
||||
extern void *kmap_high(struct page *page);
|
||||
extern void kunmap_high(struct page *page);
|
||||
|
||||
extern void kmap_init(void);
|
||||
|
||||
/*
|
||||
@ -54,12 +50,7 @@ extern void kmap_init(void);
|
||||
* when CONFIG_HIGHMEM is not set.
|
||||
*/
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
extern void *kmap(struct page *page);
|
||||
extern void kunmap(struct page *page);
|
||||
extern void *kmap_atomic(struct page *page);
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
extern void *kmap_atomic_pfn(unsigned long pfn);
|
||||
extern struct page *kmap_atomic_to_page(void *ptr);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -10,45 +10,18 @@
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
void *kmap(struct page *page)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
might_sleep();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
vaddr = (unsigned long)kmap_high(page);
|
||||
return (void *)vaddr;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(kmap);
|
||||
|
||||
void kunmap(struct page *page)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
if (!PageHighMem(page))
|
||||
return;
|
||||
kunmap_high(page);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(kunmap);
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
|
||||
{
|
||||
unsigned int idx;
|
||||
unsigned long vaddr, pte;
|
||||
int type;
|
||||
pte_t *ptep;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
|
||||
idx = type + KM_TYPE_NR * smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
pte = (page_to_pfn(page) << PAGE_SHIFT) | (PAGE_KERNEL);
|
||||
pte = (page_to_pfn(page) << PAGE_SHIFT) | prot;
|
||||
ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
|
||||
set_pte(ptep, pte);
|
||||
|
||||
@ -58,10 +31,9 @@ void *kmap_atomic(struct page *page)
|
||||
__nds32__isb();
|
||||
return (void *)vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic_high_prot);
|
||||
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
void kunmap_atomic_high(void *kvaddr)
|
||||
{
|
||||
if (kvaddr >= (void *)FIXADDR_START) {
|
||||
unsigned long vaddr = (unsigned long)kvaddr;
|
||||
@ -72,8 +44,5 @@ void __kunmap_atomic(void *kvaddr)
|
||||
ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
|
||||
set_pte(ptep, 0);
|
||||
}
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
EXPORT_SYMBOL(kunmap_atomic_high);
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include <asm/pgtable-bits.h>
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
#define FIRST_USER_ADDRESS 0UL
|
||||
@ -100,7 +99,7 @@ extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
|
||||
*/
|
||||
static inline void set_pmd(pmd_t *pmdptr, pmd_t pmdval)
|
||||
{
|
||||
pmdptr->pud.pgd.pgd = pmdval.pud.pgd.pgd;
|
||||
*pmdptr = pmdval;
|
||||
}
|
||||
|
||||
/* to find an entry in a page-table-directory */
|
||||
|
@ -242,6 +242,7 @@ vmalloc_fault:
|
||||
*/
|
||||
int offset = pgd_index(address);
|
||||
pgd_t *pgd, *pgd_k;
|
||||
p4d_t *p4d, *p4d_k;
|
||||
pud_t *pud, *pud_k;
|
||||
pmd_t *pmd, *pmd_k;
|
||||
pte_t *pte_k;
|
||||
@ -253,8 +254,12 @@ vmalloc_fault:
|
||||
goto no_context;
|
||||
set_pgd(pgd, *pgd_k);
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
pud_k = pud_offset(pgd_k, address);
|
||||
p4d = p4d_offset(pgd, address);
|
||||
p4d_k = p4d_offset(pgd_k, address);
|
||||
if (!p4d_present(*p4d_k))
|
||||
goto no_context;
|
||||
pud = pud_offset(p4d, address);
|
||||
pud_k = pud_offset(p4d_k, address);
|
||||
if (!pud_present(*pud_k))
|
||||
goto no_context;
|
||||
pmd = pmd_offset(pud, address);
|
||||
|
@ -86,11 +86,15 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr,
|
||||
if (address >= end)
|
||||
BUG();
|
||||
do {
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
error = -ENOMEM;
|
||||
pud = pud_alloc(&init_mm, dir, address);
|
||||
p4d = p4d_alloc(&init_mm, dir, address);
|
||||
if (!p4d)
|
||||
break;
|
||||
pud = pud_alloc(&init_mm, p4d, address);
|
||||
if (!pud)
|
||||
break;
|
||||
pmd = pmd_alloc(&init_mm, pud, address);
|
||||
|
@ -21,7 +21,6 @@
|
||||
#ifndef __ASM_OPENRISC_PGTABLE_H
|
||||
#define __ASM_OPENRISC_PGTABLE_H
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -295,6 +295,7 @@ vmalloc_fault:
|
||||
|
||||
int offset = pgd_index(address);
|
||||
pgd_t *pgd, *pgd_k;
|
||||
p4d_t *p4d, *p4d_k;
|
||||
pud_t *pud, *pud_k;
|
||||
pmd_t *pmd, *pmd_k;
|
||||
pte_t *pte_k;
|
||||
@ -321,8 +322,13 @@ vmalloc_fault:
|
||||
* it exists.
|
||||
*/
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
pud_k = pud_offset(pgd_k, address);
|
||||
p4d = p4d_offset(pgd, address);
|
||||
p4d_k = p4d_offset(pgd_k, address);
|
||||
if (!p4d_present(*p4d_k))
|
||||
goto no_context;
|
||||
|
||||
pud = pud_offset(p4d, address);
|
||||
pud_k = pud_offset(p4d_k, address);
|
||||
if (!pud_present(*pud_k))
|
||||
goto no_context;
|
||||
|
||||
|
@ -68,6 +68,7 @@ static void __init map_ram(void)
|
||||
unsigned long v, p, e;
|
||||
pgprot_t prot;
|
||||
pgd_t *pge;
|
||||
p4d_t *p4e;
|
||||
pud_t *pue;
|
||||
pmd_t *pme;
|
||||
pte_t *pte;
|
||||
@ -87,7 +88,8 @@ static void __init map_ram(void)
|
||||
|
||||
while (p < e) {
|
||||
int j;
|
||||
pue = pud_offset(pge, v);
|
||||
p4e = p4d_offset(pge, v);
|
||||
pue = pud_offset(p4e, v);
|
||||
pme = pmd_offset(pue, v);
|
||||
|
||||
if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
|
||||
|
@ -100,37 +100,11 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
|
||||
}
|
||||
}
|
||||
|
||||
#include <asm/kmap_types.h>
|
||||
|
||||
#define ARCH_HAS_KMAP
|
||||
|
||||
static inline void *kmap(struct page *page)
|
||||
{
|
||||
might_sleep();
|
||||
return page_address(page);
|
||||
}
|
||||
|
||||
static inline void kunmap(struct page *page)
|
||||
{
|
||||
flush_kernel_dcache_page_addr(page_address(page));
|
||||
}
|
||||
|
||||
static inline void *kmap_atomic(struct page *page)
|
||||
{
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
return page_address(page);
|
||||
}
|
||||
|
||||
static inline void __kunmap_atomic(void *addr)
|
||||
#define ARCH_HAS_FLUSH_ON_KUNMAP
|
||||
static inline void kunmap_flush_on_unmap(void *addr)
|
||||
{
|
||||
flush_kernel_dcache_page_addr(addr);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
|
||||
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
|
||||
|
||||
#endif /* _PARISC_CACHEFLUSH_H */
|
||||
|
||||
|
@ -116,6 +116,7 @@ config PPC
|
||||
#
|
||||
select ARCH_32BIT_OFF_T if PPC32
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
|
@ -2,7 +2,6 @@
|
||||
#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
|
||||
#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
#include <asm/book3s/32/hash.h>
|
||||
|
@ -134,9 +134,9 @@ static inline int get_region_id(unsigned long ea)
|
||||
|
||||
#define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
|
||||
#define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
|
||||
static inline int hash__pgd_bad(pgd_t pgd)
|
||||
static inline int hash__p4d_bad(p4d_t p4d)
|
||||
{
|
||||
return (pgd_val(pgd) == 0);
|
||||
return (p4d_val(p4d) == 0);
|
||||
}
|
||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||
extern void hash__mark_rodata_ro(void);
|
||||
|
@ -85,9 +85,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
|
||||
}
|
||||
|
||||
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
||||
static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud)
|
||||
{
|
||||
*pgd = __pgd(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
|
||||
*pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
|
||||
}
|
||||
|
||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
|
@ -2,7 +2,7 @@
|
||||
#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
|
||||
#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
|
||||
|
||||
#include <asm-generic/5level-fixup.h>
|
||||
#include <asm-generic/pgtable-nop4d.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/mmdebug.h>
|
||||
@ -251,7 +251,7 @@ extern unsigned long __pmd_frag_size_shift;
|
||||
/* Bits to mask out from a PUD to get to the PMD page */
|
||||
#define PUD_MASKED_BITS 0xc0000000000000ffUL
|
||||
/* Bits to mask out from a PGD to get to the PUD page */
|
||||
#define PGD_MASKED_BITS 0xc0000000000000ffUL
|
||||
#define P4D_MASKED_BITS 0xc0000000000000ffUL
|
||||
|
||||
/*
|
||||
* Used as an indicator for rcu callback functions
|
||||
@ -949,54 +949,60 @@ static inline bool pud_access_permitted(pud_t pud, bool write)
|
||||
return pte_access_permitted(pud_pte(pud), write);
|
||||
}
|
||||
|
||||
#define pgd_write(pgd) pte_write(pgd_pte(pgd))
|
||||
|
||||
static inline void pgd_clear(pgd_t *pgdp)
|
||||
#define __p4d_raw(x) ((p4d_t) { __pgd_raw(x) })
|
||||
static inline __be64 p4d_raw(p4d_t x)
|
||||
{
|
||||
*pgdp = __pgd(0);
|
||||
return pgd_raw(x.pgd);
|
||||
}
|
||||
|
||||
static inline int pgd_none(pgd_t pgd)
|
||||
#define p4d_write(p4d) pte_write(p4d_pte(p4d))
|
||||
|
||||
static inline void p4d_clear(p4d_t *p4dp)
|
||||
{
|
||||
return !pgd_raw(pgd);
|
||||
*p4dp = __p4d(0);
|
||||
}
|
||||
|
||||
static inline int pgd_present(pgd_t pgd)
|
||||
static inline int p4d_none(p4d_t p4d)
|
||||
{
|
||||
return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
|
||||
return !p4d_raw(p4d);
|
||||
}
|
||||
|
||||
static inline pte_t pgd_pte(pgd_t pgd)
|
||||
static inline int p4d_present(p4d_t p4d)
|
||||
{
|
||||
return __pte_raw(pgd_raw(pgd));
|
||||
return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT));
|
||||
}
|
||||
|
||||
static inline pgd_t pte_pgd(pte_t pte)
|
||||
static inline pte_t p4d_pte(p4d_t p4d)
|
||||
{
|
||||
return __pgd_raw(pte_raw(pte));
|
||||
return __pte_raw(p4d_raw(p4d));
|
||||
}
|
||||
|
||||
static inline int pgd_bad(pgd_t pgd)
|
||||
static inline p4d_t pte_p4d(pte_t pte)
|
||||
{
|
||||
return __p4d_raw(pte_raw(pte));
|
||||
}
|
||||
|
||||
static inline int p4d_bad(p4d_t p4d)
|
||||
{
|
||||
if (radix_enabled())
|
||||
return radix__pgd_bad(pgd);
|
||||
return hash__pgd_bad(pgd);
|
||||
return radix__p4d_bad(p4d);
|
||||
return hash__p4d_bad(p4d);
|
||||
}
|
||||
|
||||
#define pgd_access_permitted pgd_access_permitted
|
||||
static inline bool pgd_access_permitted(pgd_t pgd, bool write)
|
||||
#define p4d_access_permitted p4d_access_permitted
|
||||
static inline bool p4d_access_permitted(p4d_t p4d, bool write)
|
||||
{
|
||||
return pte_access_permitted(pgd_pte(pgd), write);
|
||||
return pte_access_permitted(p4d_pte(p4d), write);
|
||||
}
|
||||
|
||||
extern struct page *pgd_page(pgd_t pgd);
|
||||
extern struct page *p4d_page(p4d_t p4d);
|
||||
|
||||
/* Pointers in the page table tree are physical addresses */
|
||||
#define __pgtable_ptr_val(ptr) __pa(ptr)
|
||||
|
||||
#define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS)
|
||||
#define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS)
|
||||
#define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS)
|
||||
#define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS)
|
||||
|
||||
#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
|
||||
#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1))
|
||||
@ -1010,8 +1016,8 @@ extern struct page *pgd_page(pgd_t pgd);
|
||||
|
||||
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
|
||||
|
||||
#define pud_offset(pgdp, addr) \
|
||||
(((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr))
|
||||
#define pud_offset(p4dp, addr) \
|
||||
(((pud_t *) p4d_page_vaddr(*(p4dp))) + pud_index(addr))
|
||||
#define pmd_offset(pudp,addr) \
|
||||
(((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
|
||||
#define pte_offset_kernel(dir,addr) \
|
||||
@ -1366,11 +1372,11 @@ static inline bool pud_is_leaf(pud_t pud)
|
||||
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
|
||||
}
|
||||
|
||||
#define pgd_is_leaf pgd_is_leaf
|
||||
#define pgd_leaf pgd_is_leaf
|
||||
static inline bool pgd_is_leaf(pgd_t pgd)
|
||||
#define p4d_is_leaf p4d_is_leaf
|
||||
#define p4d_leaf p4d_is_leaf
|
||||
static inline bool p4d_is_leaf(p4d_t p4d)
|
||||
{
|
||||
return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
|
||||
return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PTE));
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@ -30,7 +30,7 @@
|
||||
/* Don't have anything in the reserved bits and leaf bits */
|
||||
#define RADIX_PMD_BAD_BITS 0x60000000000000e0UL
|
||||
#define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
|
||||
#define RADIX_PGD_BAD_BITS 0x60000000000000e0UL
|
||||
#define RADIX_P4D_BAD_BITS 0x60000000000000e0UL
|
||||
|
||||
#define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
|
||||
#define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
|
||||
@ -227,9 +227,9 @@ static inline int radix__pud_bad(pud_t pud)
|
||||
}
|
||||
|
||||
|
||||
static inline int radix__pgd_bad(pgd_t pgd)
|
||||
static inline int radix__p4d_bad(p4d_t p4d)
|
||||
{
|
||||
return !!(pgd_val(pgd) & RADIX_PGD_BAD_BITS);
|
||||
return !!(p4d_val(p4d) & RADIX_P4D_BAD_BITS);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
extern pte_t *kmap_pte;
|
||||
extern pgprot_t kmap_prot;
|
||||
extern pte_t *pkmap_page_table;
|
||||
|
||||
/*
|
||||
@ -59,33 +58,6 @@ extern pte_t *pkmap_page_table;
|
||||
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
|
||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
||||
|
||||
extern void *kmap_high(struct page *page);
|
||||
extern void kunmap_high(struct page *page);
|
||||
extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
|
||||
static inline void *kmap(struct page *page)
|
||||
{
|
||||
might_sleep();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
return kmap_high(page);
|
||||
}
|
||||
|
||||
static inline void kunmap(struct page *page)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
if (!PageHighMem(page))
|
||||
return;
|
||||
kunmap_high(page);
|
||||
}
|
||||
|
||||
static inline void *kmap_atomic(struct page *page)
|
||||
{
|
||||
return kmap_atomic_prot(page, kmap_prot);
|
||||
}
|
||||
|
||||
|
||||
#define flush_cache_kmaps() flush_cache_all()
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
@ -2,7 +2,6 @@
|
||||
#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
|
||||
#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -15,7 +15,7 @@ struct vmemmap_backing {
|
||||
};
|
||||
extern struct vmemmap_backing *vmemmap_list;
|
||||
|
||||
#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD)
|
||||
#define p4d_populate(MM, P4D, PUD) p4d_set(P4D, (unsigned long)PUD)
|
||||
|
||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
|
@ -2,7 +2,7 @@
|
||||
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
|
||||
#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
|
||||
|
||||
#include <asm-generic/5level-fixup.h>
|
||||
#include <asm-generic/pgtable-nop4d.h>
|
||||
|
||||
/*
|
||||
* Entries per page directory level. The PTE level must use a 64b record
|
||||
@ -45,41 +45,41 @@
|
||||
#define PMD_MASKED_BITS 0
|
||||
/* Bits to mask out from a PUD to get to the PMD page */
|
||||
#define PUD_MASKED_BITS 0
|
||||
/* Bits to mask out from a PGD to get to the PUD page */
|
||||
#define PGD_MASKED_BITS 0
|
||||
/* Bits to mask out from a P4D to get to the PUD page */
|
||||
#define P4D_MASKED_BITS 0
|
||||
|
||||
|
||||
/*
|
||||
* 4-level page tables related bits
|
||||
*/
|
||||
|
||||
#define pgd_none(pgd) (!pgd_val(pgd))
|
||||
#define pgd_bad(pgd) (pgd_val(pgd) == 0)
|
||||
#define pgd_present(pgd) (pgd_val(pgd) != 0)
|
||||
#define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS)
|
||||
#define p4d_none(p4d) (!p4d_val(p4d))
|
||||
#define p4d_bad(p4d) (p4d_val(p4d) == 0)
|
||||
#define p4d_present(p4d) (p4d_val(p4d) != 0)
|
||||
#define p4d_page_vaddr(p4d) (p4d_val(p4d) & ~P4D_MASKED_BITS)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static inline void pgd_clear(pgd_t *pgdp)
|
||||
static inline void p4d_clear(p4d_t *p4dp)
|
||||
{
|
||||
*pgdp = __pgd(0);
|
||||
*p4dp = __p4d(0);
|
||||
}
|
||||
|
||||
static inline pte_t pgd_pte(pgd_t pgd)
|
||||
static inline pte_t p4d_pte(p4d_t p4d)
|
||||
{
|
||||
return __pte(pgd_val(pgd));
|
||||
return __pte(p4d_val(p4d));
|
||||
}
|
||||
|
||||
static inline pgd_t pte_pgd(pte_t pte)
|
||||
static inline p4d_t pte_p4d(pte_t pte)
|
||||
{
|
||||
return __pgd(pte_val(pte));
|
||||
return __p4d(pte_val(pte));
|
||||
}
|
||||
extern struct page *pgd_page(pgd_t pgd);
|
||||
extern struct page *p4d_page(p4d_t p4d);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define pud_offset(pgdp, addr) \
|
||||
(((pud_t *) pgd_page_vaddr(*(pgdp))) + \
|
||||
#define pud_offset(p4dp, addr) \
|
||||
(((pud_t *) p4d_page_vaddr(*(p4dp))) + \
|
||||
(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
|
||||
|
||||
#define pud_ERROR(e) \
|
||||
|
@ -175,11 +175,11 @@ static inline pud_t pte_pud(pte_t pte)
|
||||
return __pud(pte_val(pte));
|
||||
}
|
||||
#define pud_write(pud) pte_write(pud_pte(pud))
|
||||
#define pgd_write(pgd) pte_write(pgd_pte(pgd))
|
||||
#define p4d_write(pgd) pte_write(p4d_pte(p4d))
|
||||
|
||||
static inline void pgd_set(pgd_t *pgdp, unsigned long val)
|
||||
static inline void p4d_set(p4d_t *p4dp, unsigned long val)
|
||||
{
|
||||
*pgdp = __pgd(val);
|
||||
*p4dp = __p4d(val);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -44,12 +44,12 @@ struct mm_struct;
|
||||
#ifdef CONFIG_PPC32
|
||||
static inline pmd_t *pmd_ptr(struct mm_struct *mm, unsigned long va)
|
||||
{
|
||||
return pmd_offset(pud_offset(pgd_offset(mm, va), va), va);
|
||||
return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
|
||||
}
|
||||
|
||||
static inline pmd_t *pmd_ptr_k(unsigned long va)
|
||||
{
|
||||
return pmd_offset(pud_offset(pgd_offset_k(va), va), va);
|
||||
return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
|
||||
}
|
||||
|
||||
static inline pte_t *virt_to_kpte(unsigned long vaddr)
|
||||
@ -158,9 +158,9 @@ static inline bool pud_is_leaf(pud_t pud)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef pgd_is_leaf
|
||||
#define pgd_is_leaf pgd_is_leaf
|
||||
static inline bool pgd_is_leaf(pgd_t pgd)
|
||||
#ifndef p4d_is_leaf
|
||||
#define p4d_is_leaf p4d_is_leaf
|
||||
static inline bool p4d_is_leaf(p4d_t p4d)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -499,13 +499,14 @@ void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
|
||||
unsigned long ig;
|
||||
|
||||
for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
|
||||
p4d_t *p4d = p4d_offset(pgd, 0);
|
||||
pud_t *pud;
|
||||
|
||||
if (!pgd_present(*pgd))
|
||||
if (!p4d_present(*p4d))
|
||||
continue;
|
||||
pud = pud_offset(pgd, 0);
|
||||
pud = pud_offset(p4d, 0);
|
||||
kvmppc_unmap_free_pud(kvm, pud, lpid);
|
||||
pgd_clear(pgd);
|
||||
p4d_clear(p4d);
|
||||
}
|
||||
}
|
||||
|
||||
@ -566,6 +567,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
|
||||
unsigned long *rmapp, struct rmap_nested **n_rmap)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud, *new_pud = NULL;
|
||||
pmd_t *pmd, *new_pmd = NULL;
|
||||
pte_t *ptep, *new_ptep = NULL;
|
||||
@ -573,9 +575,11 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
|
||||
|
||||
/* Traverse the guest's 2nd-level tree, allocate new levels needed */
|
||||
pgd = pgtable + pgd_index(gpa);
|
||||
p4d = p4d_offset(pgd, gpa);
|
||||
|
||||
pud = NULL;
|
||||
if (pgd_present(*pgd))
|
||||
pud = pud_offset(pgd, gpa);
|
||||
if (p4d_present(*p4d))
|
||||
pud = pud_offset(p4d, gpa);
|
||||
else
|
||||
new_pud = pud_alloc_one(kvm->mm, gpa);
|
||||
|
||||
@ -596,13 +600,13 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
|
||||
|
||||
/* Now traverse again under the lock and change the tree */
|
||||
ret = -ENOMEM;
|
||||
if (pgd_none(*pgd)) {
|
||||
if (p4d_none(*p4d)) {
|
||||
if (!new_pud)
|
||||
goto out_unlock;
|
||||
pgd_populate(kvm->mm, pgd, new_pud);
|
||||
p4d_populate(kvm->mm, p4d, new_pud);
|
||||
new_pud = NULL;
|
||||
}
|
||||
pud = pud_offset(pgd, gpa);
|
||||
pud = pud_offset(p4d, gpa);
|
||||
if (pud_is_leaf(*pud)) {
|
||||
unsigned long hgpa = gpa & PUD_MASK;
|
||||
|
||||
@ -1220,7 +1224,8 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
|
||||
unsigned long gpa;
|
||||
pgd_t *pgt;
|
||||
struct kvm_nested_guest *nested;
|
||||
pgd_t pgd, *pgdp;
|
||||
pgd_t *pgdp;
|
||||
p4d_t p4d, *p4dp;
|
||||
pud_t pud, *pudp;
|
||||
pmd_t pmd, *pmdp;
|
||||
pte_t *ptep;
|
||||
@ -1293,13 +1298,14 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
|
||||
}
|
||||
|
||||
pgdp = pgt + pgd_index(gpa);
|
||||
pgd = READ_ONCE(*pgdp);
|
||||
if (!(pgd_val(pgd) & _PAGE_PRESENT)) {
|
||||
gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE;
|
||||
p4dp = p4d_offset(pgdp, gpa);
|
||||
p4d = READ_ONCE(*p4dp);
|
||||
if (!(p4d_val(p4d) & _PAGE_PRESENT)) {
|
||||
gpa = (gpa & P4D_MASK) + P4D_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
pudp = pud_offset(&pgd, gpa);
|
||||
pudp = pud_offset(&p4d, gpa);
|
||||
pud = READ_ONCE(*pudp);
|
||||
if (!(pud_val(pud) & _PAGE_PRESENT)) {
|
||||
gpa = (gpa & PUD_MASK) + PUD_SIZE;
|
||||
|
@ -107,13 +107,18 @@ static inline int unmap_patch_area(unsigned long addr)
|
||||
pte_t *ptep;
|
||||
pmd_t *pmdp;
|
||||
pud_t *pudp;
|
||||
p4d_t *p4dp;
|
||||
pgd_t *pgdp;
|
||||
|
||||
pgdp = pgd_offset_k(addr);
|
||||
if (unlikely(!pgdp))
|
||||
return -EINVAL;
|
||||
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
p4dp = p4d_offset(pgdp, addr);
|
||||
if (unlikely(!p4dp))
|
||||
return -EINVAL;
|
||||
|
||||
pudp = pud_offset(p4dp, addr);
|
||||
if (unlikely(!pudp))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -148,6 +148,7 @@ void hash__vmemmap_remove_mapping(unsigned long start,
|
||||
int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
@ -155,7 +156,8 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
|
||||
BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
|
||||
if (slab_is_available()) {
|
||||
pgdp = pgd_offset_k(ea);
|
||||
pudp = pud_alloc(&init_mm, pgdp, ea);
|
||||
p4dp = p4d_offset(pgdp, ea);
|
||||
pudp = pud_alloc(&init_mm, p4dp, ea);
|
||||
if (!pudp)
|
||||
return -ENOMEM;
|
||||
pmdp = pmd_alloc(&init_mm, pudp, ea);
|
||||
|
@ -65,17 +65,19 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
|
||||
{
|
||||
unsigned long pfn = pa >> PAGE_SHIFT;
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
|
||||
pgdp = pgd_offset_k(ea);
|
||||
if (pgd_none(*pgdp)) {
|
||||
p4dp = p4d_offset(pgdp, ea);
|
||||
if (p4d_none(*p4dp)) {
|
||||
pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
|
||||
region_start, region_end);
|
||||
pgd_populate(&init_mm, pgdp, pudp);
|
||||
p4d_populate(&init_mm, p4dp, pudp);
|
||||
}
|
||||
pudp = pud_offset(pgdp, ea);
|
||||
pudp = pud_offset(p4dp, ea);
|
||||
if (map_page_size == PUD_SIZE) {
|
||||
ptep = (pte_t *)pudp;
|
||||
goto set_the_pte;
|
||||
@ -115,6 +117,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
|
||||
{
|
||||
unsigned long pfn = pa >> PAGE_SHIFT;
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
@ -137,7 +140,8 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
|
||||
* boot.
|
||||
*/
|
||||
pgdp = pgd_offset_k(ea);
|
||||
pudp = pud_alloc(&init_mm, pgdp, ea);
|
||||
p4dp = p4d_offset(pgdp, ea);
|
||||
pudp = pud_alloc(&init_mm, p4dp, ea);
|
||||
if (!pudp)
|
||||
return -ENOMEM;
|
||||
if (map_page_size == PUD_SIZE) {
|
||||
@ -174,6 +178,7 @@ void radix__change_memory_range(unsigned long start, unsigned long end,
|
||||
{
|
||||
unsigned long idx;
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
@ -186,7 +191,8 @@ void radix__change_memory_range(unsigned long start, unsigned long end,
|
||||
|
||||
for (idx = start; idx < end; idx += PAGE_SIZE) {
|
||||
pgdp = pgd_offset_k(idx);
|
||||
pudp = pud_alloc(&init_mm, pgdp, idx);
|
||||
p4dp = p4d_offset(pgdp, idx);
|
||||
pudp = pud_alloc(&init_mm, p4dp, idx);
|
||||
if (!pudp)
|
||||
continue;
|
||||
if (pud_is_leaf(*pudp)) {
|
||||
@ -850,6 +856,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
|
||||
unsigned long addr, next;
|
||||
pud_t *pud_base;
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
|
||||
@ -857,15 +864,16 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
|
||||
next = pgd_addr_end(addr, end);
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (!pgd_present(*pgd))
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (!p4d_present(*p4d))
|
||||
continue;
|
||||
|
||||
if (pgd_is_leaf(*pgd)) {
|
||||
split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
|
||||
if (p4d_is_leaf(*p4d)) {
|
||||
split_kernel_mapping(addr, end, P4D_SIZE, (pte_t *)p4d);
|
||||
continue;
|
||||
}
|
||||
|
||||
pud_base = (pud_t *)pgd_page_vaddr(*pgd);
|
||||
pud_base = (pud_t *)p4d_page_vaddr(*p4d);
|
||||
remove_pud_table(pud_base, addr, next);
|
||||
}
|
||||
|
||||
|
@ -54,15 +54,17 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
|
||||
int npages)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (pgd_none(*pgd))
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (p4d_none(*p4d))
|
||||
return;
|
||||
pud = pud_offset(pgd, addr);
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (pud_none(*pud))
|
||||
return;
|
||||
pmd = pmd_offset(pud, addr);
|
||||
|
@ -24,22 +24,11 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/*
|
||||
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
|
||||
* gives a more generic (and caching) interface. But kmap_atomic can
|
||||
* be used in IRQ contexts, so in some (very limited) cases we need
|
||||
* it.
|
||||
*/
|
||||
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
||||
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
@ -49,17 +38,14 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
||||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic_prot);
|
||||
EXPORT_SYMBOL(kmap_atomic_high_prot);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
void kunmap_atomic_high(void *kvaddr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
||||
|
||||
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
if (vaddr < __fix_to_virt(FIX_KMAP_END))
|
||||
return;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM)) {
|
||||
int type = kmap_atomic_idx();
|
||||
@ -77,7 +63,5 @@ void __kunmap_atomic(void *kvaddr)
|
||||
}
|
||||
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
EXPORT_SYMBOL(kunmap_atomic_high);
|
||||
|
@ -119,6 +119,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pg;
|
||||
p4d_t *p4;
|
||||
pud_t *pu;
|
||||
pmd_t *pm;
|
||||
hugepd_t *hpdp = NULL;
|
||||
@ -128,20 +129,21 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
|
||||
|
||||
addr &= ~(sz-1);
|
||||
pg = pgd_offset(mm, addr);
|
||||
p4 = p4d_offset(pg, addr);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
if (pshift == PGDIR_SHIFT)
|
||||
/* 16GB huge page */
|
||||
return (pte_t *) pg;
|
||||
return (pte_t *) p4;
|
||||
else if (pshift > PUD_SHIFT) {
|
||||
/*
|
||||
* We need to use hugepd table
|
||||
*/
|
||||
ptl = &mm->page_table_lock;
|
||||
hpdp = (hugepd_t *)pg;
|
||||
hpdp = (hugepd_t *)p4;
|
||||
} else {
|
||||
pdshift = PUD_SHIFT;
|
||||
pu = pud_alloc(mm, pg, addr);
|
||||
pu = pud_alloc(mm, p4, addr);
|
||||
if (!pu)
|
||||
return NULL;
|
||||
if (pshift == PUD_SHIFT)
|
||||
@ -166,10 +168,10 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
|
||||
#else
|
||||
if (pshift >= PGDIR_SHIFT) {
|
||||
ptl = &mm->page_table_lock;
|
||||
hpdp = (hugepd_t *)pg;
|
||||
hpdp = (hugepd_t *)p4;
|
||||
} else {
|
||||
pdshift = PUD_SHIFT;
|
||||
pu = pud_alloc(mm, pg, addr);
|
||||
pu = pud_alloc(mm, p4, addr);
|
||||
if (!pu)
|
||||
return NULL;
|
||||
if (pshift >= PUD_SHIFT) {
|
||||
@ -390,7 +392,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
|
||||
mm_dec_nr_pmds(tlb->mm);
|
||||
}
|
||||
|
||||
static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
|
||||
static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned long floor, unsigned long ceiling)
|
||||
{
|
||||
@ -400,7 +402,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
|
||||
|
||||
start = addr;
|
||||
do {
|
||||
pud = pud_offset(pgd, addr);
|
||||
pud = pud_offset(p4d, addr);
|
||||
next = pud_addr_end(addr, end);
|
||||
if (!is_hugepd(__hugepd(pud_val(*pud)))) {
|
||||
if (pud_none_or_clear_bad(pud))
|
||||
@ -435,8 +437,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
|
||||
if (end - 1 > ceiling - 1)
|
||||
return;
|
||||
|
||||
pud = pud_offset(pgd, start);
|
||||
pgd_clear(pgd);
|
||||
pud = pud_offset(p4d, start);
|
||||
p4d_clear(p4d);
|
||||
pud_free_tlb(tlb, pud, start);
|
||||
mm_dec_nr_puds(tlb->mm);
|
||||
}
|
||||
@ -449,6 +451,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
unsigned long floor, unsigned long ceiling)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
unsigned long next;
|
||||
|
||||
/*
|
||||
@ -471,10 +474,11 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
pgd = pgd_offset(tlb->mm, addr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
|
||||
if (pgd_none_or_clear_bad(pgd))
|
||||
if (p4d_none_or_clear_bad(p4d))
|
||||
continue;
|
||||
hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
|
||||
hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
|
||||
} else {
|
||||
unsigned long more;
|
||||
/*
|
||||
@ -487,7 +491,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
if (more > next)
|
||||
next = more;
|
||||
|
||||
free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
|
||||
free_hugepd_range(tlb, (hugepd_t *)p4d, PGDIR_SHIFT,
|
||||
addr, next, floor, ceiling);
|
||||
}
|
||||
} while (addr = next, addr != end);
|
||||
|
@ -121,7 +121,7 @@ static void __init kasan_unmap_early_shadow_vmalloc(void)
|
||||
phys_addr_t pa = __pa(kasan_early_shadow_page);
|
||||
|
||||
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
|
||||
pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
|
||||
pmd_t *pmd = pmd_ptr_k(k_cur);
|
||||
pte_t *ptep = pte_offset_kernel(pmd, k_cur);
|
||||
|
||||
if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
|
||||
|
@ -64,8 +64,6 @@ bool init_mem_is_free;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
pte_t *kmap_pte;
|
||||
EXPORT_SYMBOL(kmap_pte);
|
||||
pgprot_t kmap_prot;
|
||||
EXPORT_SYMBOL(kmap_prot);
|
||||
#endif
|
||||
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
@ -245,7 +243,6 @@ void __init paging_init(void)
|
||||
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
|
||||
|
||||
kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
|
||||
kmap_prot = PAGE_KERNEL;
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
||||
printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
|
||||
|
@ -73,6 +73,7 @@ static void __init *early_alloc_pgtable(unsigned long size)
|
||||
int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
@ -80,7 +81,8 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
|
||||
BUILD_BUG_ON(TASK_SIZE_USER64 > PGTABLE_RANGE);
|
||||
if (slab_is_available()) {
|
||||
pgdp = pgd_offset_k(ea);
|
||||
pudp = pud_alloc(&init_mm, pgdp, ea);
|
||||
p4dp = p4d_offset(pgdp, ea);
|
||||
pudp = pud_alloc(&init_mm, p4dp, ea);
|
||||
if (!pudp)
|
||||
return -ENOMEM;
|
||||
pmdp = pmd_alloc(&init_mm, pudp, ea);
|
||||
@ -91,13 +93,12 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
pgdp = pgd_offset_k(ea);
|
||||
#ifndef __PAGETABLE_PUD_FOLDED
|
||||
if (pgd_none(*pgdp)) {
|
||||
pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
|
||||
pgd_populate(&init_mm, pgdp, pudp);
|
||||
p4dp = p4d_offset(pgdp, ea);
|
||||
if (p4d_none(*p4dp)) {
|
||||
pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
|
||||
p4d_populate(&init_mm, p4dp, pmdp);
|
||||
}
|
||||
#endif /* !__PAGETABLE_PUD_FOLDED */
|
||||
pudp = pud_offset(pgdp, ea);
|
||||
pudp = pud_offset(p4dp, ea);
|
||||
if (pud_none(*pudp)) {
|
||||
pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
|
||||
pud_populate(&init_mm, pudp, pmdp);
|
||||
|
@ -265,6 +265,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
@ -272,7 +273,9 @@ void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
|
||||
return;
|
||||
pgd = mm->pgd + pgd_index(addr);
|
||||
BUG_ON(pgd_none(*pgd));
|
||||
pud = pud_offset(pgd, addr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
BUG_ON(p4d_none(*p4d));
|
||||
pud = pud_offset(p4d, addr);
|
||||
BUG_ON(pud_none(*pud));
|
||||
pmd = pmd_offset(pud, addr);
|
||||
/*
|
||||
@ -312,12 +315,13 @@ EXPORT_SYMBOL_GPL(vmalloc_to_phys);
|
||||
pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
|
||||
bool *is_thp, unsigned *hpage_shift)
|
||||
{
|
||||
pgd_t pgd, *pgdp;
|
||||
pgd_t *pgdp;
|
||||
p4d_t p4d, *p4dp;
|
||||
pud_t pud, *pudp;
|
||||
pmd_t pmd, *pmdp;
|
||||
pte_t *ret_pte;
|
||||
hugepd_t *hpdp = NULL;
|
||||
unsigned pdshift = PGDIR_SHIFT;
|
||||
unsigned pdshift;
|
||||
|
||||
if (hpage_shift)
|
||||
*hpage_shift = 0;
|
||||
@ -325,24 +329,28 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
|
||||
if (is_thp)
|
||||
*is_thp = false;
|
||||
|
||||
pgdp = pgdir + pgd_index(ea);
|
||||
pgd = READ_ONCE(*pgdp);
|
||||
/*
|
||||
* Always operate on the local stack value. This make sure the
|
||||
* value don't get updated by a parallel THP split/collapse,
|
||||
* page fault or a page unmap. The return pte_t * is still not
|
||||
* stable. So should be checked there for above conditions.
|
||||
* Top level is an exception because it is folded into p4d.
|
||||
*/
|
||||
if (pgd_none(pgd))
|
||||
pgdp = pgdir + pgd_index(ea);
|
||||
p4dp = p4d_offset(pgdp, ea);
|
||||
p4d = READ_ONCE(*p4dp);
|
||||
pdshift = P4D_SHIFT;
|
||||
|
||||
if (p4d_none(p4d))
|
||||
return NULL;
|
||||
|
||||
if (pgd_is_leaf(pgd)) {
|
||||
ret_pte = (pte_t *)pgdp;
|
||||
if (p4d_is_leaf(p4d)) {
|
||||
ret_pte = (pte_t *)p4dp;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (is_hugepd(__hugepd(pgd_val(pgd)))) {
|
||||
hpdp = (hugepd_t *)&pgd;
|
||||
if (is_hugepd(__hugepd(p4d_val(p4d)))) {
|
||||
hpdp = (hugepd_t *)&p4d;
|
||||
goto out_huge;
|
||||
}
|
||||
|
||||
@ -352,7 +360,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
|
||||
* irq disabled
|
||||
*/
|
||||
pdshift = PUD_SHIFT;
|
||||
pudp = pud_offset(&pgd, ea);
|
||||
pudp = pud_offset(&p4d, ea);
|
||||
pud = READ_ONCE(*pudp);
|
||||
|
||||
if (pud_none(pud))
|
||||
|
@ -101,13 +101,13 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
|
||||
|
||||
#ifndef __PAGETABLE_PUD_FOLDED
|
||||
/* 4 level page table */
|
||||
struct page *pgd_page(pgd_t pgd)
|
||||
struct page *p4d_page(p4d_t p4d)
|
||||
{
|
||||
if (pgd_is_leaf(pgd)) {
|
||||
VM_WARN_ON(!pgd_huge(pgd));
|
||||
return pte_page(pgd_pte(pgd));
|
||||
if (p4d_is_leaf(p4d)) {
|
||||
VM_WARN_ON(!p4d_huge(p4d));
|
||||
return pte_page(p4d_pte(p4d));
|
||||
}
|
||||
return virt_to_page(pgd_page_vaddr(pgd));
|
||||
return virt_to_page(p4d_page_vaddr(p4d));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -417,9 +417,9 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, 0);
|
||||
pud_t *pud = pud_offset(p4d, 0);
|
||||
unsigned long addr;
|
||||
unsigned int i;
|
||||
|
||||
@ -431,6 +431,20 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
{
|
||||
p4d_t *p4d = p4d_offset(pgd, 0);
|
||||
unsigned long addr;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
|
||||
addr = start + i * P4D_SIZE;
|
||||
if (!p4d_none(*p4d))
|
||||
/* p4d exists */
|
||||
walk_pud(st, p4d, addr);
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pagetables(struct pg_state *st)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k(0UL);
|
||||
@ -445,7 +459,7 @@ static void walk_pagetables(struct pg_state *st)
|
||||
addr = KERN_VIRT_START + i * PGDIR_SIZE;
|
||||
if (!pgd_none(*pgd))
|
||||
/* pgd exists */
|
||||
walk_pud(st, pgd, addr);
|
||||
walk_p4d(st, pgd, addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -277,9 +277,9 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, 0);
|
||||
pud_t *pud = pud_offset(p4d, 0);
|
||||
unsigned long addr;
|
||||
unsigned int i;
|
||||
|
||||
@ -304,11 +304,13 @@ static void walk_pagetables(struct pg_state *st)
|
||||
* the hash pagetable.
|
||||
*/
|
||||
for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
|
||||
if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd))
|
||||
p4d_t *p4d = p4d_offset(pgd, 0);
|
||||
|
||||
if (!p4d_none(*p4d) && !p4d_is_leaf(*p4d))
|
||||
/* pgd exists */
|
||||
walk_pud(st, pgd, addr);
|
||||
walk_pud(st, p4d, addr);
|
||||
else
|
||||
note_page(st, addr, 1, pgd_val(*pgd));
|
||||
note_page(st, addr, 1, p4d_val(*p4d));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -337,39 +337,19 @@ static int pseries_remove_mem_node(struct device_node *np)
|
||||
|
||||
static bool lmb_is_removable(struct drmem_lmb *lmb)
|
||||
{
|
||||
int i, scns_per_block;
|
||||
bool rc = true;
|
||||
unsigned long pfn, block_sz;
|
||||
u64 phys_addr;
|
||||
|
||||
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
|
||||
return false;
|
||||
|
||||
block_sz = memory_block_size_bytes();
|
||||
scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
|
||||
phys_addr = lmb->base_addr;
|
||||
|
||||
#ifdef CONFIG_FA_DUMP
|
||||
/*
|
||||
* Don't hot-remove memory that falls in fadump boot memory area
|
||||
* and memory that is reserved for capturing old kernel memory.
|
||||
*/
|
||||
if (is_fadump_memory_area(phys_addr, block_sz))
|
||||
if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
|
||||
return false;
|
||||
#endif
|
||||
|
||||
for (i = 0; i < scns_per_block; i++) {
|
||||
pfn = PFN_DOWN(phys_addr);
|
||||
if (!pfn_in_present_section(pfn)) {
|
||||
phys_addr += MIN_MEMORY_BLOCK_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
rc = rc && is_mem_section_removable(pfn, PAGES_PER_SECTION);
|
||||
phys_addr += MIN_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
return rc;
|
||||
/* device_offline() will determine if we can actually remove this lmb */
|
||||
return true;
|
||||
}
|
||||
|
||||
static int dlpar_add_lmb(struct drmem_lmb *);
|
||||
|
@ -3135,7 +3135,8 @@ static void show_pte(unsigned long addr)
|
||||
unsigned long tskv = 0;
|
||||
struct task_struct *tsk = NULL;
|
||||
struct mm_struct *mm;
|
||||
pgd_t *pgdp, *pgdir;
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
@ -3159,28 +3160,26 @@ static void show_pte(unsigned long addr)
|
||||
catch_memory_errors = 1;
|
||||
sync();
|
||||
|
||||
if (mm == &init_mm) {
|
||||
if (mm == &init_mm)
|
||||
pgdp = pgd_offset_k(addr);
|
||||
pgdir = pgd_offset_k(0);
|
||||
} else {
|
||||
else
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
pgdir = pgd_offset(mm, 0);
|
||||
}
|
||||
|
||||
if (pgd_none(*pgdp)) {
|
||||
printf("no linux page table for address\n");
|
||||
p4dp = p4d_offset(pgdp, addr);
|
||||
|
||||
if (p4d_none(*p4dp)) {
|
||||
printf("No valid P4D\n");
|
||||
return;
|
||||
}
|
||||
|
||||
printf("pgd @ 0x%px\n", pgdir);
|
||||
|
||||
if (pgd_is_leaf(*pgdp)) {
|
||||
format_pte(pgdp, pgd_val(*pgdp));
|
||||
if (p4d_is_leaf(*p4dp)) {
|
||||
format_pte(p4dp, p4d_val(*p4dp));
|
||||
return;
|
||||
}
|
||||
printf("pgdp @ 0x%px = 0x%016lx\n", pgdp, pgd_val(*pgdp));
|
||||
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
printf("p4dp @ 0x%px = 0x%016lx\n", p4dp, p4d_val(*p4dp));
|
||||
|
||||
pudp = pud_offset(p4dp, addr);
|
||||
|
||||
if (pud_none(*pudp)) {
|
||||
printf("No valid PUD\n");
|
||||
|
@ -59,6 +59,7 @@ config KASAN_SHADOW_OFFSET
|
||||
config S390
|
||||
def_bool y
|
||||
select ARCH_BINFMT_ELF_STATE
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
|
@ -2,7 +2,6 @@
|
||||
#ifndef __ASM_SH_PGTABLE_2LEVEL_H
|
||||
#define __ASM_SH_PGTABLE_2LEVEL_H
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
/*
|
||||
|
@ -2,7 +2,6 @@
|
||||
#ifndef __ASM_SH_PGTABLE_3LEVEL_H
|
||||
#define __ASM_SH_PGTABLE_3LEVEL_H
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
|
||||
/*
|
||||
|
@ -407,13 +407,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
/* to find an entry in a page-table-directory. */
|
||||
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
|
||||
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
|
||||
#define __pgd_offset(address) pgd_index(address)
|
||||
|
||||
/* to find an entry in a kernel page-table-directory */
|
||||
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
||||
|
||||
#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
|
||||
#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
|
||||
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
|
||||
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
|
||||
|
||||
/* Find an entry in the third-level page table.. */
|
||||
#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
|
@ -46,14 +46,13 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
|
||||
|
||||
/* To find an entry in a generic PGD. */
|
||||
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
|
||||
#define __pgd_offset(address) pgd_index(address)
|
||||
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
|
||||
|
||||
/* To find an entry in a kernel PGD. */
|
||||
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
||||
|
||||
#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
|
||||
#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
|
||||
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
|
||||
/* #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) */
|
||||
|
||||
/*
|
||||
* PMD level access routines. Same notes as above.
|
||||
|
@ -136,6 +136,7 @@ EXPORT_SYMBOL_GPL(match_trapped_io_handler);
|
||||
static struct trapped_io *lookup_tiop(unsigned long address)
|
||||
{
|
||||
pgd_t *pgd_k;
|
||||
p4d_t *p4d_k;
|
||||
pud_t *pud_k;
|
||||
pmd_t *pmd_k;
|
||||
pte_t *pte_k;
|
||||
@ -145,7 +146,11 @@ static struct trapped_io *lookup_tiop(unsigned long address)
|
||||
if (!pgd_present(*pgd_k))
|
||||
return NULL;
|
||||
|
||||
pud_k = pud_offset(pgd_k, address);
|
||||
p4d_k = p4d_offset(pgd_k, address);
|
||||
if (!p4d_present(*p4d_k))
|
||||
return NULL;
|
||||
|
||||
pud_k = pud_offset(p4d_k, address);
|
||||
if (!pud_present(*pud_k))
|
||||
return NULL;
|
||||
|
||||
|
@ -209,6 +209,7 @@ static void sh4_flush_cache_page(void *args)
|
||||
unsigned long address, pfn, phys;
|
||||
int map_coherent = 0;
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
@ -224,7 +225,8 @@ static void sh4_flush_cache_page(void *args)
|
||||
return;
|
||||
|
||||
pgd = pgd_offset(vma->vm_mm, address);
|
||||
pud = pud_offset(pgd, address);
|
||||
p4d = p4d_offset(pgd, address);
|
||||
pud = pud_offset(p4d, address);
|
||||
pmd = pmd_offset(pud, address);
|
||||
pte = pte_offset_kernel(pmd, address);
|
||||
|
||||
|
@ -383,6 +383,7 @@ static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long end)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
@ -397,7 +398,11 @@ static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
|
||||
if (pgd_bad(*pgd))
|
||||
return;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (p4d_none(*p4d) || p4d_bad(*p4d))
|
||||
return;
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (pud_none(*pud) || pud_bad(*pud))
|
||||
return;
|
||||
|
||||
|
@ -47,12 +47,13 @@ static void show_pte(struct mm_struct *mm, unsigned long addr)
|
||||
pgd = swapper_pg_dir;
|
||||
}
|
||||
|
||||
printk(KERN_ALERT "pgd = %p\n", pgd);
|
||||
pr_alert("pgd = %p\n", pgd);
|
||||
pgd += pgd_index(addr);
|
||||
printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
|
||||
(u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
|
||||
pr_alert("[%08lx] *pgd=%0*llx", addr, (u32)(sizeof(*pgd) * 2),
|
||||
(u64)pgd_val(*pgd));
|
||||
|
||||
do {
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
@ -61,33 +62,46 @@ static void show_pte(struct mm_struct *mm, unsigned long addr)
|
||||
break;
|
||||
|
||||
if (pgd_bad(*pgd)) {
|
||||
printk("(bad)");
|
||||
pr_cont("(bad)");
|
||||
break;
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (PTRS_PER_P4D != 1)
|
||||
pr_cont(", *p4d=%0*Lx", (u32)(sizeof(*p4d) * 2),
|
||||
(u64)p4d_val(*p4d));
|
||||
|
||||
if (p4d_none(*p4d))
|
||||
break;
|
||||
|
||||
if (p4d_bad(*p4d)) {
|
||||
pr_cont("(bad)");
|
||||
break;
|
||||
}
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (PTRS_PER_PUD != 1)
|
||||
printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
|
||||
pr_cont(", *pud=%0*llx", (u32)(sizeof(*pud) * 2),
|
||||
(u64)pud_val(*pud));
|
||||
|
||||
if (pud_none(*pud))
|
||||
break;
|
||||
|
||||
if (pud_bad(*pud)) {
|
||||
printk("(bad)");
|
||||
pr_cont("(bad)");
|
||||
break;
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (PTRS_PER_PMD != 1)
|
||||
printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
|
||||
pr_cont(", *pmd=%0*llx", (u32)(sizeof(*pmd) * 2),
|
||||
(u64)pmd_val(*pmd));
|
||||
|
||||
if (pmd_none(*pmd))
|
||||
break;
|
||||
|
||||
if (pmd_bad(*pmd)) {
|
||||
printk("(bad)");
|
||||
pr_cont("(bad)");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -96,17 +110,18 @@ static void show_pte(struct mm_struct *mm, unsigned long addr)
|
||||
break;
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
|
||||
pr_cont(", *pte=%0*llx", (u32)(sizeof(*pte) * 2),
|
||||
(u64)pte_val(*pte));
|
||||
} while (0);
|
||||
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
|
||||
{
|
||||
unsigned index = pgd_index(address);
|
||||
pgd_t *pgd_k;
|
||||
p4d_t *p4d, *p4d_k;
|
||||
pud_t *pud, *pud_k;
|
||||
pmd_t *pmd, *pmd_k;
|
||||
|
||||
@ -116,8 +131,13 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
|
||||
if (!pgd_present(*pgd_k))
|
||||
return NULL;
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
pud_k = pud_offset(pgd_k, address);
|
||||
p4d = p4d_offset(pgd, address);
|
||||
p4d_k = p4d_offset(pgd_k, address);
|
||||
if (!p4d_present(*p4d_k))
|
||||
return NULL;
|
||||
|
||||
pud = pud_offset(p4d, address);
|
||||
pud_k = pud_offset(p4d_k, address);
|
||||
if (!pud_present(*pud_k))
|
||||
return NULL;
|
||||
|
||||
@ -188,14 +208,12 @@ show_fault_oops(struct pt_regs *regs, unsigned long address)
|
||||
if (!oops_may_print())
|
||||
return;
|
||||
|
||||
printk(KERN_ALERT "BUG: unable to handle kernel ");
|
||||
if (address < PAGE_SIZE)
|
||||
printk(KERN_CONT "NULL pointer dereference");
|
||||
else
|
||||
printk(KERN_CONT "paging request");
|
||||
|
||||
printk(KERN_CONT " at %08lx\n", address);
|
||||
printk(KERN_ALERT "PC:");
|
||||
pr_alert("BUG: unable to handle kernel %s at %08lx\n",
|
||||
address < PAGE_SIZE ? "NULL pointer dereference"
|
||||
: "paging request",
|
||||
address);
|
||||
pr_alert("PC:");
|
||||
printk_address(regs->pc, 1);
|
||||
|
||||
show_pte(NULL, address);
|
||||
|
@ -26,19 +26,23 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (pgd) {
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
p4d = p4d_alloc(mm, pgd, addr);
|
||||
if (p4d) {
|
||||
pud = pud_alloc(mm, p4d, addr);
|
||||
if (pud) {
|
||||
pmd = pmd_alloc(mm, pud, addr);
|
||||
if (pmd)
|
||||
pte = pte_alloc_map(mm, pmd, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pte;
|
||||
}
|
||||
@ -47,19 +51,23 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (pgd) {
|
||||
pud = pud_offset(pgd, addr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (p4d) {
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (pud) {
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd)
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ void __init __weak plat_mem_setup(void)
|
||||
static pte_t *__get_pte_phys(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
@ -54,7 +55,13 @@ static pte_t *__get_pte_phys(unsigned long addr)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pud = pud_alloc(NULL, pgd, addr);
|
||||
p4d = p4d_alloc(NULL, pgd, addr);
|
||||
if (unlikely(!p4d)) {
|
||||
p4d_ERROR(*p4d);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pud = pud_alloc(NULL, p4d, addr);
|
||||
if (unlikely(!pud)) {
|
||||
pud_ERROR(*pud);
|
||||
return NULL;
|
||||
@ -172,9 +179,9 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
|
||||
unsigned long vaddr;
|
||||
|
||||
vaddr = start;
|
||||
i = __pgd_offset(vaddr);
|
||||
j = __pud_offset(vaddr);
|
||||
k = __pmd_offset(vaddr);
|
||||
i = pgd_index(vaddr);
|
||||
j = pud_index(vaddr);
|
||||
k = pmd_index(vaddr);
|
||||
pgd = pgd_base + i;
|
||||
|
||||
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define kmap_get_fixmap_pte(vaddr) \
|
||||
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
|
||||
pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)), vaddr)
|
||||
|
||||
static pte_t *kmap_coherent_pte;
|
||||
|
||||
|
@ -23,6 +23,7 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
|
||||
unsigned long address)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
@ -42,7 +43,10 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
|
||||
pgd = pgd_offset(current->mm, address);
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
p4d = p4d_offset(pgd, address);
|
||||
if (p4d_none_or_clear_bad(p4d))
|
||||
return 1;
|
||||
pud = pud_offset(p4d, address);
|
||||
if (pud_none_or_clear_bad(pud))
|
||||
return 1;
|
||||
pmd = pmd_offset(pud, address);
|
||||
|
@ -44,6 +44,7 @@ static int handle_tlbmiss(unsigned long long protection_flags,
|
||||
unsigned long address)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
@ -58,7 +59,11 @@ static int handle_tlbmiss(unsigned long long protection_flags,
|
||||
pgd = pgd_offset(current->mm, address);
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
p4d = p4d_offset(pgd, address);
|
||||
if (p4d_none(*p4d) || !p4d_present(*p4d))
|
||||
return 1;
|
||||
|
||||
pud = pud_offset(p4d, address);
|
||||
if (pud_none(*pud) || !pud_present(*pud))
|
||||
return 1;
|
||||
|
||||
|
@ -25,11 +25,12 @@
|
||||
#include <asm/vaddrs.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgtsrmmu.h>
|
||||
|
||||
/* declarations for highmem.c */
|
||||
extern unsigned long highstart_pfn, highend_pfn;
|
||||
|
||||
extern pgprot_t kmap_prot;
|
||||
#define kmap_prot __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE)
|
||||
extern pte_t *pkmap_page_table;
|
||||
|
||||
void kmap_init(void) __init;
|
||||
@ -50,28 +51,6 @@ void kmap_init(void) __init;
|
||||
|
||||
#define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
|
||||
|
||||
void *kmap_high(struct page *page);
|
||||
void kunmap_high(struct page *page);
|
||||
|
||||
static inline void *kmap(struct page *page)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
return kmap_high(page);
|
||||
}
|
||||
|
||||
static inline void kunmap(struct page *page)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
if (!PageHighMem(page))
|
||||
return;
|
||||
kunmap_high(page);
|
||||
}
|
||||
|
||||
void *kmap_atomic(struct page *page);
|
||||
void __kunmap_atomic(void *kvaddr);
|
||||
|
||||
#define flush_cache_kmaps() flush_cache_all()
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user