forked from Minki/linux
x86/mm/32: implement arch_sync_kernel_mappings()
Implement the function to sync changes in vmalloc and ioremap ranges to all page-tables. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Andy Lutomirski <luto@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Link: http://lkml.kernel.org/r/20200515140023.25469-6-joro@8bytes.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8e19843c36
commit
86cf69f1d8
@ -20,6 +20,8 @@ typedef union {
|
|||||||
|
|
||||||
#define SHARED_KERNEL_PMD 0
|
#define SHARED_KERNEL_PMD 0
|
||||||
|
|
||||||
|
#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* traditional i386 two-level paging structure:
|
* traditional i386 two-level paging structure:
|
||||||
*/
|
*/
|
||||||
|
@ -27,6 +27,8 @@ typedef union {
|
|||||||
#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
|
#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define ARCH_PAGE_TABLE_SYNC_MASK (SHARED_KERNEL_PMD ? 0 : PGTBL_PMD_MODIFIED)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PGDIR_SHIFT determines what a top-level page table entry can map
|
* PGDIR_SHIFT determines what a top-level page table entry can map
|
||||||
*/
|
*/
|
||||||
|
@ -190,16 +190,13 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
|
|||||||
return pmd_k;
|
return pmd_k;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmalloc_sync(void)
|
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
unsigned long address;
|
unsigned long addr;
|
||||||
|
|
||||||
if (SHARED_KERNEL_PMD)
|
for (addr = start & PMD_MASK;
|
||||||
return;
|
addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
|
||||||
|
addr += PMD_SIZE) {
|
||||||
for (address = VMALLOC_START & PMD_MASK;
|
|
||||||
address >= TASK_SIZE_MAX && address < VMALLOC_END;
|
|
||||||
address += PMD_SIZE) {
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
spin_lock(&pgd_lock);
|
spin_lock(&pgd_lock);
|
||||||
@ -210,13 +207,23 @@ static void vmalloc_sync(void)
|
|||||||
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
|
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
|
||||||
|
|
||||||
spin_lock(pgt_lock);
|
spin_lock(pgt_lock);
|
||||||
vmalloc_sync_one(page_address(page), address);
|
vmalloc_sync_one(page_address(page), addr);
|
||||||
spin_unlock(pgt_lock);
|
spin_unlock(pgt_lock);
|
||||||
}
|
}
|
||||||
spin_unlock(&pgd_lock);
|
spin_unlock(&pgd_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vmalloc_sync(void)
|
||||||
|
{
|
||||||
|
unsigned long address;
|
||||||
|
|
||||||
|
if (SHARED_KERNEL_PMD)
|
||||||
|
return;
|
||||||
|
|
||||||
|
arch_sync_kernel_mappings(VMALLOC_START, VMALLOC_END);
|
||||||
|
}
|
||||||
|
|
||||||
void vmalloc_sync_mappings(void)
|
void vmalloc_sync_mappings(void)
|
||||||
{
|
{
|
||||||
vmalloc_sync();
|
vmalloc_sync();
|
||||||
|
Loading…
Reference in New Issue
Block a user