mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 13:41:51 +00:00
935f583982
Instead of punting and doing tlb_flush_all(), do the same as flush_tlb_kernel_range() does and use single page invalidations. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.430001980@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
25 lines
611 B
C
25 lines
611 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __X86_MM_INTERNAL_H
|
|
#define __X86_MM_INTERNAL_H
|
|
|
|
void *alloc_low_pages(unsigned int num);
|
|
static inline void *alloc_low_page(void)
|
|
{
|
|
return alloc_low_pages(1);
|
|
}
|
|
|
|
void early_ioremap_page_table_range_init(void);
|
|
|
|
unsigned long kernel_physical_mapping_init(unsigned long start,
|
|
unsigned long end,
|
|
unsigned long page_size_mask);
|
|
void zone_sizes_init(void);
|
|
|
|
extern int after_bootmem;
|
|
|
|
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
|
|
|
|
extern unsigned long tlb_single_page_flush_ceiling;
|
|
|
|
#endif /* __X86_MM_INTERNAL_H */
|