forked from Minki/linux
ce0fa3e56a
Speculative processor accesses may reference any memory that has a valid page table entry. While a speculative access won't generate a machine check, it will log the error in a machine check bank. That could cause escalation of a subsequent error since the overflow bit will be then set in the machine check bank status register. Code has to be double-plus-tricky to avoid mentioning the 1:1 virtual address of the page we want to map out otherwise we may trigger the very problem we are trying to avoid. We use a non-canonical address that passes through the usual Linux table walking code to get to the same "pte". Thanks to Dave Hansen for reviewing several iterations of this. Also see: http://marc.info/?l=linux-mm&m=149860136413338&w=2 Signed-off-by: Tony Luck <tony.luck@intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Borislav Petkov <bp@suse.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Elliott, Robert (Persistent Memory) <elliott@hpe.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20170816171803.28342-1-tony.luck@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
65 lines
1.5 KiB
C
65 lines
1.5 KiB
C
#ifndef _ASM_X86_PAGE_64_H
|
|
#define _ASM_X86_PAGE_64_H
|
|
|
|
#include <asm/page_64_types.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#include <asm/alternative.h>
|
|
|
|
/* duplicated to the one in bootmem.h */
|
|
extern unsigned long max_pfn;
|
|
extern unsigned long phys_base;
|
|
|
|
static inline unsigned long __phys_addr_nodebug(unsigned long x)
|
|
{
|
|
unsigned long y = x - __START_KERNEL_map;
|
|
|
|
/* use the carry flag to determine if x was < __START_KERNEL_map */
|
|
x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
|
|
|
|
return x;
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_VIRTUAL
|
|
extern unsigned long __phys_addr(unsigned long);
|
|
extern unsigned long __phys_addr_symbol(unsigned long);
|
|
#else
|
|
#define __phys_addr(x) __phys_addr_nodebug(x)
|
|
#define __phys_addr_symbol(x) \
|
|
((unsigned long)(x) - __START_KERNEL_map + phys_base)
|
|
#endif
|
|
|
|
#define __phys_reloc_hide(x) (x)
|
|
|
|
#ifdef CONFIG_FLATMEM
|
|
#define pfn_valid(pfn) ((pfn) < max_pfn)
|
|
#endif
|
|
|
|
void clear_page_orig(void *page);
|
|
void clear_page_rep(void *page);
|
|
void clear_page_erms(void *page);
|
|
|
|
static inline void clear_page(void *page)
|
|
{
|
|
alternative_call_2(clear_page_orig,
|
|
clear_page_rep, X86_FEATURE_REP_GOOD,
|
|
clear_page_erms, X86_FEATURE_ERMS,
|
|
"=D" (page),
|
|
"0" (page)
|
|
: "memory", "rax", "rcx");
|
|
}
|
|
|
|
void copy_page(void *to, void *from);
|
|
|
|
#ifdef CONFIG_X86_MCE
|
|
#define arch_unmap_kpfn arch_unmap_kpfn
|
|
#endif
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#ifdef CONFIG_X86_VSYSCALL_EMULATION
|
|
# define __HAVE_ARCH_GATE_AREA 1
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_PAGE_64_H */
|