forked from Minki/linux
fd0e786d9d
In the following commit:ce0fa3e56a
("x86/mm, mm/hwpoison: Clear PRESENT bit for kernel 1:1 mappings of poison pages") ... we added code to memory_failure() to unmap the page from the kernel 1:1 virtual address space to avoid speculative access to the page logging additional errors. But memory_failure() may not always succeed in taking the page offline, especially if the page belongs to the kernel. This can happen if there are too many corrected errors on a page and either mcelog(8) or drivers/ras/cec.c asks to take a page offline. Since we remove the 1:1 mapping early in memory_failure(), we can end up with the page unmapped, but still in use. On the next access the kernel crashes :-( There are also various debug paths that call memory_failure() to simulate occurrence of an error. Since there is no actual error in memory, we don't need to map out the page for those cases. Revert most of the previous attempt and keep the solution local to arch/x86/kernel/cpu/mcheck/mce.c. Unmap the page only when: 1) there is a real error 2) memory_failure() succeeds. All of this only applies to 64-bit systems. 32-bit kernel doesn't map all of memory into kernel space. It isn't worth adding the code to unmap the piece that is mapped because nobody would run a 32-bit kernel on a machine that has recoverable machine checks. Signed-off-by: Tony Luck <tony.luck@intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@suse.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave <dave.hansen@intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Robert (Persistent Memory) <elliott@hpe.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Cc: stable@vger.kernel.org #v4.14 Fixes:ce0fa3e56a
("x86/mm, mm/hwpoison: Clear PRESENT bit for kernel 1:1 mappings of poison pages") Signed-off-by: Ingo Molnar <mingo@kernel.org>
62 lines
1.4 KiB
C
62 lines
1.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_PAGE_64_H
|
|
#define _ASM_X86_PAGE_64_H
|
|
|
|
#include <asm/page_64_types.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#include <asm/alternative.h>
|
|
|
|
/* duplicated to the one in bootmem.h */
|
|
extern unsigned long max_pfn;
|
|
extern unsigned long phys_base;
|
|
|
|
static inline unsigned long __phys_addr_nodebug(unsigned long x)
|
|
{
|
|
unsigned long y = x - __START_KERNEL_map;
|
|
|
|
/* use the carry flag to determine if x was < __START_KERNEL_map */
|
|
x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
|
|
|
|
return x;
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_VIRTUAL
|
|
extern unsigned long __phys_addr(unsigned long);
|
|
extern unsigned long __phys_addr_symbol(unsigned long);
|
|
#else
|
|
#define __phys_addr(x) __phys_addr_nodebug(x)
|
|
#define __phys_addr_symbol(x) \
|
|
((unsigned long)(x) - __START_KERNEL_map + phys_base)
|
|
#endif
|
|
|
|
#define __phys_reloc_hide(x) (x)
|
|
|
|
#ifdef CONFIG_FLATMEM
|
|
#define pfn_valid(pfn) ((pfn) < max_pfn)
|
|
#endif
|
|
|
|
void clear_page_orig(void *page);
|
|
void clear_page_rep(void *page);
|
|
void clear_page_erms(void *page);
|
|
|
|
static inline void clear_page(void *page)
|
|
{
|
|
alternative_call_2(clear_page_orig,
|
|
clear_page_rep, X86_FEATURE_REP_GOOD,
|
|
clear_page_erms, X86_FEATURE_ERMS,
|
|
"=D" (page),
|
|
"0" (page)
|
|
: "memory", "rax", "rcx");
|
|
}
|
|
|
|
void copy_page(void *to, void *from);
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#ifdef CONFIG_X86_VSYSCALL_EMULATION
|
|
# define __HAVE_ARCH_GATE_AREA 1
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_PAGE_64_H */
|