From d61172b4b695b821388cdb6088a41d431bcbb93b Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:24 -0800 Subject: [PATCH] mm/core, x86/mm/pkeys: Differentiate instruction fetches As discussed earlier, we attempt to enforce protection keys in software. However, the code checks all faults to ensure that they are not violating protection key permissions. It was assumed that all faults are either write faults where we check PKRU[key].WD (write disable) or read faults where we check the AD (access disable) bit. But, there is a third category of faults for protection keys: instruction faults. Instruction faults never run afoul of protection keys because they do not affect instruction fetches. So, plumb the PF_INSTR bit down in to the arch_vma_access_permitted() function where we do the protection key checks. We also add a new FAULT_FLAG_INSTRUCTION. This is because handle_mm_fault() is not passed the architecture-specific error_code where we keep PF_INSTR, so we need to encode the instruction fetch information in to the arch-generic fault flags. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210224.96928009@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/mmu_context.h | 2 +- arch/s390/include/asm/mmu_context.h | 2 +- arch/x86/include/asm/mmu_context.h | 5 ++++- arch/x86/mm/fault.c | 8 ++++++-- include/asm-generic/mm_hooks.h | 2 +- include/linux/mm.h | 1 + mm/gup.c | 11 +++++++++-- mm/memory.c | 1 + 8 files changed, 24 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index df9bf3ed025b..4eaab40e3ade 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -149,7 +149,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, } static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, - bool write, bool foreign) + bool write, bool execute, bool foreign) { /* by default, allow everything */ return true; diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 8906600922ce..fa66b6dfa97a 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -131,7 +131,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, } static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, - bool write, bool foreign) + bool write, bool execute, bool foreign) { /* by default, allow everything */ return true; diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index b4d939a17e60..6572b949cbca 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -323,8 +323,11 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma) } static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, - bool write, bool foreign) + bool write, bool execute, bool foreign) { + /* pkeys never affect instruction fetches */ + if (execute) + return true; /* allow access if the VMA is not one from this process */ if (foreign || vma_is_foreign(vma)) return true; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 68ecdffe284e..d81744e6f39f 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -908,7 +908,8 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code, if (error_code & PF_PK) return true; /* this checks permission keys on the VMA: */ - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign)) + if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), + (error_code & PF_INSTR), foreign)) return true; return false; } @@ -1112,7 +1113,8 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) * faults just to hit a PF_PK as soon as we fill in a * page. */ - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign)) + if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), + (error_code & PF_INSTR), foreign)) return 1; if (error_code & PF_WRITE) { @@ -1267,6 +1269,8 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, if (error_code & PF_WRITE) flags |= FAULT_FLAG_WRITE; + if (error_code & PF_INSTR) + flags |= FAULT_FLAG_INSTRUCTION; /* * When running in the kernel we expect faults to occur only to diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h index d5c9633bd955..cc5d9a1405df 100644 --- a/include/asm-generic/mm_hooks.h +++ b/include/asm-generic/mm_hooks.h @@ -27,7 +27,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, } static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, - bool write, bool foreign) + bool write, bool execute, bool foreign) { /* by default, allow everything */ return true; diff --git a/include/linux/mm.h b/include/linux/mm.h index 2aaa0f0d67ea..7955c3eb83db 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -252,6 +252,7 @@ extern pgprot_t protection_map[16]; #define FAULT_FLAG_TRIED 0x20 /* Second try */ #define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ +#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ /* * vm_fault is filled by the the pagefault handler and passed to the vma's diff --git a/mm/gup.c b/mm/gup.c index d276760163b3..7f1c4fb77cfa 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -449,7 +449,11 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) if (!(vm_flags & VM_MAYREAD)) return -EFAULT; } - if (!arch_vma_access_permitted(vma, write, foreign)) + /* + * gups are always data accesses, not instruction + * fetches, so execute=false here + */ + if (!arch_vma_access_permitted(vma, write, false, foreign)) return -EFAULT; return 0; } @@ -629,8 +633,11 @@ bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) /* * The architecture might have a hardware protection * mechanism other than read/write that can deny access. + * + * gup always represents data access, not instruction + * fetches, so execute=false here: */ - if (!arch_vma_access_permitted(vma, write, foreign)) + if (!arch_vma_access_permitted(vma, write, false, foreign)) return false; return true; diff --git a/mm/memory.c b/mm/memory.c index 76c44e5dffa2..99e9f928264a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3380,6 +3380,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, pte_t *pte; if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, + flags & FAULT_FLAG_INSTRUCTION, flags & FAULT_FLAG_REMOTE)) return VM_FAULT_SIGSEGV;