x86/fault: Fold mm_fault_error() into do_user_addr_fault()

mm_fault_error() is logically just the end of do_user_addr_fault().
Combine the functions.  This makes the code easier to read.

Most of the churn here is from renaming hw_error_code to error_code in
do_user_addr_fault().

This makes no difference at all to the generated code (objdump -dr) as
compared to changing noinline to __always_inline in the definition of
mm_fault_error().

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/dedc4d9c9b047e51ce38b991bd23971a28af4e7b.1612924255.git.luto@kernel.org
This commit is contained in:
Andy Lutomirski
2021-02-09 18:33:35 -08:00
committed by Borislav Petkov
parent d24df8ecf9
commit ec352711ce

View File

@@ -981,40 +981,6 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
} }
static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, vm_fault_t fault)
{
if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
no_context(regs, error_code, address, 0, 0);
return;
}
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & X86_PF_USER)) {
no_context(regs, error_code, address,
SIGSEGV, SEGV_MAPERR);
return;
}
/*
* We ran out of memory, call the OOM killer, and return the
* userspace (which will retry the fault, or kill us if we got
* oom-killed):
*/
pagefault_out_of_memory();
} else {
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault);
else if (fault & VM_FAULT_SIGSEGV)
bad_area_nosemaphore(regs, error_code, address);
else
BUG();
}
}
static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte) static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
{ {
if ((error_code & X86_PF_WRITE) && !pte_write(*pte)) if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
@@ -1252,7 +1218,7 @@ NOKPROBE_SYMBOL(do_kern_addr_fault);
/* Handle faults in the user portion of the address space */ /* Handle faults in the user portion of the address space */
static inline static inline
void do_user_addr_fault(struct pt_regs *regs, void do_user_addr_fault(struct pt_regs *regs,
unsigned long hw_error_code, unsigned long error_code,
unsigned long address) unsigned long address)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
@@ -1272,8 +1238,8 @@ void do_user_addr_fault(struct pt_regs *regs,
* Reserved bits are never expected to be set on * Reserved bits are never expected to be set on
* entries in the user portion of the page tables. * entries in the user portion of the page tables.
*/ */
if (unlikely(hw_error_code & X86_PF_RSVD)) if (unlikely(error_code & X86_PF_RSVD))
pgtable_bad(regs, hw_error_code, address); pgtable_bad(regs, error_code, address);
/* /*
* If SMAP is on, check for invalid kernel (supervisor) access to user * If SMAP is on, check for invalid kernel (supervisor) access to user
@@ -1283,10 +1249,10 @@ void do_user_addr_fault(struct pt_regs *regs,
* enforcement appears to be consistent with the USER bit. * enforcement appears to be consistent with the USER bit.
*/ */
if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) && if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
!(hw_error_code & X86_PF_USER) && !(error_code & X86_PF_USER) &&
!(regs->flags & X86_EFLAGS_AC))) !(regs->flags & X86_EFLAGS_AC)))
{ {
bad_area_nosemaphore(regs, hw_error_code, address); bad_area_nosemaphore(regs, error_code, address);
return; return;
} }
@@ -1295,7 +1261,7 @@ void do_user_addr_fault(struct pt_regs *regs,
* in a region with pagefaults disabled then we must not take the fault * in a region with pagefaults disabled then we must not take the fault
*/ */
if (unlikely(faulthandler_disabled() || !mm)) { if (unlikely(faulthandler_disabled() || !mm)) {
bad_area_nosemaphore(regs, hw_error_code, address); bad_area_nosemaphore(regs, error_code, address);
return; return;
} }
@@ -1316,9 +1282,9 @@ void do_user_addr_fault(struct pt_regs *regs,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (hw_error_code & X86_PF_WRITE) if (error_code & X86_PF_WRITE)
flags |= FAULT_FLAG_WRITE; flags |= FAULT_FLAG_WRITE;
if (hw_error_code & X86_PF_INSTR) if (error_code & X86_PF_INSTR)
flags |= FAULT_FLAG_INSTRUCTION; flags |= FAULT_FLAG_INSTRUCTION;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
@@ -1334,7 +1300,7 @@ void do_user_addr_fault(struct pt_regs *regs,
* to consider the PF_PK bit. * to consider the PF_PK bit.
*/ */
if (is_vsyscall_vaddr(address)) { if (is_vsyscall_vaddr(address)) {
if (emulate_vsyscall(hw_error_code, regs, address)) if (emulate_vsyscall(error_code, regs, address))
return; return;
} }
#endif #endif
@@ -1357,7 +1323,7 @@ void do_user_addr_fault(struct pt_regs *regs,
* Fault from code in kernel from * Fault from code in kernel from
* which we do not expect faults. * which we do not expect faults.
*/ */
bad_area_nosemaphore(regs, hw_error_code, address); bad_area_nosemaphore(regs, error_code, address);
return; return;
} }
retry: retry:
@@ -1373,17 +1339,17 @@ retry:
vma = find_vma(mm, address); vma = find_vma(mm, address);
if (unlikely(!vma)) { if (unlikely(!vma)) {
bad_area(regs, hw_error_code, address); bad_area(regs, error_code, address);
return; return;
} }
if (likely(vma->vm_start <= address)) if (likely(vma->vm_start <= address))
goto good_area; goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
bad_area(regs, hw_error_code, address); bad_area(regs, error_code, address);
return; return;
} }
if (unlikely(expand_stack(vma, address))) { if (unlikely(expand_stack(vma, address))) {
bad_area(regs, hw_error_code, address); bad_area(regs, error_code, address);
return; return;
} }
@@ -1392,8 +1358,8 @@ retry:
* we can handle it.. * we can handle it..
*/ */
good_area: good_area:
if (unlikely(access_error(hw_error_code, vma))) { if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, hw_error_code, address, vma); bad_area_access_error(regs, error_code, address, vma);
return; return;
} }
@@ -1415,7 +1381,7 @@ good_area:
/* Quick path to respond to signals */ /* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) { if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs)) if (!user_mode(regs))
no_context(regs, hw_error_code, address, SIGBUS, no_context(regs, error_code, address, SIGBUS,
BUS_ADRERR); BUS_ADRERR);
return; return;
} }
@@ -1432,9 +1398,36 @@ good_area:
} }
mmap_read_unlock(mm); mmap_read_unlock(mm);
if (unlikely(fault & VM_FAULT_ERROR)) { if (likely(!(fault & VM_FAULT_ERROR)))
mm_fault_error(regs, hw_error_code, address, fault);
return; return;
if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
no_context(regs, error_code, address, 0, 0);
return;
}
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & X86_PF_USER)) {
no_context(regs, error_code, address,
SIGSEGV, SEGV_MAPERR);
return;
}
/*
* We ran out of memory, call the OOM killer, and return the
* userspace (which will retry the fault, or kill us if we got
* oom-killed):
*/
pagefault_out_of_memory();
} else {
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault);
else if (fault & VM_FAULT_SIGSEGV)
bad_area_nosemaphore(regs, error_code, address);
else
BUG();
} }
check_v8086_mode(regs, address, tsk); check_v8086_mode(regs, address, tsk);