mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 04:42:12 +00:00
c922228efe
The 3.19 merge window saw some TLB modifications merged which caused a performance regression. They were fixed in commit 045bbb9fa. Once that fix was applied, I also noticed that there was a small but intermittent regression still present. It was not present consistently enough to bisect reliably, but I'm fairly confident that it came from (my own) MPX patches. The source was reading a relatively unused field in the mm_struct via arch_unmap. I also noted that this code was in the main instruction flow of do_munmap() and probably had more icache impact than we want. This patch does two things: 1. Adds a static (via Kconfig) and dynamic (via cpuid) check for MPX with cpu_feature_enabled(). This keeps us from reading that cacheline in the mm and trades it for a check of the global CPUID variables at least on CPUs without MPX. 2. Adds an unlikely() to ensure that the MPX call ends up out of the main instruction flow in do_munmap(). I've added a detailed comment about why this was done and why we want it even on systems where MPX is present. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: luto@amacapital.net Cc: Dave Hansen <dave@sr71.net> Link: http://lkml.kernel.org/r/20150108223021.AEEAB987@viggo.jf.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
155 lines
4.1 KiB
C
155 lines
4.1 KiB
C
#ifndef _ASM_X86_MMU_CONTEXT_H
|
|
#define _ASM_X86_MMU_CONTEXT_H
|
|
|
|
#include <asm/desc.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/mm_types.h>
|
|
|
|
#include <trace/events/tlb.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/paravirt.h>
|
|
#include <asm/mpx.h>
|
|
#ifndef CONFIG_PARAVIRT
|
|
static inline void paravirt_activate_mm(struct mm_struct *prev,
|
|
struct mm_struct *next)
|
|
{
|
|
}
|
|
#endif /* !CONFIG_PARAVIRT */
|
|
|
|
/*
|
|
* Used for LDT copy/destruction.
|
|
*/
|
|
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
|
void destroy_context(struct mm_struct *mm);
|
|
|
|
|
|
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
|
|
this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
|
|
#endif
|
|
}
|
|
|
|
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
struct task_struct *tsk)
|
|
{
|
|
unsigned cpu = smp_processor_id();
|
|
|
|
if (likely(prev != next)) {
|
|
#ifdef CONFIG_SMP
|
|
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
|
this_cpu_write(cpu_tlbstate.active_mm, next);
|
|
#endif
|
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
|
|
/* Re-load page tables */
|
|
load_cr3(next->pgd);
|
|
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
|
|
|
/* Stop flush ipis for the previous mm */
|
|
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
|
|
|
/*
|
|
* Load the LDT, if the LDT is different.
|
|
*
|
|
* It's possible leave_mm(prev) has been called. If so,
|
|
* then prev->context.ldt could be out of sync with the
|
|
* LDT descriptor or the LDT register. This can only happen
|
|
* if prev->context.ldt is non-null, since we never free
|
|
* an LDT. But LDTs can't be shared across mms, so
|
|
* prev->context.ldt won't be equal to next->context.ldt.
|
|
*/
|
|
if (unlikely(prev->context.ldt != next->context.ldt))
|
|
load_LDT_nolock(&next->context);
|
|
}
|
|
#ifdef CONFIG_SMP
|
|
else {
|
|
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
|
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
|
|
|
|
if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
|
|
/*
|
|
* On established mms, the mm_cpumask is only changed
|
|
* from irq context, from ptep_clear_flush() while in
|
|
* lazy tlb mode, and here. Irqs are blocked during
|
|
* schedule, protecting us from simultaneous changes.
|
|
*/
|
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
/*
|
|
* We were in lazy tlb mode and leave_mm disabled
|
|
* tlb flush IPI delivery. We must reload CR3
|
|
* to make sure to use no freed page tables.
|
|
*/
|
|
load_cr3(next->pgd);
|
|
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
|
load_LDT_nolock(&next->context);
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
|
|
#define activate_mm(prev, next) \
|
|
do { \
|
|
paravirt_activate_mm((prev), (next)); \
|
|
switch_mm((prev), (next), NULL); \
|
|
} while (0);
|
|
|
|
#ifdef CONFIG_X86_32
|
|
#define deactivate_mm(tsk, mm) \
|
|
do { \
|
|
lazy_load_gs(0); \
|
|
} while (0)
|
|
#else
|
|
#define deactivate_mm(tsk, mm) \
|
|
do { \
|
|
load_gs_index(0); \
|
|
loadsegment(fs, 0); \
|
|
} while (0)
|
|
#endif
|
|
|
|
static inline void arch_dup_mmap(struct mm_struct *oldmm,
|
|
struct mm_struct *mm)
|
|
{
|
|
paravirt_arch_dup_mmap(oldmm, mm);
|
|
}
|
|
|
|
static inline void arch_exit_mmap(struct mm_struct *mm)
|
|
{
|
|
paravirt_arch_exit_mmap(mm);
|
|
}
|
|
|
|
static inline void arch_bprm_mm_init(struct mm_struct *mm,
|
|
struct vm_area_struct *vma)
|
|
{
|
|
mpx_mm_init(mm);
|
|
}
|
|
|
|
static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
/*
|
|
* mpx_notify_unmap() goes and reads a rarely-hot
|
|
* cacheline in the mm_struct. That can be expensive
|
|
* enough to be seen in profiles.
|
|
*
|
|
* The mpx_notify_unmap() call and its contents have been
|
|
* observed to affect munmap() performance on hardware
|
|
* where MPX is not present.
|
|
*
|
|
* The unlikely() optimizes for the fast case: no MPX
|
|
* in the CPU, or no MPX use in the process. Even if
|
|
* we get this wrong (in the unlikely event that MPX
|
|
* is widely enabled on some system) the overhead of
|
|
* MPX itself (reading bounds tables) is expected to
|
|
* overwhelm the overhead of getting this unlikely()
|
|
* consistently wrong.
|
|
*/
|
|
if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
|
|
mpx_notify_unmap(mm, vma, start, end);
|
|
}
|
|
|
|
#endif /* _ASM_X86_MMU_CONTEXT_H */
|