mirror of
https://github.com/torvalds/linux.git
synced 2024-12-22 02:52:56 +00:00
72b252aed5
An IPI is sent to flush remote TLBs when a page is unmapped that was potentially accesssed by other CPUs. There are many circumstances where this happens but the obvious one is kswapd reclaiming pages belonging to a running process as kswapd and the task are likely running on separate CPUs. On small machines, this is not a significant problem but as machine gets larger with more cores and more memory, the cost of these IPIs can be high. This patch uses a simple structure that tracks CPUs that potentially have TLB entries for pages being unmapped. When the unmapping is complete, the full TLB is flushed on the assumption that a refill cost is lower than flushing individual entries. Architectures wishing to do this must give the following guarantee. If a clean page is unmapped and not immediately flushed, the architecture must guarantee that a write to that linear address from a CPU with a cached TLB entry will trap a page fault. This is essentially what the kernel already depends on but the window is much larger with this patch applied and is worth highlighting. The architecture should consider whether the cost of the full TLB flush is higher than sending an IPI to flush each individual entry. An additional architecture helper called flush_tlb_local is required. It's a trivial wrapper with some accounting in the x86 case. The impact of this patch depends on the workload as measuring any benefit requires both mapped pages co-located on the LRU and memory pressure. The case with the biggest impact is multiple processes reading mapped pages taken from the vm-scalability test suite. The test case uses NR_CPU readers of mapped files that consume 10*RAM. Linear mapped reader on a 4-node machine with 64G RAM and 48 CPUs 4.2.0-rc1 4.2.0-rc1 vanilla flushfull-v7 Ops lru-file-mmap-read-elapsed 159.62 ( 0.00%) 120.68 ( 24.40%) Ops lru-file-mmap-read-time_range 30.59 ( 0.00%) 2.80 ( 90.85%) Ops lru-file-mmap-read-time_stddv 6.70 ( 0.00%) 0.64 ( 90.38%) 4.2.0-rc1 4.2.0-rc1 vanilla flushfull-v7 User 581.00 611.43 System 5804.93 4111.76 Elapsed 161.03 122.12 This is showing that the readers completed 24.40% faster with 29% less system CPU time. From vmstats, it is known that the vanilla kernel was interrupted roughly 900K times per second during the steady phase of the test and the patched kernel was interrupts 180K times per second. The impact is lower on a single socket machine. 4.2.0-rc1 4.2.0-rc1 vanilla flushfull-v7 Ops lru-file-mmap-read-elapsed 25.33 ( 0.00%) 20.38 ( 19.54%) Ops lru-file-mmap-read-time_range 0.91 ( 0.00%) 1.44 (-58.24%) Ops lru-file-mmap-read-time_stddv 0.28 ( 0.00%) 0.47 (-65.34%) 4.2.0-rc1 4.2.0-rc1 vanilla flushfull-v7 User 58.09 57.64 System 111.82 76.56 Elapsed 27.29 22.55 It's still a noticeable improvement with vmstat showing interrupts went from roughly 500K per second to 45K per second. The patch will have no impact on workloads with no memory pressure or have relatively few mapped pages. It will have an unpredictable impact on the workload running on the CPU being flushed as it'll depend on how many TLB entries need to be refilled and how long that takes. Worst case, the TLB will be completely cleared of active entries when the target PFNs were not resident at all. [sasha.levin@oracle.com: trace tlb flush after disabling preemption in try_to_unmap_flush] Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Dave Hansen <dave.hansen@intel.com> Acked-by: Ingo Molnar <mingo@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
276 lines
6.3 KiB
C
276 lines
6.3 KiB
C
#ifndef _ASM_X86_TLBFLUSH_H
|
|
#define _ASM_X86_TLBFLUSH_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/sched.h>
|
|
|
|
#include <asm/processor.h>
|
|
#include <asm/special_insns.h>
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
#include <asm/paravirt.h>
|
|
#else
|
|
#define __flush_tlb() __native_flush_tlb()
|
|
#define __flush_tlb_global() __native_flush_tlb_global()
|
|
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
|
|
#endif
|
|
|
|
struct tlb_state {
|
|
#ifdef CONFIG_SMP
|
|
struct mm_struct *active_mm;
|
|
int state;
|
|
#endif
|
|
|
|
/*
|
|
* Access to this CR4 shadow and to H/W CR4 is protected by
|
|
* disabling interrupts when modifying either one.
|
|
*/
|
|
unsigned long cr4;
|
|
};
|
|
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
|
|
|
|
/* Initialize cr4 shadow for this CPU. */
|
|
static inline void cr4_init_shadow(void)
|
|
{
|
|
this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
|
|
}
|
|
|
|
/* Set in this cpu's CR4. */
|
|
static inline void cr4_set_bits(unsigned long mask)
|
|
{
|
|
unsigned long cr4;
|
|
|
|
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
|
if ((cr4 | mask) != cr4) {
|
|
cr4 |= mask;
|
|
this_cpu_write(cpu_tlbstate.cr4, cr4);
|
|
__write_cr4(cr4);
|
|
}
|
|
}
|
|
|
|
/* Clear in this cpu's CR4. */
|
|
static inline void cr4_clear_bits(unsigned long mask)
|
|
{
|
|
unsigned long cr4;
|
|
|
|
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
|
if ((cr4 & ~mask) != cr4) {
|
|
cr4 &= ~mask;
|
|
this_cpu_write(cpu_tlbstate.cr4, cr4);
|
|
__write_cr4(cr4);
|
|
}
|
|
}
|
|
|
|
/* Read the CR4 shadow. */
|
|
static inline unsigned long cr4_read_shadow(void)
|
|
{
|
|
return this_cpu_read(cpu_tlbstate.cr4);
|
|
}
|
|
|
|
/*
|
|
* Save some of cr4 feature set we're using (e.g. Pentium 4MB
|
|
* enable and PPro Global page enable), so that any CPU's that boot
|
|
* up after us can get the correct flags. This should only be used
|
|
* during boot on the boot cpu.
|
|
*/
|
|
extern unsigned long mmu_cr4_features;
|
|
extern u32 *trampoline_cr4_features;
|
|
|
|
static inline void cr4_set_bits_and_update_boot(unsigned long mask)
|
|
{
|
|
mmu_cr4_features |= mask;
|
|
if (trampoline_cr4_features)
|
|
*trampoline_cr4_features = mmu_cr4_features;
|
|
cr4_set_bits(mask);
|
|
}
|
|
|
|
static inline void __native_flush_tlb(void)
|
|
{
|
|
native_write_cr3(native_read_cr3());
|
|
}
|
|
|
|
static inline void __native_flush_tlb_global_irq_disabled(void)
|
|
{
|
|
unsigned long cr4;
|
|
|
|
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
|
/* clear PGE */
|
|
native_write_cr4(cr4 & ~X86_CR4_PGE);
|
|
/* write old PGE again and flush TLBs */
|
|
native_write_cr4(cr4);
|
|
}
|
|
|
|
static inline void __native_flush_tlb_global(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
/*
|
|
* Read-modify-write to CR4 - protect it from preemption and
|
|
* from interrupts. (Use the raw variant because this code can
|
|
* be called from deep inside debugging code.)
|
|
*/
|
|
raw_local_irq_save(flags);
|
|
|
|
__native_flush_tlb_global_irq_disabled();
|
|
|
|
raw_local_irq_restore(flags);
|
|
}
|
|
|
|
static inline void __native_flush_tlb_single(unsigned long addr)
|
|
{
|
|
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
|
|
}
|
|
|
|
static inline void __flush_tlb_all(void)
|
|
{
|
|
if (cpu_has_pge)
|
|
__flush_tlb_global();
|
|
else
|
|
__flush_tlb();
|
|
}
|
|
|
|
static inline void __flush_tlb_one(unsigned long addr)
|
|
{
|
|
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
|
|
__flush_tlb_single(addr);
|
|
}
|
|
|
|
#define TLB_FLUSH_ALL -1UL
|
|
|
|
/*
|
|
* TLB flushing:
|
|
*
|
|
* - flush_tlb() flushes the current mm struct TLBs
|
|
* - flush_tlb_all() flushes all processes TLBs
|
|
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
|
|
* - flush_tlb_page(vma, vmaddr) flushes one page
|
|
* - flush_tlb_range(vma, start, end) flushes a range of pages
|
|
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
|
|
* - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
|
|
*
|
|
* ..but the i386 has somewhat limited tlb flushing capabilities,
|
|
* and page-granular flushes are available only on i486 and up.
|
|
*/
|
|
|
|
#ifndef CONFIG_SMP
|
|
|
|
/* "_up" is for UniProcessor.
|
|
*
|
|
* This is a helper for other header functions. *Not* intended to be called
|
|
* directly. All global TLB flushes need to either call this, or to bump the
|
|
* vm statistics themselves.
|
|
*/
|
|
static inline void __flush_tlb_up(void)
|
|
{
|
|
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
|
__flush_tlb();
|
|
}
|
|
|
|
static inline void flush_tlb_all(void)
|
|
{
|
|
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
|
__flush_tlb_all();
|
|
}
|
|
|
|
static inline void flush_tlb(void)
|
|
{
|
|
__flush_tlb_up();
|
|
}
|
|
|
|
static inline void local_flush_tlb(void)
|
|
{
|
|
__flush_tlb_up();
|
|
}
|
|
|
|
static inline void flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
if (mm == current->active_mm)
|
|
__flush_tlb_up();
|
|
}
|
|
|
|
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|
unsigned long addr)
|
|
{
|
|
if (vma->vm_mm == current->active_mm)
|
|
__flush_tlb_one(addr);
|
|
}
|
|
|
|
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
if (vma->vm_mm == current->active_mm)
|
|
__flush_tlb_up();
|
|
}
|
|
|
|
static inline void flush_tlb_mm_range(struct mm_struct *mm,
|
|
unsigned long start, unsigned long end, unsigned long vmflag)
|
|
{
|
|
if (mm == current->active_mm)
|
|
__flush_tlb_up();
|
|
}
|
|
|
|
static inline void native_flush_tlb_others(const struct cpumask *cpumask,
|
|
struct mm_struct *mm,
|
|
unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
}
|
|
|
|
static inline void reset_lazy_tlbstate(void)
|
|
{
|
|
}
|
|
|
|
static inline void flush_tlb_kernel_range(unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
flush_tlb_all();
|
|
}
|
|
|
|
#else /* SMP */
|
|
|
|
#include <asm/smp.h>
|
|
|
|
#define local_flush_tlb() __flush_tlb()
|
|
|
|
#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
|
|
|
|
#define flush_tlb_range(vma, start, end) \
|
|
flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
|
|
|
|
extern void flush_tlb_all(void);
|
|
extern void flush_tlb_current_task(void);
|
|
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
|
|
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
unsigned long end, unsigned long vmflag);
|
|
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
|
|
#define flush_tlb() flush_tlb_current_task()
|
|
|
|
void native_flush_tlb_others(const struct cpumask *cpumask,
|
|
struct mm_struct *mm,
|
|
unsigned long start, unsigned long end);
|
|
|
|
#define TLBSTATE_OK 1
|
|
#define TLBSTATE_LAZY 2
|
|
|
|
static inline void reset_lazy_tlbstate(void)
|
|
{
|
|
this_cpu_write(cpu_tlbstate.state, 0);
|
|
this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
|
|
}
|
|
|
|
#endif /* SMP */
|
|
|
|
/* Not inlined due to inc_irq_stat not being defined yet */
|
|
#define flush_tlb_local() { \
|
|
inc_irq_stat(irq_tlb_count); \
|
|
local_flush_tlb(); \
|
|
}
|
|
|
|
#ifndef CONFIG_PARAVIRT
|
|
#define flush_tlb_others(mask, mm, start, end) \
|
|
native_flush_tlb_others(mask, mm, start, end)
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_TLBFLUSH_H */
|