mirror of
https://github.com/torvalds/linux.git
synced 2024-12-05 02:23:16 +00:00
ffaa8bd6c9
I believe at least for seccomp it's worth to turn off the tsc, not just for HT but for the L2 cache too. So it's up to you, either you turn it off completely (which isn't very nice IMHO) or I recommend to apply this below patch. This has been tested successfully on x86-64 against current cogito repository (i686 compiles so I didn't bother testing ;). People selling the cpu through cpushare may appreciate this bit for a peace of mind. There's no way to get any timing info anymore with this applied (gettimeofday is forbidden of course). The seccomp environment is completely deterministic so it can't be allowed to get timing info, it has to be deterministic so in the future I can enable a computing mode that does a parallel computing for each task with server side transparent checkpointing and verification that the output is the same from all the 2/3 seller computers for each task, without the buyer even noticing (for now the verification is left to the buyer client side and there's no checkpointing, since that would require more kernel changes to track the dirty bits but it'll be easy to extend once the basic mode is finished). Eliminating a cold-cache read of the cr4 global variable will save one cacheline during the tlb flush while making the code per-cpu-safe at the same time. Thanks to Mikael Pettersson for noticing the tlb flush wasn't per-cpu-safe. The global tlb flush can run from irq (IPI calling do_flush_tlb_all) but it'll be transparent to the switch_to code since the IPI won't make any change to the cr4 contents from the point of view of the interrupted code and since it's now all per-cpu stuff, it will not race. So no need to disable irqs in switch_to slow path. Signed-off-by: Andrea Arcangeli <andrea@cpushare.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
150 lines
3.7 KiB
C
150 lines
3.7 KiB
C
#ifndef _I386_TLBFLUSH_H
|
|
#define _I386_TLBFLUSH_H
|
|
|
|
#include <linux/config.h>
|
|
#include <linux/mm.h>
|
|
#include <asm/processor.h>
|
|
|
|
#define __flush_tlb() \
|
|
do { \
|
|
unsigned int tmpreg; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"movl %%cr3, %0; \n" \
|
|
"movl %0, %%cr3; # flush TLB \n" \
|
|
: "=r" (tmpreg) \
|
|
:: "memory"); \
|
|
} while (0)
|
|
|
|
/*
|
|
* Global pages have to be flushed a bit differently. Not a real
|
|
* performance problem because this does not happen often.
|
|
*/
|
|
#define __flush_tlb_global() \
|
|
do { \
|
|
unsigned int tmpreg, cr4, cr4_orig; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"movl %%cr4, %2; # turn off PGE \n" \
|
|
"movl %2, %1; \n" \
|
|
"andl %3, %1; \n" \
|
|
"movl %1, %%cr4; \n" \
|
|
"movl %%cr3, %0; \n" \
|
|
"movl %0, %%cr3; # flush TLB \n" \
|
|
"movl %2, %%cr4; # turn PGE back on \n" \
|
|
: "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
|
|
: "i" (~X86_CR4_PGE) \
|
|
: "memory"); \
|
|
} while (0)
|
|
|
|
extern unsigned long pgkern_mask;
|
|
|
|
# define __flush_tlb_all() \
|
|
do { \
|
|
if (cpu_has_pge) \
|
|
__flush_tlb_global(); \
|
|
else \
|
|
__flush_tlb(); \
|
|
} while (0)
|
|
|
|
#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
|
|
|
|
#define __flush_tlb_single(addr) \
|
|
__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
|
|
|
|
#ifdef CONFIG_X86_INVLPG
|
|
# define __flush_tlb_one(addr) __flush_tlb_single(addr)
|
|
#else
|
|
# define __flush_tlb_one(addr) \
|
|
do { \
|
|
if (cpu_has_invlpg) \
|
|
__flush_tlb_single(addr); \
|
|
else \
|
|
__flush_tlb(); \
|
|
} while (0)
|
|
#endif
|
|
|
|
/*
|
|
* TLB flushing:
|
|
*
|
|
* - flush_tlb() flushes the current mm struct TLBs
|
|
* - flush_tlb_all() flushes all processes TLBs
|
|
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
|
|
* - flush_tlb_page(vma, vmaddr) flushes one page
|
|
* - flush_tlb_range(vma, start, end) flushes a range of pages
|
|
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
|
|
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
|
|
*
|
|
* ..but the i386 has somewhat limited tlb flushing capabilities,
|
|
* and page-granular flushes are available only on i486 and up.
|
|
*/
|
|
|
|
#ifndef CONFIG_SMP
|
|
|
|
#define flush_tlb() __flush_tlb()
|
|
#define flush_tlb_all() __flush_tlb_all()
|
|
#define local_flush_tlb() __flush_tlb()
|
|
|
|
static inline void flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
if (mm == current->active_mm)
|
|
__flush_tlb();
|
|
}
|
|
|
|
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|
unsigned long addr)
|
|
{
|
|
if (vma->vm_mm == current->active_mm)
|
|
__flush_tlb_one(addr);
|
|
}
|
|
|
|
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
if (vma->vm_mm == current->active_mm)
|
|
__flush_tlb();
|
|
}
|
|
|
|
#else
|
|
|
|
#include <asm/smp.h>
|
|
|
|
#define local_flush_tlb() \
|
|
__flush_tlb()
|
|
|
|
extern void flush_tlb_all(void);
|
|
extern void flush_tlb_current_task(void);
|
|
extern void flush_tlb_mm(struct mm_struct *);
|
|
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
|
|
|
|
#define flush_tlb() flush_tlb_current_task()
|
|
|
|
static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
|
|
{
|
|
flush_tlb_mm(vma->vm_mm);
|
|
}
|
|
|
|
#define TLBSTATE_OK 1
|
|
#define TLBSTATE_LAZY 2
|
|
|
|
struct tlb_state
|
|
{
|
|
struct mm_struct *active_mm;
|
|
int state;
|
|
char __cacheline_padding[L1_CACHE_BYTES-8];
|
|
};
|
|
DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
|
|
|
|
|
|
#endif
|
|
|
|
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
|
|
|
|
static inline void flush_tlb_pgtables(struct mm_struct *mm,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
/* i386 does not keep any page table caches in TLB */
|
|
}
|
|
|
|
#endif /* _I386_TLBFLUSH_H */
|