2008-10-23 05:26:29 +00:00
|
|
|
#ifndef _ASM_X86_MMU_H
|
|
|
|
#define _ASM_X86_MMU_H
|
2007-10-20 06:56:59 +00:00
|
|
|
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The x86 doesn't have a mmu context, but
|
|
|
|
* we put the segment information here.
|
|
|
|
*/
|
2008-03-23 08:02:44 +00:00
|
|
|
typedef struct {
|
2015-07-30 21:31:34 +00:00
|
|
|
#ifdef CONFIG_MODIFY_LDT_SYSCALL
|
2015-07-30 21:31:32 +00:00
|
|
|
struct ldt_struct *ldt;
|
2015-07-30 21:31:34 +00:00
|
|
|
#endif
|
2011-03-13 19:49:13 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/* True if mm supports a task running in 32 bit compatibility mode. */
|
|
|
|
unsigned short ia32_compat;
|
|
|
|
#endif
|
|
|
|
|
2011-05-24 13:49:59 +00:00
|
|
|
struct mutex lock;
|
2015-12-30 04:12:21 +00:00
|
|
|
void __user *vdso; /* vdso base address */
|
|
|
|
const struct vdso_image *vdso_image; /* vdso image in use */
|
2014-10-24 22:58:12 +00:00
|
|
|
|
|
|
|
atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
|
2007-10-20 06:56:59 +00:00
|
|
|
} mm_context_t;
|
|
|
|
|
2008-01-30 12:32:01 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
void leave_mm(int cpu);
|
|
|
|
#else
|
|
|
|
static inline void leave_mm(int cpu)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-10-23 05:26:29 +00:00
|
|
|
#endif /* _ASM_X86_MMU_H */
|