2005-04-16 22:20:36 +00:00
|
|
|
#ifndef X86_64_PDA_H
|
|
|
|
#define X86_64_PDA_H
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/cache.h>
|
2006-01-11 21:43:00 +00:00
|
|
|
#include <asm/page.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Per processor datastructure. %gs points to it while the kernel runs */
|
|
|
|
struct x8664_pda {
|
2006-09-26 08:52:38 +00:00
|
|
|
struct task_struct *pcurrent; /* 0 Current process */
|
|
|
|
unsigned long data_offset; /* 8 Per cpu data offset from linker
|
|
|
|
address */
|
|
|
|
unsigned long kernelstack; /* 16 top of kernel stack for current */
|
|
|
|
unsigned long oldrsp; /* 24 user rsp for system call */
|
|
|
|
int irqcount; /* 32 Irq nesting counter. Starts with -1 */
|
|
|
|
int cpunumber; /* 36 Logical CPU number */
|
2006-09-26 08:52:38 +00:00
|
|
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
|
|
|
unsigned long stack_canary; /* 40 stack canary value */
|
|
|
|
/* gcc-ABI: this canary MUST be at
|
|
|
|
offset 40!!! */
|
|
|
|
#endif
|
|
|
|
char *irqstackptr;
|
2005-11-05 16:25:53 +00:00
|
|
|
int nodenumber; /* number of current node */
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned int __softirq_pending;
|
|
|
|
unsigned int __nmi_count; /* number of NMI on this CPUs */
|
2006-09-26 08:52:40 +00:00
|
|
|
short mmu_state;
|
|
|
|
short isidle;
|
2006-03-25 15:31:01 +00:00
|
|
|
struct mm_struct *active_mm;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned apic_timer_irqs;
|
2005-09-12 16:49:24 +00:00
|
|
|
} ____cacheline_aligned_in_smp;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-11 21:45:42 +00:00
|
|
|
extern struct x8664_pda *_cpu_pda[];
|
|
|
|
extern struct x8664_pda boot_cpu_pda[];
|
2006-01-11 21:45:39 +00:00
|
|
|
|
2006-01-11 21:45:42 +00:00
|
|
|
#define cpu_pda(i) (_cpu_pda[i])
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There is no fast way to get the base address of the PDA, all the accesses
|
|
|
|
* have to mention %fs/%gs. So it needs to be done this Torvaldian way.
|
|
|
|
*/
|
2006-09-26 08:52:40 +00:00
|
|
|
extern void __bad_pda_field(void) __attribute__((noreturn));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-09-26 08:52:40 +00:00
|
|
|
/*
|
|
|
|
* proxy_pda doesn't actually exist, but tell gcc it is accessed for
|
|
|
|
* all PDA accesses so it gets read/write dependencies right.
|
|
|
|
*/
|
2006-09-26 08:52:38 +00:00
|
|
|
extern struct x8664_pda _proxy_pda;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#define pda_offset(field) offsetof(struct x8664_pda, field)
|
|
|
|
|
2006-09-26 08:52:40 +00:00
|
|
|
#define pda_to_op(op,field,val) do { \
|
|
|
|
typedef typeof(_proxy_pda.field) T__; \
|
|
|
|
if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
|
|
|
|
switch (sizeof(_proxy_pda.field)) { \
|
|
|
|
case 2: \
|
|
|
|
asm(op "w %1,%%gs:%c2" : \
|
|
|
|
"+m" (_proxy_pda.field) : \
|
|
|
|
"ri" ((T__)val), \
|
|
|
|
"i"(pda_offset(field))); \
|
|
|
|
break; \
|
|
|
|
case 4: \
|
|
|
|
asm(op "l %1,%%gs:%c2" : \
|
|
|
|
"+m" (_proxy_pda.field) : \
|
|
|
|
"ri" ((T__)val), \
|
|
|
|
"i" (pda_offset(field))); \
|
|
|
|
break; \
|
|
|
|
case 8: \
|
|
|
|
asm(op "q %1,%%gs:%c2": \
|
|
|
|
"+m" (_proxy_pda.field) : \
|
|
|
|
"ri" ((T__)val), \
|
|
|
|
"i"(pda_offset(field))); \
|
|
|
|
break; \
|
|
|
|
default: \
|
|
|
|
__bad_pda_field(); \
|
|
|
|
} \
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (0)
|
|
|
|
|
2006-09-26 08:52:40 +00:00
|
|
|
#define pda_from_op(op,field) ({ \
|
|
|
|
typeof(_proxy_pda.field) ret__; \
|
|
|
|
switch (sizeof(_proxy_pda.field)) { \
|
|
|
|
case 2: \
|
|
|
|
asm(op "w %%gs:%c1,%0" : \
|
|
|
|
"=r" (ret__) : \
|
|
|
|
"i" (pda_offset(field)), \
|
|
|
|
"m" (_proxy_pda.field)); \
|
|
|
|
break; \
|
|
|
|
case 4: \
|
|
|
|
asm(op "l %%gs:%c1,%0": \
|
|
|
|
"=r" (ret__): \
|
|
|
|
"i" (pda_offset(field)), \
|
|
|
|
"m" (_proxy_pda.field)); \
|
|
|
|
break; \
|
|
|
|
case 8: \
|
|
|
|
asm(op "q %%gs:%c1,%0": \
|
|
|
|
"=r" (ret__) : \
|
|
|
|
"i" (pda_offset(field)), \
|
|
|
|
"m" (_proxy_pda.field)); \
|
|
|
|
break; \
|
|
|
|
default: \
|
|
|
|
__bad_pda_field(); \
|
|
|
|
} \
|
2005-04-16 22:20:36 +00:00
|
|
|
ret__; })
|
|
|
|
|
|
|
|
#define read_pda(field) pda_from_op("mov",field)
|
|
|
|
#define write_pda(field,val) pda_to_op("mov",field,val)
|
|
|
|
#define add_pda(field,val) pda_to_op("add",field,val)
|
|
|
|
#define sub_pda(field,val) pda_to_op("sub",field,val)
|
2005-09-12 16:49:24 +00:00
|
|
|
#define or_pda(field,val) pda_to_op("or",field,val)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-11-14 15:57:46 +00:00
|
|
|
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
|
|
|
#define test_and_clear_bit_pda(bit,field) ({ \
|
|
|
|
int old__; \
|
|
|
|
asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
|
|
|
|
: "=r" (old__), "+m" (_proxy_pda.field) \
|
|
|
|
: "dIr" (bit), "i" (pda_offset(field)) : "memory"); \
|
|
|
|
old__; \
|
|
|
|
})
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define PDA_STACKOFFSET (5*8)
|
|
|
|
|
|
|
|
#endif
|