forked from Minki/linux
x86, xsave: context switch support using xsave/xrstor
Uses xsave/xrstor (instead of traditional fxsave/fxrstor) in context switch when available. Introduces TS_XSAVE flag, which determine the need to use xsave/xrstor instructions during context switch instead of the legacy fxsave/fxrstor instructions. Thread-synchronous status word is already in L1 cache during this code patch and thus minimizes the performance penality compared to (cpu_has_xsave) checks. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
dc1e35c6e9
commit
b359e8a434
@ -709,7 +709,10 @@ void __cpuinit cpu_init(void)
|
||||
/*
|
||||
* Force FPU initialization:
|
||||
*/
|
||||
current_thread_info()->status = 0;
|
||||
if (cpu_has_xsave)
|
||||
current_thread_info()->status = TS_XSAVE;
|
||||
else
|
||||
current_thread_info()->status = 0;
|
||||
clear_used_math();
|
||||
mxcsr_feature_mask_init();
|
||||
|
||||
|
@ -97,7 +97,10 @@ void __cpuinit fpu_init(void)
|
||||
|
||||
mxcsr_feature_mask_init();
|
||||
/* clean state in init */
|
||||
current_thread_info()->status = 0;
|
||||
if (cpu_has_xsave)
|
||||
current_thread_info()->status = TS_XSAVE;
|
||||
else
|
||||
current_thread_info()->status = 0;
|
||||
clear_used_math();
|
||||
}
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
@ -1134,7 +1134,7 @@ asmlinkage void math_state_restore(void)
|
||||
/*
|
||||
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
|
||||
*/
|
||||
if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) {
|
||||
if (unlikely(restore_fpu_checking(me))) {
|
||||
stts();
|
||||
force_sig(SIGSEGV, me);
|
||||
return;
|
||||
|
@ -36,6 +36,8 @@ extern int save_i387_ia32(struct _fpstate_ia32 __user *buf);
|
||||
extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf);
|
||||
#endif
|
||||
|
||||
#define X87_FSW_ES (1 << 7) /* Exception Summary */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/* Ignore delayed exceptions from user space */
|
||||
@ -46,7 +48,7 @@ static inline void tolerant_fwait(void)
|
||||
_ASM_EXTABLE(1b, 2b));
|
||||
}
|
||||
|
||||
static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
|
||||
static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -66,15 +68,31 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
|
||||
return err;
|
||||
}
|
||||
|
||||
#define X87_FSW_ES (1 << 7) /* Exception Summary */
|
||||
static inline int restore_fpu_checking(struct task_struct *tsk)
|
||||
{
|
||||
if (task_thread_info(tsk)->status & TS_XSAVE)
|
||||
return xrstor_checking(&tsk->thread.xstate->xsave);
|
||||
else
|
||||
return fxrstor_checking(&tsk->thread.xstate->fxsave);
|
||||
}
|
||||
|
||||
/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
|
||||
is pending. Clear the x87 state here by setting it to fixed
|
||||
values. The kernel data segment can be sometimes 0 and sometimes
|
||||
new user value. Both should be ok.
|
||||
Use the PDA as safe address because it should be already in L1. */
|
||||
static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
|
||||
static inline void clear_fpu_state(struct task_struct *tsk)
|
||||
{
|
||||
struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
|
||||
struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
|
||||
|
||||
/*
|
||||
* xsave header may indicate the init state of the FP.
|
||||
*/
|
||||
if ((task_thread_info(tsk)->status & TS_XSAVE) &&
|
||||
!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
|
||||
return;
|
||||
|
||||
if (unlikely(fx->swd & X87_FSW_ES))
|
||||
asm volatile("fnclex");
|
||||
alternative_input(ASM_NOP8 ASM_NOP2,
|
||||
@ -107,7 +125,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void __save_init_fpu(struct task_struct *tsk)
|
||||
static inline void fxsave(struct task_struct *tsk)
|
||||
{
|
||||
/* Using "rex64; fxsave %0" is broken because, if the memory operand
|
||||
uses any extended registers for addressing, a second REX prefix
|
||||
@ -132,7 +150,16 @@ static inline void __save_init_fpu(struct task_struct *tsk)
|
||||
: "=m" (tsk->thread.xstate->fxsave)
|
||||
: "cdaSDb" (&tsk->thread.xstate->fxsave));
|
||||
#endif
|
||||
clear_fpu_state(&tsk->thread.xstate->fxsave);
|
||||
}
|
||||
|
||||
static inline void __save_init_fpu(struct task_struct *tsk)
|
||||
{
|
||||
if (task_thread_info(tsk)->status & TS_XSAVE)
|
||||
xsave(tsk);
|
||||
else
|
||||
fxsave(tsk);
|
||||
|
||||
clear_fpu_state(tsk);
|
||||
task_thread_info(tsk)->status &= ~TS_USEDFPU;
|
||||
}
|
||||
|
||||
@ -147,6 +174,10 @@ static inline void tolerant_fwait(void)
|
||||
|
||||
static inline void restore_fpu(struct task_struct *tsk)
|
||||
{
|
||||
if (task_thread_info(tsk)->status & TS_XSAVE) {
|
||||
xrstor_checking(&tsk->thread.xstate->xsave);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* The "nop" is needed to make the instructions the same
|
||||
* length.
|
||||
@ -172,6 +203,27 @@ static inline void restore_fpu(struct task_struct *tsk)
|
||||
*/
|
||||
static inline void __save_init_fpu(struct task_struct *tsk)
|
||||
{
|
||||
if (task_thread_info(tsk)->status & TS_XSAVE) {
|
||||
struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
|
||||
struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
|
||||
|
||||
xsave(tsk);
|
||||
|
||||
/*
|
||||
* xsave header may indicate the init state of the FP.
|
||||
*/
|
||||
if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
|
||||
goto end;
|
||||
|
||||
if (unlikely(fx->swd & X87_FSW_ES))
|
||||
asm volatile("fnclex");
|
||||
|
||||
/*
|
||||
* we can do a simple return here or be paranoid :)
|
||||
*/
|
||||
goto clear_state;
|
||||
}
|
||||
|
||||
/* Use more nops than strictly needed in case the compiler
|
||||
varies code */
|
||||
alternative_input(
|
||||
@ -181,6 +233,7 @@ static inline void __save_init_fpu(struct task_struct *tsk)
|
||||
X86_FEATURE_FXSR,
|
||||
[fx] "m" (tsk->thread.xstate->fxsave),
|
||||
[fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
|
||||
clear_state:
|
||||
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
|
||||
is pending. Clear the x87 state here by setting it to fixed
|
||||
values. safe_address is a random variable that should be in L1 */
|
||||
@ -190,6 +243,7 @@ static inline void __save_init_fpu(struct task_struct *tsk)
|
||||
"fildl %[addr]", /* set F?P to defined value */
|
||||
X86_FEATURE_FXSAVE_LEAK,
|
||||
[addr] "m" (safe_address));
|
||||
end:
|
||||
task_thread_info(tsk)->status &= ~TS_USEDFPU;
|
||||
}
|
||||
|
||||
|
@ -362,6 +362,7 @@ union thread_xstate {
|
||||
struct i387_fsave_struct fsave;
|
||||
struct i387_fxsave_struct fxsave;
|
||||
struct i387_soft_struct soft;
|
||||
struct xsave_struct xsave;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -239,6 +239,7 @@ static inline struct thread_info *stack_thread_info(void)
|
||||
#define TS_POLLING 0x0004 /* true if in idle loop
|
||||
and not sleeping */
|
||||
#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
|
||||
#define TS_XSAVE 0x0010 /* Use xsave/xrstor */
|
||||
|
||||
#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
|
||||
|
||||
|
@ -17,10 +17,43 @@
|
||||
#define XCNTXT_LMASK (XSTATE_FP | XSTATE_SSE)
|
||||
#define XCNTXT_HMASK 0x0
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define REX_PREFIX "0x48, "
|
||||
#else
|
||||
#define REX_PREFIX
|
||||
#endif
|
||||
|
||||
extern unsigned int xstate_size, pcntxt_hmask, pcntxt_lmask;
|
||||
extern struct xsave_struct *init_xstate_buf;
|
||||
|
||||
extern void xsave_cntxt_init(void);
|
||||
extern void xsave_init(void);
|
||||
extern int init_fpu(struct task_struct *child);
|
||||
|
||||
static inline int xrstor_checking(struct xsave_struct *fx)
|
||||
{
|
||||
int err;
|
||||
|
||||
asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
|
||||
"2:\n"
|
||||
".section .fixup,\"ax\"\n"
|
||||
"3: movl $-1,%[err]\n"
|
||||
" jmp 2b\n"
|
||||
".previous\n"
|
||||
_ASM_EXTABLE(1b, 3b)
|
||||
: [err] "=r" (err)
|
||||
: "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0)
|
||||
: "memory");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void xsave(struct task_struct *tsk)
|
||||
{
|
||||
/* This, however, we can work around by forcing the compiler to select
|
||||
an addressing mode that doesn't require extended registers. */
|
||||
__asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
|
||||
: : "D" (&(tsk->thread.xstate->xsave)),
|
||||
"a" (-1), "d"(-1) : "memory");
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user