mirror of
https://github.com/torvalds/linux.git
synced 2024-11-07 20:51:47 +00:00
x86: split out core __math_state_restore
Split the core fpu state restoration out into __math_state_restore, which assumes that cr0.TS is clear and that the fpu context has been initialized. This will be used during context switch. There are two reasons this is desireable: - There's a small clarification. When __switch_to() calls math_state_restore, it relies on the fact that tsk_used_math() returns true, and so will never do a blocking init_fpu(). __math_state_restore() does not have (or need) that logic, so the question never arises. - It allows the clts() to be moved earler in __switch_to() so it can be performed while cpu context updates are batched (will be done in a later patch). [ Impact: refactor code to make reuse cleaner; no functional change ] Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Alok Kataria <akataria@vmware.com> Cc: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
3fe0344faf
commit
e6e9cac8c3
@ -26,6 +26,7 @@ extern void fpu_init(void);
|
||||
extern void mxcsr_feature_mask_init(void);
|
||||
extern int init_fpu(struct task_struct *child);
|
||||
extern asmlinkage void math_state_restore(void);
|
||||
extern void __math_state_restore(void);
|
||||
extern void init_thread_xstate(void);
|
||||
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
|
||||
|
||||
|
@ -813,6 +813,28 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* __math_state_restore assumes that cr0.TS is already clear and the
|
||||
* fpu state is all ready for use. Used during context switch.
|
||||
*/
|
||||
void __math_state_restore(void)
|
||||
{
|
||||
struct thread_info *thread = current_thread_info();
|
||||
struct task_struct *tsk = thread->task;
|
||||
|
||||
/*
|
||||
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
|
||||
*/
|
||||
if (unlikely(restore_fpu_checking(tsk))) {
|
||||
stts();
|
||||
force_sig(SIGSEGV, tsk);
|
||||
return;
|
||||
}
|
||||
|
||||
thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
|
||||
tsk->fpu_counter++;
|
||||
}
|
||||
|
||||
/*
|
||||
* 'math_state_restore()' saves the current math information in the
|
||||
* old math state array, and gets the new ones from the current task
|
||||
@ -844,17 +866,8 @@ asmlinkage void math_state_restore(void)
|
||||
}
|
||||
|
||||
clts(); /* Allow maths ops (or we recurse) */
|
||||
/*
|
||||
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
|
||||
*/
|
||||
if (unlikely(restore_fpu_checking(tsk))) {
|
||||
stts();
|
||||
force_sig(SIGSEGV, tsk);
|
||||
return;
|
||||
}
|
||||
|
||||
thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
|
||||
tsk->fpu_counter++;
|
||||
__math_state_restore();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(math_state_restore);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user