mirror of
https://github.com/torvalds/linux.git
synced 2024-11-28 07:01:32 +00:00
sched/core, sched/x86: Kill thread_info::saved_preempt_count
With the introduction of the context switch preempt_count invariant, and the demise of PREEMPT_ACTIVE, its pointless to save/restore the per-cpu preemption count, it must always be 2. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
da7142e2ed
commit
d87b7a3379
@ -30,12 +30,9 @@ static __always_inline void preempt_count_set(int pc)
|
||||
/*
|
||||
* must be macros to avoid header recursion hell
|
||||
*/
|
||||
#define init_task_preempt_count(p) do { \
|
||||
task_thread_info(p)->saved_preempt_count = FORK_PREEMPT_COUNT; \
|
||||
} while (0)
|
||||
#define init_task_preempt_count(p) do { } while (0)
|
||||
|
||||
#define init_idle_preempt_count(p, cpu) do { \
|
||||
task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
|
||||
per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
|
||||
} while (0)
|
||||
|
||||
|
@ -57,7 +57,6 @@ struct thread_info {
|
||||
__u32 flags; /* low level flags */
|
||||
__u32 status; /* thread synchronous flags */
|
||||
__u32 cpu; /* current CPU */
|
||||
int saved_preempt_count;
|
||||
mm_segment_t addr_limit;
|
||||
void __user *sysenter_return;
|
||||
unsigned int sig_on_uaccess_error:1;
|
||||
@ -69,7 +68,6 @@ struct thread_info {
|
||||
.task = &tsk, \
|
||||
.flags = 0, \
|
||||
.cpu = 0, \
|
||||
.saved_preempt_count = INIT_PREEMPT_COUNT, \
|
||||
.addr_limit = KERNEL_DS, \
|
||||
}
|
||||
|
||||
|
@ -279,14 +279,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
|
||||
set_iopl_mask(next->iopl);
|
||||
|
||||
/*
|
||||
* If it were not for PREEMPT_ACTIVE we could guarantee that the
|
||||
* preempt_count of all tasks was equal here and this would not be
|
||||
* needed.
|
||||
*/
|
||||
task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
|
||||
this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
|
||||
|
||||
/*
|
||||
* Now maybe handle debug registers and/or IO bitmaps
|
||||
*/
|
||||
|
@ -401,14 +401,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
*/
|
||||
this_cpu_write(current_task, next_p);
|
||||
|
||||
/*
|
||||
* If it were not for PREEMPT_ACTIVE we could guarantee that the
|
||||
* preempt_count of all tasks was equal here and this would not be
|
||||
* needed.
|
||||
*/
|
||||
task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
|
||||
this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
|
||||
|
||||
/* Reload esp0 and ss1. This changes current_thread_info(). */
|
||||
load_sp0(tss, next);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user