mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 21:02:19 +00:00
7baa7aecdd
Fold finish_arch_switch() into switch_to(). Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Cc: linux@arm.linux.org.uk [ Fixed up the SOB chain. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
32 lines
1016 B
C
32 lines
1016 B
C
#ifndef __ASM_ARM_SWITCH_TO_H
|
|
#define __ASM_ARM_SWITCH_TO_H
|
|
|
|
#include <linux/thread_info.h>
|
|
|
|
/*
|
|
* For v7 SMP cores running a preemptible kernel we may be pre-empted
|
|
* during a TLB maintenance operation, so execute an inner-shareable dsb
|
|
* to ensure that the maintenance completes in case we migrate to another
|
|
* CPU.
|
|
*/
|
|
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
|
|
#define __complete_pending_tlbi() dsb(ish)
|
|
#else
|
|
#define __complete_pending_tlbi()
|
|
#endif
|
|
|
|
/*
|
|
* switch_to(prev, next) should switch from task `prev' to `next'
|
|
* `prev' will never be the same as `next'. schedule() itself
|
|
* contains the memory barrier to tell GCC not to cache `current'.
|
|
*/
|
|
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
|
|
|
|
#define switch_to(prev,next,last) \
|
|
do { \
|
|
__complete_pending_tlbi(); \
|
|
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
|
} while (0)
|
|
|
|
#endif /* __ASM_ARM_SWITCH_TO_H */
|