mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 04:42:12 +00:00
ba1f14fbe7
While hunting a preemption issue with Alexander, Ben noticed that the currently generic PREEMPT_NEED_RESCHED stuff is horribly broken for load-store architectures. We currently rely on the IPI to fold TIF_NEED_RESCHED into PREEMPT_NEED_RESCHED, but when this IPI lands while we already have a load for the preempt-count but before the store, the store will erase the PREEMPT_NEED_RESCHED change. The current preempt-count only works on load-store archs because interrupts are assumed to be completely balanced wrt their preempt_count fiddling; the previous preempt_count load will match the preempt_count state after the interrupt and therefore nothing gets lost. This patch removes the PREEMPT_NEED_RESCHED usage from generic code and pushes it into x86 arch code; the generic code goes back to relying on TIF_NEED_RESCHED. Boot tested on x86_64 and compile tested on ppc64. Reported-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Reported-and-Tested-by: Alexander Graf <agraf@suse.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20131128132641.GP10022@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
93 lines
2.0 KiB
C
93 lines
2.0 KiB
C
#ifndef __ASM_PREEMPT_H
|
|
#define __ASM_PREEMPT_H
|
|
|
|
#include <linux/thread_info.h>
|
|
|
|
#define PREEMPT_ENABLED (0)
|
|
|
|
static __always_inline int preempt_count(void)
|
|
{
|
|
return current_thread_info()->preempt_count;
|
|
}
|
|
|
|
static __always_inline int *preempt_count_ptr(void)
|
|
{
|
|
return ¤t_thread_info()->preempt_count;
|
|
}
|
|
|
|
static __always_inline void preempt_count_set(int pc)
|
|
{
|
|
*preempt_count_ptr() = pc;
|
|
}
|
|
|
|
/*
|
|
* must be macros to avoid header recursion hell
|
|
*/
|
|
#define task_preempt_count(p) \
|
|
(task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED)
|
|
|
|
#define init_task_preempt_count(p) do { \
|
|
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
|
|
} while (0)
|
|
|
|
#define init_idle_preempt_count(p, cpu) do { \
|
|
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
|
|
} while (0)
|
|
|
|
static __always_inline void set_preempt_need_resched(void)
|
|
{
|
|
}
|
|
|
|
static __always_inline void clear_preempt_need_resched(void)
|
|
{
|
|
}
|
|
|
|
static __always_inline bool test_preempt_need_resched(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* The various preempt_count add/sub methods
|
|
*/
|
|
|
|
static __always_inline void __preempt_count_add(int val)
|
|
{
|
|
*preempt_count_ptr() += val;
|
|
}
|
|
|
|
static __always_inline void __preempt_count_sub(int val)
|
|
{
|
|
*preempt_count_ptr() -= val;
|
|
}
|
|
|
|
static __always_inline bool __preempt_count_dec_and_test(void)
|
|
{
|
|
/*
|
|
* Because of load-store architectures cannot do per-cpu atomic
|
|
* operations; we cannot use PREEMPT_NEED_RESCHED because it might get
|
|
* lost.
|
|
*/
|
|
return !--*preempt_count_ptr() && tif_need_resched();
|
|
}
|
|
|
|
/*
|
|
* Returns true when we need to resched and can (barring IRQ state).
|
|
*/
|
|
static __always_inline bool should_resched(void)
|
|
{
|
|
return unlikely(!preempt_count() && tif_need_resched());
|
|
}
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
extern asmlinkage void preempt_schedule(void);
|
|
#define __preempt_schedule() preempt_schedule()
|
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING
|
|
extern asmlinkage void preempt_schedule_context(void);
|
|
#define __preempt_schedule_context() preempt_schedule_context()
|
|
#endif
|
|
#endif /* CONFIG_PREEMPT */
|
|
|
|
#endif /* __ASM_PREEMPT_H */
|