mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 05:11:48 +00:00
3962446922
The asm-generic/preempt.h implementation doesn't make use of the PREEMPT_NEED_RESCHED flag, since this can interact badly with load/store architectures which rely on the preempt_count word being unchanged across an interrupt. However, since we're a 64-bit architecture and the preempt count is only 32 bits wide, we can simply pack it next to the resched flag and load the whole thing in one go, so that a dec-and-test operation doesn't need to load twice. Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
90 lines
2.3 KiB
C
90 lines
2.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_PREEMPT_H
|
|
#define __ASM_PREEMPT_H
|
|
|
|
#include <linux/thread_info.h>
|
|
|
|
#define PREEMPT_NEED_RESCHED BIT(32)
|
|
#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
|
|
|
|
static inline int preempt_count(void)
|
|
{
|
|
return READ_ONCE(current_thread_info()->preempt.count);
|
|
}
|
|
|
|
static inline void preempt_count_set(u64 pc)
|
|
{
|
|
/* Preserve existing value of PREEMPT_NEED_RESCHED */
|
|
WRITE_ONCE(current_thread_info()->preempt.count, pc);
|
|
}
|
|
|
|
#define init_task_preempt_count(p) do { \
|
|
task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
|
|
} while (0)
|
|
|
|
#define init_idle_preempt_count(p, cpu) do { \
|
|
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
|
|
} while (0)
|
|
|
|
static inline void set_preempt_need_resched(void)
|
|
{
|
|
current_thread_info()->preempt.need_resched = 0;
|
|
}
|
|
|
|
static inline void clear_preempt_need_resched(void)
|
|
{
|
|
current_thread_info()->preempt.need_resched = 1;
|
|
}
|
|
|
|
static inline bool test_preempt_need_resched(void)
|
|
{
|
|
return !current_thread_info()->preempt.need_resched;
|
|
}
|
|
|
|
static inline void __preempt_count_add(int val)
|
|
{
|
|
u32 pc = READ_ONCE(current_thread_info()->preempt.count);
|
|
pc += val;
|
|
WRITE_ONCE(current_thread_info()->preempt.count, pc);
|
|
}
|
|
|
|
static inline void __preempt_count_sub(int val)
|
|
{
|
|
u32 pc = READ_ONCE(current_thread_info()->preempt.count);
|
|
pc -= val;
|
|
WRITE_ONCE(current_thread_info()->preempt.count, pc);
|
|
}
|
|
|
|
static inline bool __preempt_count_dec_and_test(void)
|
|
{
|
|
struct thread_info *ti = current_thread_info();
|
|
u64 pc = READ_ONCE(ti->preempt_count);
|
|
|
|
/* Update only the count field, leaving need_resched unchanged */
|
|
WRITE_ONCE(ti->preempt.count, --pc);
|
|
|
|
/*
|
|
* If we wrote back all zeroes, then we're preemptible and in
|
|
* need of a reschedule. Otherwise, we need to reload the
|
|
* preempt_count in case the need_resched flag was cleared by an
|
|
* interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
|
|
* pair.
|
|
*/
|
|
return !pc || !READ_ONCE(ti->preempt_count);
|
|
}
|
|
|
|
static inline bool should_resched(int preempt_offset)
|
|
{
|
|
u64 pc = READ_ONCE(current_thread_info()->preempt_count);
|
|
return pc == preempt_offset;
|
|
}
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
void preempt_schedule(void);
|
|
#define __preempt_schedule() preempt_schedule()
|
|
void preempt_schedule_notrace(void);
|
|
#define __preempt_schedule_notrace() preempt_schedule_notrace()
|
|
#endif /* CONFIG_PREEMPT */
|
|
|
|
#endif /* __ASM_PREEMPT_H */
|