entry: Wire up syscall_work in common entry code

Prepare the common entry code to use the SYSCALL_WORK flags. They will
be defined in subsequent patches for each type of syscall
work. SYSCALL_WORK_ENTRY/EXIT are defined for the transition, as they
will replace the TIF_ equivalent defines.

Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Andy Lutomirski <luto@kernel.org>
Link: https://lore.kernel.org/r/20201116174206.2639648-4-krisman@collabora.com
This commit is contained in:
Gabriel Krisman Bertazi 2020-11-16 12:41:59 -05:00 committed by Thomas Gleixner
parent 3136b93c3f
commit b86678cf0f
2 changed files with 12 additions and 6 deletions

View File

@ -64,6 +64,9 @@
(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | ARCH_SYSCALL_EXIT_WORK) _TIF_SYSCALL_TRACEPOINT | ARCH_SYSCALL_EXIT_WORK)
#define SYSCALL_WORK_ENTER (0)
#define SYSCALL_WORK_EXIT (0)
/* /*
* TIF flags handled in exit_to_user_mode_loop() * TIF flags handled in exit_to_user_mode_loop()
*/ */

View File

@ -42,7 +42,7 @@ static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
} }
static long syscall_trace_enter(struct pt_regs *regs, long syscall, static long syscall_trace_enter(struct pt_regs *regs, long syscall,
unsigned long ti_work) unsigned long ti_work, unsigned long work)
{ {
long ret = 0; long ret = 0;
@ -74,11 +74,12 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
static __always_inline long static __always_inline long
__syscall_enter_from_user_work(struct pt_regs *regs, long syscall) __syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
{ {
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
unsigned long ti_work; unsigned long ti_work;
ti_work = READ_ONCE(current_thread_info()->flags); ti_work = READ_ONCE(current_thread_info()->flags);
if (ti_work & SYSCALL_ENTER_WORK) if (work & SYSCALL_WORK_ENTER || ti_work & SYSCALL_ENTER_WORK)
syscall = syscall_trace_enter(regs, syscall, ti_work); syscall = syscall_trace_enter(regs, syscall, ti_work, work);
return syscall; return syscall;
} }
@ -225,7 +226,8 @@ static inline bool report_single_step(unsigned long ti_work)
} }
#endif #endif
static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work) static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work,
unsigned long work)
{ {
bool step; bool step;
@ -245,6 +247,7 @@ static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work)
*/ */
static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs) static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
{ {
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
u32 cached_flags = READ_ONCE(current_thread_info()->flags); u32 cached_flags = READ_ONCE(current_thread_info()->flags);
unsigned long nr = syscall_get_nr(current, regs); unsigned long nr = syscall_get_nr(current, regs);
@ -262,8 +265,8 @@ static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
* enabled, we want to run them exactly once per syscall exit with * enabled, we want to run them exactly once per syscall exit with
* interrupts enabled. * interrupts enabled.
*/ */
if (unlikely(cached_flags & SYSCALL_EXIT_WORK)) if (unlikely(work & SYSCALL_WORK_EXIT || cached_flags & SYSCALL_EXIT_WORK))
syscall_exit_work(regs, cached_flags); syscall_exit_work(regs, cached_flags, work);
} }
__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs) __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)