System call entry and particularly exit code is beyond the limit of what is reasonable to implement in asm. This conversion moves all conditional branches out of the asm code, except for the case that all GPRs should be restored at exit. Null syscall test is about 5% faster after this patch, because the exit work is handled under local_irq_disable, and the hard mask and pending interrupt replay is handled after that, which avoids games with MSR. mpe: Includes subsequent fixes from Nick: This fixes 4 issues caught by TM selftests. First was a tm-syscall bug that hit due to tabort_syscall being called after interrupts were reconciled (in a subsequent patch), which led to interrupts being enabled before tabort_syscall was called. Rather than going through an un-reconciling interrupts for the return, I just go back to putting the test early in asm, the C-ification of that wasn't a big win anyway. Second is the syscall return _TIF_USER_WORK_MASK check would go into an infinite loop if _TIF_RESTORE_TM became set. The asm code uses _TIF_USER_WORK_MASK to brach to slowpath which includes restore_tm_state. Third is system call return was not calling restore_tm_state, I missed this completely (alhtough it's in the return from interrupt C conversion because when the asm syscall code encountered problems it would branch to the interrupt return code. Fourth is MSR_VEC missing from restore_math, which was caught by tm-unavailable selftest taking an unexpected facility unavailable interrupt when testing VSX unavailble exception with MSR.FP=1 MSR.VEC=1. Fourth case also has a fixup in a subsequent patch. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michal Suchanek <msuchanek@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200225173541.1549955-26-npiggin@gmail.com
99 lines
2.6 KiB
C
99 lines
2.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Definitions for measuring cputime on powerpc machines.
|
|
*
|
|
* Copyright (C) 2006 Paul Mackerras, IBM Corp.
|
|
*
|
|
* If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in
|
|
* the same units as the timebase. Otherwise we measure cpu time
|
|
* in jiffies using the generic definitions.
|
|
*/
|
|
|
|
#ifndef __POWERPC_CPUTIME_H
|
|
#define __POWERPC_CPUTIME_H
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/time.h>
|
|
#include <asm/div64.h>
|
|
#include <asm/time.h>
|
|
#include <asm/param.h>
|
|
|
|
typedef u64 __nocast cputime_t;
|
|
typedef u64 __nocast cputime64_t;
|
|
|
|
#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
|
|
|
|
#ifdef __KERNEL__
|
|
/*
|
|
* Convert cputime <-> microseconds
|
|
*/
|
|
extern u64 __cputime_usec_factor;
|
|
|
|
static inline unsigned long cputime_to_usecs(const cputime_t ct)
|
|
{
|
|
return mulhdu((__force u64) ct, __cputime_usec_factor);
|
|
}
|
|
|
|
/*
|
|
* PPC64 uses PACA which is task independent for storing accounting data while
|
|
* PPC32 uses struct thread_info, therefore at task switch the accounting data
|
|
* has to be populated in the new task
|
|
*/
|
|
#ifdef CONFIG_PPC64
|
|
#define get_accounting(tsk) (&get_paca()->accounting)
|
|
#define raw_get_accounting(tsk) (&local_paca->accounting)
|
|
static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
|
|
|
|
#else
|
|
#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
|
|
#define raw_get_accounting(tsk) get_accounting(tsk)
|
|
/*
|
|
* Called from the context switch with interrupts disabled, to charge all
|
|
* accumulated times to the current process, and to prepare accounting on
|
|
* the next process.
|
|
*/
|
|
static inline void arch_vtime_task_switch(struct task_struct *prev)
|
|
{
|
|
struct cpu_accounting_data *acct = get_accounting(current);
|
|
struct cpu_accounting_data *acct0 = get_accounting(prev);
|
|
|
|
acct->starttime = acct0->starttime;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* account_cpu_user_entry/exit runs "unreconciled", so can't trace,
|
|
* can't use use get_paca()
|
|
*/
|
|
static notrace inline void account_cpu_user_entry(void)
|
|
{
|
|
unsigned long tb = mftb();
|
|
struct cpu_accounting_data *acct = raw_get_accounting(current);
|
|
|
|
acct->utime += (tb - acct->starttime_user);
|
|
acct->starttime = tb;
|
|
}
|
|
|
|
static notrace inline void account_cpu_user_exit(void)
|
|
{
|
|
unsigned long tb = mftb();
|
|
struct cpu_accounting_data *acct = raw_get_accounting(current);
|
|
|
|
acct->stime += (tb - acct->starttime);
|
|
acct->starttime_user = tb;
|
|
}
|
|
|
|
|
|
#endif /* __KERNEL__ */
|
|
#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
|
static inline void account_cpu_user_entry(void)
|
|
{
|
|
}
|
|
static inline void account_cpu_user_exit(void)
|
|
{
|
|
}
|
|
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
|
#endif /* __POWERPC_CPUTIME_H */
|