linux/arch/arm64/include/asm/arch_timer.h
Julien Thierry ec5c8e429d arm_arch_timer: Expose event stream status
The arch timer configuration for a CPU might get reset after suspending
said CPU.

In order to reliably use the event stream in the kernel (e.g. for delays),
we keep track of the state where we can safely consider the event stream as
properly configured. After writing to cntkctl, we issue an ISB to ensure
that subsequent delay loops can rely on the event stream being enabled.

Signed-off-by: Julien Thierry <julien.thierry@arm.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-13 18:55:05 +01:00

171 lines
4.1 KiB
C

/*
* arch/arm64/include/asm/arch_timer.h
*
* Copyright (C) 2012 ARM Ltd.
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_ARCH_TIMER_H
#define __ASM_ARCH_TIMER_H
#include <asm/barrier.h>
#include <asm/sysreg.h>
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/jump_label.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <clocksource/arm_arch_timer.h>
#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
extern struct static_key_false arch_timer_read_ool_enabled;
#define needs_unstable_timer_counter_workaround() \
static_branch_unlikely(&arch_timer_read_ool_enabled)
#else
#define needs_unstable_timer_counter_workaround() false
#endif
enum arch_timer_erratum_match_type {
ate_match_dt,
ate_match_local_cap_id,
ate_match_acpi_oem_info,
};
struct clock_event_device;
struct arch_timer_erratum_workaround {
enum arch_timer_erratum_match_type match_type;
const void *id;
const char *desc;
u32 (*read_cntp_tval_el0)(void);
u32 (*read_cntv_tval_el0)(void);
u64 (*read_cntvct_el0)(void);
int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
};
DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
timer_unstable_counter_workaround);
#define arch_timer_reg_read_stable(reg) \
({ \
u64 _val; \
if (needs_unstable_timer_counter_workaround()) { \
const struct arch_timer_erratum_workaround *wa; \
preempt_disable_notrace(); \
wa = __this_cpu_read(timer_unstable_counter_workaround); \
if (wa && wa->read_##reg) \
_val = wa->read_##reg(); \
else \
_val = read_sysreg(reg); \
preempt_enable_notrace(); \
} else { \
_val = read_sysreg(reg); \
} \
_val; \
})
/*
* These register accessors are marked inline so the compiler can
* nicely work out which register we want, and chuck away the rest of
* the code.
*/
static __always_inline
void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
write_sysreg(val, cntp_ctl_el0);
break;
case ARCH_TIMER_REG_TVAL:
write_sysreg(val, cntp_tval_el0);
break;
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
write_sysreg(val, cntv_ctl_el0);
break;
case ARCH_TIMER_REG_TVAL:
write_sysreg(val, cntv_tval_el0);
break;
}
}
isb();
}
static __always_inline
u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
return read_sysreg(cntp_ctl_el0);
case ARCH_TIMER_REG_TVAL:
return arch_timer_reg_read_stable(cntp_tval_el0);
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
return read_sysreg(cntv_ctl_el0);
case ARCH_TIMER_REG_TVAL:
return arch_timer_reg_read_stable(cntv_tval_el0);
}
}
BUG();
}
static inline u32 arch_timer_get_cntfrq(void)
{
return read_sysreg(cntfrq_el0);
}
static inline u32 arch_timer_get_cntkctl(void)
{
return read_sysreg(cntkctl_el1);
}
static inline void arch_timer_set_cntkctl(u32 cntkctl)
{
write_sysreg(cntkctl, cntkctl_el1);
isb();
}
static inline u64 arch_counter_get_cntpct(void)
{
/*
* AArch64 kernel and user space mandate the use of CNTVCT.
*/
BUG();
return 0;
}
static inline u64 arch_counter_get_cntvct(void)
{
isb();
return arch_timer_reg_read_stable(cntvct_el0);
}
static inline int arch_timer_arch_init(void)
{
return 0;
}
#endif