mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
timekeeping: Introduce fast accessor to clock tai
Introduce fast/NMI safe accessor to clock tai for tracing. The Linux kernel tracing infrastructure has support for using different clocks to generate timestamps for trace events. Especially in TSN networks it's useful to have TAI as trace clock, because the application scheduling is done in accordance to the network time, which is based on TAI. With a tai trace_clock in place, it becomes very convenient to correlate network activity with Linux kernel application traces. Use the same implementation as ktime_get_boot_fast_ns() does by reading the monotonic time and adding the TAI offset. The same limitations as for the fast boot implementation apply. The TAI offset may change at run time e.g., by setting the time or using adjtimex() with an offset. However, these kind of offset changes are rare events. Nevertheless, the user has to be aware and deal with it in post processing. An alternative approach would be to use the same implementation as ktime_get_real_fast_ns() does. However, this requires to add an additional u64 member to the tk_read_base struct. This struct together with a seqcount is designed to fit into a single cache line on 64 bit architectures. Adding a new member would violate this constraint. Signed-off-by: Kurt Kanzenbach <kurt@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Steven Rostedt <rostedt@goodmis.org> Link: https://lore.kernel.org/r/20220414091805.89667-2-kurt@linutronix.de
This commit is contained in:
parent
ce522ba9ef
commit
3dc6ffae2d
@ -132,6 +132,7 @@ Some additional variants exist for more specialized cases:
|
||||
.. c:function:: u64 ktime_get_mono_fast_ns( void )
|
||||
u64 ktime_get_raw_fast_ns( void )
|
||||
u64 ktime_get_boot_fast_ns( void )
|
||||
u64 ktime_get_tai_fast_ns( void )
|
||||
u64 ktime_get_real_fast_ns( void )
|
||||
|
||||
These variants are safe to call from any context, including from
|
||||
|
@ -177,6 +177,7 @@ static inline u64 ktime_get_raw_ns(void)
|
||||
extern u64 ktime_get_mono_fast_ns(void);
|
||||
extern u64 ktime_get_raw_fast_ns(void);
|
||||
extern u64 ktime_get_boot_fast_ns(void);
|
||||
extern u64 ktime_get_tai_fast_ns(void);
|
||||
extern u64 ktime_get_real_fast_ns(void);
|
||||
|
||||
/*
|
||||
|
@ -532,6 +532,23 @@ u64 notrace ktime_get_boot_fast_ns(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
|
||||
|
||||
/**
|
||||
* ktime_get_tai_fast_ns - NMI safe and fast access to tai clock.
|
||||
*
|
||||
* The same limitations as described for ktime_get_boot_fast_ns() apply. The
|
||||
* mono time and the TAI offset are not read atomically which may yield wrong
|
||||
* readouts. However, an update of the TAI offset is an rare event e.g., caused
|
||||
* by settime or adjtimex with an offset. The user of this function has to deal
|
||||
* with the possibility of wrong timestamps in post processing.
|
||||
*/
|
||||
u64 notrace ktime_get_tai_fast_ns(void)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
|
||||
return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_tai)));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ktime_get_tai_fast_ns);
|
||||
|
||||
static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
|
||||
{
|
||||
struct tk_read_base *tkr;
|
||||
|
Loading…
Reference in New Issue
Block a user