mirror of
https://github.com/torvalds/linux.git
synced 2024-12-05 10:32:35 +00:00
sched/headers: Move scheduler clock interfaces to <linux/sched/clock.h>
Move the sched_clock interfaces into a separate header file, to reduce the size of sched.h. Include <linux/sched/clock.h> in all files that made use of one of the Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
eb61baf698
commit
5689810360
@ -2190,103 +2190,6 @@ static inline void calc_load_exit_idle(void) { }
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do not use outside of architecture code which knows its limitations.
|
||||
*
|
||||
* sched_clock() has no promise of monotonicity or bounded drift between
|
||||
* CPUs, use (which you should not) requires disabling IRQs.
|
||||
*
|
||||
* Please use one of the three interfaces below.
|
||||
*/
|
||||
extern unsigned long long notrace sched_clock(void);
|
||||
/*
|
||||
* See the comment in kernel/sched/clock.c
|
||||
*/
|
||||
extern u64 running_clock(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
|
||||
|
||||
extern void sched_clock_init(void);
|
||||
|
||||
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
static inline void sched_clock_init_late(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_clock_tick(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void clear_sched_clock_stable(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_clock_idle_sleep_event(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
|
||||
{
|
||||
}
|
||||
|
||||
static inline u64 cpu_clock(int cpu)
|
||||
{
|
||||
return sched_clock();
|
||||
}
|
||||
|
||||
static inline u64 local_clock(void)
|
||||
{
|
||||
return sched_clock();
|
||||
}
|
||||
#else
|
||||
extern void sched_clock_init_late(void);
|
||||
/*
|
||||
* Architectures can set this to 1 if they have specified
|
||||
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
|
||||
* but then during bootup it turns out that sched_clock()
|
||||
* is reliable after all:
|
||||
*/
|
||||
extern int sched_clock_stable(void);
|
||||
extern void clear_sched_clock_stable(void);
|
||||
|
||||
extern void sched_clock_tick(void);
|
||||
extern void sched_clock_idle_sleep_event(void);
|
||||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
|
||||
/*
|
||||
* As outlined in clock.c, provides a fast, high resolution, nanosecond
|
||||
* time source that is monotonic per cpu argument and has bounded drift
|
||||
* between cpus.
|
||||
*
|
||||
* ######################### BIG FAT WARNING ##########################
|
||||
* # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
|
||||
* # go backwards !! #
|
||||
* ####################################################################
|
||||
*/
|
||||
static inline u64 cpu_clock(int cpu)
|
||||
{
|
||||
return sched_clock_cpu(cpu);
|
||||
}
|
||||
|
||||
static inline u64 local_clock(void)
|
||||
{
|
||||
return sched_clock_cpu(raw_smp_processor_id());
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
/*
|
||||
* An i/f to runtime opt-in for irq time accounting based off of sched_clock.
|
||||
* The reason for this explicit opt-in is not to have perf penalty with
|
||||
* slow sched_clocks.
|
||||
*/
|
||||
extern void enable_sched_clock_irqtime(void);
|
||||
extern void disable_sched_clock_irqtime(void);
|
||||
#else
|
||||
static inline void enable_sched_clock_irqtime(void) {}
|
||||
static inline void disable_sched_clock_irqtime(void) {}
|
||||
#endif
|
||||
|
||||
extern unsigned long long
|
||||
task_sched_runtime(struct task_struct *task);
|
||||
|
||||
@ -2297,9 +2200,6 @@ extern void sched_exec(void);
|
||||
#define sched_exec() {}
|
||||
#endif
|
||||
|
||||
extern void sched_clock_idle_sleep_event(void);
|
||||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern void idle_task_exit(void);
|
||||
#else
|
||||
|
@ -3,4 +3,102 @@
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
/*
|
||||
* Do not use outside of architecture code which knows its limitations.
|
||||
*
|
||||
* sched_clock() has no promise of monotonicity or bounded drift between
|
||||
* CPUs, use (which you should not) requires disabling IRQs.
|
||||
*
|
||||
* Please use one of the three interfaces below.
|
||||
*/
|
||||
extern unsigned long long notrace sched_clock(void);
|
||||
|
||||
/*
|
||||
* See the comment in kernel/sched/clock.c
|
||||
*/
|
||||
extern u64 running_clock(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
|
||||
|
||||
extern void sched_clock_init(void);
|
||||
|
||||
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
static inline void sched_clock_init_late(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_clock_tick(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void clear_sched_clock_stable(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_clock_idle_sleep_event(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
|
||||
{
|
||||
}
|
||||
|
||||
static inline u64 cpu_clock(int cpu)
|
||||
{
|
||||
return sched_clock();
|
||||
}
|
||||
|
||||
static inline u64 local_clock(void)
|
||||
{
|
||||
return sched_clock();
|
||||
}
|
||||
#else
|
||||
extern void sched_clock_init_late(void);
|
||||
/*
|
||||
* Architectures can set this to 1 if they have specified
|
||||
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
|
||||
* but then during bootup it turns out that sched_clock()
|
||||
* is reliable after all:
|
||||
*/
|
||||
extern int sched_clock_stable(void);
|
||||
extern void clear_sched_clock_stable(void);
|
||||
|
||||
extern void sched_clock_tick(void);
|
||||
extern void sched_clock_idle_sleep_event(void);
|
||||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
|
||||
/*
|
||||
* As outlined in clock.c, provides a fast, high resolution, nanosecond
|
||||
* time source that is monotonic per cpu argument and has bounded drift
|
||||
* between cpus.
|
||||
*
|
||||
* ######################### BIG FAT WARNING ##########################
|
||||
* # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
|
||||
* # go backwards !! #
|
||||
* ####################################################################
|
||||
*/
|
||||
static inline u64 cpu_clock(int cpu)
|
||||
{
|
||||
return sched_clock_cpu(cpu);
|
||||
}
|
||||
|
||||
static inline u64 local_clock(void)
|
||||
{
|
||||
return sched_clock_cpu(raw_smp_processor_id());
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
/*
|
||||
* An i/f to runtime opt-in for irq time accounting based off of sched_clock.
|
||||
* The reason for this explicit opt-in is not to have perf penalty with
|
||||
* slow sched_clocks.
|
||||
*/
|
||||
extern void enable_sched_clock_irqtime(void);
|
||||
extern void disable_sched_clock_irqtime(void);
|
||||
#else
|
||||
static inline void enable_sched_clock_irqtime(void) {}
|
||||
static inline void disable_sched_clock_irqtime(void) {}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_SCHED_CLOCK_H */
|
||||
|
Loading…
Reference in New Issue
Block a user