forked from Minki/linux
sched/fair: Remove sched_trace_*() helper functions
We no longer need them as we can use DWARF debug info or BTF + pahole to re-generate the required structs to compile against them for a given kernel. This moves the burden of maintaining these helper functions to the module. https://github.com/qais-yousef/sched_tp Note that pahole v1.15 is required at least for using DWARF. And for BTF v1.23 which is not yet released will be required. There's alignment problem that will lead to crashes in earlier versions when used with BTF. We should have enough infrastructure to make these helper functions now obsolete, so remove them. [Rewrote commit message to reflect the new alternative] Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Qais Yousef <qais.yousef@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220428144338.479094-2-qais.yousef@arm.com
This commit is contained in:
parent
4e3c7d338a
commit
50e7b416d2
@ -2378,20 +2378,6 @@ static inline void rseq_syscall(struct pt_regs *regs)
|
||||
|
||||
#endif
|
||||
|
||||
const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
|
||||
char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
|
||||
int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
|
||||
|
||||
const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
|
||||
const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
|
||||
const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
|
||||
|
||||
int sched_trace_rq_cpu(struct rq *rq);
|
||||
int sched_trace_rq_cpu_capacity(struct rq *rq);
|
||||
int sched_trace_rq_nr_running(struct rq *rq);
|
||||
|
||||
const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
|
||||
|
||||
#ifdef CONFIG_SCHED_CORE
|
||||
extern void sched_core_free(struct task_struct *tsk);
|
||||
extern void sched_core_fork(struct task_struct *p);
|
||||
|
@ -11839,101 +11839,3 @@ __init void init_sched_fair_class(void)
|
||||
#endif /* SMP */
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper functions to facilitate extracting info from tracepoints.
|
||||
*/
|
||||
|
||||
const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return cfs_rq ? &cfs_rq->avg : NULL;
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_avg);
|
||||
|
||||
char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len)
|
||||
{
|
||||
if (!cfs_rq) {
|
||||
if (str)
|
||||
strlcpy(str, "(null)", len);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cfs_rq_tg_path(cfs_rq, str, len);
|
||||
return str;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_path);
|
||||
|
||||
int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_cpu);
|
||||
|
||||
const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return rq ? &rq->avg_rt : NULL;
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_trace_rq_avg_rt);
|
||||
|
||||
const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return rq ? &rq->avg_dl : NULL;
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_trace_rq_avg_dl);
|
||||
|
||||
const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq)
|
||||
{
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_HAVE_SCHED_AVG_IRQ)
|
||||
return rq ? &rq->avg_irq : NULL;
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_trace_rq_avg_irq);
|
||||
|
||||
int sched_trace_rq_cpu(struct rq *rq)
|
||||
{
|
||||
return rq ? cpu_of(rq) : -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_trace_rq_cpu);
|
||||
|
||||
int sched_trace_rq_cpu_capacity(struct rq *rq)
|
||||
{
|
||||
return rq ?
|
||||
#ifdef CONFIG_SMP
|
||||
rq->cpu_capacity
|
||||
#else
|
||||
SCHED_CAPACITY_SCALE
|
||||
#endif
|
||||
: -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_trace_rq_cpu_capacity);
|
||||
|
||||
const struct cpumask *sched_trace_rd_span(struct root_domain *rd)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return rd ? rd->span : NULL;
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_trace_rd_span);
|
||||
|
||||
int sched_trace_rq_nr_running(struct rq *rq)
|
||||
{
|
||||
return rq ? rq->nr_running : -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running);
|
||||
|
Loading…
Reference in New Issue
Block a user