mirror of
https://github.com/torvalds/linux.git
synced 2024-12-02 09:01:34 +00:00
perf/x86/intel: Factor out intel_pmu_check_num_counters
Each Hybrid PMU has to check its own number of counters and mask fixed counters before registration. The intel_pmu_check_num_counters will be reused later to check the number of the counters for each hybrid PMU. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Andi Kleen <ak@linux.intel.com> Link: https://lkml.kernel.org/r/1618237865-33448-12-git-send-email-kan.liang@linux.intel.com
This commit is contained in:
parent
183af7366b
commit
b8c4d1a876
@ -5064,6 +5064,26 @@ static const struct attribute_group *attr_update[] = {
|
||||
|
||||
static struct attribute *empty_attrs;
|
||||
|
||||
static void intel_pmu_check_num_counters(int *num_counters,
|
||||
int *num_counters_fixed,
|
||||
u64 *intel_ctrl, u64 fixed_mask)
|
||||
{
|
||||
if (*num_counters > INTEL_PMC_MAX_GENERIC) {
|
||||
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
|
||||
*num_counters, INTEL_PMC_MAX_GENERIC);
|
||||
*num_counters = INTEL_PMC_MAX_GENERIC;
|
||||
}
|
||||
*intel_ctrl = (1ULL << *num_counters) - 1;
|
||||
|
||||
if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) {
|
||||
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
|
||||
*num_counters_fixed, INTEL_PMC_MAX_FIXED);
|
||||
*num_counters_fixed = INTEL_PMC_MAX_FIXED;
|
||||
}
|
||||
|
||||
*intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
|
||||
}
|
||||
|
||||
__init int intel_pmu_init(void)
|
||||
{
|
||||
struct attribute **extra_skl_attr = &empty_attrs;
|
||||
@ -5703,20 +5723,10 @@ __init int intel_pmu_init(void)
|
||||
|
||||
x86_pmu.attr_update = attr_update;
|
||||
|
||||
if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
|
||||
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
|
||||
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
|
||||
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
|
||||
}
|
||||
x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
|
||||
|
||||
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
|
||||
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
|
||||
x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
|
||||
x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
|
||||
}
|
||||
|
||||
x86_pmu.intel_ctrl |= (u64)fixed_mask << INTEL_PMC_IDX_FIXED;
|
||||
intel_pmu_check_num_counters(&x86_pmu.num_counters,
|
||||
&x86_pmu.num_counters_fixed,
|
||||
&x86_pmu.intel_ctrl,
|
||||
(u64)fixed_mask);
|
||||
|
||||
/* AnyThread may be deprecated on arch perfmon v5 or later */
|
||||
if (x86_pmu.intel_cap.anythread_deprecated)
|
||||
|
Loading…
Reference in New Issue
Block a user