perf/x86/intel/rapl: Utilize event->pmu_private

Store the PMU pointer in event->pmu_private and use it instead of the per CPU
data. Preparatory step to get rid of the per CPU allocations. The usage sites
are the perf fast path, so we keep that even after the conversion to per
package storage as a CPU to package lookup involves 3 loads versus 1 with the
pmu_private pointer.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Harish Chegondi <harish.chegondi@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/20160222221012.748151799@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Thomas Gleixner 2016-02-22 22:19:25 +00:00 committed by Ingo Molnar
parent a208749c64
commit 8a6d2f8f73

View File

@ -122,6 +122,7 @@ static struct perf_pmu_events_attr event_attr_##v = { \
struct rapl_pmu {
raw_spinlock_t lock;
int n_active;
int cpu;
struct list_head active_list;
struct pmu *pmu;
ktime_t timer_interval;
@ -203,7 +204,7 @@ static void rapl_start_hrtimer(struct rapl_pmu *pmu)
static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
{
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
struct perf_event *event;
unsigned long flags;
@ -249,7 +250,7 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
static void rapl_pmu_event_start(struct perf_event *event, int mode)
{
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
struct rapl_pmu *pmu = event->pmu_private;
unsigned long flags;
raw_spin_lock_irqsave(&pmu->lock, flags);
@ -259,7 +260,7 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode)
static void rapl_pmu_event_stop(struct perf_event *event, int mode)
{
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
struct rapl_pmu *pmu = event->pmu_private;
struct hw_perf_event *hwc = &event->hw;
unsigned long flags;
@ -293,7 +294,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
static int rapl_pmu_event_add(struct perf_event *event, int mode)
{
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
struct rapl_pmu *pmu = event->pmu_private;
struct hw_perf_event *hwc = &event->hw;
unsigned long flags;
@ -316,6 +317,7 @@ static void rapl_pmu_event_del(struct perf_event *event, int flags)
static int rapl_pmu_event_init(struct perf_event *event)
{
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
u64 cfg = event->attr.config & RAPL_EVENT_MASK;
int bit, msr, ret = 0;
@ -327,6 +329,9 @@ static int rapl_pmu_event_init(struct perf_event *event)
if (event->attr.config & ~RAPL_EVENT_MASK)
return -EINVAL;
if (event->cpu < 0)
return -EINVAL;
/*
* check event is known (determines counter)
*/
@ -365,6 +370,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
return -EINVAL;
/* must be done before validate_group */
event->cpu = pmu->cpu;
event->pmu_private = pmu;
event->hw.event_base = msr;
event->hw.config = cfg;
event->hw.idx = bit;
@ -572,6 +579,7 @@ static int rapl_cpu_prepare(int cpu)
INIT_LIST_HEAD(&pmu->active_list);
pmu->pmu = &rapl_pmu_class;
pmu->cpu = cpu;
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);