perf counters: add prctl interface to disable/enable counters
Add a way for self-monitoring tasks to disable/enable counters summarily, via a prctl: PR_TASK_PERF_COUNTERS_DISABLE 31 PR_TASK_PERF_COUNTERS_ENABLE 32 Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
		
							parent
							
								
									bae43c9945
								
							
						
					
					
						commit
						1d1c7ddbfa
					
				| @ -213,6 +213,8 @@ extern u64 hw_perf_save_disable(void); | ||||
| extern void hw_perf_restore(u64 ctrl); | ||||
| extern void atomic64_counter_set(struct perf_counter *counter, u64 val64); | ||||
| extern u64 atomic64_counter_read(struct perf_counter *counter); | ||||
| extern int perf_counter_task_disable(void); | ||||
| extern int perf_counter_task_enable(void); | ||||
| 
 | ||||
| #else | ||||
| static inline void | ||||
| @ -226,6 +228,8 @@ static inline void perf_counter_notify(struct pt_regs *regs)		{ } | ||||
| static inline void perf_counter_print_debug(void)			{ } | ||||
| static inline void hw_perf_restore(u64 ctrl)			{ } | ||||
| static inline u64 hw_perf_save_disable(void)		      { return 0; } | ||||
| static inline int perf_counter_task_disable(void)	{ return -EINVAL; } | ||||
| static inline int perf_counter_task_enable(void)	{ return -EINVAL; } | ||||
| #endif | ||||
| 
 | ||||
| #endif /* _LINUX_PERF_COUNTER_H */ | ||||
|  | ||||
| @ -85,4 +85,7 @@ | ||||
| #define PR_SET_TIMERSLACK 29 | ||||
| #define PR_GET_TIMERSLACK 30 | ||||
| 
 | ||||
| #define PR_TASK_PERF_COUNTERS_DISABLE		31 | ||||
| #define PR_TASK_PERF_COUNTERS_ENABLE		32 | ||||
| 
 | ||||
| #endif /* _LINUX_PRCTL_H */ | ||||
|  | ||||
| @ -415,6 +415,9 @@ counter_sched_in(struct perf_counter *counter, | ||||
| 		 struct perf_counter_context *ctx, | ||||
| 		 int cpu) | ||||
| { | ||||
| 	if (counter->active == -1) | ||||
| 		return; | ||||
| 
 | ||||
| 	counter->hw_ops->hw_perf_counter_enable(counter); | ||||
| 	counter->active = 1; | ||||
| 	counter->oncpu = cpu;	/* TODO: put 'cpu' into cpuctx->cpu */ | ||||
| @ -479,6 +482,79 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu) | ||||
| 	cpuctx->task_ctx = ctx; | ||||
| } | ||||
| 
 | ||||
| int perf_counter_task_disable(void) | ||||
| { | ||||
| 	struct task_struct *curr = current; | ||||
| 	struct perf_counter_context *ctx = &curr->perf_counter_ctx; | ||||
| 	struct perf_counter *counter; | ||||
| 	u64 perf_flags; | ||||
| 	int cpu; | ||||
| 
 | ||||
| 	if (likely(!ctx->nr_counters)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	local_irq_disable(); | ||||
| 	cpu = smp_processor_id(); | ||||
| 
 | ||||
| 	perf_counter_task_sched_out(curr, cpu); | ||||
| 
 | ||||
| 	spin_lock(&ctx->lock); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Disable all the counters: | ||||
| 	 */ | ||||
| 	perf_flags = hw_perf_save_disable(); | ||||
| 
 | ||||
| 	list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||||
| 		WARN_ON_ONCE(counter->active == 1); | ||||
| 		counter->active = -1; | ||||
| 	} | ||||
| 	hw_perf_restore(perf_flags); | ||||
| 
 | ||||
| 	spin_unlock(&ctx->lock); | ||||
| 
 | ||||
| 	local_irq_enable(); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int perf_counter_task_enable(void) | ||||
| { | ||||
| 	struct task_struct *curr = current; | ||||
| 	struct perf_counter_context *ctx = &curr->perf_counter_ctx; | ||||
| 	struct perf_counter *counter; | ||||
| 	u64 perf_flags; | ||||
| 	int cpu; | ||||
| 
 | ||||
| 	if (likely(!ctx->nr_counters)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	local_irq_disable(); | ||||
| 	cpu = smp_processor_id(); | ||||
| 
 | ||||
| 	spin_lock(&ctx->lock); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Disable all the counters: | ||||
| 	 */ | ||||
| 	perf_flags = hw_perf_save_disable(); | ||||
| 
 | ||||
| 	list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||||
| 		if (counter->active != -1) | ||||
| 			continue; | ||||
| 		counter->active = 0; | ||||
| 	} | ||||
| 	hw_perf_restore(perf_flags); | ||||
| 
 | ||||
| 	spin_unlock(&ctx->lock); | ||||
| 
 | ||||
| 	perf_counter_task_sched_in(curr, cpu); | ||||
| 
 | ||||
| 	local_irq_enable(); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void perf_counter_task_tick(struct task_struct *curr, int cpu) | ||||
| { | ||||
| 	struct perf_counter_context *ctx = &curr->perf_counter_ctx; | ||||
| @ -951,13 +1027,9 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | ||||
|  * @cpu:		target cpu | ||||
|  * @group_fd:		group leader counter fd | ||||
|  */ | ||||
| asmlinkage int sys_perf_counter_open( | ||||
| 
 | ||||
| 	struct perf_counter_hw_event	*hw_event_uptr		__user, | ||||
| 	pid_t				pid, | ||||
| 	int				cpu, | ||||
| 	int				group_fd) | ||||
| 
 | ||||
| asmlinkage int | ||||
| sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user, | ||||
| 		      pid_t pid, int cpu, int group_fd) | ||||
| { | ||||
| 	struct perf_counter *counter, *group_leader; | ||||
| 	struct perf_counter_hw_event hw_event; | ||||
|  | ||||
| @ -14,6 +14,7 @@ | ||||
| #include <linux/prctl.h> | ||||
| #include <linux/highuid.h> | ||||
| #include <linux/fs.h> | ||||
| #include <linux/perf_counter.h> | ||||
| #include <linux/resource.h> | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/kexec.h> | ||||
| @ -1716,6 +1717,12 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | ||||
| 		case PR_SET_TSC: | ||||
| 			error = SET_TSC_CTL(arg2); | ||||
| 			break; | ||||
| 		case PR_TASK_PERF_COUNTERS_DISABLE: | ||||
| 			error = perf_counter_task_disable(); | ||||
| 			break; | ||||
| 		case PR_TASK_PERF_COUNTERS_ENABLE: | ||||
| 			error = perf_counter_task_enable(); | ||||
| 			break; | ||||
| 		case PR_GET_TIMERSLACK: | ||||
| 			error = current->timer_slack_ns; | ||||
| 			break; | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user