sched/headers, cgroups: Remove the threadgroup_change_*() wrappery
threadgroup_change_begin()/end() is a pointless wrapper around cgroup_threadgroup_change_begin()/end(), minus a might_sleep() in the !CONFIG_CGROUPS=y case. Remove the wrappery, move the might_sleep() (the down_read() already has a might_sleep() check). This debloats <linux/sched.h> a bit and simplifies this API. Update all call sites. No change in functionality. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									f9411ebe3d
								
							
						
					
					
						commit
						780de9dd27
					
				| @ -1088,7 +1088,7 @@ static int de_thread(struct task_struct *tsk) | ||||
| 		struct task_struct *leader = tsk->group_leader; | ||||
| 
 | ||||
| 		for (;;) { | ||||
| 			threadgroup_change_begin(tsk); | ||||
| 			cgroup_threadgroup_change_begin(tsk); | ||||
| 			write_lock_irq(&tasklist_lock); | ||||
| 			/*
 | ||||
| 			 * Do this under tasklist_lock to ensure that | ||||
| @ -1099,7 +1099,7 @@ static int de_thread(struct task_struct *tsk) | ||||
| 				break; | ||||
| 			__set_current_state(TASK_KILLABLE); | ||||
| 			write_unlock_irq(&tasklist_lock); | ||||
| 			threadgroup_change_end(tsk); | ||||
| 			cgroup_threadgroup_change_end(tsk); | ||||
| 			schedule(); | ||||
| 			if (unlikely(__fatal_signal_pending(tsk))) | ||||
| 				goto killed; | ||||
| @ -1157,7 +1157,7 @@ static int de_thread(struct task_struct *tsk) | ||||
| 		if (unlikely(leader->ptrace)) | ||||
| 			__wake_up_parent(leader, leader->parent); | ||||
| 		write_unlock_irq(&tasklist_lock); | ||||
| 		threadgroup_change_end(tsk); | ||||
| 		cgroup_threadgroup_change_end(tsk); | ||||
| 
 | ||||
| 		release_task(leader); | ||||
| 	} | ||||
|  | ||||
| @ -531,8 +531,8 @@ extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; | ||||
|  * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups | ||||
|  * @tsk: target task | ||||
|  * | ||||
|  * Called from threadgroup_change_begin() and allows cgroup operations to | ||||
|  * synchronize against threadgroup changes using a percpu_rw_semaphore. | ||||
|  * Allows cgroup operations to synchronize against threadgroup changes | ||||
|  * using a percpu_rw_semaphore. | ||||
|  */ | ||||
| static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) | ||||
| { | ||||
| @ -543,8 +543,7 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) | ||||
|  * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups | ||||
|  * @tsk: target task | ||||
|  * | ||||
|  * Called from threadgroup_change_end().  Counterpart of | ||||
|  * cgroup_threadcgroup_change_begin(). | ||||
|  * Counterpart of cgroup_threadcgroup_change_begin(). | ||||
|  */ | ||||
| static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) | ||||
| { | ||||
| @ -555,7 +554,11 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) | ||||
| 
 | ||||
| #define CGROUP_SUBSYS_COUNT 0 | ||||
| 
 | ||||
| static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {} | ||||
| static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) | ||||
| { | ||||
| 	might_sleep(); | ||||
| } | ||||
| 
 | ||||
| static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} | ||||
| 
 | ||||
| #endif	/* CONFIG_CGROUPS */ | ||||
|  | ||||
| @ -3162,34 +3162,6 @@ static inline void unlock_task_sighand(struct task_struct *tsk, | ||||
| 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * threadgroup_change_begin - mark the beginning of changes to a threadgroup | ||||
|  * @tsk: task causing the changes | ||||
|  * | ||||
|  * All operations which modify a threadgroup - a new thread joining the | ||||
|  * group, death of a member thread (the assertion of PF_EXITING) and | ||||
|  * exec(2) dethreading the process and replacing the leader - are wrapped | ||||
|  * by threadgroup_change_{begin|end}().  This is to provide a place which | ||||
|  * subsystems needing threadgroup stability can hook into for | ||||
|  * synchronization. | ||||
|  */ | ||||
| static inline void threadgroup_change_begin(struct task_struct *tsk) | ||||
| { | ||||
| 	might_sleep(); | ||||
| 	cgroup_threadgroup_change_begin(tsk); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * threadgroup_change_end - mark the end of changes to a threadgroup | ||||
|  * @tsk: task causing the changes | ||||
|  * | ||||
|  * See threadgroup_change_begin(). | ||||
|  */ | ||||
| static inline void threadgroup_change_end(struct task_struct *tsk) | ||||
| { | ||||
| 	cgroup_threadgroup_change_end(tsk); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_THREAD_INFO_IN_TASK | ||||
| 
 | ||||
| static inline struct thread_info *task_thread_info(struct task_struct *task) | ||||
|  | ||||
| @ -214,7 +214,7 @@ static void pids_cancel_attach(struct cgroup_taskset *tset) | ||||
| 
 | ||||
| /*
 | ||||
|  * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies | ||||
|  * on threadgroup_change_begin() held by the copy_process(). | ||||
|  * on cgroup_threadgroup_change_begin() held by the copy_process(). | ||||
|  */ | ||||
| static int pids_can_fork(struct task_struct *task) | ||||
| { | ||||
|  | ||||
| @ -1746,7 +1746,7 @@ static __latent_entropy struct task_struct *copy_process( | ||||
| 	INIT_LIST_HEAD(&p->thread_group); | ||||
| 	p->task_works = NULL; | ||||
| 
 | ||||
| 	threadgroup_change_begin(current); | ||||
| 	cgroup_threadgroup_change_begin(current); | ||||
| 	/*
 | ||||
| 	 * Ensure that the cgroup subsystem policies allow the new process to be | ||||
| 	 * forked. It should be noted the the new process's css_set can be changed | ||||
| @ -1843,7 +1843,7 @@ static __latent_entropy struct task_struct *copy_process( | ||||
| 
 | ||||
| 	proc_fork_connector(p); | ||||
| 	cgroup_post_fork(p); | ||||
| 	threadgroup_change_end(current); | ||||
| 	cgroup_threadgroup_change_end(current); | ||||
| 	perf_event_fork(p); | ||||
| 
 | ||||
| 	trace_task_newtask(p, clone_flags); | ||||
| @ -1854,7 +1854,7 @@ static __latent_entropy struct task_struct *copy_process( | ||||
| bad_fork_cancel_cgroup: | ||||
| 	cgroup_cancel_fork(p); | ||||
| bad_fork_free_pid: | ||||
| 	threadgroup_change_end(current); | ||||
| 	cgroup_threadgroup_change_end(current); | ||||
| 	if (pid != &init_struct_pid) | ||||
| 		free_pid(pid); | ||||
| bad_fork_cleanup_thread: | ||||
|  | ||||
| @ -2395,11 +2395,11 @@ void exit_signals(struct task_struct *tsk) | ||||
| 	 * @tsk is about to have PF_EXITING set - lock out users which | ||||
| 	 * expect stable threadgroup. | ||||
| 	 */ | ||||
| 	threadgroup_change_begin(tsk); | ||||
| 	cgroup_threadgroup_change_begin(tsk); | ||||
| 
 | ||||
| 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { | ||||
| 		tsk->flags |= PF_EXITING; | ||||
| 		threadgroup_change_end(tsk); | ||||
| 		cgroup_threadgroup_change_end(tsk); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| @ -2410,7 +2410,7 @@ void exit_signals(struct task_struct *tsk) | ||||
| 	 */ | ||||
| 	tsk->flags |= PF_EXITING; | ||||
| 
 | ||||
| 	threadgroup_change_end(tsk); | ||||
| 	cgroup_threadgroup_change_end(tsk); | ||||
| 
 | ||||
| 	if (!signal_pending(tsk)) | ||||
| 		goto out; | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user