mirror of
https://github.com/torvalds/linux.git
synced 2024-11-05 11:32:04 +00:00
800d4d30c8
when autogroup is disable from the beginning, sched_autogroup_create_attach() autogroup_move_group() <== 1 sched_move_task() <== 2 task_move_group_fair() set_task_rq() task_group() autogroup_task_group() We go the whole path without doing anything useful. Then stop going further if autogroup is disabled. But there will be a race window between 1 and 2, in which sysctl_sched_autogroup_enabled is enabled. This issue will be toke by following patch. Signed-off-by: Yong Zhang <yong.zhang0@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> LKML-Reference: <1298185696-4403-4-git-send-email-yong.zhang0@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
42 lines
947 B
C
42 lines
947 B
C
#ifdef CONFIG_SCHED_AUTOGROUP
|
|
|
|
struct autogroup {
|
|
/*
|
|
* reference doesn't mean how many thread attach to this
|
|
* autogroup now. It just stands for the number of task
|
|
* could use this autogroup.
|
|
*/
|
|
struct kref kref;
|
|
struct task_group *tg;
|
|
struct rw_semaphore lock;
|
|
unsigned long id;
|
|
int nice;
|
|
};
|
|
|
|
static inline struct task_group *
|
|
autogroup_task_group(struct task_struct *p, struct task_group *tg);
|
|
|
|
#else /* !CONFIG_SCHED_AUTOGROUP */
|
|
|
|
static inline void autogroup_init(struct task_struct *init_task) { }
|
|
static inline void autogroup_free(struct task_group *tg) { }
|
|
static inline bool task_group_is_autogroup(struct task_group *tg)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline struct task_group *
|
|
autogroup_task_group(struct task_struct *p, struct task_group *tg)
|
|
{
|
|
return tg;
|
|
}
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#endif /* CONFIG_SCHED_AUTOGROUP */
|