mirror of
https://github.com/torvalds/linux.git
synced 2024-11-07 12:41:55 +00:00
316c1608d1
ACCESS_ONCE doesn't work reliably on non-scalar types. This patch removes the rest of the existing usages of ACCESS_ONCE() in the scheduler, and use the new READ_ONCE() and WRITE_ONCE() APIs as appropriate. Signed-off-by: Jason Low <jason.low2@hp.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Waiman Long <Waiman.Long@hp.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Aswin Chandramouleeswaran <aswin@hp.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com> Cc: Scott J Norton <scott.norton@hp.com> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/1430251224-5764-2-git-send-email-jason.low2@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
65 lines
1.5 KiB
C
65 lines
1.5 KiB
C
#ifdef CONFIG_SCHED_AUTOGROUP
|
|
|
|
#include <linux/kref.h>
|
|
#include <linux/rwsem.h>
|
|
|
|
struct autogroup {
|
|
/*
|
|
* reference doesn't mean how many thread attach to this
|
|
* autogroup now. It just stands for the number of task
|
|
* could use this autogroup.
|
|
*/
|
|
struct kref kref;
|
|
struct task_group *tg;
|
|
struct rw_semaphore lock;
|
|
unsigned long id;
|
|
int nice;
|
|
};
|
|
|
|
extern void autogroup_init(struct task_struct *init_task);
|
|
extern void autogroup_free(struct task_group *tg);
|
|
|
|
static inline bool task_group_is_autogroup(struct task_group *tg)
|
|
{
|
|
return !!tg->autogroup;
|
|
}
|
|
|
|
extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
|
|
|
|
static inline struct task_group *
|
|
autogroup_task_group(struct task_struct *p, struct task_group *tg)
|
|
{
|
|
int enabled = READ_ONCE(sysctl_sched_autogroup_enabled);
|
|
|
|
if (enabled && task_wants_autogroup(p, tg))
|
|
return p->signal->autogroup->tg;
|
|
|
|
return tg;
|
|
}
|
|
|
|
extern int autogroup_path(struct task_group *tg, char *buf, int buflen);
|
|
|
|
#else /* !CONFIG_SCHED_AUTOGROUP */
|
|
|
|
static inline void autogroup_init(struct task_struct *init_task) { }
|
|
static inline void autogroup_free(struct task_group *tg) { }
|
|
static inline bool task_group_is_autogroup(struct task_group *tg)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline struct task_group *
|
|
autogroup_task_group(struct task_struct *p, struct task_group *tg)
|
|
{
|
|
return tg;
|
|
}
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#endif /* CONFIG_SCHED_AUTOGROUP */
|