mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
146aa1bd05
put_css_set_taskexit may be called when find_css_set is called on other cpu. And the race will occur: put_css_set_taskexit side find_css_set side | atomic_dec_and_test(&kref->refcount) | /* kref->refcount = 0 */ | .................................................................... | read_lock(&css_set_lock) | find_existing_css_set | get_css_set | read_unlock(&css_set_lock); .................................................................... __release_css_set | .................................................................... | /* use a released css_set */ | [put_css_set is the same. But in the current code, all put_css_set are put into cgroup mutex critical region as the same as find_css_set.] [akpm@linux-foundation.org: repair comments] [menage@google.com: eliminate race in css_set refcounting] Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Paul Menage <menage@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
108 lines
2.0 KiB
C
108 lines
2.0 KiB
C
/*
|
|
* kernel/cgroup_debug.c - Example cgroup subsystem that
|
|
* exposes debug info
|
|
*
|
|
* Copyright (C) Google Inc, 2007
|
|
*
|
|
* Developed by Paul Menage (menage@google.com)
|
|
*
|
|
*/
|
|
|
|
#include <linux/cgroup.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/rcupdate.h>
|
|
|
|
#include <asm/atomic.h>
|
|
|
|
static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
|
|
struct cgroup *cont)
|
|
{
|
|
struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
|
|
|
|
if (!css)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
return css;
|
|
}
|
|
|
|
static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
|
|
{
|
|
kfree(cont->subsys[debug_subsys_id]);
|
|
}
|
|
|
|
static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
|
|
{
|
|
return atomic_read(&cont->count);
|
|
}
|
|
|
|
static u64 taskcount_read(struct cgroup *cont, struct cftype *cft)
|
|
{
|
|
u64 count;
|
|
|
|
cgroup_lock();
|
|
count = cgroup_task_count(cont);
|
|
cgroup_unlock();
|
|
return count;
|
|
}
|
|
|
|
static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
|
|
{
|
|
return (u64)(long)current->cgroups;
|
|
}
|
|
|
|
static u64 current_css_set_refcount_read(struct cgroup *cont,
|
|
struct cftype *cft)
|
|
{
|
|
u64 count;
|
|
|
|
rcu_read_lock();
|
|
count = atomic_read(¤t->cgroups->refcount);
|
|
rcu_read_unlock();
|
|
return count;
|
|
}
|
|
|
|
static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
|
|
{
|
|
return test_bit(CGRP_RELEASABLE, &cgrp->flags);
|
|
}
|
|
|
|
static struct cftype files[] = {
|
|
{
|
|
.name = "cgroup_refcount",
|
|
.read_u64 = cgroup_refcount_read,
|
|
},
|
|
{
|
|
.name = "taskcount",
|
|
.read_u64 = taskcount_read,
|
|
},
|
|
|
|
{
|
|
.name = "current_css_set",
|
|
.read_u64 = current_css_set_read,
|
|
},
|
|
|
|
{
|
|
.name = "current_css_set_refcount",
|
|
.read_u64 = current_css_set_refcount_read,
|
|
},
|
|
|
|
{
|
|
.name = "releasable",
|
|
.read_u64 = releasable_read,
|
|
},
|
|
};
|
|
|
|
static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
|
|
{
|
|
return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
|
|
}
|
|
|
|
struct cgroup_subsys debug_subsys = {
|
|
.name = "debug",
|
|
.create = debug_create,
|
|
.destroy = debug_destroy,
|
|
.populate = debug_populate,
|
|
.subsys_id = debug_subsys_id,
|
|
};
|