mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
cgroup: bpf: use cgroup_lock()/cgroup_unlock() wrappers
Replace mutex_[un]lock() with cgroup_[un]lock() wrappers to stay consistent across cgroup core and other subsystem code, while operating on the cgroup_mutex. Signed-off-by: Kamalesh Babulal <kamalesh.babulal@oracle.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Reviewed-by: Christian Brauner <brauner@kernel.org> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
8d3c682a5e
commit
4cdb91b0de
@ -173,11 +173,11 @@ void bpf_cgroup_atype_put(int cgroup_atype)
|
|||||||
{
|
{
|
||||||
int i = cgroup_atype - CGROUP_LSM_START;
|
int i = cgroup_atype - CGROUP_LSM_START;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
if (--cgroup_lsm_atype[i].refcnt <= 0)
|
if (--cgroup_lsm_atype[i].refcnt <= 0)
|
||||||
cgroup_lsm_atype[i].attach_btf_id = 0;
|
cgroup_lsm_atype[i].attach_btf_id = 0;
|
||||||
WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
|
WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static enum cgroup_bpf_attach_type
|
static enum cgroup_bpf_attach_type
|
||||||
@ -282,7 +282,7 @@ static void cgroup_bpf_release(struct work_struct *work)
|
|||||||
|
|
||||||
unsigned int atype;
|
unsigned int atype;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
|
for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
|
||||||
struct hlist_head *progs = &cgrp->bpf.progs[atype];
|
struct hlist_head *progs = &cgrp->bpf.progs[atype];
|
||||||
@ -315,7 +315,7 @@ static void cgroup_bpf_release(struct work_struct *work)
|
|||||||
bpf_cgroup_storage_free(storage);
|
bpf_cgroup_storage_free(storage);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
|
for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
|
||||||
cgroup_bpf_put(p);
|
cgroup_bpf_put(p);
|
||||||
@ -729,9 +729,9 @@ static int cgroup_bpf_attach(struct cgroup *cgrp,
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
|
ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -831,7 +831,7 @@ static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
|
|||||||
|
|
||||||
cg_link = container_of(link, struct bpf_cgroup_link, link);
|
cg_link = container_of(link, struct bpf_cgroup_link, link);
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
/* link might have been auto-released by dying cgroup, so fail */
|
/* link might have been auto-released by dying cgroup, so fail */
|
||||||
if (!cg_link->cgroup) {
|
if (!cg_link->cgroup) {
|
||||||
ret = -ENOLINK;
|
ret = -ENOLINK;
|
||||||
@ -843,7 +843,7 @@ static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
|
|||||||
}
|
}
|
||||||
ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
|
ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1009,9 +1009,9 @@ static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
|
ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1120,9 +1120,9 @@ static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
ret = __cgroup_bpf_query(cgrp, attr, uattr);
|
ret = __cgroup_bpf_query(cgrp, attr, uattr);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1189,11 +1189,11 @@ static void bpf_cgroup_link_release(struct bpf_link *link)
|
|||||||
if (!cg_link->cgroup)
|
if (!cg_link->cgroup)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
/* re-check cgroup under lock again */
|
/* re-check cgroup under lock again */
|
||||||
if (!cg_link->cgroup) {
|
if (!cg_link->cgroup) {
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1205,7 +1205,7 @@ static void bpf_cgroup_link_release(struct bpf_link *link)
|
|||||||
cg = cg_link->cgroup;
|
cg = cg_link->cgroup;
|
||||||
cg_link->cgroup = NULL;
|
cg_link->cgroup = NULL;
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
cgroup_put(cg);
|
cgroup_put(cg);
|
||||||
}
|
}
|
||||||
@ -1232,10 +1232,10 @@ static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
|
|||||||
container_of(link, struct bpf_cgroup_link, link);
|
container_of(link, struct bpf_cgroup_link, link);
|
||||||
u64 cg_id = 0;
|
u64 cg_id = 0;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
if (cg_link->cgroup)
|
if (cg_link->cgroup)
|
||||||
cg_id = cgroup_id(cg_link->cgroup);
|
cg_id = cgroup_id(cg_link->cgroup);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
seq_printf(seq,
|
seq_printf(seq,
|
||||||
"cgroup_id:\t%llu\n"
|
"cgroup_id:\t%llu\n"
|
||||||
@ -1251,10 +1251,10 @@ static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
|
|||||||
container_of(link, struct bpf_cgroup_link, link);
|
container_of(link, struct bpf_cgroup_link, link);
|
||||||
u64 cg_id = 0;
|
u64 cg_id = 0;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
if (cg_link->cgroup)
|
if (cg_link->cgroup)
|
||||||
cg_id = cgroup_id(cg_link->cgroup);
|
cg_id = cgroup_id(cg_link->cgroup);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
info->cgroup.cgroup_id = cg_id;
|
info->cgroup.cgroup_id = cg_id;
|
||||||
info->cgroup.attach_type = cg_link->type;
|
info->cgroup.attach_type = cg_link->type;
|
||||||
|
@ -58,7 +58,7 @@ static void *cgroup_iter_seq_start(struct seq_file *seq, loff_t *pos)
|
|||||||
{
|
{
|
||||||
struct cgroup_iter_priv *p = seq->private;
|
struct cgroup_iter_priv *p = seq->private;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
/* cgroup_iter doesn't support read across multiple sessions. */
|
/* cgroup_iter doesn't support read across multiple sessions. */
|
||||||
if (*pos > 0) {
|
if (*pos > 0) {
|
||||||
@ -89,7 +89,7 @@ static void cgroup_iter_seq_stop(struct seq_file *seq, void *v)
|
|||||||
{
|
{
|
||||||
struct cgroup_iter_priv *p = seq->private;
|
struct cgroup_iter_priv *p = seq->private;
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
/* pass NULL to the prog for post-processing */
|
/* pass NULL to the prog for post-processing */
|
||||||
if (!v) {
|
if (!v) {
|
||||||
|
@ -333,14 +333,14 @@ static void cgroup_storage_map_free(struct bpf_map *_map)
|
|||||||
struct list_head *storages = &map->list;
|
struct list_head *storages = &map->list;
|
||||||
struct bpf_cgroup_storage *storage, *stmp;
|
struct bpf_cgroup_storage *storage, *stmp;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
list_for_each_entry_safe(storage, stmp, storages, list_map) {
|
list_for_each_entry_safe(storage, stmp, storages, list_map) {
|
||||||
bpf_cgroup_storage_unlink(storage);
|
bpf_cgroup_storage_unlink(storage);
|
||||||
bpf_cgroup_storage_free(storage);
|
bpf_cgroup_storage_free(storage);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
WARN_ON(!RB_EMPTY_ROOT(&map->root));
|
WARN_ON(!RB_EMPTY_ROOT(&map->root));
|
||||||
WARN_ON(!list_empty(&map->list));
|
WARN_ON(!list_empty(&map->list));
|
||||||
|
@ -58,7 +58,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
|
|||||||
struct cgroup_root *root;
|
struct cgroup_root *root;
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
cgroup_attach_lock(true);
|
cgroup_attach_lock(true);
|
||||||
for_each_root(root) {
|
for_each_root(root) {
|
||||||
struct cgroup *from_cgrp;
|
struct cgroup *from_cgrp;
|
||||||
@ -72,7 +72,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
cgroup_attach_unlock(true);
|
cgroup_attach_unlock(true);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
@ -106,7 +106,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
percpu_down_write(&cgroup_threadgroup_rwsem);
|
percpu_down_write(&cgroup_threadgroup_rwsem);
|
||||||
|
|
||||||
@ -145,7 +145,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
|
|||||||
out_err:
|
out_err:
|
||||||
cgroup_migrate_finish(&mgctx);
|
cgroup_migrate_finish(&mgctx);
|
||||||
percpu_up_write(&cgroup_threadgroup_rwsem);
|
percpu_up_write(&cgroup_threadgroup_rwsem);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -847,13 +847,13 @@ static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent
|
|||||||
kernfs_break_active_protection(new_parent);
|
kernfs_break_active_protection(new_parent);
|
||||||
kernfs_break_active_protection(kn);
|
kernfs_break_active_protection(kn);
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
ret = kernfs_rename(kn, new_parent, new_name_str);
|
ret = kernfs_rename(kn, new_parent, new_name_str);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
TRACE_CGROUP_PATH(rename, cgrp);
|
TRACE_CGROUP_PATH(rename, cgrp);
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
kernfs_unbreak_active_protection(kn);
|
kernfs_unbreak_active_protection(kn);
|
||||||
kernfs_unbreak_active_protection(new_parent);
|
kernfs_unbreak_active_protection(new_parent);
|
||||||
@ -1119,7 +1119,7 @@ int cgroup1_reconfigure(struct fs_context *fc)
|
|||||||
trace_cgroup_remount(root);
|
trace_cgroup_remount(root);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1246,7 +1246,7 @@ int cgroup1_get_tree(struct fs_context *fc)
|
|||||||
if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
|
if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
|
||||||
ret = 1; /* restart */
|
ret = 1; /* restart */
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = cgroup_do_get_tree(fc);
|
ret = cgroup_do_get_tree(fc);
|
||||||
|
@ -1391,7 +1391,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
|
|||||||
cgroup_favor_dynmods(root, false);
|
cgroup_favor_dynmods(root, false);
|
||||||
cgroup_exit_root_id(root);
|
cgroup_exit_root_id(root);
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
cgroup_rstat_exit(cgrp);
|
cgroup_rstat_exit(cgrp);
|
||||||
kernfs_destroy_root(root->kf_root);
|
kernfs_destroy_root(root->kf_root);
|
||||||
@ -1625,7 +1625,7 @@ void cgroup_kn_unlock(struct kernfs_node *kn)
|
|||||||
else
|
else
|
||||||
cgrp = kn->parent->priv;
|
cgrp = kn->parent->priv;
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
kernfs_unbreak_active_protection(kn);
|
kernfs_unbreak_active_protection(kn);
|
||||||
cgroup_put(cgrp);
|
cgroup_put(cgrp);
|
||||||
@ -1670,7 +1670,7 @@ struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline)
|
|||||||
if (drain_offline)
|
if (drain_offline)
|
||||||
cgroup_lock_and_drain_offline(cgrp);
|
cgroup_lock_and_drain_offline(cgrp);
|
||||||
else
|
else
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
if (!cgroup_is_dead(cgrp))
|
if (!cgroup_is_dead(cgrp))
|
||||||
return cgrp;
|
return cgrp;
|
||||||
@ -2167,13 +2167,13 @@ int cgroup_do_get_tree(struct fs_context *fc)
|
|||||||
struct super_block *sb = fc->root->d_sb;
|
struct super_block *sb = fc->root->d_sb;
|
||||||
struct cgroup *cgrp;
|
struct cgroup *cgrp;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
spin_lock_irq(&css_set_lock);
|
spin_lock_irq(&css_set_lock);
|
||||||
|
|
||||||
cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root);
|
cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root);
|
||||||
|
|
||||||
spin_unlock_irq(&css_set_lock);
|
spin_unlock_irq(&css_set_lock);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
nsdentry = kernfs_node_dentry(cgrp->kn, sb);
|
nsdentry = kernfs_node_dentry(cgrp->kn, sb);
|
||||||
dput(fc->root);
|
dput(fc->root);
|
||||||
@ -2356,13 +2356,13 @@ int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
spin_lock_irq(&css_set_lock);
|
spin_lock_irq(&css_set_lock);
|
||||||
|
|
||||||
ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
|
ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
|
||||||
|
|
||||||
spin_unlock_irq(&css_set_lock);
|
spin_unlock_irq(&css_set_lock);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2388,7 +2388,7 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
|
|||||||
int hierarchy_id = 1;
|
int hierarchy_id = 1;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
spin_lock_irq(&css_set_lock);
|
spin_lock_irq(&css_set_lock);
|
||||||
|
|
||||||
root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
|
root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
|
||||||
@ -2402,7 +2402,7 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
|
|||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&css_set_lock);
|
spin_unlock_irq(&css_set_lock);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(task_cgroup_path);
|
EXPORT_SYMBOL_GPL(task_cgroup_path);
|
||||||
@ -3111,7 +3111,7 @@ void cgroup_lock_and_drain_offline(struct cgroup *cgrp)
|
|||||||
int ssid;
|
int ssid;
|
||||||
|
|
||||||
restart:
|
restart:
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
|
cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
|
||||||
for_each_subsys(ss, ssid) {
|
for_each_subsys(ss, ssid) {
|
||||||
@ -3125,7 +3125,7 @@ restart:
|
|||||||
prepare_to_wait(&dsct->offline_waitq, &wait,
|
prepare_to_wait(&dsct->offline_waitq, &wait,
|
||||||
TASK_UNINTERRUPTIBLE);
|
TASK_UNINTERRUPTIBLE);
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
schedule();
|
schedule();
|
||||||
finish_wait(&dsct->offline_waitq, &wait);
|
finish_wait(&dsct->offline_waitq, &wait);
|
||||||
|
|
||||||
@ -4374,9 +4374,9 @@ int cgroup_rm_cftypes(struct cftype *cfts)
|
|||||||
if (!(cfts[0].flags & __CFTYPE_ADDED))
|
if (!(cfts[0].flags & __CFTYPE_ADDED))
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
ret = cgroup_rm_cftypes_locked(cfts);
|
ret = cgroup_rm_cftypes_locked(cfts);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4408,14 +4408,14 @@ static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
list_add_tail(&cfts->node, &ss->cfts);
|
list_add_tail(&cfts->node, &ss->cfts);
|
||||||
ret = cgroup_apply_cftypes(cfts, true);
|
ret = cgroup_apply_cftypes(cfts, true);
|
||||||
if (ret)
|
if (ret)
|
||||||
cgroup_rm_cftypes_locked(cfts);
|
cgroup_rm_cftypes_locked(cfts);
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5385,7 +5385,7 @@ static void css_release_work_fn(struct work_struct *work)
|
|||||||
struct cgroup_subsys *ss = css->ss;
|
struct cgroup_subsys *ss = css->ss;
|
||||||
struct cgroup *cgrp = css->cgroup;
|
struct cgroup *cgrp = css->cgroup;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
css->flags |= CSS_RELEASED;
|
css->flags |= CSS_RELEASED;
|
||||||
list_del_rcu(&css->sibling);
|
list_del_rcu(&css->sibling);
|
||||||
@ -5426,7 +5426,7 @@ static void css_release_work_fn(struct work_struct *work)
|
|||||||
NULL);
|
NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
|
INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
|
||||||
queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
|
queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
|
||||||
@ -5774,7 +5774,7 @@ static void css_killed_work_fn(struct work_struct *work)
|
|||||||
struct cgroup_subsys_state *css =
|
struct cgroup_subsys_state *css =
|
||||||
container_of(work, struct cgroup_subsys_state, destroy_work);
|
container_of(work, struct cgroup_subsys_state, destroy_work);
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
do {
|
do {
|
||||||
offline_css(css);
|
offline_css(css);
|
||||||
@ -5783,7 +5783,7 @@ static void css_killed_work_fn(struct work_struct *work)
|
|||||||
css = css->parent;
|
css = css->parent;
|
||||||
} while (css && atomic_dec_and_test(&css->online_cnt));
|
} while (css && atomic_dec_and_test(&css->online_cnt));
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* css kill confirmation processing requires process context, bounce */
|
/* css kill confirmation processing requires process context, bounce */
|
||||||
@ -5967,7 +5967,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
|
|||||||
|
|
||||||
pr_debug("Initializing cgroup subsys %s\n", ss->name);
|
pr_debug("Initializing cgroup subsys %s\n", ss->name);
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
idr_init(&ss->css_idr);
|
idr_init(&ss->css_idr);
|
||||||
INIT_LIST_HEAD(&ss->cfts);
|
INIT_LIST_HEAD(&ss->cfts);
|
||||||
@ -6011,7 +6011,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
|
|||||||
|
|
||||||
BUG_ON(online_css(css));
|
BUG_ON(online_css(css));
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -6071,7 +6071,7 @@ int __init cgroup_init(void)
|
|||||||
|
|
||||||
get_user_ns(init_cgroup_ns.user_ns);
|
get_user_ns(init_cgroup_ns.user_ns);
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Add init_css_set to the hash table so that dfl_root can link to
|
* Add init_css_set to the hash table so that dfl_root can link to
|
||||||
@ -6082,7 +6082,7 @@ int __init cgroup_init(void)
|
|||||||
|
|
||||||
BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
|
BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
for_each_subsys(ss, ssid) {
|
for_each_subsys(ss, ssid) {
|
||||||
if (ss->early_init) {
|
if (ss->early_init) {
|
||||||
@ -6134,9 +6134,9 @@ int __init cgroup_init(void)
|
|||||||
if (ss->bind)
|
if (ss->bind)
|
||||||
ss->bind(init_css_set.subsys[ssid]);
|
ss->bind(init_css_set.subsys[ssid]);
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
css_populate_dir(init_css_set.subsys[ssid]);
|
css_populate_dir(init_css_set.subsys[ssid]);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* init_css_set.subsys[] has been updated, re-hash */
|
/* init_css_set.subsys[] has been updated, re-hash */
|
||||||
@ -6241,7 +6241,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
|
|||||||
if (!buf)
|
if (!buf)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
spin_lock_irq(&css_set_lock);
|
spin_lock_irq(&css_set_lock);
|
||||||
|
|
||||||
for_each_root(root) {
|
for_each_root(root) {
|
||||||
@ -6296,7 +6296,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
|
|||||||
retval = 0;
|
retval = 0;
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock_irq(&css_set_lock);
|
spin_unlock_irq(&css_set_lock);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
out:
|
out:
|
||||||
return retval;
|
return retval;
|
||||||
@ -6380,7 +6380,7 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
|
|||||||
struct file *f;
|
struct file *f;
|
||||||
|
|
||||||
if (kargs->flags & CLONE_INTO_CGROUP)
|
if (kargs->flags & CLONE_INTO_CGROUP)
|
||||||
mutex_lock(&cgroup_mutex);
|
cgroup_lock();
|
||||||
|
|
||||||
cgroup_threadgroup_change_begin(current);
|
cgroup_threadgroup_change_begin(current);
|
||||||
|
|
||||||
@ -6455,7 +6455,7 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
|
|||||||
|
|
||||||
err:
|
err:
|
||||||
cgroup_threadgroup_change_end(current);
|
cgroup_threadgroup_change_end(current);
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
if (f)
|
if (f)
|
||||||
fput(f);
|
fput(f);
|
||||||
if (dst_cgrp)
|
if (dst_cgrp)
|
||||||
@ -6482,7 +6482,7 @@ static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs)
|
|||||||
struct cgroup *cgrp = kargs->cgrp;
|
struct cgroup *cgrp = kargs->cgrp;
|
||||||
struct css_set *cset = kargs->cset;
|
struct css_set *cset = kargs->cset;
|
||||||
|
|
||||||
mutex_unlock(&cgroup_mutex);
|
cgroup_unlock();
|
||||||
|
|
||||||
if (cset) {
|
if (cset) {
|
||||||
put_css_set(cset);
|
put_css_set(cset);
|
||||||
|
Loading…
Reference in New Issue
Block a user