mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 13:41:51 +00:00
blkcg: remove additional reference to the css
The previous patch in this series removed carrying around a pointer to the css in blkg. However, the blkg association logic still relied on taking a reference on the css to ensure we wouldn't fail in getting a reference for the blkg. Here the implicit dependency on the css is removed. The association continues to rely on the tryget logic walking up the blkg tree. This streamlines the three ways that association can happen: normal, swap, and writeback. Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
c839e7a03f
commit
f0fcb3ec89
62
block/bio.c
62
block/bio.c
@ -1978,18 +1978,30 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __bio_associate_blkg_from_css - internal blkg association function
|
||||
*
|
||||
* This in the core association function that all association paths rely on.
|
||||
* A blkg reference is taken which is released upon freeing of the bio.
|
||||
*/
|
||||
static int __bio_associate_blkg_from_css(struct bio *bio,
|
||||
struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct request_queue *q = bio->bi_disk->queue;
|
||||
struct blkcg_gq *blkg;
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_disk->queue);
|
||||
if (!css || !css->parent)
|
||||
blkg = q->root_blkg;
|
||||
else
|
||||
blkg = blkg_lookup_create(css_to_blkcg(css), q);
|
||||
|
||||
ret = bio_associate_blkg(bio, blkg);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return bio_associate_blkg(bio, blkg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1998,13 +2010,14 @@ static int __bio_associate_blkg_from_css(struct bio *bio,
|
||||
* @css: target css
|
||||
*
|
||||
* Associate @bio with the blkg found by combining the css's blkg and the
|
||||
* request_queue of the @bio. This takes a reference on the css that will
|
||||
* be put upon freeing of @bio.
|
||||
* request_queue of the @bio. This falls back to the queue's root_blkg if
|
||||
* the association fails with the css.
|
||||
*/
|
||||
int bio_associate_blkg_from_css(struct bio *bio,
|
||||
struct cgroup_subsys_state *css)
|
||||
{
|
||||
css_get(css);
|
||||
if (unlikely(bio->bi_blkg))
|
||||
return -EBUSY;
|
||||
return __bio_associate_blkg_from_css(bio, css);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
|
||||
@ -2016,22 +2029,29 @@ EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
|
||||
* @page: the page to lookup the blkcg from
|
||||
*
|
||||
* Associate @bio with the blkg from @page's owning memcg and the respective
|
||||
* request_queue. This works like every other associate function wrt
|
||||
* references.
|
||||
* request_queue. If cgroup_e_css returns NULL, fall back to the queue's
|
||||
* root_blkg.
|
||||
*
|
||||
* Note: this must be called after bio has an associated device.
|
||||
*/
|
||||
int bio_associate_blkg_from_page(struct bio *bio, struct page *page)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
int ret;
|
||||
|
||||
if (unlikely(bio->bi_blkg))
|
||||
return -EBUSY;
|
||||
if (!page->mem_cgroup)
|
||||
return 0;
|
||||
css = cgroup_get_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
|
||||
|
||||
return __bio_associate_blkg_from_css(bio, css);
|
||||
rcu_read_lock();
|
||||
|
||||
css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
|
||||
|
||||
ret = __bio_associate_blkg_from_css(bio, css);
|
||||
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_MEMCG */
|
||||
|
||||
@ -2041,12 +2061,12 @@ int bio_associate_blkg_from_page(struct bio *bio, struct page *page)
|
||||
* @bio: target bio
|
||||
*
|
||||
* Associate @bio with the blkg found from the bio's css and the request_queue.
|
||||
* If one is not found, bio_lookup_blkg creates the blkg.
|
||||
* If one is not found, bio_lookup_blkg creates the blkg. This falls back to
|
||||
* the queue's root_blkg if association fails.
|
||||
*/
|
||||
int bio_associate_create_blkg(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct blkcg *blkcg;
|
||||
struct blkcg_gq *blkg;
|
||||
struct cgroup_subsys_state *css;
|
||||
int ret = 0;
|
||||
|
||||
/* someone has already associated this bio with a blkg */
|
||||
@ -2055,15 +2075,9 @@ int bio_associate_create_blkg(struct request_queue *q, struct bio *bio)
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
blkcg = css_to_blkcg(blkcg_get_css());
|
||||
css = blkcg_css();
|
||||
|
||||
if (!blkcg->css.parent) {
|
||||
ret = bio_associate_blkg(bio, q->root_blkg);
|
||||
} else {
|
||||
blkg = blkg_lookup_create(blkcg, q);
|
||||
|
||||
ret = bio_associate_blkg(bio, blkg);
|
||||
}
|
||||
ret = __bio_associate_blkg_from_css(bio, css);
|
||||
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
@ -2080,8 +2094,6 @@ void bio_disassociate_task(struct bio *bio)
|
||||
bio->bi_ioc = NULL;
|
||||
}
|
||||
if (bio->bi_blkg) {
|
||||
/* a ref is always taken on css */
|
||||
css_put(&bio_blkcg(bio)->css);
|
||||
blkg_put(bio->bi_blkg);
|
||||
bio->bi_blkg = NULL;
|
||||
}
|
||||
@ -2094,10 +2106,8 @@ void bio_disassociate_task(struct bio *bio)
|
||||
*/
|
||||
void bio_clone_blkg_association(struct bio *dst, struct bio *src)
|
||||
{
|
||||
if (src->bi_blkg) {
|
||||
css_get(&bio_blkcg(src)->css);
|
||||
if (src->bi_blkg)
|
||||
bio_associate_blkg(dst, src->bi_blkg);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
|
||||
#endif /* CONFIG_BLK_CGROUP */
|
||||
|
@ -249,47 +249,6 @@ static inline struct cgroup_subsys_state *blkcg_css(void)
|
||||
return task_css(current, io_cgrp_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkcg_get_css - find and get a reference to the css
|
||||
*
|
||||
* Find the css associated with either the kthread or the current task.
|
||||
* This takes a reference on the blkcg which will need to be managed by the
|
||||
* caller.
|
||||
*/
|
||||
static inline struct cgroup_subsys_state *blkcg_get_css(void)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
css = kthread_blkcg();
|
||||
if (css) {
|
||||
css_get(css);
|
||||
} else {
|
||||
/*
|
||||
* This is a bit complicated. It is possible task_css is seeing
|
||||
* an old css pointer here. This is caused by the current
|
||||
* thread migrating away from this cgroup and this cgroup dying.
|
||||
* css_tryget() will fail when trying to take a ref on a cgroup
|
||||
* that's ref count has hit 0.
|
||||
*
|
||||
* Therefore, if it does fail, this means current must have
|
||||
* been swapped away already and this is waiting for it to
|
||||
* propagate on the polling cpu. Hence the use of cpu_relax().
|
||||
*/
|
||||
while (true) {
|
||||
css = task_css(current, io_cgrp_id);
|
||||
if (likely(css_tryget(css)))
|
||||
break;
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return css;
|
||||
}
|
||||
|
||||
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return css ? container_of(css, struct blkcg, css) : NULL;
|
||||
@ -628,10 +587,8 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
|
||||
rcu_read_lock();
|
||||
|
||||
blkcg = bio_blkcg(bio);
|
||||
if (blkcg)
|
||||
css_get(&blkcg->css);
|
||||
else
|
||||
blkcg = css_to_blkcg(blkcg_get_css());
|
||||
if (!blkcg)
|
||||
blkcg = css_to_blkcg(blkcg_css());
|
||||
|
||||
/* bypass blkg lookup and use @q->root_rl directly for root */
|
||||
if (blkcg == &blkcg_root)
|
||||
@ -646,7 +603,8 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
|
||||
if (unlikely(!blkg))
|
||||
goto root_rl;
|
||||
|
||||
blkg_get(blkg);
|
||||
if (!blkg_try_get(blkg))
|
||||
goto root_rl;
|
||||
rcu_read_unlock();
|
||||
return &blkg->rl;
|
||||
root_rl:
|
||||
@ -663,8 +621,6 @@ root_rl:
|
||||
*/
|
||||
static inline void blk_put_rl(struct request_list *rl)
|
||||
{
|
||||
/* an additional ref is always taken for rl */
|
||||
css_put(&rl->blkg->blkcg->css);
|
||||
if (rl->blkg->blkcg != &blkcg_root)
|
||||
blkg_put(rl->blkg);
|
||||
}
|
||||
|
@ -93,6 +93,8 @@ extern struct css_set init_css_set;
|
||||
|
||||
bool css_has_online_children(struct cgroup_subsys_state *css);
|
||||
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
|
||||
struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
|
||||
struct cgroup_subsys *ss);
|
||||
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
|
||||
struct cgroup_subsys *ss);
|
||||
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
|
||||
|
@ -492,7 +492,7 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
|
||||
* cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss
|
||||
* @cgrp: the cgroup of interest
|
||||
* @ss: the subsystem of interest (%NULL returns @cgrp->self)
|
||||
*
|
||||
@ -501,8 +501,8 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
|
||||
* enabled. If @ss is associated with the hierarchy @cgrp is on, this
|
||||
* function is guaranteed to return non-NULL css.
|
||||
*/
|
||||
static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
|
||||
struct cgroup_subsys *ss)
|
||||
static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
|
||||
struct cgroup_subsys *ss)
|
||||
{
|
||||
lockdep_assert_held(&cgroup_mutex);
|
||||
|
||||
@ -522,6 +522,35 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
|
||||
return cgroup_css(cgrp, ss);
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
|
||||
* @cgrp: the cgroup of interest
|
||||
* @ss: the subsystem of interest
|
||||
*
|
||||
* Find and get the effective css of @cgrp for @ss. The effective css is
|
||||
* defined as the matching css of the nearest ancestor including self which
|
||||
* has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
|
||||
* the root css is returned, so this function always returns a valid css.
|
||||
*
|
||||
* The returned css is not guaranteed to be online, and therefore it is the
|
||||
* callers responsiblity to tryget a reference for it.
|
||||
*/
|
||||
struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
|
||||
struct cgroup_subsys *ss)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
do {
|
||||
css = cgroup_css(cgrp, ss);
|
||||
|
||||
if (css)
|
||||
return css;
|
||||
cgrp = cgroup_parent(cgrp);
|
||||
} while (cgrp);
|
||||
|
||||
return init_css_set.subsys[ss->id];
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
|
||||
* @cgrp: the cgroup of interest
|
||||
@ -604,10 +633,11 @@ EXPORT_SYMBOL_GPL(of_css);
|
||||
*
|
||||
* Should be called under cgroup_[tree_]mutex.
|
||||
*/
|
||||
#define for_each_e_css(css, ssid, cgrp) \
|
||||
for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
|
||||
if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
|
||||
; \
|
||||
#define for_each_e_css(css, ssid, cgrp) \
|
||||
for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
|
||||
if (!((css) = cgroup_e_css_by_mask(cgrp, \
|
||||
cgroup_subsys[(ssid)]))) \
|
||||
; \
|
||||
else
|
||||
|
||||
/**
|
||||
@ -1006,7 +1036,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset,
|
||||
* @ss is in this hierarchy, so we want the
|
||||
* effective css from @cgrp.
|
||||
*/
|
||||
template[i] = cgroup_e_css(cgrp, ss);
|
||||
template[i] = cgroup_e_css_by_mask(cgrp, ss);
|
||||
} else {
|
||||
/*
|
||||
* @ss is not in this hierarchy, so we don't want
|
||||
@ -3019,7 +3049,7 @@ static int cgroup_apply_control(struct cgroup *cgrp)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* At this point, cgroup_e_css() results reflect the new csses
|
||||
* At this point, cgroup_e_css_by_mask() results reflect the new csses
|
||||
* making the following cgroup_update_dfl_csses() properly update
|
||||
* css associations of all tasks in the subtree.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user