mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
mm: memcg/slab: remove memcg_kmem_get_cache()
The memcg_kmem_get_cache() function became really trivial, so let's just inline it into the single call point: memcg_slab_pre_alloc_hook(). It will make the code less bulky and can also help the compiler to generate a better code. Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/20200623174037.3951353-15-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d797b7d054
commit
272911a4ad
@ -1403,8 +1403,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
|
||||
}
|
||||
#endif
|
||||
|
||||
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
|
||||
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
|
||||
unsigned int nr_pages);
|
||||
|
@ -393,7 +393,7 @@ void memcg_put_cache_ids(void)
|
||||
|
||||
/*
|
||||
* A lot of the calls to the cache allocation functions are expected to be
|
||||
* inlined by the compiler. Since the calls to memcg_kmem_get_cache are
|
||||
* inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
|
||||
* conditional to this static branch, we'll have to allow modules that does
|
||||
* kmem_cache_alloc and the such to see this symbol as well
|
||||
*/
|
||||
@ -2900,29 +2900,6 @@ static void memcg_free_cache_id(int id)
|
||||
ida_simple_remove(&memcg_cache_ida, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* memcg_kmem_get_cache: select memcg or root cache for allocation
|
||||
* @cachep: the original global kmem cache
|
||||
*
|
||||
* Return the kmem_cache we're supposed to use for a slab allocation.
|
||||
*
|
||||
* If the cache does not exist yet, if we are the first user of it, we
|
||||
* create it asynchronously in a workqueue and let the current allocation
|
||||
* go through with the original cache.
|
||||
*/
|
||||
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
|
||||
{
|
||||
struct kmem_cache *memcg_cachep;
|
||||
|
||||
memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache);
|
||||
if (unlikely(!memcg_cachep)) {
|
||||
queue_work(system_wq, &cachep->memcg_params.work);
|
||||
return cachep;
|
||||
}
|
||||
|
||||
return memcg_cachep;
|
||||
}
|
||||
|
||||
/**
|
||||
* __memcg_kmem_charge: charge a number of kernel pages to a memcg
|
||||
* @memcg: memory cgroup to charge
|
||||
|
11
mm/slab.h
11
mm/slab.h
@ -365,9 +365,16 @@ static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
|
||||
if (memcg_kmem_bypass())
|
||||
return s;
|
||||
|
||||
cachep = memcg_kmem_get_cache(s);
|
||||
if (is_root_cache(cachep))
|
||||
cachep = READ_ONCE(s->memcg_params.memcg_cache);
|
||||
if (unlikely(!cachep)) {
|
||||
/*
|
||||
* If memcg cache does not exist yet, we schedule it's
|
||||
* asynchronous creation and let the current allocation
|
||||
* go through with the root cache.
|
||||
*/
|
||||
queue_work(system_wq, &s->memcg_params.work);
|
||||
return s;
|
||||
}
|
||||
|
||||
objcg = get_obj_cgroup_from_current();
|
||||
if (!objcg)
|
||||
|
@ -570,7 +570,7 @@ void memcg_create_kmem_cache(struct kmem_cache *root_cache)
|
||||
}
|
||||
|
||||
/*
|
||||
* Since readers won't lock (see memcg_kmem_get_cache()), we need a
|
||||
* Since readers won't lock (see memcg_slab_pre_alloc_hook()), we need a
|
||||
* barrier here to ensure nobody will see the kmem_cache partially
|
||||
* initialized.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user