mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 06:02:05 +00:00
memcg: remove memcg_kmem_skip_account
The flag memcg_kmem_skip_account was added during the era of opt-out kmem accounting. There is no need for such flag in the opt-in world as there aren't any __GFP_ACCOUNT allocations within memcg_create_cache_enqueue(). Link: http://lkml.kernel.org/r/20180919004501.178023-1-shakeelb@google.com Signed-off-by: Shakeel Butt <shakeelb@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Greg Thelen <gthelen@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
86b27beae5
commit
85cfb24506
@ -724,9 +724,6 @@ struct task_struct {
|
||||
#endif
|
||||
#ifdef CONFIG_MEMCG
|
||||
unsigned in_user_fault:1;
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
unsigned memcg_kmem_skip_account:1;
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_COMPAT_BRK
|
||||
unsigned brk_randomized:1;
|
||||
|
@ -2460,7 +2460,7 @@ static void memcg_kmem_cache_create_func(struct work_struct *w)
|
||||
/*
|
||||
* Enqueue the creation of a per-memcg kmem_cache.
|
||||
*/
|
||||
static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
|
||||
static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
|
||||
struct kmem_cache *cachep)
|
||||
{
|
||||
struct memcg_kmem_cache_create_work *cw;
|
||||
@ -2478,25 +2478,6 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
|
||||
queue_work(memcg_kmem_cache_wq, &cw->work);
|
||||
}
|
||||
|
||||
static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
|
||||
struct kmem_cache *cachep)
|
||||
{
|
||||
/*
|
||||
* We need to stop accounting when we kmalloc, because if the
|
||||
* corresponding kmalloc cache is not yet created, the first allocation
|
||||
* in __memcg_schedule_kmem_cache_create will recurse.
|
||||
*
|
||||
* However, it is better to enclose the whole function. Depending on
|
||||
* the debugging options enabled, INIT_WORK(), for instance, can
|
||||
* trigger an allocation. This too, will make us recurse. Because at
|
||||
* this point we can't allow ourselves back into memcg_kmem_get_cache,
|
||||
* the safest choice is to do it like this, wrapping the whole function.
|
||||
*/
|
||||
current->memcg_kmem_skip_account = 1;
|
||||
__memcg_schedule_kmem_cache_create(memcg, cachep);
|
||||
current->memcg_kmem_skip_account = 0;
|
||||
}
|
||||
|
||||
static inline bool memcg_kmem_bypass(void)
|
||||
{
|
||||
if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
|
||||
@ -2531,9 +2512,6 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
|
||||
if (memcg_kmem_bypass())
|
||||
return cachep;
|
||||
|
||||
if (current->memcg_kmem_skip_account)
|
||||
return cachep;
|
||||
|
||||
memcg = get_mem_cgroup_from_current();
|
||||
kmemcg_id = READ_ONCE(memcg->kmemcg_id);
|
||||
if (kmemcg_id < 0)
|
||||
|
Loading…
Reference in New Issue
Block a user