mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
memcg: don't call memcg_update_all_caches if new cache id fits
memcg_update_all_caches grows arrays of per-memcg caches, so we only need to call it when memcg_limited_groups_array_size is increased. However, currently we invoke it each time a new kmem-active memory cgroup is created. Then it just iterates over all slab_caches and does nothing (memcg_update_cache_size returns immediately). This patch fixes this insanity. In the meantime it moves the code dealing with id allocations to separate functions, memcg_alloc_cache_id and memcg_free_cache_id. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: Glauber Costa <glommer@gmail.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
33a690c45b
commit
f3bb3043a0
136
mm/memcontrol.c
136
mm/memcontrol.c
@ -649,11 +649,13 @@ int memcg_limited_groups_array_size;
|
||||
struct static_key memcg_kmem_enabled_key;
|
||||
EXPORT_SYMBOL(memcg_kmem_enabled_key);
|
||||
|
||||
static void memcg_free_cache_id(int id);
|
||||
|
||||
static void disarm_kmem_keys(struct mem_cgroup *memcg)
|
||||
{
|
||||
if (memcg_kmem_is_active(memcg)) {
|
||||
static_key_slow_dec(&memcg_kmem_enabled_key);
|
||||
ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
|
||||
memcg_free_cache_id(memcg->kmemcg_id);
|
||||
}
|
||||
/*
|
||||
* This check can't live in kmem destruction function,
|
||||
@ -2906,19 +2908,44 @@ int memcg_cache_id(struct mem_cgroup *memcg)
|
||||
return memcg ? memcg->kmemcg_id : -1;
|
||||
}
|
||||
|
||||
static size_t memcg_caches_array_size(int num_groups)
|
||||
static int memcg_alloc_cache_id(void)
|
||||
{
|
||||
ssize_t size;
|
||||
if (num_groups <= 0)
|
||||
return 0;
|
||||
int id, size;
|
||||
int err;
|
||||
|
||||
size = 2 * num_groups;
|
||||
id = ida_simple_get(&kmem_limited_groups,
|
||||
0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
|
||||
if (id < 0)
|
||||
return id;
|
||||
|
||||
if (id < memcg_limited_groups_array_size)
|
||||
return id;
|
||||
|
||||
/*
|
||||
* There's no space for the new id in memcg_caches arrays,
|
||||
* so we have to grow them.
|
||||
*/
|
||||
|
||||
size = 2 * (id + 1);
|
||||
if (size < MEMCG_CACHES_MIN_SIZE)
|
||||
size = MEMCG_CACHES_MIN_SIZE;
|
||||
else if (size > MEMCG_CACHES_MAX_SIZE)
|
||||
size = MEMCG_CACHES_MAX_SIZE;
|
||||
|
||||
return size;
|
||||
mutex_lock(&memcg_slab_mutex);
|
||||
err = memcg_update_all_caches(size);
|
||||
mutex_unlock(&memcg_slab_mutex);
|
||||
|
||||
if (err) {
|
||||
ida_simple_remove(&kmem_limited_groups, id);
|
||||
return err;
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
static void memcg_free_cache_id(int id)
|
||||
{
|
||||
ida_simple_remove(&kmem_limited_groups, id);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2928,59 +2955,55 @@ static size_t memcg_caches_array_size(int num_groups)
|
||||
*/
|
||||
void memcg_update_array_size(int num)
|
||||
{
|
||||
if (num > memcg_limited_groups_array_size)
|
||||
memcg_limited_groups_array_size = memcg_caches_array_size(num);
|
||||
memcg_limited_groups_array_size = num;
|
||||
}
|
||||
|
||||
int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
|
||||
{
|
||||
struct memcg_cache_params *cur_params = s->memcg_params;
|
||||
struct memcg_cache_params *new_params;
|
||||
size_t size;
|
||||
int i;
|
||||
|
||||
VM_BUG_ON(!is_root_cache(s));
|
||||
|
||||
if (num_groups > memcg_limited_groups_array_size) {
|
||||
int i;
|
||||
struct memcg_cache_params *new_params;
|
||||
ssize_t size = memcg_caches_array_size(num_groups);
|
||||
size = num_groups * sizeof(void *);
|
||||
size += offsetof(struct memcg_cache_params, memcg_caches);
|
||||
|
||||
size *= sizeof(void *);
|
||||
size += offsetof(struct memcg_cache_params, memcg_caches);
|
||||
new_params = kzalloc(size, GFP_KERNEL);
|
||||
if (!new_params)
|
||||
return -ENOMEM;
|
||||
|
||||
new_params = kzalloc(size, GFP_KERNEL);
|
||||
if (!new_params)
|
||||
return -ENOMEM;
|
||||
new_params->is_root_cache = true;
|
||||
|
||||
new_params->is_root_cache = true;
|
||||
|
||||
/*
|
||||
* There is the chance it will be bigger than
|
||||
* memcg_limited_groups_array_size, if we failed an allocation
|
||||
* in a cache, in which case all caches updated before it, will
|
||||
* have a bigger array.
|
||||
*
|
||||
* But if that is the case, the data after
|
||||
* memcg_limited_groups_array_size is certainly unused
|
||||
*/
|
||||
for (i = 0; i < memcg_limited_groups_array_size; i++) {
|
||||
if (!cur_params->memcg_caches[i])
|
||||
continue;
|
||||
new_params->memcg_caches[i] =
|
||||
cur_params->memcg_caches[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* Ideally, we would wait until all caches succeed, and only
|
||||
* then free the old one. But this is not worth the extra
|
||||
* pointer per-cache we'd have to have for this.
|
||||
*
|
||||
* It is not a big deal if some caches are left with a size
|
||||
* bigger than the others. And all updates will reset this
|
||||
* anyway.
|
||||
*/
|
||||
rcu_assign_pointer(s->memcg_params, new_params);
|
||||
if (cur_params)
|
||||
kfree_rcu(cur_params, rcu_head);
|
||||
/*
|
||||
* There is the chance it will be bigger than
|
||||
* memcg_limited_groups_array_size, if we failed an allocation
|
||||
* in a cache, in which case all caches updated before it, will
|
||||
* have a bigger array.
|
||||
*
|
||||
* But if that is the case, the data after
|
||||
* memcg_limited_groups_array_size is certainly unused
|
||||
*/
|
||||
for (i = 0; i < memcg_limited_groups_array_size; i++) {
|
||||
if (!cur_params->memcg_caches[i])
|
||||
continue;
|
||||
new_params->memcg_caches[i] =
|
||||
cur_params->memcg_caches[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* Ideally, we would wait until all caches succeed, and only
|
||||
* then free the old one. But this is not worth the extra
|
||||
* pointer per-cache we'd have to have for this.
|
||||
*
|
||||
* It is not a big deal if some caches are left with a size
|
||||
* bigger than the others. And all updates will reset this
|
||||
* anyway.
|
||||
*/
|
||||
rcu_assign_pointer(s->memcg_params, new_params);
|
||||
if (cur_params)
|
||||
kfree_rcu(cur_params, rcu_head);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4181,23 +4204,12 @@ static int __memcg_activate_kmem(struct mem_cgroup *memcg,
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
memcg_id = ida_simple_get(&kmem_limited_groups,
|
||||
0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
|
||||
memcg_id = memcg_alloc_cache_id();
|
||||
if (memcg_id < 0) {
|
||||
err = memcg_id;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure we have enough space for this cgroup in each root cache's
|
||||
* memcg_params.
|
||||
*/
|
||||
mutex_lock(&memcg_slab_mutex);
|
||||
err = memcg_update_all_caches(memcg_id + 1);
|
||||
mutex_unlock(&memcg_slab_mutex);
|
||||
if (err)
|
||||
goto out_rmid;
|
||||
|
||||
memcg->kmemcg_id = memcg_id;
|
||||
INIT_LIST_HEAD(&memcg->memcg_slab_caches);
|
||||
|
||||
@ -4218,10 +4230,6 @@ static int __memcg_activate_kmem(struct mem_cgroup *memcg,
|
||||
out:
|
||||
memcg_resume_kmem_account();
|
||||
return err;
|
||||
|
||||
out_rmid:
|
||||
ida_simple_remove(&kmem_limited_groups, memcg_id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int memcg_activate_kmem(struct mem_cgroup *memcg,
|
||||
|
Loading…
Reference in New Issue
Block a user