mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
mm: memcg/slab: rename *_lruvec_slab_state to *_lruvec_kmem_state
The *_lruvec_slab_state is also suitable for pages allocated from buddy, not just for the slab objects. But the function name seems to tell us that only slab object is applicable. So we can rename the keyword of slab to kmem. Link: https://lkml.kernel.org/r/20201117085249.24319-1-songmuchun@bytedance.com Signed-off-by: Muchun Song <songmuchun@bytedance.com> Acked-by: Roman Gushchin <guro@fb.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fe6960cb38
commit
da3ceeff92
@ -788,15 +788,15 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
||||
int val);
|
||||
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
||||
int val);
|
||||
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
|
||||
void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
|
||||
|
||||
static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
|
||||
static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
|
||||
int val)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__mod_lruvec_slab_state(p, idx, val);
|
||||
__mod_lruvec_kmem_state(p, idx, val);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@ -1229,7 +1229,7 @@ static inline void mod_lruvec_page_state(struct page *page,
|
||||
mod_node_page_state(page_pgdat(page), idx, val);
|
||||
}
|
||||
|
||||
static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
|
||||
static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
|
||||
int val)
|
||||
{
|
||||
struct page *page = virt_to_head_page(p);
|
||||
@ -1237,7 +1237,7 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
|
||||
__mod_node_page_state(page_pgdat(page), idx, val);
|
||||
}
|
||||
|
||||
static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
|
||||
static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
|
||||
int val)
|
||||
{
|
||||
struct page *page = virt_to_head_page(p);
|
||||
@ -1332,14 +1332,14 @@ static inline void __dec_lruvec_page_state(struct page *page,
|
||||
__mod_lruvec_page_state(page, idx, -1);
|
||||
}
|
||||
|
||||
static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
|
||||
static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
|
||||
{
|
||||
__mod_lruvec_slab_state(p, idx, 1);
|
||||
__mod_lruvec_kmem_state(p, idx, 1);
|
||||
}
|
||||
|
||||
static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
|
||||
static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
|
||||
{
|
||||
__mod_lruvec_slab_state(p, idx, -1);
|
||||
__mod_lruvec_kmem_state(p, idx, -1);
|
||||
}
|
||||
|
||||
/* idx can be of type enum memcg_stat_item or node_stat_item */
|
||||
|
@ -385,7 +385,7 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
|
||||
mod_lruvec_page_state(vm->pages[0], NR_KERNEL_STACK_KB,
|
||||
account * (THREAD_SIZE / 1024));
|
||||
else
|
||||
mod_lruvec_slab_state(stack, NR_KERNEL_STACK_KB,
|
||||
mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB,
|
||||
account * (THREAD_SIZE / 1024));
|
||||
}
|
||||
|
||||
|
@ -853,7 +853,7 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
||||
__mod_memcg_lruvec_state(lruvec, idx, val);
|
||||
}
|
||||
|
||||
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
|
||||
void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
|
||||
{
|
||||
pg_data_t *pgdat = page_pgdat(virt_to_page(p));
|
||||
struct mem_cgroup *memcg;
|
||||
|
@ -445,12 +445,12 @@ void workingset_update_node(struct xa_node *node)
|
||||
if (node->count && node->count == node->nr_values) {
|
||||
if (list_empty(&node->private_list)) {
|
||||
list_lru_add(&shadow_nodes, &node->private_list);
|
||||
__inc_lruvec_slab_state(node, WORKINGSET_NODES);
|
||||
__inc_lruvec_kmem_state(node, WORKINGSET_NODES);
|
||||
}
|
||||
} else {
|
||||
if (!list_empty(&node->private_list)) {
|
||||
list_lru_del(&shadow_nodes, &node->private_list);
|
||||
__dec_lruvec_slab_state(node, WORKINGSET_NODES);
|
||||
__dec_lruvec_kmem_state(node, WORKINGSET_NODES);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -544,7 +544,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
|
||||
}
|
||||
|
||||
list_lru_isolate(lru, item);
|
||||
__dec_lruvec_slab_state(node, WORKINGSET_NODES);
|
||||
__dec_lruvec_kmem_state(node, WORKINGSET_NODES);
|
||||
|
||||
spin_unlock(lru_lock);
|
||||
|
||||
@ -559,7 +559,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
|
||||
goto out_invalid;
|
||||
mapping->nrexceptional -= node->nr_values;
|
||||
xa_delete_node(node, workingset_update_node);
|
||||
__inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
|
||||
__inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM);
|
||||
|
||||
out_invalid:
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
|
Loading…
Reference in New Issue
Block a user