forked from Minki/linux
mm/ksm.c: use new hashtable implementation
Switch ksm to use the new hashtable implementation. This reduces the amount of generic unrelated code in the ksm module. Signed-off-by: Sasha Levin <levinsasha928@gmail.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
43b5fbbd28
commit
4ca3a69bcb
30
mm/ksm.c
30
mm/ksm.c
@ -33,7 +33,7 @@
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/ksm.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/oom.h>
|
||||
|
||||
@ -156,9 +156,8 @@ struct rmap_item {
|
||||
static struct rb_root root_stable_tree = RB_ROOT;
|
||||
static struct rb_root root_unstable_tree = RB_ROOT;
|
||||
|
||||
#define MM_SLOTS_HASH_SHIFT 10
|
||||
#define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT)
|
||||
static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS];
|
||||
#define MM_SLOTS_HASH_BITS 10
|
||||
static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
|
||||
|
||||
static struct mm_slot ksm_mm_head = {
|
||||
.mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
|
||||
@ -275,26 +274,21 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
|
||||
|
||||
static struct mm_slot *get_mm_slot(struct mm_struct *mm)
|
||||
{
|
||||
struct mm_slot *mm_slot;
|
||||
struct hlist_head *bucket;
|
||||
struct hlist_node *node;
|
||||
struct mm_slot *slot;
|
||||
|
||||
hash_for_each_possible(mm_slots_hash, slot, node, link, (unsigned long)mm)
|
||||
if (slot->mm == mm)
|
||||
return slot;
|
||||
|
||||
bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
|
||||
hlist_for_each_entry(mm_slot, node, bucket, link) {
|
||||
if (mm == mm_slot->mm)
|
||||
return mm_slot;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void insert_to_mm_slots_hash(struct mm_struct *mm,
|
||||
struct mm_slot *mm_slot)
|
||||
{
|
||||
struct hlist_head *bucket;
|
||||
|
||||
bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
|
||||
mm_slot->mm = mm;
|
||||
hlist_add_head(&mm_slot->link, bucket);
|
||||
hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm);
|
||||
}
|
||||
|
||||
static inline int in_stable_tree(struct rmap_item *rmap_item)
|
||||
@ -647,7 +641,7 @@ static int unmerge_and_remove_all_rmap_items(void)
|
||||
ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
|
||||
struct mm_slot, mm_list);
|
||||
if (ksm_test_exit(mm)) {
|
||||
hlist_del(&mm_slot->link);
|
||||
hash_del(&mm_slot->link);
|
||||
list_del(&mm_slot->mm_list);
|
||||
spin_unlock(&ksm_mmlist_lock);
|
||||
|
||||
@ -1392,7 +1386,7 @@ next_mm:
|
||||
* or when all VM_MERGEABLE areas have been unmapped (and
|
||||
* mmap_sem then protects against race with MADV_MERGEABLE).
|
||||
*/
|
||||
hlist_del(&slot->link);
|
||||
hash_del(&slot->link);
|
||||
list_del(&slot->mm_list);
|
||||
spin_unlock(&ksm_mmlist_lock);
|
||||
|
||||
@ -1559,7 +1553,7 @@ void __ksm_exit(struct mm_struct *mm)
|
||||
mm_slot = get_mm_slot(mm);
|
||||
if (mm_slot && ksm_scan.mm_slot != mm_slot) {
|
||||
if (!mm_slot->rmap_list) {
|
||||
hlist_del(&mm_slot->link);
|
||||
hash_del(&mm_slot->link);
|
||||
list_del(&mm_slot->mm_list);
|
||||
easy_to_free = 1;
|
||||
} else {
|
||||
|
Loading…
Reference in New Issue
Block a user