forked from Minki/linux
mm, highmem: remove useless pool_lock
The pool_lock protects the page_address_pool from concurrent access. But, access to the page_address_pool is already protected by kmap_lock. So remove it. Signed-off-by: Joonsoo Kim <js1304@gmail.com> Cc: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Minchan Kin <minchan@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4de22c0584
commit
cc33a303f1
@ -328,7 +328,6 @@ struct page_address_map {
|
||||
* page_address_map freelist, allocated from page_address_maps.
|
||||
*/
|
||||
static struct list_head page_address_pool; /* freelist */
|
||||
static spinlock_t pool_lock; /* protects page_address_pool */
|
||||
|
||||
/*
|
||||
* Hash table bucket
|
||||
@ -395,11 +394,9 @@ void set_page_address(struct page *page, void *virtual)
|
||||
if (virtual) { /* Add */
|
||||
BUG_ON(list_empty(&page_address_pool));
|
||||
|
||||
spin_lock_irqsave(&pool_lock, flags);
|
||||
pam = list_entry(page_address_pool.next,
|
||||
struct page_address_map, list);
|
||||
list_del(&pam->list);
|
||||
spin_unlock_irqrestore(&pool_lock, flags);
|
||||
|
||||
pam->page = page;
|
||||
pam->virtual = virtual;
|
||||
@ -413,9 +410,7 @@ void set_page_address(struct page *page, void *virtual)
|
||||
if (pam->page == page) {
|
||||
list_del(&pam->list);
|
||||
spin_unlock_irqrestore(&pas->lock, flags);
|
||||
spin_lock_irqsave(&pool_lock, flags);
|
||||
list_add_tail(&pam->list, &page_address_pool);
|
||||
spin_unlock_irqrestore(&pool_lock, flags);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
@ -438,7 +433,6 @@ void __init page_address_init(void)
|
||||
INIT_LIST_HEAD(&page_address_htable[i].lh);
|
||||
spin_lock_init(&page_address_htable[i].lock);
|
||||
}
|
||||
spin_lock_init(&pool_lock);
|
||||
}
|
||||
|
||||
#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
|
||||
|
Loading…
Reference in New Issue
Block a user