[PATCH] mm: never ClearPageLRU released pages

If vmscan finds a zero refcount page on the lru list, never ClearPageLRU
it.  This means the release code need not hold ->lru_lock to stabilise
PageLRU, so that lock may be skipped entirely when releasing !PageLRU pages
(because we know PageLRU won't have been temporarily cleared by vmscan,
which was previously guaranteed by holding the lock to synchronise against
vmscan).

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Nick Piggin 2006-03-22 00:07:58 -08:00 committed by Linus Torvalds
parent 2492ecc1a1
commit 46453a6e19
2 changed files with 39 additions and 33 deletions

View File

@ -209,19 +209,18 @@ int lru_add_drain_all(void)
*/
void fastcall __page_cache_release(struct page *page)
{
unsigned long flags;
struct zone *zone = page_zone(page);
if (PageLRU(page)) {
unsigned long flags;
struct zone *zone = page_zone(page);
spin_lock_irqsave(&zone->lru_lock, flags);
if (TestClearPageLRU(page))
spin_lock_irqsave(&zone->lru_lock, flags);
if (!TestClearPageLRU(page))
BUG();
del_page_from_lru(zone, page);
if (page_count(page) != 0)
page = NULL;
spin_unlock_irqrestore(&zone->lru_lock, flags);
if (page)
free_hot_page(page);
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
free_hot_page(page);
}
EXPORT_SYMBOL(__page_cache_release);
/*
@ -245,7 +244,6 @@ void release_pages(struct page **pages, int nr, int cold)
pagevec_init(&pages_to_free, cold);
for (i = 0; i < nr; i++) {
struct page *page = pages[i];
struct zone *pagezone;
if (unlikely(PageCompound(page))) {
if (zone) {
@ -259,23 +257,27 @@ void release_pages(struct page **pages, int nr, int cold)
if (!put_page_testzero(page))
continue;
pagezone = page_zone(page);
if (pagezone != zone) {
if (zone)
spin_unlock_irq(&zone->lru_lock);
zone = pagezone;
spin_lock_irq(&zone->lru_lock);
}
if (TestClearPageLRU(page))
del_page_from_lru(zone, page);
if (page_count(page) == 0) {
if (!pagevec_add(&pages_to_free, page)) {
spin_unlock_irq(&zone->lru_lock);
__pagevec_free(&pages_to_free);
pagevec_reinit(&pages_to_free);
zone = NULL; /* No lock is held */
if (PageLRU(page)) {
struct zone *pagezone = page_zone(page);
if (pagezone != zone) {
if (zone)
spin_unlock_irq(&zone->lru_lock);
zone = pagezone;
spin_lock_irq(&zone->lru_lock);
}
if (!TestClearPageLRU(page))
BUG();
del_page_from_lru(zone, page);
}
if (!pagevec_add(&pages_to_free, page)) {
if (zone) {
spin_unlock_irq(&zone->lru_lock);
zone = NULL;
}
__pagevec_free(&pages_to_free);
pagevec_reinit(&pages_to_free);
}
}
if (zone)
spin_unlock_irq(&zone->lru_lock);

View File

@ -1085,21 +1085,25 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
if (!TestClearPageLRU(page))
BUG();
list_del(&page->lru);
if (get_page_testone(page)) {
if (unlikely(get_page_testone(page))) {
/*
* It is being freed elsewhere
*/
__put_page(page);
SetPageLRU(page);
list_add(&page->lru, src);
continue;
} else {
list_add(&page->lru, dst);
nr_taken++;
}
/*
* Be careful not to clear PageLRU until after we're sure
* the page is not being freed elsewhere -- the page release
* code relies on it.
*/
if (!TestClearPageLRU(page))
BUG();
list_add(&page->lru, dst);
nr_taken++;
}
*scanned = scan;