mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
mm: meminit: reduce number of times pageblocks are set during struct page init
During parallel sturct page initialisation, ranges are checked for every PFN unnecessarily which increases boot times. This patch alters when the ranges are checked. Signed-off-by: Mel Gorman <mgorman@suse.de> Tested-by: Nate Zimmer <nzimmer@sgi.com> Tested-by: Waiman Long <waiman.long@hp.com> Tested-by: Daniel J Blueman <daniel@numascale.com> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: Robin Holt <robinmholt@gmail.com> Cc: Nate Zimmer <nzimmer@sgi.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Waiman Long <waiman.long@hp.com> Cc: Scott Norton <scott.norton@hp.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a4de83dd33
commit
ac5d2539b2
@ -838,33 +838,12 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
|
|||||||
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
|
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
|
||||||
unsigned long zone, int nid)
|
unsigned long zone, int nid)
|
||||||
{
|
{
|
||||||
struct zone *z = &NODE_DATA(nid)->node_zones[zone];
|
|
||||||
|
|
||||||
set_page_links(page, zone, nid, pfn);
|
set_page_links(page, zone, nid, pfn);
|
||||||
mminit_verify_page_links(page, zone, nid, pfn);
|
mminit_verify_page_links(page, zone, nid, pfn);
|
||||||
init_page_count(page);
|
init_page_count(page);
|
||||||
page_mapcount_reset(page);
|
page_mapcount_reset(page);
|
||||||
page_cpupid_reset_last(page);
|
page_cpupid_reset_last(page);
|
||||||
|
|
||||||
/*
|
|
||||||
* Mark the block movable so that blocks are reserved for
|
|
||||||
* movable at startup. This will force kernel allocations
|
|
||||||
* to reserve their blocks rather than leaking throughout
|
|
||||||
* the address space during boot when many long-lived
|
|
||||||
* kernel allocations are made. Later some blocks near
|
|
||||||
* the start are marked MIGRATE_RESERVE by
|
|
||||||
* setup_zone_migrate_reserve()
|
|
||||||
*
|
|
||||||
* bitmap is created for zone's valid pfn range. but memmap
|
|
||||||
* can be created for invalid pages (for alignment)
|
|
||||||
* check here not to call set_pageblock_migratetype() against
|
|
||||||
* pfn out of zone.
|
|
||||||
*/
|
|
||||||
if ((z->zone_start_pfn <= pfn)
|
|
||||||
&& (pfn < zone_end_pfn(z))
|
|
||||||
&& !(pfn & (pageblock_nr_pages - 1)))
|
|
||||||
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&page->lru);
|
INIT_LIST_HEAD(&page->lru);
|
||||||
#ifdef WANT_PAGE_VIRTUAL
|
#ifdef WANT_PAGE_VIRTUAL
|
||||||
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
|
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
|
||||||
@ -1073,6 +1052,7 @@ static void __defermem_init deferred_free_range(struct page *page,
|
|||||||
/* Free a large naturally-aligned chunk if possible */
|
/* Free a large naturally-aligned chunk if possible */
|
||||||
if (nr_pages == MAX_ORDER_NR_PAGES &&
|
if (nr_pages == MAX_ORDER_NR_PAGES &&
|
||||||
(pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
|
(pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
|
||||||
|
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
||||||
__free_pages_boot_core(page, pfn, MAX_ORDER-1);
|
__free_pages_boot_core(page, pfn, MAX_ORDER-1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -4593,8 +4573,30 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|||||||
&nr_initialised))
|
&nr_initialised))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mark the block movable so that blocks are reserved for
|
||||||
|
* movable at startup. This will force kernel allocations
|
||||||
|
* to reserve their blocks rather than leaking throughout
|
||||||
|
* the address space during boot when many long-lived
|
||||||
|
* kernel allocations are made. Later some blocks near
|
||||||
|
* the start are marked MIGRATE_RESERVE by
|
||||||
|
* setup_zone_migrate_reserve()
|
||||||
|
*
|
||||||
|
* bitmap is created for zone's valid pfn range. but memmap
|
||||||
|
* can be created for invalid pages (for alignment)
|
||||||
|
* check here not to call set_pageblock_migratetype() against
|
||||||
|
* pfn out of zone.
|
||||||
|
*/
|
||||||
|
if (!(pfn & (pageblock_nr_pages - 1))) {
|
||||||
|
struct page *page = pfn_to_page(pfn);
|
||||||
|
|
||||||
|
__init_single_page(page, pfn, zone, nid);
|
||||||
|
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
||||||
|
} else {
|
||||||
__init_single_pfn(pfn, zone, nid);
|
__init_single_pfn(pfn, zone, nid);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __meminit zone_init_free_lists(struct zone *zone)
|
static void __meminit zone_init_free_lists(struct zone *zone)
|
||||||
|
Loading…
Reference in New Issue
Block a user