mm, compaction: embed migration mode in compact_control
We're going to want to manipulate the migration mode for compaction in the page allocator, and currently compact_control's sync field is only a bool. Currently, we only do MIGRATE_ASYNC or MIGRATE_SYNC_LIGHT compaction depending on the value of this bool. Convert the bool to enum migrate_mode and pass the migration mode in directly. Later, we'll want to avoid MIGRATE_SYNC_LIGHT for thp allocations in the pagefault patch to avoid unnecessary latency. This also alters compaction triggered from sysfs, either for the entire system or for a node, to force MIGRATE_SYNC. [akpm@linux-foundation.org: fix build] [iamjoonsoo.kim@lge.com: use MIGRATE_SYNC in alloc_contig_range()] Signed-off-by: David Rientjes <rientjes@google.com> Suggested-by: Mel Gorman <mgorman@suse.de> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Greg Thelen <gthelen@google.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									35979ef339
								
							
						
					
					
						commit
						e0b9daeb45
					
				| @ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, | ||||
| extern int fragmentation_index(struct zone *zone, unsigned int order); | ||||
| extern unsigned long try_to_compact_pages(struct zonelist *zonelist, | ||||
| 			int order, gfp_t gfp_mask, nodemask_t *mask, | ||||
| 			bool sync, bool *contended); | ||||
| 			enum migrate_mode mode, bool *contended); | ||||
| extern void compact_pgdat(pg_data_t *pgdat, int order); | ||||
| extern void reset_isolation_suitable(pg_data_t *pgdat); | ||||
| extern unsigned long compaction_suitable(struct zone *zone, int order); | ||||
| @ -91,7 +91,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) | ||||
| #else | ||||
| static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, | ||||
| 			int order, gfp_t gfp_mask, nodemask_t *nodemask, | ||||
| 			bool sync, bool *contended) | ||||
| 			enum migrate_mode mode, bool *contended) | ||||
| { | ||||
| 	return COMPACT_CONTINUE; | ||||
| } | ||||
|  | ||||
| @ -161,7 +161,8 @@ static void update_pageblock_skip(struct compact_control *cc, | ||||
| 			return; | ||||
| 		if (pfn > zone->compact_cached_migrate_pfn[0]) | ||||
| 			zone->compact_cached_migrate_pfn[0] = pfn; | ||||
| 		if (cc->sync && pfn > zone->compact_cached_migrate_pfn[1]) | ||||
| 		if (cc->mode != MIGRATE_ASYNC && | ||||
| 		    pfn > zone->compact_cached_migrate_pfn[1]) | ||||
| 			zone->compact_cached_migrate_pfn[1] = pfn; | ||||
| 	} else { | ||||
| 		if (cc->finished_update_free) | ||||
| @ -208,7 +209,7 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, | ||||
| 		} | ||||
| 
 | ||||
| 		/* async aborts if taking too long or contended */ | ||||
| 		if (!cc->sync) { | ||||
| 		if (cc->mode == MIGRATE_ASYNC) { | ||||
| 			cc->contended = true; | ||||
| 			return false; | ||||
| 		} | ||||
| @ -473,7 +474,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | ||||
| 	bool locked = false; | ||||
| 	struct page *page = NULL, *valid_page = NULL; | ||||
| 	bool set_unsuitable = true; | ||||
| 	const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) | | ||||
| 	const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ? | ||||
| 					ISOLATE_ASYNC_MIGRATE : 0) | | ||||
| 				    (unevictable ? ISOLATE_UNEVICTABLE : 0); | ||||
| 
 | ||||
| 	/*
 | ||||
| @ -483,7 +485,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | ||||
| 	 */ | ||||
| 	while (unlikely(too_many_isolated(zone))) { | ||||
| 		/* async migration should just abort */ | ||||
| 		if (!cc->sync) | ||||
| 		if (cc->mode == MIGRATE_ASYNC) | ||||
| 			return 0; | ||||
| 
 | ||||
| 		congestion_wait(BLK_RW_ASYNC, HZ/10); | ||||
| @ -548,7 +550,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | ||||
| 			 * the minimum amount of work satisfies the allocation | ||||
| 			 */ | ||||
| 			mt = get_pageblock_migratetype(page); | ||||
| 			if (!cc->sync && !migrate_async_suitable(mt)) { | ||||
| 			if (cc->mode == MIGRATE_ASYNC && | ||||
| 			    !migrate_async_suitable(mt)) { | ||||
| 				set_unsuitable = false; | ||||
| 				goto next_pageblock; | ||||
| 			} | ||||
| @ -981,6 +984,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | ||||
| 	int ret; | ||||
| 	unsigned long start_pfn = zone->zone_start_pfn; | ||||
| 	unsigned long end_pfn = zone_end_pfn(zone); | ||||
| 	const bool sync = cc->mode != MIGRATE_ASYNC; | ||||
| 
 | ||||
| 	ret = compaction_suitable(zone, cc->order); | ||||
| 	switch (ret) { | ||||
| @ -1006,7 +1010,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | ||||
| 	 * information on where the scanners should start but check that it | ||||
| 	 * is initialised by ensuring the values are within zone boundaries. | ||||
| 	 */ | ||||
| 	cc->migrate_pfn = zone->compact_cached_migrate_pfn[cc->sync]; | ||||
| 	cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; | ||||
| 	cc->free_pfn = zone->compact_cached_free_pfn; | ||||
| 	if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { | ||||
| 		cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); | ||||
| @ -1040,8 +1044,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | ||||
| 
 | ||||
| 		nr_migrate = cc->nr_migratepages; | ||||
| 		err = migrate_pages(&cc->migratepages, compaction_alloc, | ||||
| 				compaction_free, (unsigned long)cc, | ||||
| 				cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC, | ||||
| 				compaction_free, (unsigned long)cc, cc->mode, | ||||
| 				MR_COMPACTION); | ||||
| 		update_nr_listpages(cc); | ||||
| 		nr_remaining = cc->nr_migratepages; | ||||
| @ -1074,9 +1077,8 @@ out: | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static unsigned long compact_zone_order(struct zone *zone, | ||||
| 				 int order, gfp_t gfp_mask, | ||||
| 				 bool sync, bool *contended) | ||||
| static unsigned long compact_zone_order(struct zone *zone, int order, | ||||
| 		gfp_t gfp_mask, enum migrate_mode mode, bool *contended) | ||||
| { | ||||
| 	unsigned long ret; | ||||
| 	struct compact_control cc = { | ||||
| @ -1085,7 +1087,7 @@ static unsigned long compact_zone_order(struct zone *zone, | ||||
| 		.order = order, | ||||
| 		.migratetype = allocflags_to_migratetype(gfp_mask), | ||||
| 		.zone = zone, | ||||
| 		.sync = sync, | ||||
| 		.mode = mode, | ||||
| 	}; | ||||
| 	INIT_LIST_HEAD(&cc.freepages); | ||||
| 	INIT_LIST_HEAD(&cc.migratepages); | ||||
| @ -1107,7 +1109,7 @@ int sysctl_extfrag_threshold = 500; | ||||
|  * @order: The order of the current allocation | ||||
|  * @gfp_mask: The GFP mask of the current allocation | ||||
|  * @nodemask: The allowed nodes to allocate from | ||||
|  * @sync: Whether migration is synchronous or not | ||||
|  * @mode: The migration mode for async, sync light, or sync migration | ||||
|  * @contended: Return value that is true if compaction was aborted due to lock contention | ||||
|  * @page: Optionally capture a free page of the requested order during compaction | ||||
|  * | ||||
| @ -1115,7 +1117,7 @@ int sysctl_extfrag_threshold = 500; | ||||
|  */ | ||||
| unsigned long try_to_compact_pages(struct zonelist *zonelist, | ||||
| 			int order, gfp_t gfp_mask, nodemask_t *nodemask, | ||||
| 			bool sync, bool *contended) | ||||
| 			enum migrate_mode mode, bool *contended) | ||||
| { | ||||
| 	enum zone_type high_zoneidx = gfp_zone(gfp_mask); | ||||
| 	int may_enter_fs = gfp_mask & __GFP_FS; | ||||
| @ -1140,7 +1142,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | ||||
| 								nodemask) { | ||||
| 		int status; | ||||
| 
 | ||||
| 		status = compact_zone_order(zone, order, gfp_mask, sync, | ||||
| 		status = compact_zone_order(zone, order, gfp_mask, mode, | ||||
| 						contended); | ||||
| 		rc = max(status, rc); | ||||
| 
 | ||||
| @ -1190,7 +1192,7 @@ void compact_pgdat(pg_data_t *pgdat, int order) | ||||
| { | ||||
| 	struct compact_control cc = { | ||||
| 		.order = order, | ||||
| 		.sync = false, | ||||
| 		.mode = MIGRATE_ASYNC, | ||||
| 	}; | ||||
| 
 | ||||
| 	if (!order) | ||||
| @ -1203,7 +1205,7 @@ static void compact_node(int nid) | ||||
| { | ||||
| 	struct compact_control cc = { | ||||
| 		.order = -1, | ||||
| 		.sync = true, | ||||
| 		.mode = MIGRATE_SYNC, | ||||
| 		.ignore_skip_hint = true, | ||||
| 	}; | ||||
| 
 | ||||
|  | ||||
| @ -134,7 +134,7 @@ struct compact_control { | ||||
| 	unsigned long nr_migratepages;	/* Number of pages to migrate */ | ||||
| 	unsigned long free_pfn;		/* isolate_freepages search base */ | ||||
| 	unsigned long migrate_pfn;	/* isolate_migratepages search base */ | ||||
| 	bool sync;			/* Synchronous migration */ | ||||
| 	enum migrate_mode mode;		/* Async or sync migration mode */ | ||||
| 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */ | ||||
| 	bool finished_update_free;	/* True when the zone cached pfns are
 | ||||
| 					 * no longer being updated | ||||
|  | ||||
| @ -2217,7 +2217,7 @@ static struct page * | ||||
| __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | ||||
| 	struct zonelist *zonelist, enum zone_type high_zoneidx, | ||||
| 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | ||||
| 	int migratetype, bool sync_migration, | ||||
| 	int migratetype, enum migrate_mode mode, | ||||
| 	bool *contended_compaction, bool *deferred_compaction, | ||||
| 	unsigned long *did_some_progress) | ||||
| { | ||||
| @ -2231,7 +2231,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | ||||
| 
 | ||||
| 	current->flags |= PF_MEMALLOC; | ||||
| 	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, | ||||
| 						nodemask, sync_migration, | ||||
| 						nodemask, mode, | ||||
| 						contended_compaction); | ||||
| 	current->flags &= ~PF_MEMALLOC; | ||||
| 
 | ||||
| @ -2264,7 +2264,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | ||||
| 		 * As async compaction considers a subset of pageblocks, only | ||||
| 		 * defer if the failure was a sync compaction failure. | ||||
| 		 */ | ||||
| 		if (sync_migration) | ||||
| 		if (mode != MIGRATE_ASYNC) | ||||
| 			defer_compaction(preferred_zone, order); | ||||
| 
 | ||||
| 		cond_resched(); | ||||
| @ -2277,9 +2277,8 @@ static inline struct page * | ||||
| __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | ||||
| 	struct zonelist *zonelist, enum zone_type high_zoneidx, | ||||
| 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | ||||
| 	int migratetype, bool sync_migration, | ||||
| 	bool *contended_compaction, bool *deferred_compaction, | ||||
| 	unsigned long *did_some_progress) | ||||
| 	int migratetype, enum migrate_mode mode, bool *contended_compaction, | ||||
| 	bool *deferred_compaction, unsigned long *did_some_progress) | ||||
| { | ||||
| 	return NULL; | ||||
| } | ||||
| @ -2474,7 +2473,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | ||||
| 	int alloc_flags; | ||||
| 	unsigned long pages_reclaimed = 0; | ||||
| 	unsigned long did_some_progress; | ||||
| 	bool sync_migration = false; | ||||
| 	enum migrate_mode migration_mode = MIGRATE_ASYNC; | ||||
| 	bool deferred_compaction = false; | ||||
| 	bool contended_compaction = false; | ||||
| 
 | ||||
| @ -2568,17 +2567,15 @@ rebalance: | ||||
| 	 * Try direct compaction. The first pass is asynchronous. Subsequent | ||||
| 	 * attempts after direct reclaim are synchronous | ||||
| 	 */ | ||||
| 	page = __alloc_pages_direct_compact(gfp_mask, order, | ||||
| 					zonelist, high_zoneidx, | ||||
| 					nodemask, | ||||
| 					alloc_flags, preferred_zone, | ||||
| 					migratetype, sync_migration, | ||||
| 					&contended_compaction, | ||||
| 	page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, | ||||
| 					high_zoneidx, nodemask, alloc_flags, | ||||
| 					preferred_zone, migratetype, | ||||
| 					migration_mode, &contended_compaction, | ||||
| 					&deferred_compaction, | ||||
| 					&did_some_progress); | ||||
| 	if (page) | ||||
| 		goto got_pg; | ||||
| 	sync_migration = true; | ||||
| 	migration_mode = MIGRATE_SYNC_LIGHT; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If compaction is deferred for high-order allocations, it is because | ||||
| @ -2653,12 +2650,10 @@ rebalance: | ||||
| 		 * direct reclaim and reclaim/compaction depends on compaction | ||||
| 		 * being called after reclaim so call directly if necessary | ||||
| 		 */ | ||||
| 		page = __alloc_pages_direct_compact(gfp_mask, order, | ||||
| 					zonelist, high_zoneidx, | ||||
| 					nodemask, | ||||
| 					alloc_flags, preferred_zone, | ||||
| 					migratetype, sync_migration, | ||||
| 					&contended_compaction, | ||||
| 		page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, | ||||
| 					high_zoneidx, nodemask, alloc_flags, | ||||
| 					preferred_zone, migratetype, | ||||
| 					migration_mode, &contended_compaction, | ||||
| 					&deferred_compaction, | ||||
| 					&did_some_progress); | ||||
| 		if (page) | ||||
| @ -6218,7 +6213,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, | ||||
| 		cc->nr_migratepages -= nr_reclaimed; | ||||
| 
 | ||||
| 		ret = migrate_pages(&cc->migratepages, alloc_migrate_target, | ||||
| 				    NULL, 0, MIGRATE_SYNC, MR_CMA); | ||||
| 				    NULL, 0, cc->mode, MR_CMA); | ||||
| 	} | ||||
| 	if (ret < 0) { | ||||
| 		putback_movable_pages(&cc->migratepages); | ||||
| @ -6257,7 +6252,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, | ||||
| 		.nr_migratepages = 0, | ||||
| 		.order = -1, | ||||
| 		.zone = page_zone(pfn_to_page(start)), | ||||
| 		.sync = true, | ||||
| 		.mode = MIGRATE_SYNC, | ||||
| 		.ignore_skip_hint = true, | ||||
| 	}; | ||||
| 	INIT_LIST_HEAD(&cc.migratepages); | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user