mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
mm, vmscan: by default have direct reclaim only shrink once per node
Direct reclaim iterates over all zones in the zonelist and shrinking them but this is in conflict with node-based reclaim. In the default case, only shrink once per node. Link: http://lkml.kernel.org/r/1467970510-21195-11-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
38087d9b03
commit
79dafcdca3
22
mm/vmscan.c
22
mm/vmscan.c
@ -2552,14 +2552,6 @@ static inline bool compaction_ready(struct zone *zone, int order, int classzone_
|
||||
* try to reclaim pages from zones which will satisfy the caller's allocation
|
||||
* request.
|
||||
*
|
||||
* We reclaim from a zone even if that zone is over high_wmark_pages(zone).
|
||||
* Because:
|
||||
* a) The caller may be trying to free *extra* pages to satisfy a higher-order
|
||||
* allocation or
|
||||
* b) The target zone may be at high_wmark_pages(zone) but the lower zones
|
||||
* must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
|
||||
* zone defense algorithm.
|
||||
*
|
||||
* If a zone is deemed to be full of pinned pages then just give it a light
|
||||
* scan then give up on it.
|
||||
*/
|
||||
@ -2571,6 +2563,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
||||
unsigned long nr_soft_scanned;
|
||||
gfp_t orig_mask;
|
||||
enum zone_type classzone_idx;
|
||||
pg_data_t *last_pgdat = NULL;
|
||||
|
||||
/*
|
||||
* If the number of buffer_heads in the machine exceeds the maximum
|
||||
@ -2629,6 +2622,15 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Shrink each node in the zonelist once. If the
|
||||
* zonelist is ordered by zone (not the default) then a
|
||||
* node may be shrunk multiple times but in that case
|
||||
* the user prefers lower zones being preserved.
|
||||
*/
|
||||
if (zone->zone_pgdat == last_pgdat)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* This steals pages from memory cgroups over softlimit
|
||||
* and returns the number of reclaimed pages and
|
||||
@ -2644,6 +2646,10 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
||||
/* need some check for avoid more shrink_zone() */
|
||||
}
|
||||
|
||||
/* See comment about same check for global reclaim above */
|
||||
if (zone->zone_pgdat == last_pgdat)
|
||||
continue;
|
||||
last_pgdat = zone->zone_pgdat;
|
||||
shrink_node(zone->zone_pgdat, sc, classzone_idx);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user