forked from Minki/linux
mm: vmscan: use per memcg nr_deferred of shrinker
Use per memcg's nr_deferred for memcg aware shrinkers. The shrinker's nr_deferred will be used in the following cases: 1. Non memcg aware shrinkers 2. !CONFIG_MEMCG 3. memcg is disabled by boot parameter Link: https://lkml.kernel.org/r/20210311190845.9708-11-shy828301@gmail.com Signed-off-by: Yang Shi <shy828301@gmail.com> Acked-by: Roman Gushchin <guro@fb.com> Acked-by: Kirill Tkhai <ktkhai@virtuozzo.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3c6f17e6c5
commit
8675083046
78
mm/vmscan.c
78
mm/vmscan.c
@ -376,6 +376,24 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
|
||||
idr_remove(&shrinker_idr, id);
|
||||
}
|
||||
|
||||
static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
|
||||
struct mem_cgroup *memcg)
|
||||
{
|
||||
struct shrinker_info *info;
|
||||
|
||||
info = shrinker_info_protected(memcg, nid);
|
||||
return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
|
||||
}
|
||||
|
||||
static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
|
||||
struct mem_cgroup *memcg)
|
||||
{
|
||||
struct shrinker_info *info;
|
||||
|
||||
info = shrinker_info_protected(memcg, nid);
|
||||
return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
|
||||
}
|
||||
|
||||
static bool cgroup_reclaim(struct scan_control *sc)
|
||||
{
|
||||
return sc->target_mem_cgroup;
|
||||
@ -414,6 +432,18 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
|
||||
{
|
||||
}
|
||||
|
||||
static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
|
||||
struct mem_cgroup *memcg)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
|
||||
struct mem_cgroup *memcg)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool cgroup_reclaim(struct scan_control *sc)
|
||||
{
|
||||
return false;
|
||||
@ -425,6 +455,39 @@ static bool writeback_throttling_sane(struct scan_control *sc)
|
||||
}
|
||||
#endif
|
||||
|
||||
static long xchg_nr_deferred(struct shrinker *shrinker,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
int nid = sc->nid;
|
||||
|
||||
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
|
||||
nid = 0;
|
||||
|
||||
if (sc->memcg &&
|
||||
(shrinker->flags & SHRINKER_MEMCG_AWARE))
|
||||
return xchg_nr_deferred_memcg(nid, shrinker,
|
||||
sc->memcg);
|
||||
|
||||
return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
|
||||
}
|
||||
|
||||
|
||||
static long add_nr_deferred(long nr, struct shrinker *shrinker,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
int nid = sc->nid;
|
||||
|
||||
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
|
||||
nid = 0;
|
||||
|
||||
if (sc->memcg &&
|
||||
(shrinker->flags & SHRINKER_MEMCG_AWARE))
|
||||
return add_nr_deferred_memcg(nr, nid, shrinker,
|
||||
sc->memcg);
|
||||
|
||||
return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
|
||||
}
|
||||
|
||||
/*
|
||||
* This misses isolated pages which are not accounted for to save counters.
|
||||
* As the data only determines if reclaim or compaction continues, it is
|
||||
@ -561,14 +624,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
||||
long freeable;
|
||||
long nr;
|
||||
long new_nr;
|
||||
int nid = shrinkctl->nid;
|
||||
long batch_size = shrinker->batch ? shrinker->batch
|
||||
: SHRINK_BATCH;
|
||||
long scanned = 0, next_deferred;
|
||||
|
||||
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
|
||||
nid = 0;
|
||||
|
||||
freeable = shrinker->count_objects(shrinker, shrinkctl);
|
||||
if (freeable == 0 || freeable == SHRINK_EMPTY)
|
||||
return freeable;
|
||||
@ -578,7 +637,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
||||
* and zero it so that other concurrent shrinker invocations
|
||||
* don't also do this scanning work.
|
||||
*/
|
||||
nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
|
||||
nr = xchg_nr_deferred(shrinker, shrinkctl);
|
||||
|
||||
total_scan = nr;
|
||||
if (shrinker->seeks) {
|
||||
@ -669,14 +728,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
||||
next_deferred = 0;
|
||||
/*
|
||||
* move the unused scan count back into the shrinker in a
|
||||
* manner that handles concurrent updates. If we exhausted the
|
||||
* scan, there is no need to do an update.
|
||||
* manner that handles concurrent updates.
|
||||
*/
|
||||
if (next_deferred > 0)
|
||||
new_nr = atomic_long_add_return(next_deferred,
|
||||
&shrinker->nr_deferred[nid]);
|
||||
else
|
||||
new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
|
||||
new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl);
|
||||
|
||||
trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
|
||||
return freed;
|
||||
|
Loading…
Reference in New Issue
Block a user