mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
mm/zsmalloc: fix migrate_write_lock() when !CONFIG_COMPACTION
Patch series "mm/zsmalloc: fix and optimize objects/page migration". This series is to fix and optimize the zsmalloc objects/page migration. This patch (of 3): migrate_write_lock() is a empty function when !CONFIG_COMPACTION, in which case zs_compact() can be triggered from shrinker reclaim context. (Maybe it's better to rename it to zs_shrink()?) And zspage map object users rely on this migrate_read_lock() so object won't be migrated elsewhere. Fix it by always implementing the migrate_write_lock() related functions. Link: https://lkml.kernel.org/r/20240219-b4-szmalloc-migrate-v1-0-34cd49c6545b@bytedance.com Link: https://lkml.kernel.org/r/20240219-b4-szmalloc-migrate-v1-1-34cd49c6545b@bytedance.com Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Yosry Ahmed <yosryahmed@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
75c40c2509
commit
568b567f78
@ -278,18 +278,15 @@ static bool ZsHugePage(struct zspage *zspage)
|
||||
static void migrate_lock_init(struct zspage *zspage);
|
||||
static void migrate_read_lock(struct zspage *zspage);
|
||||
static void migrate_read_unlock(struct zspage *zspage);
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
static void migrate_write_lock(struct zspage *zspage);
|
||||
static void migrate_write_lock_nested(struct zspage *zspage);
|
||||
static void migrate_write_unlock(struct zspage *zspage);
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
static void kick_deferred_free(struct zs_pool *pool);
|
||||
static void init_deferred_free(struct zs_pool *pool);
|
||||
static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
|
||||
#else
|
||||
static void migrate_write_lock(struct zspage *zspage) {}
|
||||
static void migrate_write_lock_nested(struct zspage *zspage) {}
|
||||
static void migrate_write_unlock(struct zspage *zspage) {}
|
||||
static void kick_deferred_free(struct zs_pool *pool) {}
|
||||
static void init_deferred_free(struct zs_pool *pool) {}
|
||||
static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
|
||||
@ -1725,7 +1722,6 @@ static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
|
||||
read_unlock(&zspage->lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
static void migrate_write_lock(struct zspage *zspage)
|
||||
{
|
||||
write_lock(&zspage->lock);
|
||||
@ -1741,6 +1737,7 @@ static void migrate_write_unlock(struct zspage *zspage)
|
||||
write_unlock(&zspage->lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
/* Number of isolated subpage for *page migration* in this zspage */
|
||||
static void inc_zspage_isolation(struct zspage *zspage)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user