mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
zsmalloc: separate free_zspage from putback_zspage
Currently, putback_zspage does free zspage under class->lock if fullness become ZS_EMPTY but it makes trouble to implement locking scheme for new zspage migration. So, this patch is to separate free_zspage from putback_zspage and free zspage out of class->lock which is preparation for zspage migration. Link: http://lkml.kernel.org/r/1464736881-24886-10-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3783689a1a
commit
4aa409cab7
@ -1687,14 +1687,12 @@ static struct zspage *isolate_zspage(struct size_class *class, bool source)
|
||||
|
||||
/*
|
||||
* putback_zspage - add @zspage into right class's fullness list
|
||||
* @pool: target pool
|
||||
* @class: destination class
|
||||
* @zspage: target page
|
||||
*
|
||||
* Return @zspage's fullness_group
|
||||
*/
|
||||
static enum fullness_group putback_zspage(struct zs_pool *pool,
|
||||
struct size_class *class,
|
||||
static enum fullness_group putback_zspage(struct size_class *class,
|
||||
struct zspage *zspage)
|
||||
{
|
||||
enum fullness_group fullness;
|
||||
@ -1703,15 +1701,6 @@ static enum fullness_group putback_zspage(struct zs_pool *pool,
|
||||
insert_zspage(class, zspage, fullness);
|
||||
set_zspage_mapping(zspage, class->index, fullness);
|
||||
|
||||
if (fullness == ZS_EMPTY) {
|
||||
zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
|
||||
class->size, class->pages_per_zspage));
|
||||
atomic_long_sub(class->pages_per_zspage,
|
||||
&pool->pages_allocated);
|
||||
|
||||
free_zspage(pool, zspage);
|
||||
}
|
||||
|
||||
return fullness;
|
||||
}
|
||||
|
||||
@ -1760,23 +1749,29 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
|
||||
if (!migrate_zspage(pool, class, &cc))
|
||||
break;
|
||||
|
||||
putback_zspage(pool, class, dst_zspage);
|
||||
putback_zspage(class, dst_zspage);
|
||||
}
|
||||
|
||||
/* Stop if we couldn't find slot */
|
||||
if (dst_zspage == NULL)
|
||||
break;
|
||||
|
||||
putback_zspage(pool, class, dst_zspage);
|
||||
if (putback_zspage(pool, class, src_zspage) == ZS_EMPTY)
|
||||
putback_zspage(class, dst_zspage);
|
||||
if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
|
||||
zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
|
||||
class->size, class->pages_per_zspage));
|
||||
atomic_long_sub(class->pages_per_zspage,
|
||||
&pool->pages_allocated);
|
||||
free_zspage(pool, src_zspage);
|
||||
pool->stats.pages_compacted += class->pages_per_zspage;
|
||||
}
|
||||
spin_unlock(&class->lock);
|
||||
cond_resched();
|
||||
spin_lock(&class->lock);
|
||||
}
|
||||
|
||||
if (src_zspage)
|
||||
putback_zspage(pool, class, src_zspage);
|
||||
putback_zspage(class, src_zspage);
|
||||
|
||||
spin_unlock(&class->lock);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user