forked from Minki/linux
mm/z3fold: fix z3fold_page_migrate races with z3fold_map
Think about the below scenario:
CPU1 CPU2
z3fold_page_migrate z3fold_map
z3fold_page_trylock
...
z3fold_page_unlock
/* slots still points to old zhdr*/
get_z3fold_header
get slots from handle
get old zhdr from slots
z3fold_page_trylock
return *old* zhdr
encode_handle(new_zhdr, FIRST|LAST|MIDDLE)
put_page(page) /* zhdr is freed! */
but zhdr is still used by caller!
z3fold_map can map freed z3fold page and lead to use-after-free bug. To
fix it, we add PAGE_MIGRATED to indicate z3fold page is migrated and soon
to be released. So get_z3fold_header won't return such page.
Link: https://lkml.kernel.org/r/20220429064051.61552-10-linmiaohe@huawei.com
Fixes: 1f862989b0
("mm/z3fold.c: support page migration")
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Vitaly Wool <vitaly.wool@konsulko.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
04094226d6
commit
943fb61dd6
16
mm/z3fold.c
16
mm/z3fold.c
@ -181,6 +181,7 @@ enum z3fold_page_flags {
|
|||||||
NEEDS_COMPACTING,
|
NEEDS_COMPACTING,
|
||||||
PAGE_STALE,
|
PAGE_STALE,
|
||||||
PAGE_CLAIMED, /* by either reclaim or free */
|
PAGE_CLAIMED, /* by either reclaim or free */
|
||||||
|
PAGE_MIGRATED, /* page is migrated and soon to be released */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -270,8 +271,13 @@ static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
|
|||||||
zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
|
zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
|
||||||
locked = z3fold_page_trylock(zhdr);
|
locked = z3fold_page_trylock(zhdr);
|
||||||
read_unlock(&slots->lock);
|
read_unlock(&slots->lock);
|
||||||
if (locked)
|
if (locked) {
|
||||||
break;
|
struct page *page = virt_to_page(zhdr);
|
||||||
|
|
||||||
|
if (!test_bit(PAGE_MIGRATED, &page->private))
|
||||||
|
break;
|
||||||
|
z3fold_page_unlock(zhdr);
|
||||||
|
}
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
} while (true);
|
} while (true);
|
||||||
} else {
|
} else {
|
||||||
@ -389,6 +395,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
|
|||||||
clear_bit(NEEDS_COMPACTING, &page->private);
|
clear_bit(NEEDS_COMPACTING, &page->private);
|
||||||
clear_bit(PAGE_STALE, &page->private);
|
clear_bit(PAGE_STALE, &page->private);
|
||||||
clear_bit(PAGE_CLAIMED, &page->private);
|
clear_bit(PAGE_CLAIMED, &page->private);
|
||||||
|
clear_bit(PAGE_MIGRATED, &page->private);
|
||||||
if (headless)
|
if (headless)
|
||||||
return zhdr;
|
return zhdr;
|
||||||
|
|
||||||
@ -1576,7 +1583,7 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
|
|||||||
new_zhdr = page_address(newpage);
|
new_zhdr = page_address(newpage);
|
||||||
memcpy(new_zhdr, zhdr, PAGE_SIZE);
|
memcpy(new_zhdr, zhdr, PAGE_SIZE);
|
||||||
newpage->private = page->private;
|
newpage->private = page->private;
|
||||||
page->private = 0;
|
set_bit(PAGE_MIGRATED, &page->private);
|
||||||
z3fold_page_unlock(zhdr);
|
z3fold_page_unlock(zhdr);
|
||||||
spin_lock_init(&new_zhdr->page_lock);
|
spin_lock_init(&new_zhdr->page_lock);
|
||||||
INIT_WORK(&new_zhdr->work, compact_page_work);
|
INIT_WORK(&new_zhdr->work, compact_page_work);
|
||||||
@ -1606,7 +1613,8 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
|
|||||||
|
|
||||||
queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
|
queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
|
||||||
|
|
||||||
clear_bit(PAGE_CLAIMED, &page->private);
|
/* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
|
||||||
|
page->private = 0;
|
||||||
put_page(page);
|
put_page(page);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user