mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 04:31:50 +00:00
mm: fix races between swapoff and flush dcache
Thanks to commit 4b3ef9daa4
("mm/swap: split swap cache into 64MB
trunks"), after swapoff the address_space associated with the swap
device will be freed. So page_mapping() users which may touch the
address_space need some kind of mechanism to prevent the address_space
from being freed during accessing.
The dcache flushing functions (flush_dcache_page(), etc) in architecture
specific code may access the address_space of swap device for anonymous
pages in swap cache via page_mapping() function. But in some cases
there are no mechanisms to prevent the swap device from being swapoff,
for example,
CPU1 CPU2
__get_user_pages() swapoff()
flush_dcache_page()
mapping = page_mapping()
... exit_swap_address_space()
... kvfree(spaces)
mapping_mapped(mapping)
The address space may be accessed after being freed.
But from cachetlb.txt and Russell King, flush_dcache_page() only care
about file cache pages, for anonymous pages, flush_anon_page() should be
used. The implementation of flush_dcache_page() in all architectures
follows this too. They will check whether page_mapping() is NULL and
whether mapping_mapped() is true to determine whether to flush the
dcache immediately. And they will use interval tree (mapping->i_mmap)
to find all user space mappings. While mapping_mapped() and
mapping->i_mmap isn't used by anonymous pages in swap cache at all.
So, to fix the race between swapoff and flush dcache, __page_mapping()
is add to return the address_space for file cache pages and NULL
otherwise. All page_mapping() invoking in flush dcache functions are
replaced with page_mapping_file().
[akpm@linux-foundation.org: simplify page_mapping_file(), per Mike]
Link: http://lkml.kernel.org/r/20180305083634.15174-1-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Chen Liqin <liqin.linux@gmail.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Zankel <chris@zankel.net>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1c0ff0f1bd
commit
cb9f753a37
@ -833,7 +833,7 @@ void flush_dcache_page(struct page *page)
|
||||
}
|
||||
|
||||
/* don't handle anon pages here */
|
||||
mapping = page_mapping(page);
|
||||
mapping = page_mapping_file(page);
|
||||
if (!mapping)
|
||||
return;
|
||||
|
||||
|
@ -70,7 +70,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
|
||||
void *kto = kmap_atomic(to);
|
||||
|
||||
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
|
||||
__flush_dcache_page(page_mapping(from), from);
|
||||
__flush_dcache_page(page_mapping_file(from), from);
|
||||
|
||||
raw_spin_lock(&minicache_lock);
|
||||
|
||||
|
@ -76,7 +76,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
|
||||
unsigned long kfrom, kto;
|
||||
|
||||
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
|
||||
__flush_dcache_page(page_mapping(from), from);
|
||||
__flush_dcache_page(page_mapping_file(from), from);
|
||||
|
||||
/* FIXME: not highmem safe */
|
||||
discard_old_kernel_data(page_address(to));
|
||||
|
@ -90,7 +90,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
|
||||
void *kto = kmap_atomic(to);
|
||||
|
||||
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
|
||||
__flush_dcache_page(page_mapping(from), from);
|
||||
__flush_dcache_page(page_mapping_file(from), from);
|
||||
|
||||
raw_spin_lock(&minicache_lock);
|
||||
|
||||
|
@ -195,7 +195,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
|
||||
if (page == ZERO_PAGE(0))
|
||||
return;
|
||||
|
||||
mapping = page_mapping(page);
|
||||
mapping = page_mapping_file(page);
|
||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||
__flush_dcache_page(mapping, page);
|
||||
if (mapping) {
|
||||
|
@ -285,7 +285,7 @@ void __sync_icache_dcache(pte_t pteval)
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (cache_is_vipt_aliasing())
|
||||
mapping = page_mapping(page);
|
||||
mapping = page_mapping_file(page);
|
||||
else
|
||||
mapping = NULL;
|
||||
|
||||
@ -333,7 +333,7 @@ void flush_dcache_page(struct page *page)
|
||||
return;
|
||||
}
|
||||
|
||||
mapping = page_mapping(page);
|
||||
mapping = page_mapping_file(page);
|
||||
|
||||
if (!cache_ops_need_broadcast() &&
|
||||
mapping && !page_mapcount(page))
|
||||
@ -363,7 +363,7 @@ void flush_kernel_dcache_page(struct page *page)
|
||||
if (cache_is_vivt() || cache_is_vipt_aliasing()) {
|
||||
struct address_space *mapping;
|
||||
|
||||
mapping = page_mapping(page);
|
||||
mapping = page_mapping_file(page);
|
||||
|
||||
if (!mapping || mapping_mapped(mapping)) {
|
||||
void *addr;
|
||||
|
@ -86,7 +86,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
|
||||
|
||||
void __flush_dcache_page(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
struct address_space *mapping = page_mapping_file(page);
|
||||
unsigned long addr;
|
||||
|
||||
if (mapping && !mapping_mapped(mapping)) {
|
||||
|
@ -180,7 +180,7 @@ void flush_dcache_page(struct page *page)
|
||||
if (page == ZERO_PAGE(0))
|
||||
return;
|
||||
|
||||
mapping = page_mapping(page);
|
||||
mapping = page_mapping_file(page);
|
||||
|
||||
/* Flush this page if there are aliases. */
|
||||
if (mapping && !mapping_mapped(mapping)) {
|
||||
@ -215,7 +215,7 @@ void update_mmu_cache(struct vm_area_struct *vma,
|
||||
if (page == ZERO_PAGE(0))
|
||||
return;
|
||||
|
||||
mapping = page_mapping(page);
|
||||
mapping = page_mapping_file(page);
|
||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||
__flush_dcache_page(mapping, page);
|
||||
|
||||
|
@ -88,7 +88,8 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
|
||||
return;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
|
||||
if (page_mapping_file(page) &&
|
||||
test_bit(PG_dcache_dirty, &page->flags)) {
|
||||
flush_kernel_dcache_page_addr(pfn_va(pfn));
|
||||
clear_bit(PG_dcache_dirty, &page->flags);
|
||||
} else if (parisc_requires_coherency())
|
||||
@ -304,7 +305,7 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
|
||||
void flush_dcache_page(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
struct address_space *mapping = page_mapping_file(page);
|
||||
struct vm_area_struct *mpnt;
|
||||
unsigned long offset;
|
||||
unsigned long addr, old_addr = 0;
|
||||
|
@ -112,7 +112,7 @@ static void sh4_flush_dcache_page(void *arg)
|
||||
struct page *page = arg;
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
#ifndef CONFIG_SMP
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
struct address_space *mapping = page_mapping_file(page);
|
||||
|
||||
if (mapping && !mapping_mapped(mapping))
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
|
@ -136,7 +136,7 @@ static void __flush_dcache_page(unsigned long phys)
|
||||
static void sh7705_flush_dcache_page(void *arg)
|
||||
{
|
||||
struct page *page = arg;
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
struct address_space *mapping = page_mapping_file(page);
|
||||
|
||||
if (mapping && !mapping_mapped(mapping))
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
|
@ -929,9 +929,9 @@ static inline void __local_flush_dcache_page(struct page *page)
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
__flush_dcache_page(page_address(page),
|
||||
((tlb_type == spitfire) &&
|
||||
page_mapping(page) != NULL));
|
||||
page_mapping_file(page) != NULL));
|
||||
#else
|
||||
if (page_mapping(page) != NULL &&
|
||||
if (page_mapping_file(page) != NULL &&
|
||||
tlb_type == spitfire)
|
||||
__flush_icache_page(__pa(page_address(page)));
|
||||
#endif
|
||||
@ -958,7 +958,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
|
||||
|
||||
if (tlb_type == spitfire) {
|
||||
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
|
||||
if (page_mapping(page) != NULL)
|
||||
if (page_mapping_file(page) != NULL)
|
||||
data0 |= ((u64)1 << 32);
|
||||
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
@ -994,7 +994,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
|
||||
pg_addr = page_address(page);
|
||||
if (tlb_type == spitfire) {
|
||||
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
|
||||
if (page_mapping(page) != NULL)
|
||||
if (page_mapping_file(page) != NULL)
|
||||
data0 |= ((u64)1 << 32);
|
||||
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
|
@ -206,9 +206,9 @@ inline void flush_dcache_page_impl(struct page *page)
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
__flush_dcache_page(page_address(page),
|
||||
((tlb_type == spitfire) &&
|
||||
page_mapping(page) != NULL));
|
||||
page_mapping_file(page) != NULL));
|
||||
#else
|
||||
if (page_mapping(page) != NULL &&
|
||||
if (page_mapping_file(page) != NULL &&
|
||||
tlb_type == spitfire)
|
||||
__flush_icache_page(__pa(page_address(page)));
|
||||
#endif
|
||||
@ -490,7 +490,7 @@ void flush_dcache_page(struct page *page)
|
||||
|
||||
this_cpu = get_cpu();
|
||||
|
||||
mapping = page_mapping(page);
|
||||
mapping = page_mapping_file(page);
|
||||
if (mapping && !mapping_mapped(mapping)) {
|
||||
int dirty = test_bit(PG_dcache_dirty, &page->flags);
|
||||
if (dirty) {
|
||||
|
@ -128,7 +128,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
goto no_cache_flush;
|
||||
|
||||
/* A real file page? */
|
||||
mapping = page_mapping(page);
|
||||
mapping = page_mapping_file(page);
|
||||
if (!mapping)
|
||||
goto no_cache_flush;
|
||||
|
||||
|
@ -83,7 +83,7 @@ void flush_dcache_page(struct page *page)
|
||||
if (page == ZERO_PAGE(0))
|
||||
return;
|
||||
|
||||
mapping = page_mapping(page);
|
||||
mapping = page_mapping_file(page);
|
||||
|
||||
if (mapping && !mapping_mapped(mapping))
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
|
@ -503,7 +503,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
|
||||
if (page == ZERO_PAGE(0))
|
||||
return;
|
||||
|
||||
mapping = page_mapping(page);
|
||||
mapping = page_mapping_file(page);
|
||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||
__flush_dcache_page(mapping, page);
|
||||
if (mapping)
|
||||
|
@ -127,7 +127,7 @@ EXPORT_SYMBOL(copy_user_highpage);
|
||||
|
||||
void flush_dcache_page(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
struct address_space *mapping = page_mapping_file(page);
|
||||
|
||||
/*
|
||||
* If we have a mapping but the page is not mapped to user-space
|
||||
|
@ -1155,6 +1155,7 @@ static inline pgoff_t page_index(struct page *page)
|
||||
|
||||
bool page_mapped(struct page *page);
|
||||
struct address_space *page_mapping(struct page *page);
|
||||
struct address_space *page_mapping_file(struct page *page);
|
||||
|
||||
/*
|
||||
* Return true only if the page has been allocated with
|
||||
|
10
mm/util.c
10
mm/util.c
@ -515,6 +515,16 @@ struct address_space *page_mapping(struct page *page)
|
||||
}
|
||||
EXPORT_SYMBOL(page_mapping);
|
||||
|
||||
/*
|
||||
* For file cache pages, return the address_space, otherwise return NULL
|
||||
*/
|
||||
struct address_space *page_mapping_file(struct page *page)
|
||||
{
|
||||
if (unlikely(PageSwapCache(page)))
|
||||
return NULL;
|
||||
return page_mapping(page);
|
||||
}
|
||||
|
||||
/* Slow path of page_mapcount() for compound pages */
|
||||
int __page_mapcount(struct page *page)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user