mm/ksm: handle protnone saved writes when making page write protect
Without this KSM will consider the page write protected, but a numa fault can later mark the page writable. This can result in memory corruption. Link: http://lkml.kernel.org/r/1487498625-10891-3-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
288bc54949
commit
595cd8f256
@ -233,6 +233,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
|
|||||||
#define pte_mk_savedwrite pte_mkwrite
|
#define pte_mk_savedwrite pte_mkwrite
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef pte_clear_savedwrite
|
||||||
|
#define pte_clear_savedwrite pte_wrprotect
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef pmd_savedwrite
|
#ifndef pmd_savedwrite
|
||||||
#define pmd_savedwrite pmd_write
|
#define pmd_savedwrite pmd_write
|
||||||
#endif
|
#endif
|
||||||
@ -241,6 +245,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
|
|||||||
#define pmd_mk_savedwrite pmd_mkwrite
|
#define pmd_mk_savedwrite pmd_mkwrite
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef pmd_clear_savedwrite
|
||||||
|
#define pmd_clear_savedwrite pmd_wrprotect
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
|
#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
|
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
|
||||||
|
9
mm/ksm.c
9
mm/ksm.c
@ -880,7 +880,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
|||||||
if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
|
if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte)) {
|
if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
|
||||||
|
(pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) {
|
||||||
pte_t entry;
|
pte_t entry;
|
||||||
|
|
||||||
swapped = PageSwapCache(page);
|
swapped = PageSwapCache(page);
|
||||||
@ -905,7 +906,11 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
|||||||
}
|
}
|
||||||
if (pte_dirty(entry))
|
if (pte_dirty(entry))
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
entry = pte_mkclean(pte_wrprotect(entry));
|
|
||||||
|
if (pte_protnone(entry))
|
||||||
|
entry = pte_mkclean(pte_clear_savedwrite(entry));
|
||||||
|
else
|
||||||
|
entry = pte_mkclean(pte_wrprotect(entry));
|
||||||
set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
|
set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
|
||||||
}
|
}
|
||||||
*orig_pte = *pvmw.pte;
|
*orig_pte = *pvmw.pte;
|
||||||
|
Loading…
Reference in New Issue
Block a user