[PATCH] lock PTE before updating it in 440/BookE page fault handler
Fix 44x and BookE page fault handler to correctly lock PTE before trying to pte_update() it, otherwise this PTE might be swapped out after pte_present() check but before pte_uptdate() call, resulting in corrupted PTE. This can happen with enabled preemption and low memory condition. Signed-off-by: Eugene Surovegin <ebs@ebshome.net> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
bac30d1a78
commit
bab70a4af7
@ -267,25 +267,29 @@ good_area:
|
||||
#endif
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
|
||||
pte_t *ptep;
|
||||
pmd_t *pmdp;
|
||||
|
||||
/* Since 4xx/Book-E supports per-page execute permission,
|
||||
* we lazily flush dcache to icache. */
|
||||
ptep = NULL;
|
||||
if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
|
||||
struct page *page = pte_page(*ptep);
|
||||
if (get_pteptr(mm, address, &ptep, &pmdp)) {
|
||||
spinlock_t *ptl = pte_lockptr(mm, pmdp);
|
||||
spin_lock(ptl);
|
||||
if (pte_present(*ptep)) {
|
||||
struct page *page = pte_page(*ptep);
|
||||
|
||||
if (! test_bit(PG_arch_1, &page->flags)) {
|
||||
flush_dcache_icache_page(page);
|
||||
set_bit(PG_arch_1, &page->flags);
|
||||
if (!test_bit(PG_arch_1, &page->flags)) {
|
||||
flush_dcache_icache_page(page);
|
||||
set_bit(PG_arch_1, &page->flags);
|
||||
}
|
||||
pte_update(ptep, 0, _PAGE_HWEXEC);
|
||||
_tlbie(address);
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
up_read(&mm->mmap_sem);
|
||||
return 0;
|
||||
}
|
||||
pte_update(ptep, 0, _PAGE_HWEXEC);
|
||||
_tlbie(address);
|
||||
pte_unmap(ptep);
|
||||
up_read(&mm->mmap_sem);
|
||||
return 0;
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
}
|
||||
if (ptep != NULL)
|
||||
pte_unmap(ptep);
|
||||
#endif
|
||||
/* a write */
|
||||
} else if (is_write) {
|
||||
|
@ -372,7 +372,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
|
||||
* the PTE pointer is unmodified if PTE is not found.
|
||||
*/
|
||||
int
|
||||
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
|
||||
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pmd_t *pmd;
|
||||
@ -387,6 +387,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
|
||||
if (pte) {
|
||||
retval = 1;
|
||||
*ptep = pte;
|
||||
if (pmdp)
|
||||
*pmdp = pmd;
|
||||
/* XXX caller needs to do pte_unmap, yuck */
|
||||
}
|
||||
}
|
||||
@ -424,7 +426,7 @@ unsigned long iopa(unsigned long addr)
|
||||
mm = &init_mm;
|
||||
|
||||
pa = 0;
|
||||
if (get_pteptr(mm, addr, &pte)) {
|
||||
if (get_pteptr(mm, addr, &pte, NULL)) {
|
||||
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
|
||||
pte_unmap(pte);
|
||||
}
|
||||
|
@ -202,6 +202,7 @@ good_area:
|
||||
/* an exec - 4xx/Book-E allows for per-page execute permission */
|
||||
} else if (TRAP(regs) == 0x400) {
|
||||
pte_t *ptep;
|
||||
pmd_t *pmdp;
|
||||
|
||||
#if 0
|
||||
/* It would be nice to actually enforce the VM execute
|
||||
@ -215,21 +216,24 @@ good_area:
|
||||
/* Since 4xx/Book-E supports per-page execute permission,
|
||||
* we lazily flush dcache to icache. */
|
||||
ptep = NULL;
|
||||
if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
|
||||
struct page *page = pte_page(*ptep);
|
||||
if (get_pteptr(mm, address, &ptep, &pmdp)) {
|
||||
spinlock_t *ptl = pte_lockptr(mm, pmdp);
|
||||
spin_lock(ptl);
|
||||
if (pte_present(*ptep)) {
|
||||
struct page *page = pte_page(*ptep);
|
||||
|
||||
if (! test_bit(PG_arch_1, &page->flags)) {
|
||||
flush_dcache_icache_page(page);
|
||||
set_bit(PG_arch_1, &page->flags);
|
||||
if (!test_bit(PG_arch_1, &page->flags)) {
|
||||
flush_dcache_icache_page(page);
|
||||
set_bit(PG_arch_1, &page->flags);
|
||||
}
|
||||
pte_update(ptep, 0, _PAGE_HWEXEC);
|
||||
_tlbie(address);
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
up_read(&mm->mmap_sem);
|
||||
return 0;
|
||||
}
|
||||
pte_update(ptep, 0, _PAGE_HWEXEC);
|
||||
_tlbie(address);
|
||||
pte_unmap(ptep);
|
||||
up_read(&mm->mmap_sem);
|
||||
return 0;
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
}
|
||||
if (ptep != NULL)
|
||||
pte_unmap(ptep);
|
||||
#endif
|
||||
/* a read */
|
||||
} else {
|
||||
|
@ -368,7 +368,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
|
||||
* the PTE pointer is unmodified if PTE is not found.
|
||||
*/
|
||||
int
|
||||
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
|
||||
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pmd_t *pmd;
|
||||
@ -383,6 +383,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
|
||||
if (pte) {
|
||||
retval = 1;
|
||||
*ptep = pte;
|
||||
if (pmdp)
|
||||
*pmdp = pmd;
|
||||
/* XXX caller needs to do pte_unmap, yuck */
|
||||
}
|
||||
}
|
||||
@ -420,7 +422,7 @@ unsigned long iopa(unsigned long addr)
|
||||
mm = &init_mm;
|
||||
|
||||
pa = 0;
|
||||
if (get_pteptr(mm, addr, &pte)) {
|
||||
if (get_pteptr(mm, addr, &pte, NULL)) {
|
||||
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
|
||||
pte_unmap(pte);
|
||||
}
|
||||
|
@ -837,7 +837,8 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
|
||||
*/
|
||||
#define pgtable_cache_init() do { } while (0)
|
||||
|
||||
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep);
|
||||
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
|
||||
pmd_t **pmdp);
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user