forked from Minki/linux
ARM: 6380/1: Introduce __sync_icache_dcache() for VIPT caches
On SMP systems, there is a small chance of a PTE becoming visible to a different CPU before the current cache maintenance operations in update_mmu_cache(). To avoid this, cache maintenance must be handled in set_pte_at() (similar to IA-64 and PowerPC). This patch provides a unified VIPT cache handling mechanism and implements the __sync_icache_dcache() function for ARMv6 onwards architectures. It is called from set_pte_at() and replaces the update_mmu_cache(). The latter is still used on VIVT hardware where a vm_area_struct is required. Tested-by: Rabin Vincent <rabin.vincent@stericsson.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
c01778001a
commit
6012191aa9
@ -278,9 +278,24 @@ extern struct page *empty_zero_page;
|
||||
|
||||
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
|
||||
|
||||
#define set_pte_at(mm,addr,ptep,pteval) do { \
|
||||
set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \
|
||||
} while (0)
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
static inline void __sync_icache_dcache(pte_t pteval)
|
||||
{
|
||||
}
|
||||
#else
|
||||
extern void __sync_icache_dcache(pte_t pteval);
|
||||
#endif
|
||||
|
||||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pteval)
|
||||
{
|
||||
if (addr >= TASK_SIZE)
|
||||
set_pte_ext(ptep, pteval, 0);
|
||||
else {
|
||||
__sync_icache_dcache(pteval);
|
||||
set_pte_ext(ptep, pteval, PTE_EXT_NG);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The following only work if pte_present() is true.
|
||||
@ -290,8 +305,13 @@ extern struct page *empty_zero_page;
|
||||
#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
|
||||
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
|
||||
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
|
||||
#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
|
||||
#define pte_special(pte) (0)
|
||||
|
||||
#define pte_present_user(pte) \
|
||||
((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
|
||||
(L_PTE_PRESENT | L_PTE_USER))
|
||||
|
||||
#define PTE_BIT_FUNC(fn,op) \
|
||||
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
|
||||
|
||||
|
@ -562,10 +562,18 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
/*
|
||||
* If PG_dcache_clean is not set for the page, we need to ensure that any
|
||||
* cache entries for the kernels virtual memory range are written
|
||||
* back to the page.
|
||||
* back to the page. On ARMv6 and later, the cache coherency is handled via
|
||||
* the set_pte_at() function.
|
||||
*/
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
#else
|
||||
static inline void update_mmu_cache(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
|
||||
static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
/*
|
||||
* We take the easy way out of this problem - we make the
|
||||
* PTE uncacheable. However, we leave the write buffer on.
|
||||
@ -168,10 +169,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
|
||||
return;
|
||||
|
||||
mapping = page_mapping(page);
|
||||
#ifndef CONFIG_SMP
|
||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||
__flush_dcache_page(mapping, page);
|
||||
#endif
|
||||
if (mapping) {
|
||||
if (cache_is_vivt())
|
||||
make_coherent(mapping, vma, addr, ptep, pfn);
|
||||
@ -179,6 +178,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
|
||||
__flush_icache_all();
|
||||
}
|
||||
}
|
||||
#endif /* __LINUX_ARM_ARCH__ < 6 */
|
||||
|
||||
/*
|
||||
* Check whether the write buffer has physical address aliasing
|
||||
|
@ -215,6 +215,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
|
||||
flush_dcache_mmap_unlock(mapping);
|
||||
}
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
void __sync_icache_dcache(pte_t pteval)
|
||||
{
|
||||
unsigned long pfn;
|
||||
struct page *page;
|
||||
struct address_space *mapping;
|
||||
|
||||
if (!pte_present_user(pteval))
|
||||
return;
|
||||
if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
|
||||
/* only flush non-aliasing VIPT caches for exec mappings */
|
||||
return;
|
||||
pfn = pte_pfn(pteval);
|
||||
if (!pfn_valid(pfn))
|
||||
return;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (cache_is_vipt_aliasing())
|
||||
mapping = page_mapping(page);
|
||||
else
|
||||
mapping = NULL;
|
||||
|
||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||
__flush_dcache_page(mapping, page);
|
||||
/* pte_exec() already checked above for non-aliasing VIPT cache */
|
||||
if (cache_is_vipt_nonaliasing() || pte_exec(pteval))
|
||||
__flush_icache_all();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Ensure cache coherency between kernel mapping and userspace mapping
|
||||
* of this page.
|
||||
|
Loading…
Reference in New Issue
Block a user