forked from Minki/linux
c01778001a
There are places in Linux where writes to newly allocated page cache pages happen without a subsequent call to flush_dcache_page() (several PIO drivers including USB HCD). This patch changes the meaning of PG_arch_1 to be PG_dcache_clean and always flush the D-cache for a newly mapped page in update_mmu_cache(). The patch also sets the PG_arch_1 bit in the DMA cache maintenance function to avoid additional cache flushing in update_mmu_cache(). Tested-by: Rabin Vincent <rabin.vincent@stericsson.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
150 lines
3.8 KiB
C
150 lines
3.8 KiB
C
/*
|
|
* linux/arch/arm/mm/copypage-v6.c
|
|
*
|
|
* Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#include <linux/init.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/highmem.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/shmparam.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cachetype.h>
|
|
|
|
#include "mm.h"
|
|
|
|
#if SHMLBA > 16384
|
|
#error FIX ME
|
|
#endif
|
|
|
|
#define from_address (0xffff8000)
|
|
#define to_address (0xffffc000)
|
|
|
|
static DEFINE_SPINLOCK(v6_lock);
|
|
|
|
/*
|
|
* Copy the user page. No aliasing to deal with so we can just
|
|
* attack the kernel's existing mapping of these pages.
|
|
*/
|
|
static void v6_copy_user_highpage_nonaliasing(struct page *to,
|
|
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
|
|
{
|
|
void *kto, *kfrom;
|
|
|
|
kfrom = kmap_atomic(from, KM_USER0);
|
|
kto = kmap_atomic(to, KM_USER1);
|
|
copy_page(kto, kfrom);
|
|
__cpuc_flush_dcache_area(kto, PAGE_SIZE);
|
|
kunmap_atomic(kto, KM_USER1);
|
|
kunmap_atomic(kfrom, KM_USER0);
|
|
}
|
|
|
|
/*
|
|
* Clear the user page. No aliasing to deal with so we can just
|
|
* attack the kernel's existing mapping of this page.
|
|
*/
|
|
static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
|
|
{
|
|
void *kaddr = kmap_atomic(page, KM_USER0);
|
|
clear_page(kaddr);
|
|
kunmap_atomic(kaddr, KM_USER0);
|
|
}
|
|
|
|
/*
|
|
* Discard data in the kernel mapping for the new page.
|
|
* FIXME: needs this MCRR to be supported.
|
|
*/
|
|
static void discard_old_kernel_data(void *kto)
|
|
{
|
|
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
|
|
:
|
|
: "r" (kto),
|
|
"r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
|
|
: "cc");
|
|
}
|
|
|
|
/*
|
|
* Copy the page, taking account of the cache colour.
|
|
*/
|
|
static void v6_copy_user_highpage_aliasing(struct page *to,
|
|
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
|
|
{
|
|
unsigned int offset = CACHE_COLOUR(vaddr);
|
|
unsigned long kfrom, kto;
|
|
|
|
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
|
|
__flush_dcache_page(page_mapping(from), from);
|
|
|
|
/* FIXME: not highmem safe */
|
|
discard_old_kernel_data(page_address(to));
|
|
|
|
/*
|
|
* Now copy the page using the same cache colour as the
|
|
* pages ultimate destination.
|
|
*/
|
|
spin_lock(&v6_lock);
|
|
|
|
set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
|
|
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
|
|
|
|
kfrom = from_address + (offset << PAGE_SHIFT);
|
|
kto = to_address + (offset << PAGE_SHIFT);
|
|
|
|
flush_tlb_kernel_page(kfrom);
|
|
flush_tlb_kernel_page(kto);
|
|
|
|
copy_page((void *)kto, (void *)kfrom);
|
|
|
|
spin_unlock(&v6_lock);
|
|
}
|
|
|
|
/*
|
|
* Clear the user page. We need to deal with the aliasing issues,
|
|
* so remap the kernel page into the same cache colour as the user
|
|
* page.
|
|
*/
|
|
static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
|
|
{
|
|
unsigned int offset = CACHE_COLOUR(vaddr);
|
|
unsigned long to = to_address + (offset << PAGE_SHIFT);
|
|
|
|
/* FIXME: not highmem safe */
|
|
discard_old_kernel_data(page_address(page));
|
|
|
|
/*
|
|
* Now clear the page using the same cache colour as
|
|
* the pages ultimate destination.
|
|
*/
|
|
spin_lock(&v6_lock);
|
|
|
|
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
|
|
flush_tlb_kernel_page(to);
|
|
clear_page((void *)to);
|
|
|
|
spin_unlock(&v6_lock);
|
|
}
|
|
|
|
struct cpu_user_fns v6_user_fns __initdata = {
|
|
.cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
|
|
.cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
|
|
};
|
|
|
|
static int __init v6_userpage_init(void)
|
|
{
|
|
if (cache_is_vipt_aliasing()) {
|
|
cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
|
|
cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
core_initcall(v6_userpage_init);
|