linux/include/asm-powerpc/hugetlb.h
Andy Whitcroft 016b33c495 powerpc: Add 64 bit version of huge_ptep_set_wrprotect
The implementation of huge_ptep_set_wrprotect() directly calls
ptep_set_wrprotect() to mark a hugepte write protected.  However this
call is not appropriate on ppc64 kernels as this is a small page only
implementation.  This can lead to the hash not being flushed correctly
when a mapping is being converted to COW, allowing processes to continue
using the original copy.

Currently huge_ptep_set_wrprotect() unconditionally calls
ptep_set_wrprotect().  This is fine on ppc32 kernels as this call is
generic.  On 64 bit this is implemented as:

	pte_update(mm, addr, ptep, _PAGE_RW, 0);

On ppc64 this last parameter is the page size and is passed directly on
to hpte_need_flush():

	hpte_need_flush(mm, addr, ptep, old, huge);

And this directly affects the page size we pass to flush_hash_page():

	flush_hash_page(vaddr, rpte, psize, ssize, 0);

As this changes the way the hash is calculated we will flush the wrong
pages, potentially leaving live hashes to the original page.

Move the definition of huge_ptep_set_wrprotect() to the 32/64 bit specific
headers.

Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-07-01 11:28:56 +10:00

74 lines
1.6 KiB
C

#ifndef _ASM_POWERPC_HUGETLB_H
#define _ASM_POWERPC_HUGETLB_H
#include <asm/page.h>
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len);
void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
unsigned long end, unsigned long floor,
unsigned long ceiling);
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
}
static inline pte_t huge_ptep_get(pte_t *ptep)
{
return *ptep;
}
static inline int arch_prepare_hugepage(struct page *page)
{
return 0;
}
static inline void arch_release_hugepage(struct page *page)
{
}
#endif /* _ASM_POWERPC_HUGETLB_H */