forked from Minki/linux
20a004e7b0
In many cases, page tables can be accessed concurrently by either another CPU (due to things like fast gup) or by the hardware page table walker itself, which may set access/dirty bits. In such cases, it is important to use READ_ONCE/WRITE_ONCE when accessing page table entries so that entries cannot be torn, merged or subject to apparent loss of coherence due to compiler transformations. Whilst there are some scenarios where this cannot happen (e.g. pinned kernel mappings for the linear region), the overhead of using READ_ONCE /WRITE_ONCE everywhere is minimal and makes the code an awful lot easier to reason about. This patch consistently uses these macros in the arch code, as well as explicitly namespacing pointers to page table entries from the entries themselves by using adopting a 'p' suffix for the former (as is sometimes used elsewhere in the kernel source). Tested-by: Yury Norov <ynorov@caviumnetworks.com> Tested-by: Richard Ruigrok <rruigrok@codeaurora.org> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
98 lines
2.7 KiB
C
98 lines
2.7 KiB
C
/*
|
|
* arch/arm64/include/asm/hugetlb.h
|
|
*
|
|
* Copyright (C) 2013 Linaro Ltd.
|
|
*
|
|
* Based on arch/x86/include/asm/hugetlb.h
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*/
|
|
|
|
#ifndef __ASM_HUGETLB_H
|
|
#define __ASM_HUGETLB_H
|
|
|
|
#include <asm/page.h>
|
|
|
|
static inline pte_t huge_ptep_get(pte_t *ptep)
|
|
{
|
|
return READ_ONCE(*ptep);
|
|
}
|
|
|
|
|
|
|
|
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
|
unsigned long addr, unsigned long end,
|
|
unsigned long floor,
|
|
unsigned long ceiling)
|
|
{
|
|
free_pgd_range(tlb, addr, end, floor, ceiling);
|
|
}
|
|
|
|
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long len)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int prepare_hugepage_range(struct file *file,
|
|
unsigned long addr, unsigned long len)
|
|
{
|
|
struct hstate *h = hstate_file(file);
|
|
if (len & ~huge_page_mask(h))
|
|
return -EINVAL;
|
|
if (addr & ~huge_page_mask(h))
|
|
return -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
static inline int huge_pte_none(pte_t pte)
|
|
{
|
|
return pte_none(pte);
|
|
}
|
|
|
|
static inline pte_t huge_pte_wrprotect(pte_t pte)
|
|
{
|
|
return pte_wrprotect(pte);
|
|
}
|
|
|
|
static inline void arch_clear_hugepage_flags(struct page *page)
|
|
{
|
|
clear_bit(PG_dcache_clean, &page->flags);
|
|
}
|
|
|
|
extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
|
|
struct page *page, int writable);
|
|
#define arch_make_huge_pte arch_make_huge_pte
|
|
extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pte);
|
|
extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep,
|
|
pte_t pte, int dirty);
|
|
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
|
unsigned long addr, pte_t *ptep);
|
|
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|
unsigned long addr, pte_t *ptep);
|
|
extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep);
|
|
extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, unsigned long sz);
|
|
#define huge_pte_clear huge_pte_clear
|
|
extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pte, unsigned long sz);
|
|
#define set_huge_swap_pte_at set_huge_swap_pte_at
|
|
|
|
#include <asm-generic/hugetlb.h>
|
|
|
|
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
|
|
static inline bool gigantic_page_supported(void) { return true; }
|
|
#endif
|
|
|
|
#endif /* __ASM_HUGETLB_H */
|