linux/arch/sh/include/asm/pgtable.h
Matthew Wilcox (Oracle) 157efa2904 sh: implement the new page table range API
Add PFN_PTE_SHIFT, update_mmu_cache_range(), flush_dcache_folio() and
flush_icache_pages().  Change the PG_dcache_clean flag from being per-page
to per-folio.  Flush the entire folio containing the pages in
flush_icache_pages() for ease of implementation.

Link: https://lkml.kernel.org/r/20230802151406.3735276-25-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Rich Felker <dalias@libc.org>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-08-24 16:20:23 -07:00

154 lines
3.9 KiB
C

/* SPDX-License-Identifier: GPL-2.0
*
* This file contains the functions and defines necessary to modify and
* use the SuperH page table tree.
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2002 - 2007 Paul Mundt
*/
#ifndef __ASM_SH_PGTABLE_H
#define __ASM_SH_PGTABLE_H
#ifdef CONFIG_X2TLB
#include <asm/pgtable-3level.h>
#else
#include <asm/pgtable-2level.h>
#endif
#include <asm/page.h>
#include <asm/mmu.h>
#ifndef __ASSEMBLY__
#include <asm/addrspace.h>
#include <asm/fixmap.h>
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */
/*
* Effective and physical address definitions, to aid with sign
* extension.
*/
#define NEFF 32
#define NEFF_SIGN (1LL << (NEFF - 1))
#define NEFF_MASK (-1LL << NEFF)
static inline unsigned long long neff_sign_extend(unsigned long val)
{
unsigned long long extended = val;
return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended;
}
#ifdef CONFIG_29BIT
#define NPHYS 29
#else
#define NPHYS 32
#endif
#define NPHYS_SIGN (1LL << (NPHYS - 1))
#define NPHYS_MASK (-1LL << NPHYS)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/* Entries per level */
#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
#define PHYS_ADDR_MASK29 0x1fffffff
#define PHYS_ADDR_MASK32 0xffffffff
static inline unsigned long phys_addr_mask(void)
{
/* Is the MMU in 29bit mode? */
if (__in_29bit_mode())
return PHYS_ADDR_MASK29;
return PHYS_ADDR_MASK32;
}
#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
#define VMALLOC_START (P3SEG)
#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#include <asm/pgtable_32.h>
/*
* SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
* protection for execute, and considers it the same as a read. Also, write
* permission implies read permission. This is the closest we can get..
*
* SH-X2 (SH7785) and later parts take this to the opposite end of the extreme,
* not only supporting separate execute, read, and write bits, but having
* completely separate permission bits for user and kernel space.
*/
/*xwr*/
typedef pte_t *pte_addr_t;
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
struct vm_area_struct;
struct mm_struct;
extern void __update_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte);
extern void __update_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t pte);
static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
pte_t pte = *ptep;
__update_cache(vma, address, pte);
__update_tlb(vma, address, pte);
}
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void);
extern void page_table_range_init(unsigned long start, unsigned long end,
pgd_t *pgd);
static inline bool __pte_access_permitted(pte_t pte, u64 prot)
{
return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
}
#ifdef CONFIG_X2TLB
static inline bool pte_access_permitted(pte_t pte, bool write)
{
u64 prot = _PAGE_PRESENT;
prot |= _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
if (write)
prot |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
return __pte_access_permitted(pte, prot);
}
#else
static inline bool pte_access_permitted(pte_t pte, bool write)
{
u64 prot = _PAGE_PRESENT | _PAGE_USER;
if (write)
prot |= _PAGE_RW;
return __pte_access_permitted(pte, prot);
}
#endif
#define pte_access_permitted pte_access_permitted
/* arch/sh/mm/mmap.c */
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#endif /* __ASM_SH_PGTABLE_H */