mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 13:22:23 +00:00
1d4d37159d
Modern ARMv7-A/R cores optionally implement below new hardware feature: - PXN: Privileged execute-never(PXN) is a security feature. PXN bit determines whether the processor can execute software from the region. This is effective solution against ret2usr attack. On an implementation that does not include the LPAE, PXN is optionally supported. This patch set PXN bit on user page table for preventing user code execution with privilege mode. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Jungseung Lee <js07.lee@gmail.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
175 lines
3.8 KiB
C
175 lines
3.8 KiB
C
/*
|
|
* arch/arm/include/asm/pgalloc.h
|
|
*
|
|
* Copyright (C) 2000-2001 Russell King
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#ifndef _ASMARM_PGALLOC_H
|
|
#define _ASMARM_PGALLOC_H
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <asm/domain.h>
|
|
#include <asm/pgtable-hwdef.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#define check_pgt_cache() do { } while (0)
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
|
|
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
|
|
}
|
|
|
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|
{
|
|
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
|
|
free_page((unsigned long)pmd);
|
|
}
|
|
|
|
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
{
|
|
set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
|
|
}
|
|
|
|
#else /* !CONFIG_ARM_LPAE */
|
|
|
|
/*
|
|
* Since we have only two-level page tables, these are trivial
|
|
*/
|
|
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
|
|
#define pmd_free(mm, pmd) do { } while (0)
|
|
#define pud_populate(mm,pmd,pte) BUG()
|
|
|
|
#endif /* CONFIG_ARM_LPAE */
|
|
|
|
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
|
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
|
|
|
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
|
|
|
|
static inline void clean_pte_table(pte_t *pte)
|
|
{
|
|
clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE);
|
|
}
|
|
|
|
/*
|
|
* Allocate one PTE table.
|
|
*
|
|
* This actually allocates two hardware PTE tables, but we wrap this up
|
|
* into one table thus:
|
|
*
|
|
* +------------+
|
|
* | Linux pt 0 |
|
|
* +------------+
|
|
* | Linux pt 1 |
|
|
* +------------+
|
|
* | h/w pt 0 |
|
|
* +------------+
|
|
* | h/w pt 1 |
|
|
* +------------+
|
|
*/
|
|
static inline pte_t *
|
|
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
pte_t *pte;
|
|
|
|
pte = (pte_t *)__get_free_page(PGALLOC_GFP);
|
|
if (pte)
|
|
clean_pte_table(pte);
|
|
|
|
return pte;
|
|
}
|
|
|
|
static inline pgtable_t
|
|
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
struct page *pte;
|
|
|
|
#ifdef CONFIG_HIGHPTE
|
|
pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0);
|
|
#else
|
|
pte = alloc_pages(PGALLOC_GFP, 0);
|
|
#endif
|
|
if (!pte)
|
|
return NULL;
|
|
if (!PageHighMem(pte))
|
|
clean_pte_table(page_address(pte));
|
|
if (!pgtable_page_ctor(pte)) {
|
|
__free_page(pte);
|
|
return NULL;
|
|
}
|
|
return pte;
|
|
}
|
|
|
|
/*
|
|
* Free one PTE table.
|
|
*/
|
|
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
|
{
|
|
if (pte)
|
|
free_page((unsigned long)pte);
|
|
}
|
|
|
|
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
|
{
|
|
pgtable_page_dtor(pte);
|
|
__free_page(pte);
|
|
}
|
|
|
|
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
|
|
pmdval_t prot)
|
|
{
|
|
pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot;
|
|
pmdp[0] = __pmd(pmdval);
|
|
#ifndef CONFIG_ARM_LPAE
|
|
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
|
|
#endif
|
|
flush_pmd_entry(pmdp);
|
|
}
|
|
|
|
/*
|
|
* Populate the pmdp entry with a pointer to the pte. This pmd is part
|
|
* of the mm address space.
|
|
*
|
|
* Ensure that we always set both PMD entries.
|
|
*/
|
|
static inline void
|
|
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
|
|
{
|
|
/*
|
|
* The pmd must be loaded with the physical address of the PTE table
|
|
*/
|
|
__pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE);
|
|
}
|
|
|
|
static inline void
|
|
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
|
|
{
|
|
extern pmdval_t user_pmd_table;
|
|
pmdval_t prot;
|
|
|
|
if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE))
|
|
prot = user_pmd_table;
|
|
else
|
|
prot = _PAGE_USER_TABLE;
|
|
|
|
__pmd_populate(pmdp, page_to_phys(ptep), prot);
|
|
}
|
|
#define pmd_pgtable(pmd) pmd_page(pmd)
|
|
|
|
#endif /* CONFIG_MMU */
|
|
|
|
#endif
|