linux/arch/x86/mm/pgprot.c
Christoph Hellwig e10cd4b009 x86/mm: enable ARCH_HAS_VM_GET_PAGE_PROT
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT.  This also unsubscribes from config
ARCH_HAS_FILTER_PGPROT, after dropping off arch_filter_pgprot() and
arch_vm_get_page_prot().

Link: https://lkml.kernel.org/r/20220414062125.609297-6-anshuman.khandual@arm.com
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David S. Miller <davem@davemloft.net>
Cc: Khalid Aziz <khalid.aziz@oracle.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-04-28 23:16:13 -07:00

36 lines
918 B
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
unsigned long val = pgprot_val(protection_map[vm_flags &
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
/*
* Take the 4 protection key bits out of the vma->vm_flags value and
* turn them in to the bits that we can put in to a pte.
*
* Only override these if Protection Keys are available (which is only
* on 64-bit).
*/
if (vm_flags & VM_PKEY_BIT0)
val |= _PAGE_PKEY_BIT0;
if (vm_flags & VM_PKEY_BIT1)
val |= _PAGE_PKEY_BIT1;
if (vm_flags & VM_PKEY_BIT2)
val |= _PAGE_PKEY_BIT2;
if (vm_flags & VM_PKEY_BIT3)
val |= _PAGE_PKEY_BIT3;
#endif
val = __sme_set(val);
if (val & _PAGE_PRESENT)
val &= __supported_pte_mask;
return __pgprot(val);
}
EXPORT_SYMBOL(vm_get_page_prot);