mirror of
https://github.com/torvalds/linux.git
synced 2024-12-01 08:31:37 +00:00
8b1e0f81fb
Drop the pgtable_t variable from all implementation for pte_fn_t as none of them use it. apply_to_pte_range() should stop computing it as well. Should help us save some cycles. Link: http://lkml.kernel.org/r/1556803126-26596-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Acked-by: Matthew Wilcox <willy@infradead.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: <jglisse@redhat.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
91 lines
2.0 KiB
C
91 lines
2.0 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
|
*/
|
|
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/set_memory.h>
|
|
|
|
struct page_change_data {
|
|
pgprot_t set_mask;
|
|
pgprot_t clear_mask;
|
|
};
|
|
|
|
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
|
|
{
|
|
struct page_change_data *cdata = data;
|
|
pte_t pte = *ptep;
|
|
|
|
pte = clear_pte_bit(pte, cdata->clear_mask);
|
|
pte = set_pte_bit(pte, cdata->set_mask);
|
|
|
|
set_pte_ext(ptep, pte, 0);
|
|
return 0;
|
|
}
|
|
|
|
static bool in_range(unsigned long start, unsigned long size,
|
|
unsigned long range_start, unsigned long range_end)
|
|
{
|
|
return start >= range_start && start < range_end &&
|
|
size <= range_end - start;
|
|
}
|
|
|
|
static int change_memory_common(unsigned long addr, int numpages,
|
|
pgprot_t set_mask, pgprot_t clear_mask)
|
|
{
|
|
unsigned long start = addr & PAGE_MASK;
|
|
unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
|
|
unsigned long size = end - start;
|
|
int ret;
|
|
struct page_change_data data;
|
|
|
|
WARN_ON_ONCE(start != addr);
|
|
|
|
if (!size)
|
|
return 0;
|
|
|
|
if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
|
|
!in_range(start, size, VMALLOC_START, VMALLOC_END))
|
|
return -EINVAL;
|
|
|
|
data.set_mask = set_mask;
|
|
data.clear_mask = clear_mask;
|
|
|
|
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
|
|
&data);
|
|
|
|
flush_tlb_kernel_range(start, end);
|
|
return ret;
|
|
}
|
|
|
|
int set_memory_ro(unsigned long addr, int numpages)
|
|
{
|
|
return change_memory_common(addr, numpages,
|
|
__pgprot(L_PTE_RDONLY),
|
|
__pgprot(0));
|
|
}
|
|
|
|
int set_memory_rw(unsigned long addr, int numpages)
|
|
{
|
|
return change_memory_common(addr, numpages,
|
|
__pgprot(0),
|
|
__pgprot(L_PTE_RDONLY));
|
|
}
|
|
|
|
int set_memory_nx(unsigned long addr, int numpages)
|
|
{
|
|
return change_memory_common(addr, numpages,
|
|
__pgprot(L_PTE_XN),
|
|
__pgprot(0));
|
|
}
|
|
|
|
int set_memory_x(unsigned long addr, int numpages)
|
|
{
|
|
return change_memory_common(addr, numpages,
|
|
__pgprot(0),
|
|
__pgprot(L_PTE_XN));
|
|
}
|