mirror of
https://github.com/torvalds/linux.git
synced 2024-12-13 22:53:20 +00:00
d4bbc30bb0
The set_memory_* functions have moved to set_memory.h. Use that header explicitly. Link: http://lkml.kernel.org/r/1488920133-27229-4-git-send-email-labbott@redhat.com Signed-off-by: Laura Abbott <labbott@redhat.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
186 lines
4.8 KiB
C
186 lines
4.8 KiB
C
/*
|
|
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 and
|
|
* only version 2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/set_memory.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
struct page_change_data {
|
|
pgprot_t set_mask;
|
|
pgprot_t clear_mask;
|
|
};
|
|
|
|
static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
|
|
void *data)
|
|
{
|
|
struct page_change_data *cdata = data;
|
|
pte_t pte = *ptep;
|
|
|
|
pte = clear_pte_bit(pte, cdata->clear_mask);
|
|
pte = set_pte_bit(pte, cdata->set_mask);
|
|
|
|
set_pte(ptep, pte);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* This function assumes that the range is mapped with PAGE_SIZE pages.
|
|
*/
|
|
static int __change_memory_common(unsigned long start, unsigned long size,
|
|
pgprot_t set_mask, pgprot_t clear_mask)
|
|
{
|
|
struct page_change_data data;
|
|
int ret;
|
|
|
|
data.set_mask = set_mask;
|
|
data.clear_mask = clear_mask;
|
|
|
|
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
|
|
&data);
|
|
|
|
flush_tlb_kernel_range(start, start + size);
|
|
return ret;
|
|
}
|
|
|
|
static int change_memory_common(unsigned long addr, int numpages,
|
|
pgprot_t set_mask, pgprot_t clear_mask)
|
|
{
|
|
unsigned long start = addr;
|
|
unsigned long size = PAGE_SIZE*numpages;
|
|
unsigned long end = start + size;
|
|
struct vm_struct *area;
|
|
|
|
if (!PAGE_ALIGNED(addr)) {
|
|
start &= PAGE_MASK;
|
|
end = start + size;
|
|
WARN_ON_ONCE(1);
|
|
}
|
|
|
|
/*
|
|
* Kernel VA mappings are always live, and splitting live section
|
|
* mappings into page mappings may cause TLB conflicts. This means
|
|
* we have to ensure that changing the permission bits of the range
|
|
* we are operating on does not result in such splitting.
|
|
*
|
|
* Let's restrict ourselves to mappings created by vmalloc (or vmap).
|
|
* Those are guaranteed to consist entirely of page mappings, and
|
|
* splitting is never needed.
|
|
*
|
|
* So check whether the [addr, addr + size) interval is entirely
|
|
* covered by precisely one VM area that has the VM_ALLOC flag set.
|
|
*/
|
|
area = find_vm_area((void *)addr);
|
|
if (!area ||
|
|
end > (unsigned long)area->addr + area->size ||
|
|
!(area->flags & VM_ALLOC))
|
|
return -EINVAL;
|
|
|
|
if (!numpages)
|
|
return 0;
|
|
|
|
return __change_memory_common(start, size, set_mask, clear_mask);
|
|
}
|
|
|
|
int set_memory_ro(unsigned long addr, int numpages)
|
|
{
|
|
return change_memory_common(addr, numpages,
|
|
__pgprot(PTE_RDONLY),
|
|
__pgprot(PTE_WRITE));
|
|
}
|
|
|
|
int set_memory_rw(unsigned long addr, int numpages)
|
|
{
|
|
return change_memory_common(addr, numpages,
|
|
__pgprot(PTE_WRITE),
|
|
__pgprot(PTE_RDONLY));
|
|
}
|
|
|
|
int set_memory_nx(unsigned long addr, int numpages)
|
|
{
|
|
return change_memory_common(addr, numpages,
|
|
__pgprot(PTE_PXN),
|
|
__pgprot(0));
|
|
}
|
|
EXPORT_SYMBOL_GPL(set_memory_nx);
|
|
|
|
int set_memory_x(unsigned long addr, int numpages)
|
|
{
|
|
return change_memory_common(addr, numpages,
|
|
__pgprot(0),
|
|
__pgprot(PTE_PXN));
|
|
}
|
|
EXPORT_SYMBOL_GPL(set_memory_x);
|
|
|
|
int set_memory_valid(unsigned long addr, int numpages, int enable)
|
|
{
|
|
if (enable)
|
|
return __change_memory_common(addr, PAGE_SIZE * numpages,
|
|
__pgprot(PTE_VALID),
|
|
__pgprot(0));
|
|
else
|
|
return __change_memory_common(addr, PAGE_SIZE * numpages,
|
|
__pgprot(0),
|
|
__pgprot(PTE_VALID));
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
void __kernel_map_pages(struct page *page, int numpages, int enable)
|
|
{
|
|
set_memory_valid((unsigned long)page_address(page), numpages, enable);
|
|
}
|
|
#ifdef CONFIG_HIBERNATION
|
|
/*
|
|
* When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
|
|
* is used to determine if a linear map page has been marked as not-valid by
|
|
* CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
|
|
* This is based on kern_addr_valid(), which almost does what we need.
|
|
*
|
|
* Because this is only called on the kernel linear map, p?d_sect() implies
|
|
* p?d_present(). When debug_pagealloc is enabled, sections mappings are
|
|
* disabled.
|
|
*/
|
|
bool kernel_page_present(struct page *page)
|
|
{
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
unsigned long addr = (unsigned long)page_address(page);
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
if (pgd_none(*pgd))
|
|
return false;
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
if (pud_none(*pud))
|
|
return false;
|
|
if (pud_sect(*pud))
|
|
return true;
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
if (pmd_none(*pmd))
|
|
return false;
|
|
if (pmd_sect(*pmd))
|
|
return true;
|
|
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
return pte_valid(*pte);
|
|
}
|
|
#endif /* CONFIG_HIBERNATION */
|
|
#endif /* CONFIG_DEBUG_PAGEALLOC */
|