2008-01-30 12:33:41 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2002 Andi Kleen, SuSE Labs.
|
2005-04-16 22:20:36 +00:00
|
|
|
* Thanks to Ben LaHaise for precious feedback.
|
2008-01-30 12:33:41 +00:00
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/module.h>
|
2008-01-30 12:33:41 +00:00
|
|
|
#include <linux/sched.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/slab.h>
|
2008-01-30 12:33:41 +00:00
|
|
|
#include <linux/mm.h>
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/tlbflush.h>
|
2006-01-06 08:12:10 +00:00
|
|
|
#include <asm/sections.h>
|
2008-01-30 12:33:41 +00:00
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/pgalloc.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:33:43 +00:00
|
|
|
pte_t *lookup_address(unsigned long address, int *level)
|
2008-01-30 12:33:41 +00:00
|
|
|
{
|
2005-04-16 22:20:36 +00:00
|
|
|
pgd_t *pgd = pgd_offset_k(address);
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
2008-01-30 12:33:41 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (pgd_none(*pgd))
|
|
|
|
return NULL;
|
|
|
|
pud = pud_offset(pgd, address);
|
|
|
|
if (pud_none(*pud))
|
|
|
|
return NULL;
|
|
|
|
pmd = pmd_offset(pud, address);
|
|
|
|
if (pmd_none(*pmd))
|
|
|
|
return NULL;
|
2008-01-30 12:33:43 +00:00
|
|
|
*level = 2;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (pmd_large(*pmd))
|
|
|
|
return (pte_t *)pmd;
|
2008-01-30 12:33:43 +00:00
|
|
|
*level = 3;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:33:41 +00:00
|
|
|
return pte_offset_kernel(pmd, address);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct page *
|
|
|
|
split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
|
|
|
|
{
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long addr;
|
|
|
|
struct page *base;
|
|
|
|
pte_t *pbase;
|
2008-01-30 12:33:41 +00:00
|
|
|
int i;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
base = alloc_pages(GFP_KERNEL, 0);
|
2008-01-30 12:33:41 +00:00
|
|
|
if (!base)
|
2005-04-16 22:20:36 +00:00
|
|
|
return NULL;
|
|
|
|
|
2006-03-22 08:08:31 +00:00
|
|
|
/*
|
|
|
|
* page_private is used to track the number of entries in
|
|
|
|
* the page table page that have non standard attributes.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
address = __pa(address);
|
2008-01-30 12:33:41 +00:00
|
|
|
addr = address & LARGE_PAGE_MASK;
|
2005-04-16 22:20:36 +00:00
|
|
|
pbase = (pte_t *)page_address(base);
|
2007-07-18 01:37:03 +00:00
|
|
|
paravirt_alloc_pt(&init_mm, page_to_pfn(base));
|
2008-01-30 12:33:41 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
|
2008-01-30 12:33:41 +00:00
|
|
|
set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
|
|
|
|
addr == address ? prot : ref_prot));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
return base;
|
2008-01-30 12:33:41 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:33:41 +00:00
|
|
|
static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
|
|
|
|
{
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long flags;
|
2008-01-30 12:33:41 +00:00
|
|
|
struct page *page;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:33:41 +00:00
|
|
|
/* change init_mm */
|
|
|
|
set_pte_atomic(kpte, pte);
|
[PATCH] i386: PARAVIRT: Allow paravirt backend to choose kernel PMD sharing
Normally when running in PAE mode, the 4th PMD maps the kernel address space,
which can be shared among all processes (since they all need the same kernel
mappings).
Xen, however, does not allow guests to have the kernel pmd shared between page
tables, so parameterize pgtable.c to allow both modes of operation.
There are several side-effects of this. One is that vmalloc will update the
kernel address space mappings, and those updates need to be propagated into
all processes if the kernel mappings are not intrinsically shared. In the
non-PAE case, this is done by maintaining a pgd_list of all processes; this
list is used when all process pagetables must be updated. pgd_list is
threaded via otherwise unused entries in the page structure for the pgd, which
means that the pgd must be page-sized for this to work.
Normally the PAE pgd is only 4x64 byte entries large, but Xen requires the PAE
pgd to page aligned anyway, so this patch forces the pgd to be page
aligned+sized when the kernel pmd is unshared, to accomodate both these
requirements.
Also, since there may be several distinct kernel pmds (if the user/kernel
split is below 3G), there's no point in allocating them from a slab cache;
they're just allocated with get_free_page and initialized appropriately. (Of
course the could be cached if there is just a single kernel pmd - which is the
default with a 3G user/kernel split - but it doesn't seem worthwhile to add
yet another case into this code).
[ Many thanks to wli for review comments. ]
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Christoph Lameter <clameter@sgi.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2007-05-02 17:27:13 +00:00
|
|
|
if (SHARED_KERNEL_PMD)
|
2005-04-16 22:20:36 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&pgd_lock, flags);
|
|
|
|
for (page = pgd_list; page; page = (struct page *)page->index) {
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
2008-01-30 12:33:41 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
pgd = (pgd_t *)page_address(page) + pgd_index(address);
|
|
|
|
pud = pud_offset(pgd, address);
|
|
|
|
pmd = pmd_offset(pud, address);
|
|
|
|
set_pte_atomic((pte_t *)pmd, pte);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&pgd_lock, flags);
|
|
|
|
}
|
|
|
|
|
2008-01-30 12:33:41 +00:00
|
|
|
static int __change_page_attr(struct page *page, pgprot_t prot)
|
|
|
|
{
|
2008-01-30 12:33:55 +00:00
|
|
|
pgprot_t ref_prot = PAGE_KERNEL;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct page *kpte_page;
|
2008-01-30 12:33:41 +00:00
|
|
|
unsigned long address;
|
2008-01-30 12:33:55 +00:00
|
|
|
pgprot_t oldprot;
|
2008-01-30 12:33:41 +00:00
|
|
|
pte_t *kpte;
|
2008-01-30 12:33:43 +00:00
|
|
|
int level;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
BUG_ON(PageHighMem(page));
|
|
|
|
address = (unsigned long)page_address(page);
|
|
|
|
|
2008-01-30 12:33:43 +00:00
|
|
|
kpte = lookup_address(address, &level);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!kpte)
|
|
|
|
return -EINVAL;
|
2008-01-30 12:33:41 +00:00
|
|
|
|
2008-01-30 12:33:55 +00:00
|
|
|
oldprot = pte_pgprot(*kpte);
|
2005-04-16 22:20:36 +00:00
|
|
|
kpte_page = virt_to_page(kpte);
|
2007-07-21 15:09:51 +00:00
|
|
|
BUG_ON(PageLRU(kpte_page));
|
|
|
|
BUG_ON(PageCompound(kpte_page));
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2008-01-30 12:33:55 +00:00
|
|
|
* Better fail early if someone sets the kernel text to NX.
|
|
|
|
* Does not cover __inittext
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-01-30 12:33:55 +00:00
|
|
|
BUG_ON(address >= (unsigned long)&_text &&
|
|
|
|
address < (unsigned long)&_etext &&
|
|
|
|
(pgprot_val(prot) & _PAGE_NX));
|
2007-07-21 15:09:51 +00:00
|
|
|
|
2008-01-30 12:33:55 +00:00
|
|
|
if ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
|
|
|
|
ref_prot = PAGE_KERNEL_EXEC;
|
|
|
|
|
|
|
|
ref_prot = canon_pgprot(ref_prot);
|
|
|
|
prot = canon_pgprot(prot);
|
|
|
|
|
|
|
|
if (level == 3) {
|
|
|
|
set_pte_atomic(kpte, mk_pte(page, prot));
|
|
|
|
} else {
|
|
|
|
struct page *split;
|
|
|
|
split = split_large_page(address, prot, ref_prot);
|
|
|
|
if (!split)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There's a small window here to waste a bit of RAM:
|
|
|
|
*/
|
|
|
|
set_pmd_pte(kpte, address, mk_pte(split, ref_prot));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
return 0;
|
2008-01-30 12:33:41 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Change the page attributes of an page in the linear mapping.
|
|
|
|
*
|
|
|
|
* This should be used when a page is mapped with a different caching policy
|
|
|
|
* than write-back somewhere - some CPUs do not like it when mappings with
|
|
|
|
* different caching policies exist. This changes the page attributes of the
|
|
|
|
* in kernel linear mapping too.
|
2008-01-30 12:33:41 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* The caller needs to ensure that there are no conflicting mappings elsewhere.
|
|
|
|
* This function only deals with the kernel linear map.
|
2008-01-30 12:33:41 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Caller must call global_flush_tlb() after this.
|
|
|
|
*/
|
|
|
|
int change_page_attr(struct page *page, int numpages, pgprot_t prot)
|
|
|
|
{
|
2008-01-30 12:33:41 +00:00
|
|
|
int err = 0, i;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:33:41 +00:00
|
|
|
for (i = 0; i < numpages; i++, page++) {
|
2005-04-16 22:20:36 +00:00
|
|
|
err = __change_page_attr(page, prot);
|
2008-01-30 12:33:41 +00:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
2008-01-30 12:33:41 +00:00
|
|
|
EXPORT_SYMBOL(change_page_attr);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:33:55 +00:00
|
|
|
int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot)
|
2006-06-23 09:05:55 +00:00
|
|
|
{
|
2008-01-30 12:33:55 +00:00
|
|
|
int i;
|
|
|
|
unsigned long pfn = (addr >> PAGE_SHIFT);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:33:55 +00:00
|
|
|
for (i = 0; i < numpages; i++) {
|
|
|
|
if (!pfn_valid(pfn + i)) {
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
int level;
|
|
|
|
pte_t *pte = lookup_address(addr + i*PAGE_SIZE, &level);
|
|
|
|
BUG_ON(pte && !pte_none(*pte));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return change_page_attr(virt_to_page(addr), i, prot);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void flush_kernel_map(void *arg)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Flush all to work around Errata in early athlons regarding
|
|
|
|
* large page flushing.
|
|
|
|
*/
|
|
|
|
__flush_tlb_all();
|
|
|
|
|
|
|
|
if (boot_cpu_data.x86_model >= 4)
|
|
|
|
wbinvd();
|
|
|
|
}
|
|
|
|
|
|
|
|
void global_flush_tlb(void)
|
|
|
|
{
|
2005-04-16 22:20:36 +00:00
|
|
|
BUG_ON(irqs_disabled());
|
|
|
|
|
2008-01-30 12:33:55 +00:00
|
|
|
on_each_cpu(flush_kernel_map, NULL, 1, 1);
|
2006-06-23 09:05:55 +00:00
|
|
|
}
|
2008-01-30 12:33:41 +00:00
|
|
|
EXPORT_SYMBOL(global_flush_tlb);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
|
|
void kernel_map_pages(struct page *page, int numpages, int enable)
|
|
|
|
{
|
|
|
|
if (PageHighMem(page))
|
|
|
|
return;
|
2008-01-30 12:33:41 +00:00
|
|
|
if (!enable) {
|
2006-06-27 09:54:49 +00:00
|
|
|
debug_check_no_locks_freed(page_address(page),
|
|
|
|
numpages * PAGE_SIZE);
|
2008-01-30 12:33:41 +00:00
|
|
|
}
|
2006-01-09 23:59:21 +00:00
|
|
|
|
2008-01-30 12:33:41 +00:00
|
|
|
/*
|
|
|
|
* the return value is ignored - the calls cannot fail,
|
2005-04-16 22:20:36 +00:00
|
|
|
* large pages are disabled at boot time.
|
|
|
|
*/
|
|
|
|
change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
|
2008-01-30 12:33:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* we should perform an IPI and flush all tlbs,
|
2005-04-16 22:20:36 +00:00
|
|
|
* but that can deadlock->flush only current cpu.
|
|
|
|
*/
|
|
|
|
__flush_tlb_all();
|
|
|
|
}
|
|
|
|
#endif
|