2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/mm.h>
|
2007-10-17 16:04:34 +00:00
|
|
|
#include <linux/nmi.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/fixmap.h>
|
2017-01-27 09:27:10 +00:00
|
|
|
#include <asm/e820/api.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include <asm/tlbflush.h>
|
2010-05-03 07:19:43 +00:00
|
|
|
#include <asm/io.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-03-03 10:55:04 +00:00
|
|
|
unsigned int __VMALLOC_RESERVE = 128 << 20;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Associate a virtual page frame with a given physical page frame
|
|
|
|
* and protection flags for that frame.
|
|
|
|
*/
|
2008-06-17 18:41:59 +00:00
|
|
|
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
2017-03-13 14:33:05 +00:00
|
|
|
p4d_t *p4d;
|
2005-04-16 22:20:36 +00:00
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
pgd = swapper_pg_dir + pgd_index(vaddr);
|
|
|
|
if (pgd_none(*pgd)) {
|
|
|
|
BUG();
|
|
|
|
return;
|
|
|
|
}
|
2017-03-13 14:33:05 +00:00
|
|
|
p4d = p4d_offset(pgd, vaddr);
|
|
|
|
if (p4d_none(*p4d)) {
|
|
|
|
BUG();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pud = pud_offset(p4d, vaddr);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (pud_none(*pud)) {
|
|
|
|
BUG();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pmd = pmd_offset(pud, vaddr);
|
|
|
|
if (pmd_none(*pmd)) {
|
|
|
|
BUG();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pte = pte_offset_kernel(pmd, vaddr);
|
2016-07-08 00:19:15 +00:00
|
|
|
if (!pte_none(pteval))
|
2009-03-18 20:03:32 +00:00
|
|
|
set_pte_at(&init_mm, vaddr, pte, pteval);
|
2006-12-07 01:14:09 +00:00
|
|
|
else
|
|
|
|
pte_clear(&init_mm, vaddr, pte);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* It's enough to flush this one mapping.
|
|
|
|
* (PGE mappings get flushed as well)
|
|
|
|
*/
|
|
|
|
__flush_tlb_one(vaddr);
|
|
|
|
}
|
|
|
|
|
2006-09-26 06:32:25 +00:00
|
|
|
unsigned long __FIXADDR_TOP = 0xfffff000;
|
|
|
|
EXPORT_SYMBOL(__FIXADDR_TOP);
|
|
|
|
|
2008-06-23 00:40:10 +00:00
|
|
|
/*
|
|
|
|
* vmalloc=size forces the vmalloc area to be exactly 'size'
|
|
|
|
* bytes. This can be used to increase (or decrease) the
|
|
|
|
* vmalloc area - the default is 128m.
|
|
|
|
*/
|
|
|
|
static int __init parse_vmalloc(char *arg)
|
|
|
|
{
|
|
|
|
if (!arg)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-08-20 23:43:03 +00:00
|
|
|
/* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/
|
|
|
|
__VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET;
|
2008-06-23 00:40:10 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("vmalloc", parse_vmalloc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* reservetop=size reserves a hole at the top of the kernel address space which
|
|
|
|
* a hypervisor can load into later. Needed for dynamically loaded hypervisors,
|
|
|
|
* so relocating the fixmap can be done before paging initialization.
|
|
|
|
*/
|
|
|
|
static int __init parse_reservetop(char *arg)
|
|
|
|
{
|
|
|
|
unsigned long address;
|
|
|
|
|
|
|
|
if (!arg)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
address = memparse(arg, &arg);
|
|
|
|
reserve_top_address(address);
|
2014-04-07 22:39:49 +00:00
|
|
|
early_ioremap_init();
|
2008-06-23 00:40:10 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("reservetop", parse_reservetop);
|