forked from Minki/linux
xen/setup: update VA mapping when releasing memory during setup
In xen_memory_setup(), if a page that is being released has a VA mapping this must also be updated. Otherwise, the page will be not released completely -- it will still be referenced in Xen and won't be freed util the mapping is removed and this prevents it from being reallocated at a different PFN. This was already being done for the ISA memory region in xen_ident_map_ISA() but on many systems this was omitting a few pages as many systems marked a few pages below the ISA memory region as reserved in the e820 map. This fixes errors such as: (XEN) page_alloc.c:1148:d0 Over-allocation for domain 0: 2097153 > 2097152 (XEN) memory.c:133:d0 Could not allocate order=0 extent: id=0 memflags=0 (0 of 17) Signed-off-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
96dc08b35c
commit
83d51ab473
@ -1308,7 +1308,6 @@ asmlinkage void __init xen_start_kernel(void)
|
|||||||
|
|
||||||
xen_raw_console_write("mapping kernel into physical memory\n");
|
xen_raw_console_write("mapping kernel into physical memory\n");
|
||||||
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
|
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
|
||||||
xen_ident_map_ISA();
|
|
||||||
|
|
||||||
/* Allocate and initialize top and mid mfn levels for p2m structure */
|
/* Allocate and initialize top and mid mfn levels for p2m structure */
|
||||||
xen_build_mfn_list_list();
|
xen_build_mfn_list_list();
|
||||||
|
@ -1929,29 +1929,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init xen_ident_map_ISA(void)
|
|
||||||
{
|
|
||||||
unsigned long pa;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If we're dom0, then linear map the ISA machine addresses into
|
|
||||||
* the kernel's address space.
|
|
||||||
*/
|
|
||||||
if (!xen_initial_domain())
|
|
||||||
return;
|
|
||||||
|
|
||||||
xen_raw_printk("Xen: setup ISA identity maps\n");
|
|
||||||
|
|
||||||
for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
|
|
||||||
pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
|
|
||||||
|
|
||||||
if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
xen_flush_tlb();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init xen_post_allocator_init(void)
|
static void __init xen_post_allocator_init(void)
|
||||||
{
|
{
|
||||||
pv_mmu_ops.set_pte = xen_set_pte;
|
pv_mmu_ops.set_pte = xen_set_pte;
|
||||||
|
@ -139,6 +139,13 @@ static unsigned long __init xen_do_chunk(unsigned long start,
|
|||||||
|
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned long __init xen_release_chunk(unsigned long start,
|
||||||
|
unsigned long end)
|
||||||
|
{
|
||||||
|
return xen_do_chunk(start, end, true);
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned long __init xen_populate_chunk(
|
static unsigned long __init xen_populate_chunk(
|
||||||
const struct e820entry *list, size_t map_size,
|
const struct e820entry *list, size_t map_size,
|
||||||
unsigned long max_pfn, unsigned long *last_pfn,
|
unsigned long max_pfn, unsigned long *last_pfn,
|
||||||
@ -197,6 +204,29 @@ static unsigned long __init xen_populate_chunk(
|
|||||||
}
|
}
|
||||||
return done;
|
return done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __init xen_set_identity_and_release_chunk(
|
||||||
|
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
|
||||||
|
unsigned long *released, unsigned long *identity)
|
||||||
|
{
|
||||||
|
unsigned long pfn;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the PFNs are currently mapped, the VA mapping also needs
|
||||||
|
* to be updated to be 1:1.
|
||||||
|
*/
|
||||||
|
for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
|
||||||
|
(void)HYPERVISOR_update_va_mapping(
|
||||||
|
(unsigned long)__va(pfn << PAGE_SHIFT),
|
||||||
|
mfn_pte(pfn, PAGE_KERNEL_IO), 0);
|
||||||
|
|
||||||
|
if (start_pfn < nr_pages)
|
||||||
|
*released += xen_release_chunk(
|
||||||
|
start_pfn, min(end_pfn, nr_pages));
|
||||||
|
|
||||||
|
*identity += set_phys_range_identity(start_pfn, end_pfn);
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned long __init xen_set_identity_and_release(
|
static unsigned long __init xen_set_identity_and_release(
|
||||||
const struct e820entry *list, size_t map_size, unsigned long nr_pages)
|
const struct e820entry *list, size_t map_size, unsigned long nr_pages)
|
||||||
{
|
{
|
||||||
@ -226,14 +256,11 @@ static unsigned long __init xen_set_identity_and_release(
|
|||||||
if (entry->type == E820_RAM)
|
if (entry->type == E820_RAM)
|
||||||
end_pfn = PFN_UP(entry->addr);
|
end_pfn = PFN_UP(entry->addr);
|
||||||
|
|
||||||
if (start_pfn < end_pfn) {
|
if (start_pfn < end_pfn)
|
||||||
if (start_pfn < nr_pages)
|
xen_set_identity_and_release_chunk(
|
||||||
released += xen_do_chunk(
|
start_pfn, end_pfn, nr_pages,
|
||||||
start_pfn, min(end_pfn, nr_pages), true);
|
&released, &identity);
|
||||||
|
|
||||||
identity += set_phys_range_identity(
|
|
||||||
start_pfn, end_pfn);
|
|
||||||
}
|
|
||||||
start = end;
|
start = end;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
|
|||||||
void xen_build_mfn_list_list(void);
|
void xen_build_mfn_list_list(void);
|
||||||
void xen_setup_machphys_mapping(void);
|
void xen_setup_machphys_mapping(void);
|
||||||
pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
|
pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
|
||||||
void xen_ident_map_ISA(void);
|
|
||||||
void xen_reserve_top(void);
|
void xen_reserve_top(void);
|
||||||
extern unsigned long xen_max_p2m_pfn;
|
extern unsigned long xen_max_p2m_pfn;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user