Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - a few misc bits - ocfs2 updates - almost all of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (131 commits) memory hotplug: fix comments when adding section mm: make alloc_node_mem_map a void call if we don't have CONFIG_FLAT_NODE_MEM_MAP mm: simplify nodemask printing mm,oom_reaper: remove pointless kthread_run() error check mm/page_ext.c: check if page_ext is not prepared writeback: remove unused function parameter mm: do not rely on preempt_count in print_vma_addr mm, sparse: do not swamp log with huge vmemmap allocation failures mm/hmm: remove redundant variable align_end mm/list_lru.c: mark expected switch fall-through mm/shmem.c: mark expected switch fall-through mm/page_alloc.c: broken deferred calculation mm: don't warn about allocations which stall for too long fs: fuse: account fuse_inode slab memory as reclaimable mm, page_alloc: fix potential false positive in __zone_watermark_ok mm: mlock: remove lru_add_drain_all() mm, sysctl: make NUMA stats configurable shmem: convert shmem_init_inodecache() to void Unify migrate_pages and move_pages access checks mm, pagevec: rename pagevec drained field ...
This commit is contained in:
@@ -231,6 +231,36 @@ extern unsigned long _PAGE_ALL_SZ_BITS;
|
||||
extern struct page *mem_map_zero;
|
||||
#define ZERO_PAGE(vaddr) (mem_map_zero)
|
||||
|
||||
/* This macro must be updated when the size of struct page grows above 80
|
||||
* or reduces below 64.
|
||||
* The idea that compiler optimizes out switch() statement, and only
|
||||
* leaves clrx instructions
|
||||
*/
|
||||
#define mm_zero_struct_page(pp) do { \
|
||||
unsigned long *_pp = (void *)(pp); \
|
||||
\
|
||||
/* Check that struct page is either 64, 72, or 80 bytes */ \
|
||||
BUILD_BUG_ON(sizeof(struct page) & 7); \
|
||||
BUILD_BUG_ON(sizeof(struct page) < 64); \
|
||||
BUILD_BUG_ON(sizeof(struct page) > 80); \
|
||||
\
|
||||
switch (sizeof(struct page)) { \
|
||||
case 80: \
|
||||
_pp[9] = 0; /* fallthrough */ \
|
||||
case 72: \
|
||||
_pp[8] = 0; /* fallthrough */ \
|
||||
default: \
|
||||
_pp[7] = 0; \
|
||||
_pp[6] = 0; \
|
||||
_pp[5] = 0; \
|
||||
_pp[4] = 0; \
|
||||
_pp[3] = 0; \
|
||||
_pp[2] = 0; \
|
||||
_pp[1] = 0; \
|
||||
_pp[0] = 0; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* PFNs are real physical page numbers. However, mem_map only begins to record
|
||||
* per-page information starting at pfn_base. This is to handle systems where
|
||||
* the first physical page in the machine is at some huge physical address,
|
||||
|
||||
@@ -397,7 +397,7 @@ static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
|
||||
|
||||
pmd_clear(pmd);
|
||||
pte_free_tlb(tlb, token, addr);
|
||||
atomic_long_dec(&tlb->mm->nr_ptes);
|
||||
mm_dec_nr_ptes(tlb->mm);
|
||||
}
|
||||
|
||||
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
|
||||
@@ -472,6 +472,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
|
||||
pud = pud_offset(pgd, start);
|
||||
pgd_clear(pgd);
|
||||
pud_free_tlb(tlb, pud, start);
|
||||
mm_dec_nr_puds(tlb->mm);
|
||||
}
|
||||
|
||||
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
|
||||
@@ -2540,9 +2540,16 @@ void __init mem_init(void)
|
||||
{
|
||||
high_memory = __va(last_valid_pfn << PAGE_SHIFT);
|
||||
|
||||
register_page_bootmem_info();
|
||||
free_all_bootmem();
|
||||
|
||||
/*
|
||||
* Must be done after boot memory is put on freelist, because here we
|
||||
* might set fields in deferred struct pages that have not yet been
|
||||
* initialized, and free_all_bootmem() initializes all the reserved
|
||||
* deferred pages for us.
|
||||
*/
|
||||
register_page_bootmem_info();
|
||||
|
||||
/*
|
||||
* Set up the zero page, mark it reserved, so that page count
|
||||
* is not manipulated when freeing the page from user ptes.
|
||||
@@ -2637,30 +2644,19 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
|
||||
vstart = vstart & PMD_MASK;
|
||||
vend = ALIGN(vend, PMD_SIZE);
|
||||
for (; vstart < vend; vstart += PMD_SIZE) {
|
||||
pgd_t *pgd = pgd_offset_k(vstart);
|
||||
pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
|
||||
unsigned long pte;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
if (pgd_none(*pgd)) {
|
||||
pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
|
||||
if (!pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
pgd_populate(&init_mm, pgd, new);
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, vstart);
|
||||
if (pud_none(*pud)) {
|
||||
pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
|
||||
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
pud_populate(&init_mm, pud, new);
|
||||
}
|
||||
pud = vmemmap_pud_populate(pgd, vstart, node);
|
||||
if (!pud)
|
||||
return -ENOMEM;
|
||||
|
||||
pmd = pmd_offset(pud, vstart);
|
||||
|
||||
pte = pmd_val(*pmd);
|
||||
if (!(pte & _PAGE_VALID)) {
|
||||
void *block = vmemmap_alloc_block(PMD_SIZE, node);
|
||||
@@ -2927,7 +2923,7 @@ void __flush_tlb_all(void)
|
||||
pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||
unsigned long address)
|
||||
{
|
||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
pte_t *pte = NULL;
|
||||
|
||||
if (page)
|
||||
@@ -2939,11 +2935,11 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||
pgtable_t pte_alloc_one(struct mm_struct *mm,
|
||||
unsigned long address)
|
||||
{
|
||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!page)
|
||||
return NULL;
|
||||
if (!pgtable_page_ctor(page)) {
|
||||
free_hot_cold_page(page, 0);
|
||||
free_unref_page(page);
|
||||
return NULL;
|
||||
}
|
||||
return (pte_t *) page_address(page);
|
||||
|
||||
Reference in New Issue
Block a user