mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
645d5ce2f7
We can hit the following BUG_ON during memory unplug: kernel BUG at arch/powerpc/mm/book3s64/pgtable.c:342! Oops: Exception in kernel mode, sig: 5 [#1] LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA pSeries NIP [c000000000093308] pmd_fragment_free+0x48/0xc0 LR [c00000000147bfec] remove_pagetable+0x578/0x60c Call Trace: 0xc000008050000000 (unreliable) remove_pagetable+0x384/0x60c radix__remove_section_mapping+0x18/0x2c remove_section_mapping+0x1c/0x3c arch_remove_memory+0x11c/0x180 try_remove_memory+0x120/0x1b0 __remove_memory+0x20/0x40 dlpar_remove_lmb+0xc0/0x114 dlpar_memory+0x8b0/0xb20 handle_dlpar_errorlog+0xc0/0x190 pseries_hp_work_fn+0x2c/0x60 process_one_work+0x30c/0x810 worker_thread+0x98/0x540 kthread+0x1c4/0x1d0 ret_from_kernel_thread+0x5c/0x74 This occurs when unplug is attempted for such memory which has been mapped using memblock pages as part of early kernel page table setup. We wouldn't have initialized the PMD or PTE fragment count for those PMD or PTE pages. This can be fixed by allocating memory in PAGE_SIZE granularity during early page table allocation. This makes sure a specific page is not shared for another memblock allocation and we can free them correctly on removing page-table pages. Since we now do PAGE_SIZE allocations for both PUD table and PMD table (Note that PTE table allocation is already of PAGE_SIZE), we end up allocating more memory for the same amount of system RAM. Here is a comparision of how much more we need for a 64T and 2G system after this patch: 1. 64T system ------------- 64T RAM would need 64G for vmemmap with struct page size being 64B. 128 PUD tables for 64T memory (1G mappings) 1 PUD table and 64 PMD tables for 64G vmemmap (2M mappings) With default PUD[PMD]_TABLE_SIZE(4K), (128+1+64)*4K=772K With PAGE_SIZE(64K) table allocations, (128+1+64)*64K=12352K 2. 2G system ------------ 2G RAM would need 2M for vmemmap with struct page size being 64B. 1 PUD table for 2G memory (1G mapping) 1 PUD table and 1 PMD table for 2M vmemmap (2M mappings) With default PUD[PMD]_TABLE_SIZE(4K), (1+1+1)*4K=12K With new PAGE_SIZE(64K) table allocations, (1+1+1)*64K=192K Signed-off-by: Bharata B Rao <bharata@linux.ibm.com> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200709131925.922266-2-aneesh.kumar@linux.ibm.com
123 lines
2.6 KiB
C
123 lines
2.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
* Handling Page Tables through page fragments
|
|
*
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/tlb.h>
|
|
|
|
void pte_frag_destroy(void *pte_frag)
|
|
{
|
|
int count;
|
|
struct page *page;
|
|
|
|
page = virt_to_page(pte_frag);
|
|
/* drop all the pending references */
|
|
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
|
|
/* We allow PTE_FRAG_NR fragments from a PTE page */
|
|
if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
|
|
pgtable_pte_page_dtor(page);
|
|
__free_page(page);
|
|
}
|
|
}
|
|
|
|
static pte_t *get_pte_from_cache(struct mm_struct *mm)
|
|
{
|
|
void *pte_frag, *ret;
|
|
|
|
if (PTE_FRAG_NR == 1)
|
|
return NULL;
|
|
|
|
spin_lock(&mm->page_table_lock);
|
|
ret = pte_frag_get(&mm->context);
|
|
if (ret) {
|
|
pte_frag = ret + PTE_FRAG_SIZE;
|
|
/*
|
|
* If we have taken up all the fragments mark PTE page NULL
|
|
*/
|
|
if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
|
|
pte_frag = NULL;
|
|
pte_frag_set(&mm->context, pte_frag);
|
|
}
|
|
spin_unlock(&mm->page_table_lock);
|
|
return (pte_t *)ret;
|
|
}
|
|
|
|
static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
|
|
{
|
|
void *ret = NULL;
|
|
struct page *page;
|
|
|
|
if (!kernel) {
|
|
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
|
|
if (!page)
|
|
return NULL;
|
|
if (!pgtable_pte_page_ctor(page)) {
|
|
__free_page(page);
|
|
return NULL;
|
|
}
|
|
} else {
|
|
page = alloc_page(PGALLOC_GFP);
|
|
if (!page)
|
|
return NULL;
|
|
}
|
|
|
|
atomic_set(&page->pt_frag_refcount, 1);
|
|
|
|
ret = page_address(page);
|
|
/*
|
|
* if we support only one fragment just return the
|
|
* allocated page.
|
|
*/
|
|
if (PTE_FRAG_NR == 1)
|
|
return ret;
|
|
spin_lock(&mm->page_table_lock);
|
|
/*
|
|
* If we find pgtable_page set, we return
|
|
* the allocated page with single fragement
|
|
* count.
|
|
*/
|
|
if (likely(!pte_frag_get(&mm->context))) {
|
|
atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
|
|
pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
|
|
}
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
|
return (pte_t *)ret;
|
|
}
|
|
|
|
pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel)
|
|
{
|
|
pte_t *pte;
|
|
|
|
pte = get_pte_from_cache(mm);
|
|
if (pte)
|
|
return pte;
|
|
|
|
return __alloc_for_ptecache(mm, kernel);
|
|
}
|
|
|
|
void pte_fragment_free(unsigned long *table, int kernel)
|
|
{
|
|
struct page *page = virt_to_page(table);
|
|
|
|
if (PageReserved(page))
|
|
return free_reserved_page(page);
|
|
|
|
BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
|
|
if (atomic_dec_and_test(&page->pt_frag_refcount)) {
|
|
if (!kernel)
|
|
pgtable_pte_page_dtor(page);
|
|
__free_page(page);
|
|
}
|
|
}
|