mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
mm: hugetlb_vmemmap: move PageVmemmapSelfHosted() check to split_vmemmap_huge_pmd()
To check a page whether it is self-hosted needs to traverse the page table (e.g. pmd_off_k()), however, we already have done this in the next calling of vmemmap_remap_range(). Moving PageVmemmapSelfHosted() check to vmemmap_pmd_entry() could simplify the code a bit. Link: https://lkml.kernel.org/r/20231127084645.27017-4-songmuchun@bytedance.com Signed-off-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
fb93ed6334
commit
be035a2acf
@ -95,6 +95,7 @@ static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start,
|
||||
static int vmemmap_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
int ret = 0;
|
||||
struct page *head;
|
||||
struct vmemmap_remap_walk *vmemmap_walk = walk->private;
|
||||
|
||||
@ -104,9 +105,30 @@ static int vmemmap_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
|
||||
/*
|
||||
* Due to HugeTLB alignment requirements and the vmemmap
|
||||
* pages being at the start of the hotplugged memory
|
||||
* region in memory_hotplug.memmap_on_memory case. Checking
|
||||
* the vmemmap page associated with the first vmemmap page
|
||||
* if it is self-hosted is sufficient.
|
||||
*
|
||||
* [ hotplugged memory ]
|
||||
* [ section ][...][ section ]
|
||||
* [ vmemmap ][ usable memory ]
|
||||
* ^ | ^ |
|
||||
* +--+ | |
|
||||
* +------------------------+
|
||||
*/
|
||||
if (unlikely(!vmemmap_walk->nr_walked)) {
|
||||
struct page *page = head ? head + pte_index(addr) :
|
||||
pte_page(ptep_get(pte_offset_kernel(pmd, addr)));
|
||||
|
||||
if (PageVmemmapSelfHosted(page))
|
||||
ret = -ENOTSUPP;
|
||||
}
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
if (!head)
|
||||
return 0;
|
||||
if (!head || ret)
|
||||
return ret;
|
||||
|
||||
return vmemmap_split_pmd(pmd, head, addr & PMD_MASK, vmemmap_walk);
|
||||
}
|
||||
@ -524,50 +546,6 @@ static bool vmemmap_should_optimize(const struct hstate *h, const struct page *h
|
||||
if (!hugetlb_vmemmap_optimizable(h))
|
||||
return false;
|
||||
|
||||
if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
|
||||
pmd_t *pmdp, pmd;
|
||||
struct page *vmemmap_page;
|
||||
unsigned long vaddr = (unsigned long)head;
|
||||
|
||||
/*
|
||||
* Only the vmemmap page's vmemmap page can be self-hosted.
|
||||
* Walking the page tables to find the backing page of the
|
||||
* vmemmap page.
|
||||
*/
|
||||
pmdp = pmd_off_k(vaddr);
|
||||
/*
|
||||
* The READ_ONCE() is used to stabilize *pmdp in a register or
|
||||
* on the stack so that it will stop changing under the code.
|
||||
* The only concurrent operation where it can be changed is
|
||||
* split_vmemmap_huge_pmd() (*pmdp will be stable after this
|
||||
* operation).
|
||||
*/
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
if (pmd_leaf(pmd))
|
||||
vmemmap_page = pmd_page(pmd) + pte_index(vaddr);
|
||||
else
|
||||
vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr));
|
||||
/*
|
||||
* Due to HugeTLB alignment requirements and the vmemmap pages
|
||||
* being at the start of the hotplugged memory region in
|
||||
* memory_hotplug.memmap_on_memory case. Checking any vmemmap
|
||||
* page's vmemmap page if it is marked as VmemmapSelfHosted is
|
||||
* sufficient.
|
||||
*
|
||||
* [ hotplugged memory ]
|
||||
* [ section ][...][ section ]
|
||||
* [ vmemmap ][ usable memory ]
|
||||
* ^ | | |
|
||||
* +---+ | |
|
||||
* ^ | |
|
||||
* +-------+ |
|
||||
* ^ |
|
||||
* +-------------------------------------------+
|
||||
*/
|
||||
if (PageVmemmapSelfHosted(vmemmap_page))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user