mm: use nth_page instead of mem_map_offset mem_map_next
To handle the discontiguous case, mem_map_next() has a parameter named
`offset`. As a function caller, one would be confused why "get next
entry" needs a parameter named "offset". The other drawback of
mem_map_next() is that the callers must take care of the map between
parameter "iter" and "offset", otherwise we may get an hole or duplication
during iteration. So we use nth_page instead of mem_map_next.
And replace mem_map_offset with nth_page() per Matthew's comments.
Link: https://lkml.kernel.org/r/1662708669-9395-1-git-send-email-lic121@chinatelecom.cn
Signed-off-by: Cheng Li <lic121@chinatelecom.cn>
Fixes: 69d177c2fc
("hugetlbfs: handle pages higher order than MAX_ORDER")
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
0d83b2d89d
commit
14455eabd8
29
mm/hugetlb.c
29
mm/hugetlb.c
@ -1306,12 +1306,13 @@ static void __destroy_compound_gigantic_page(struct page *page,
|
||||
{
|
||||
int i;
|
||||
int nr_pages = 1 << order;
|
||||
struct page *p = page + 1;
|
||||
struct page *p;
|
||||
|
||||
atomic_set(compound_mapcount_ptr(page), 0);
|
||||
atomic_set(compound_pincount_ptr(page), 0);
|
||||
|
||||
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
|
||||
for (i = 1; i < nr_pages; i++) {
|
||||
p = nth_page(page, i);
|
||||
p->mapping = NULL;
|
||||
clear_compound_head(p);
|
||||
if (!demote)
|
||||
@ -1532,7 +1533,7 @@ static void add_hugetlb_page(struct hstate *h, struct page *page,
|
||||
static void __update_and_free_page(struct hstate *h, struct page *page)
|
||||
{
|
||||
int i;
|
||||
struct page *subpage = page;
|
||||
struct page *subpage;
|
||||
|
||||
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
|
||||
return;
|
||||
@ -1563,8 +1564,8 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
|
||||
if (unlikely(PageHWPoison(page)))
|
||||
hugetlb_clear_page_hwpoison(page);
|
||||
|
||||
for (i = 0; i < pages_per_huge_page(h);
|
||||
i++, subpage = mem_map_next(subpage, page, i)) {
|
||||
for (i = 0; i < pages_per_huge_page(h); i++) {
|
||||
subpage = nth_page(page, i);
|
||||
subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
|
||||
1 << PG_referenced | 1 << PG_dirty |
|
||||
1 << PG_active | 1 << PG_private |
|
||||
@ -1771,13 +1772,15 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
|
||||
{
|
||||
int i, j;
|
||||
int nr_pages = 1 << order;
|
||||
struct page *p = page + 1;
|
||||
struct page *p;
|
||||
|
||||
/* we rely on prep_new_huge_page to set the destructor */
|
||||
set_compound_order(page, order);
|
||||
__ClearPageReserved(page);
|
||||
__SetPageHead(page);
|
||||
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
|
||||
for (i = 1; i < nr_pages; i++) {
|
||||
p = nth_page(page, i);
|
||||
|
||||
/*
|
||||
* For gigantic hugepages allocated through bootmem at
|
||||
* boot, it's safer to be consistent with the not-gigantic
|
||||
@ -1824,14 +1827,16 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
|
||||
|
||||
out_error:
|
||||
/* undo tail page modifications made above */
|
||||
p = page + 1;
|
||||
for (j = 1; j < i; j++, p = mem_map_next(p, page, j)) {
|
||||
for (j = 1; j < i; j++) {
|
||||
p = nth_page(page, j);
|
||||
clear_compound_head(p);
|
||||
set_page_refcounted(p);
|
||||
}
|
||||
/* need to clear PG_reserved on remaining tail pages */
|
||||
for (; j < nr_pages; j++, p = mem_map_next(p, page, j))
|
||||
for (; j < nr_pages; j++) {
|
||||
p = nth_page(page, j);
|
||||
__ClearPageReserved(p);
|
||||
}
|
||||
set_compound_order(page, 0);
|
||||
#ifdef CONFIG_64BIT
|
||||
page[1].compound_nr = 0;
|
||||
@ -6128,7 +6133,7 @@ static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
|
||||
|
||||
for (nr = 0; nr < refs; nr++) {
|
||||
if (likely(pages))
|
||||
pages[nr] = mem_map_offset(page, nr);
|
||||
pages[nr] = nth_page(page, nr);
|
||||
if (vmas)
|
||||
vmas[nr] = vma;
|
||||
}
|
||||
@ -6292,7 +6297,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
(vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
|
||||
|
||||
if (pages || vmas)
|
||||
record_subpages_vmas(mem_map_offset(page, pfn_offset),
|
||||
record_subpages_vmas(nth_page(page, pfn_offset),
|
||||
vma, refs,
|
||||
likely(pages) ? pages + i : NULL,
|
||||
vmas ? vmas + i : NULL);
|
||||
|
@ -638,34 +638,6 @@ static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
|
||||
}
|
||||
#endif /* !CONFIG_MMU */
|
||||
|
||||
/*
|
||||
* Return the mem_map entry representing the 'offset' subpage within
|
||||
* the maximally aligned gigantic page 'base'. Handle any discontiguity
|
||||
* in the mem_map at MAX_ORDER_NR_PAGES boundaries.
|
||||
*/
|
||||
static inline struct page *mem_map_offset(struct page *base, int offset)
|
||||
{
|
||||
if (unlikely(offset >= MAX_ORDER_NR_PAGES))
|
||||
return nth_page(base, offset);
|
||||
return base + offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterator over all subpages within the maximally aligned gigantic
|
||||
* page 'base'. Handle any discontiguity in the mem_map.
|
||||
*/
|
||||
static inline struct page *mem_map_next(struct page *iter,
|
||||
struct page *base, int offset)
|
||||
{
|
||||
if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
|
||||
unsigned long pfn = page_to_pfn(base) + offset;
|
||||
if (!pfn_valid(pfn))
|
||||
return NULL;
|
||||
return pfn_to_page(pfn);
|
||||
}
|
||||
return iter + 1;
|
||||
}
|
||||
|
||||
/* Memory initialisation debug and verification */
|
||||
enum mminit_level {
|
||||
MMINIT_WARNING,
|
||||
|
21
mm/memory.c
21
mm/memory.c
@ -5690,11 +5690,11 @@ static void clear_gigantic_page(struct page *page,
|
||||
unsigned int pages_per_huge_page)
|
||||
{
|
||||
int i;
|
||||
struct page *p = page;
|
||||
struct page *p;
|
||||
|
||||
might_sleep();
|
||||
for (i = 0; i < pages_per_huge_page;
|
||||
i++, p = mem_map_next(p, page, i)) {
|
||||
for (i = 0; i < pages_per_huge_page; i++) {
|
||||
p = nth_page(page, i);
|
||||
cond_resched();
|
||||
clear_user_highpage(p, addr + i * PAGE_SIZE);
|
||||
}
|
||||
@ -5730,13 +5730,12 @@ static void copy_user_gigantic_page(struct page *dst, struct page *src,
|
||||
struct page *dst_base = dst;
|
||||
struct page *src_base = src;
|
||||
|
||||
for (i = 0; i < pages_per_huge_page; ) {
|
||||
for (i = 0; i < pages_per_huge_page; i++) {
|
||||
dst = nth_page(dst_base, i);
|
||||
src = nth_page(src_base, i);
|
||||
|
||||
cond_resched();
|
||||
copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
|
||||
|
||||
i++;
|
||||
dst = mem_map_next(dst, dst_base, i);
|
||||
src = mem_map_next(src, src_base, i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5783,10 +5782,10 @@ long copy_huge_page_from_user(struct page *dst_page,
|
||||
void *page_kaddr;
|
||||
unsigned long i, rc = 0;
|
||||
unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
|
||||
struct page *subpage = dst_page;
|
||||
struct page *subpage;
|
||||
|
||||
for (i = 0; i < pages_per_huge_page;
|
||||
i++, subpage = mem_map_next(subpage, dst_page, i)) {
|
||||
for (i = 0; i < pages_per_huge_page; i++) {
|
||||
subpage = nth_page(dst_page, i);
|
||||
if (allow_pagefault)
|
||||
page_kaddr = kmap(subpage);
|
||||
else
|
||||
|
Loading…
Reference in New Issue
Block a user