mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
mm/hugetlb: make hugetlb migration callback CMA aware
new_non_cma_page() in gup.c requires to allocate the new page that is not on the CMA area. new_non_cma_page() implements it by using allocation scope APIs. However, there is a work-around for hugetlb. Normal hugetlb page allocation API for migration is alloc_huge_page_nodemask(). It consists of two steps. First is dequeing from the pool. Second is, if there is no available page on the queue, allocating by using the page allocator. new_non_cma_page() can't use this API since first step (deque) isn't aware of scope API to exclude CMA area. So, new_non_cma_page() exports hugetlb internal function for the second step, alloc_migrate_huge_page(), to global scope and uses it directly. This is suboptimal since hugetlb pages on the queue cannot be utilized. This patch tries to fix this situation by making the deque function on hugetlb CMA aware. In the deque function, CMA memory is skipped if PF_MEMALLOC_NOCMA flag is found. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Mike Kravetz <mike.kravetz@oracle.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Roman Gushchin <guro@fb.com> Link: http://lkml.kernel.org/r/1596180906-8442-2-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
41b4dc14ee
commit
bbe88753bd
@ -511,8 +511,6 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
|
|||||||
nodemask_t *nmask, gfp_t gfp_mask);
|
nodemask_t *nmask, gfp_t gfp_mask);
|
||||||
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
|
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
|
||||||
unsigned long address);
|
unsigned long address);
|
||||||
struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
|
|
||||||
int nid, nodemask_t *nmask);
|
|
||||||
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
|
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
|
||||||
pgoff_t idx);
|
pgoff_t idx);
|
||||||
|
|
||||||
|
6
mm/gup.c
6
mm/gup.c
@ -1635,11 +1635,7 @@ static struct page *new_non_cma_page(struct page *page, unsigned long private)
|
|||||||
struct hstate *h = page_hstate(page);
|
struct hstate *h = page_hstate(page);
|
||||||
|
|
||||||
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
|
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
|
||||||
/*
|
return alloc_huge_page_nodemask(h, nid, NULL, gfp_mask);
|
||||||
* We don't want to dequeue from the pool because pool pages will
|
|
||||||
* mostly be from the CMA region.
|
|
||||||
*/
|
|
||||||
return alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (PageTransHuge(page)) {
|
if (PageTransHuge(page)) {
|
||||||
|
11
mm/hugetlb.c
11
mm/hugetlb.c
@ -19,6 +19,7 @@
|
|||||||
#include <linux/memblock.h>
|
#include <linux/memblock.h>
|
||||||
#include <linux/sysfs.h>
|
#include <linux/sysfs.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/sched/mm.h>
|
||||||
#include <linux/mmdebug.h>
|
#include <linux/mmdebug.h>
|
||||||
#include <linux/sched/signal.h>
|
#include <linux/sched/signal.h>
|
||||||
#include <linux/rmap.h>
|
#include <linux/rmap.h>
|
||||||
@ -1040,10 +1041,16 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
|
|||||||
static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
|
static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
bool nocma = !!(current->flags & PF_MEMALLOC_NOCMA);
|
||||||
|
|
||||||
|
list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
|
||||||
|
if (nocma && is_migrate_cma_page(page))
|
||||||
|
continue;
|
||||||
|
|
||||||
list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
|
|
||||||
if (!PageHWPoison(page))
|
if (!PageHWPoison(page))
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if 'non-isolated free hugepage' not found on the list,
|
* if 'non-isolated free hugepage' not found on the list,
|
||||||
* the allocation fails.
|
* the allocation fails.
|
||||||
@ -1935,7 +1942,7 @@ out_unlock:
|
|||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
|
static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
|
||||||
int nid, nodemask_t *nmask)
|
int nid, nodemask_t *nmask)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
Loading…
Reference in New Issue
Block a user