mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
mm/sparse.c: move subsection_map related functions together
No functional change. [bhe@redhat.com: move functions into CONFIG_MEMORY_HOTPLUG ifdeffery scope] Link: http://lkml.kernel.org/r/20200316045804.GC3486@MiWiFi-R3L-srv Signed-off-by: Baoquan He <bhe@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.com> Cc: David Hildenbrand <david@redhat.com> Cc: Wei Yang <richard.weiyang@gmail.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Link: http://lkml.kernel.org/r/20200312124414.439-6-bhe@redhat.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
95a5a34dfe
commit
6ecb0fc612
114
mm/sparse.c
114
mm/sparse.c
@ -666,6 +666,55 @@ static void free_map_bootmem(struct page *memmap)
|
||||
|
||||
vmemmap_free(start, end, NULL);
|
||||
}
|
||||
|
||||
static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
|
||||
{
|
||||
DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
|
||||
DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
|
||||
struct mem_section *ms = __pfn_to_section(pfn);
|
||||
unsigned long *subsection_map = ms->usage
|
||||
? &ms->usage->subsection_map[0] : NULL;
|
||||
|
||||
subsection_mask_set(map, pfn, nr_pages);
|
||||
if (subsection_map)
|
||||
bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
|
||||
|
||||
if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
|
||||
"section already deactivated (%#lx + %ld)\n",
|
||||
pfn, nr_pages))
|
||||
return -EINVAL;
|
||||
|
||||
bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool is_subsection_map_empty(struct mem_section *ms)
|
||||
{
|
||||
return bitmap_empty(&ms->usage->subsection_map[0],
|
||||
SUBSECTIONS_PER_SECTION);
|
||||
}
|
||||
|
||||
static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
|
||||
{
|
||||
struct mem_section *ms = __pfn_to_section(pfn);
|
||||
DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
|
||||
unsigned long *subsection_map;
|
||||
int rc = 0;
|
||||
|
||||
subsection_mask_set(map, pfn, nr_pages);
|
||||
|
||||
subsection_map = &ms->usage->subsection_map[0];
|
||||
|
||||
if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
|
||||
rc = -EINVAL;
|
||||
else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
|
||||
rc = -EEXIST;
|
||||
else
|
||||
bitmap_or(subsection_map, map, subsection_map,
|
||||
SUBSECTIONS_PER_SECTION);
|
||||
|
||||
return rc;
|
||||
}
|
||||
#else
|
||||
struct page * __meminit populate_section_memmap(unsigned long pfn,
|
||||
unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
|
||||
@ -709,36 +758,7 @@ static void free_map_bootmem(struct page *memmap)
|
||||
put_page_bootmem(page);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
|
||||
{
|
||||
DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
|
||||
DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
|
||||
struct mem_section *ms = __pfn_to_section(pfn);
|
||||
unsigned long *subsection_map = ms->usage
|
||||
? &ms->usage->subsection_map[0] : NULL;
|
||||
|
||||
subsection_mask_set(map, pfn, nr_pages);
|
||||
if (subsection_map)
|
||||
bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
|
||||
|
||||
if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
|
||||
"section already deactivated (%#lx + %ld)\n",
|
||||
pfn, nr_pages))
|
||||
return -EINVAL;
|
||||
|
||||
bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool is_subsection_map_empty(struct mem_section *ms)
|
||||
{
|
||||
return bitmap_empty(&ms->usage->subsection_map[0],
|
||||
SUBSECTIONS_PER_SECTION);
|
||||
}
|
||||
#else
|
||||
static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
|
||||
{
|
||||
return 0;
|
||||
@ -748,7 +768,12 @@ static bool is_subsection_map_empty(struct mem_section *ms)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
||||
|
||||
/*
|
||||
* To deactivate a memory region, there are 3 cases to handle across
|
||||
@ -810,35 +835,6 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
|
||||
ms->section_mem_map = (unsigned long)NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
|
||||
{
|
||||
struct mem_section *ms = __pfn_to_section(pfn);
|
||||
DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
|
||||
unsigned long *subsection_map;
|
||||
int rc = 0;
|
||||
|
||||
subsection_mask_set(map, pfn, nr_pages);
|
||||
|
||||
subsection_map = &ms->usage->subsection_map[0];
|
||||
|
||||
if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
|
||||
rc = -EINVAL;
|
||||
else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
|
||||
rc = -EEXIST;
|
||||
else
|
||||
bitmap_or(subsection_map, map, subsection_map,
|
||||
SUBSECTIONS_PER_SECTION);
|
||||
|
||||
return rc;
|
||||
}
|
||||
#else
|
||||
static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct page * __meminit section_activate(int nid, unsigned long pfn,
|
||||
unsigned long nr_pages, struct vmem_altmap *altmap)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user