forked from Minki/linux
mm: simplify free_highmem_page() and free_reserved_page()
adjust_managed_page_count() as called by free_reserved_page() properly handles pages in a highmem zone, so we can reuse it for free_highmem_page(). We can now get rid of totalhigh_pages_inc() and simplify free_reserved_page(). Link: https://lkml.kernel.org/r/20210126182113.19892-3-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Mike Rapoport <rppt@kernel.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Wei Yang <richard.weiyang@linux.alibaba.com> Cc: "Gustavo A. R. Silva" <gustavoars@kernel.org> Cc: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b3880c690b
commit
a0cd7a7c4b
@ -127,11 +127,6 @@ static inline unsigned long totalhigh_pages(void)
|
||||
return (unsigned long)atomic_long_read(&_totalhigh_pages);
|
||||
}
|
||||
|
||||
static inline void totalhigh_pages_inc(void)
|
||||
{
|
||||
atomic_long_inc(&_totalhigh_pages);
|
||||
}
|
||||
|
||||
static inline void totalhigh_pages_add(long count)
|
||||
{
|
||||
atomic_long_add(count, &_totalhigh_pages);
|
||||
|
@ -2310,32 +2310,20 @@ extern void free_initmem(void);
|
||||
extern unsigned long free_reserved_area(void *start, void *end,
|
||||
int poison, const char *s);
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
* Free a highmem page into the buddy system, adjusting totalhigh_pages
|
||||
* and totalram_pages.
|
||||
*/
|
||||
extern void free_highmem_page(struct page *page);
|
||||
#endif
|
||||
|
||||
extern void adjust_managed_page_count(struct page *page, long count);
|
||||
extern void mem_init_print_info(const char *str);
|
||||
|
||||
extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
|
||||
|
||||
/* Free the reserved page into the buddy system, so it gets managed. */
|
||||
static inline void __free_reserved_page(struct page *page)
|
||||
static inline void free_reserved_page(struct page *page)
|
||||
{
|
||||
ClearPageReserved(page);
|
||||
init_page_count(page);
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
static inline void free_reserved_page(struct page *page)
|
||||
{
|
||||
__free_reserved_page(page);
|
||||
adjust_managed_page_count(page, 1);
|
||||
}
|
||||
#define free_highmem_page(page) free_reserved_page(page)
|
||||
|
||||
static inline void mark_page_reserved(struct page *page)
|
||||
{
|
||||
|
@ -7691,17 +7691,6 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
|
||||
return pages;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
void free_highmem_page(struct page *page)
|
||||
{
|
||||
__free_reserved_page(page);
|
||||
totalram_pages_inc();
|
||||
atomic_long_inc(&page_zone(page)->managed_pages);
|
||||
totalhigh_pages_inc();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void __init mem_init_print_info(const char *str)
|
||||
{
|
||||
unsigned long physpages, codesize, datasize, rosize, bss_size;
|
||||
|
Loading…
Reference in New Issue
Block a user