mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
mm/mm_init: use helper macro BITS_PER_LONG and BITS_PER_BYTE
It's more readable to use helper macro BITS_PER_LONG and BITS_PER_BYTE. No functional change intended. Link: https://lkml.kernel.org/r/20230807023528.325191-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
6379693e3c
commit
daee07bfba
@ -79,7 +79,7 @@ void __init mminit_verify_pageflags_layout(void)
|
||||
int shift, width;
|
||||
unsigned long or_mask, add_mask;
|
||||
|
||||
shift = 8 * sizeof(unsigned long);
|
||||
shift = BITS_PER_LONG;
|
||||
width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
|
||||
- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
|
||||
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
|
||||
@ -1426,9 +1426,9 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
|
||||
usemapsize = roundup(zonesize, pageblock_nr_pages);
|
||||
usemapsize = usemapsize >> pageblock_order;
|
||||
usemapsize *= NR_PAGEBLOCK_BITS;
|
||||
usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
|
||||
usemapsize = roundup(usemapsize, BITS_PER_LONG);
|
||||
|
||||
return usemapsize / 8;
|
||||
return usemapsize / BITS_PER_BYTE;
|
||||
}
|
||||
|
||||
static void __ref setup_usemap(struct zone *zone)
|
||||
|
Loading…
Reference in New Issue
Block a user