arch, drivers: replace for_each_membock() with for_each_mem_range()
There are several occurrences of the following pattern:
for_each_memblock(memory, reg) {
start = __pfn_to_phys(memblock_region_memory_base_pfn(reg);
end = __pfn_to_phys(memblock_region_memory_end_pfn(reg));
/* do something with start and end */
}
Using for_each_mem_range() iterator is more appropriate in such cases and
allows simpler and cleaner code.
[akpm@linux-foundation.org: fix arch/arm/mm/pmsa-v7.c build]
[rppt@linux.ibm.com: mips: fix cavium-octeon build caused by memblock refactoring]
Link: http://lkml.kernel.org/r/20200827124549.GD167163@linux.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Daniel Axtens <dja@axtens.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Emil Renner Berthing <kernel@esmil.dk>
Cc: Hari Bathini <hbathini@linux.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: https://lkml.kernel.org/r/20200818151634.14343-13-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
c9118e6c37
commit
b10d6bca87
@@ -191,13 +191,13 @@ int is_fadump_active(void)
|
||||
*/
|
||||
static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t reg_start, reg_end;
|
||||
bool ret = false;
|
||||
u64 start, end;
|
||||
u64 i, start, end;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
start = max_t(u64, d_start, reg->base);
|
||||
end = min_t(u64, d_end, (reg->base + reg->size));
|
||||
for_each_mem_range(i, ®_start, ®_end) {
|
||||
start = max_t(u64, d_start, reg_start);
|
||||
end = min_t(u64, d_end, reg_end);
|
||||
if (d_start < end) {
|
||||
/* Memory hole from d_start to start */
|
||||
if (start > d_start)
|
||||
@@ -422,34 +422,34 @@ static int __init add_boot_mem_regions(unsigned long mstart,
|
||||
|
||||
static int __init fadump_get_boot_mem_regions(void)
|
||||
{
|
||||
unsigned long base, size, cur_size, hole_size, last_end;
|
||||
unsigned long size, cur_size, hole_size, last_end;
|
||||
unsigned long mem_size = fw_dump.boot_memory_size;
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t reg_start, reg_end;
|
||||
int ret = 1;
|
||||
u64 i;
|
||||
|
||||
fw_dump.boot_mem_regs_cnt = 0;
|
||||
|
||||
last_end = 0;
|
||||
hole_size = 0;
|
||||
cur_size = 0;
|
||||
for_each_memblock(memory, reg) {
|
||||
base = reg->base;
|
||||
size = reg->size;
|
||||
hole_size += (base - last_end);
|
||||
for_each_mem_range(i, ®_start, ®_end) {
|
||||
size = reg_end - reg_start;
|
||||
hole_size += (reg_start - last_end);
|
||||
|
||||
if ((cur_size + size) >= mem_size) {
|
||||
size = (mem_size - cur_size);
|
||||
ret = add_boot_mem_regions(base, size);
|
||||
ret = add_boot_mem_regions(reg_start, size);
|
||||
break;
|
||||
}
|
||||
|
||||
mem_size -= size;
|
||||
cur_size += size;
|
||||
ret = add_boot_mem_regions(base, size);
|
||||
ret = add_boot_mem_regions(reg_start, size);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
last_end = base + size;
|
||||
last_end = reg_end;
|
||||
}
|
||||
fw_dump.boot_mem_top = PAGE_ALIGN(fw_dump.boot_memory_size + hole_size);
|
||||
|
||||
@@ -985,9 +985,8 @@ static int fadump_init_elfcore_header(char *bufp)
|
||||
*/
|
||||
static int fadump_setup_crash_memory_ranges(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
u64 start, end;
|
||||
int i, ret;
|
||||
u64 i, start, end;
|
||||
int ret;
|
||||
|
||||
pr_debug("Setup crash memory ranges.\n");
|
||||
crash_mrange_info.mem_range_cnt = 0;
|
||||
@@ -1005,10 +1004,7 @@ static int fadump_setup_crash_memory_ranges(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
start = (u64)reg->base;
|
||||
end = start + (u64)reg->size;
|
||||
|
||||
for_each_mem_range(i, &start, &end) {
|
||||
/*
|
||||
* skip the memory chunk that is already added
|
||||
* (0 through boot_memory_top).
|
||||
@@ -1242,7 +1238,9 @@ static void fadump_free_reserved_memory(unsigned long start_pfn,
|
||||
*/
|
||||
static void fadump_release_reserved_area(u64 start, u64 end)
|
||||
{
|
||||
u64 tstart, tend, spfn, epfn, reg_spfn, reg_epfn, i;
|
||||
unsigned long reg_spfn, reg_epfn;
|
||||
u64 tstart, tend, spfn, epfn;
|
||||
int i;
|
||||
|
||||
spfn = PHYS_PFN(start);
|
||||
epfn = PHYS_PFN(end);
|
||||
@@ -1685,12 +1683,10 @@ int __init fadump_reserve_mem(void)
|
||||
/* Preserve everything above the base address */
|
||||
static void __init fadump_reserve_crash_area(u64 base)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
u64 mstart, msize;
|
||||
u64 i, mstart, mend, msize;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
mstart = reg->base;
|
||||
msize = reg->size;
|
||||
for_each_mem_range(i, &mstart, &mend) {
|
||||
msize = mend - mstart;
|
||||
|
||||
if ((mstart + msize) < base)
|
||||
continue;
|
||||
|
||||
@@ -138,15 +138,13 @@ out:
|
||||
*/
|
||||
static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t base, end;
|
||||
struct crash_mem *tmem;
|
||||
u64 i;
|
||||
int ret;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
u64 base, size;
|
||||
|
||||
base = (u64)reg->base;
|
||||
size = (u64)reg->size;
|
||||
for_each_mem_range(i, &base, &end) {
|
||||
u64 size = end - base;
|
||||
|
||||
/* Skip backup memory region, which needs a separate entry */
|
||||
if (base == BACKUP_SRC_START) {
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* SMP scalability work:
|
||||
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
|
||||
*
|
||||
*
|
||||
* Module name: htab.c
|
||||
*
|
||||
* Description:
|
||||
@@ -867,8 +867,8 @@ static void __init htab_initialize(void)
|
||||
unsigned long table;
|
||||
unsigned long pteg_count;
|
||||
unsigned long prot;
|
||||
unsigned long base = 0, size = 0;
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t base = 0, size = 0, end;
|
||||
u64 i;
|
||||
|
||||
DBG(" -> htab_initialize()\n");
|
||||
|
||||
@@ -884,7 +884,7 @@ static void __init htab_initialize(void)
|
||||
/*
|
||||
* Calculate the required size of the htab. We want the number of
|
||||
* PTEGs to equal one half the number of real pages.
|
||||
*/
|
||||
*/
|
||||
htab_size_bytes = htab_get_table_size();
|
||||
pteg_count = htab_size_bytes >> 7;
|
||||
|
||||
@@ -894,7 +894,7 @@ static void __init htab_initialize(void)
|
||||
firmware_has_feature(FW_FEATURE_PS3_LV1)) {
|
||||
/* Using a hypervisor which owns the htab */
|
||||
htab_address = NULL;
|
||||
_SDR1 = 0;
|
||||
_SDR1 = 0;
|
||||
#ifdef CONFIG_FA_DUMP
|
||||
/*
|
||||
* If firmware assisted dump is active firmware preserves
|
||||
@@ -960,9 +960,9 @@ static void __init htab_initialize(void)
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
||||
/* create bolted the linear mapping in the hash table */
|
||||
for_each_memblock(memory, reg) {
|
||||
base = (unsigned long)__va(reg->base);
|
||||
size = reg->size;
|
||||
for_each_mem_range(i, &base, &end) {
|
||||
size = end - base;
|
||||
base = (unsigned long)__va(base);
|
||||
|
||||
DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
|
||||
base, size, prot);
|
||||
|
||||
@@ -329,7 +329,8 @@ static int __meminit create_physical_mapping(unsigned long start,
|
||||
static void __init radix_init_pgtable(void)
|
||||
{
|
||||
unsigned long rts_field;
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t start, end;
|
||||
u64 i;
|
||||
|
||||
/* We don't support slb for radix */
|
||||
mmu_slb_size = 0;
|
||||
@@ -337,20 +338,19 @@ static void __init radix_init_pgtable(void)
|
||||
/*
|
||||
* Create the linear mapping
|
||||
*/
|
||||
for_each_memblock(memory, reg) {
|
||||
for_each_mem_range(i, &start, &end) {
|
||||
/*
|
||||
* The memblock allocator is up at this point, so the
|
||||
* page tables will be allocated within the range. No
|
||||
* need or a node (which we don't have yet).
|
||||
*/
|
||||
|
||||
if ((reg->base + reg->size) >= RADIX_VMALLOC_START) {
|
||||
if (end >= RADIX_VMALLOC_START) {
|
||||
pr_warn("Outside the supported range\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
WARN_ON(create_physical_mapping(reg->base,
|
||||
reg->base + reg->size,
|
||||
WARN_ON(create_physical_mapping(start, end,
|
||||
radix_mem_block_size,
|
||||
-1, PAGE_KERNEL));
|
||||
}
|
||||
|
||||
@@ -138,11 +138,11 @@ void __init kasan_mmu_init(void)
|
||||
|
||||
void __init kasan_init(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t base, end;
|
||||
u64 i;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t base = reg->base;
|
||||
phys_addr_t top = min(base + reg->size, total_lowmem);
|
||||
for_each_mem_range(i, &base, &end) {
|
||||
phys_addr_t top = min(end, total_lowmem);
|
||||
int ret;
|
||||
|
||||
if (base >= top)
|
||||
|
||||
@@ -585,20 +585,24 @@ void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
*/
|
||||
static int __init add_system_ram_resources(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t start, end;
|
||||
u64 i;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
for_each_mem_range(i, &start, &end) {
|
||||
struct resource *res;
|
||||
unsigned long base = reg->base;
|
||||
unsigned long size = reg->size;
|
||||
|
||||
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
|
||||
WARN_ON(!res);
|
||||
|
||||
if (res) {
|
||||
res->name = "System RAM";
|
||||
res->start = base;
|
||||
res->end = base + size - 1;
|
||||
res->start = start;
|
||||
/*
|
||||
* In memblock, end points to the first byte after
|
||||
* the range while in resourses, end points to the
|
||||
* last byte in the range.
|
||||
*/
|
||||
res->end = end - 1;
|
||||
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
||||
WARN_ON(request_resource(&iomem_resource, res) < 0);
|
||||
}
|
||||
|
||||
@@ -123,11 +123,11 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
|
||||
|
||||
void __init mapin_ram(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t base, end;
|
||||
u64 i;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t base = reg->base;
|
||||
phys_addr_t top = min(base + reg->size, total_lowmem);
|
||||
for_each_mem_range(i, &base, &end) {
|
||||
phys_addr_t top = min(end, total_lowmem);
|
||||
|
||||
if (base >= top)
|
||||
continue;
|
||||
|
||||
Reference in New Issue
Block a user