arch, drivers: replace for_each_membock() with for_each_mem_range()
There are several occurrences of the following pattern:
for_each_memblock(memory, reg) {
start = __pfn_to_phys(memblock_region_memory_base_pfn(reg);
end = __pfn_to_phys(memblock_region_memory_end_pfn(reg));
/* do something with start and end */
}
Using for_each_mem_range() iterator is more appropriate in such cases and
allows simpler and cleaner code.
[akpm@linux-foundation.org: fix arch/arm/mm/pmsa-v7.c build]
[rppt@linux.ibm.com: mips: fix cavium-octeon build caused by memblock refactoring]
Link: http://lkml.kernel.org/r/20200827124549.GD167163@linux.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Daniel Axtens <dja@axtens.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Emil Renner Berthing <kernel@esmil.dk>
Cc: Hari Bathini <hbathini@linux.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: https://lkml.kernel.org/r/20200818151634.14343-13-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
c9118e6c37
commit
b10d6bca87
@@ -843,19 +843,25 @@ early_param("mem", early_mem);
|
||||
|
||||
static void __init request_standard_resources(const struct machine_desc *mdesc)
|
||||
{
|
||||
struct memblock_region *region;
|
||||
phys_addr_t start, end, res_end;
|
||||
struct resource *res;
|
||||
u64 i;
|
||||
|
||||
kernel_code.start = virt_to_phys(_text);
|
||||
kernel_code.end = virt_to_phys(__init_begin - 1);
|
||||
kernel_data.start = virt_to_phys(_sdata);
|
||||
kernel_data.end = virt_to_phys(_end - 1);
|
||||
|
||||
for_each_memblock(memory, region) {
|
||||
phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
|
||||
phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
|
||||
for_each_mem_range(i, &start, &end) {
|
||||
unsigned long boot_alias_start;
|
||||
|
||||
/*
|
||||
* In memblock, end points to the first byte after the
|
||||
* range while in resourses, end points to the last byte in
|
||||
* the range.
|
||||
*/
|
||||
res_end = end - 1;
|
||||
|
||||
/*
|
||||
* Some systems have a special memory alias which is only
|
||||
* used for booting. We need to advertise this region to
|
||||
@@ -869,7 +875,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
|
||||
__func__, sizeof(*res));
|
||||
res->name = "System RAM (boot alias)";
|
||||
res->start = boot_alias_start;
|
||||
res->end = phys_to_idmap(end);
|
||||
res->end = phys_to_idmap(res_end);
|
||||
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
request_resource(&iomem_resource, res);
|
||||
}
|
||||
@@ -880,7 +886,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
|
||||
sizeof(*res));
|
||||
res->name = "System RAM";
|
||||
res->start = start;
|
||||
res->end = end;
|
||||
res->end = res_end;
|
||||
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
||||
|
||||
request_resource(&iomem_resource, res);
|
||||
|
||||
@@ -1154,9 +1154,8 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
|
||||
|
||||
void __init adjust_lowmem_bounds(void)
|
||||
{
|
||||
phys_addr_t memblock_limit = 0;
|
||||
u64 vmalloc_limit;
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t block_start, block_end, memblock_limit = 0;
|
||||
u64 vmalloc_limit, i;
|
||||
phys_addr_t lowmem_limit = 0;
|
||||
|
||||
/*
|
||||
@@ -1172,26 +1171,18 @@ void __init adjust_lowmem_bounds(void)
|
||||
* The first usable region must be PMD aligned. Mark its start
|
||||
* as MEMBLOCK_NOMAP if it isn't
|
||||
*/
|
||||
for_each_memblock(memory, reg) {
|
||||
if (!memblock_is_nomap(reg)) {
|
||||
if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
|
||||
phys_addr_t len;
|
||||
for_each_mem_range(i, &block_start, &block_end) {
|
||||
if (!IS_ALIGNED(block_start, PMD_SIZE)) {
|
||||
phys_addr_t len;
|
||||
|
||||
len = round_up(reg->base, PMD_SIZE) - reg->base;
|
||||
memblock_mark_nomap(reg->base, len);
|
||||
}
|
||||
break;
|
||||
len = round_up(block_start, PMD_SIZE) - block_start;
|
||||
memblock_mark_nomap(block_start, len);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t block_start = reg->base;
|
||||
phys_addr_t block_end = reg->base + reg->size;
|
||||
|
||||
if (memblock_is_nomap(reg))
|
||||
continue;
|
||||
|
||||
if (reg->base < vmalloc_limit) {
|
||||
for_each_mem_range(i, &block_start, &block_end) {
|
||||
if (block_start < vmalloc_limit) {
|
||||
if (block_end > lowmem_limit)
|
||||
/*
|
||||
* Compare as u64 to ensure vmalloc_limit does
|
||||
@@ -1440,19 +1431,15 @@ static void __init kmap_init(void)
|
||||
|
||||
static void __init map_lowmem(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
|
||||
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
|
||||
phys_addr_t start, end;
|
||||
u64 i;
|
||||
|
||||
/* Map all the lowmem memory banks. */
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t start = reg->base;
|
||||
phys_addr_t end = start + reg->size;
|
||||
for_each_mem_range(i, &start, &end) {
|
||||
struct map_desc map;
|
||||
|
||||
if (memblock_is_nomap(reg))
|
||||
continue;
|
||||
|
||||
if (end > arm_lowmem_limit)
|
||||
end = arm_lowmem_limit;
|
||||
if (start >= end)
|
||||
|
||||
@@ -231,12 +231,12 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size,
|
||||
void __init pmsav7_adjust_lowmem_bounds(void)
|
||||
{
|
||||
phys_addr_t specified_mem_size = 0, total_mem_size = 0;
|
||||
struct memblock_region *reg;
|
||||
bool first = true;
|
||||
phys_addr_t mem_start;
|
||||
phys_addr_t mem_end;
|
||||
phys_addr_t reg_start, reg_end;
|
||||
unsigned int mem_max_regions;
|
||||
int num, i;
|
||||
int num;
|
||||
u64 i;
|
||||
|
||||
/* Free-up PMSAv7_PROBE_REGION */
|
||||
mpu_min_region_order = __mpu_min_region_order();
|
||||
@@ -262,20 +262,19 @@ void __init pmsav7_adjust_lowmem_bounds(void)
|
||||
mem_max_regions -= num;
|
||||
#endif
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
if (first) {
|
||||
for_each_mem_range(i, ®_start, ®_end) {
|
||||
if (i == 0) {
|
||||
phys_addr_t phys_offset = PHYS_OFFSET;
|
||||
|
||||
/*
|
||||
* Initially only use memory continuous from
|
||||
* PHYS_OFFSET */
|
||||
if (reg->base != phys_offset)
|
||||
if (reg_start != phys_offset)
|
||||
panic("First memory bank must be contiguous from PHYS_OFFSET");
|
||||
|
||||
mem_start = reg->base;
|
||||
mem_end = reg->base + reg->size;
|
||||
specified_mem_size = reg->size;
|
||||
first = false;
|
||||
mem_start = reg_start;
|
||||
mem_end = reg_end;
|
||||
specified_mem_size = mem_end - mem_start;
|
||||
} else {
|
||||
/*
|
||||
* memblock auto merges contiguous blocks, remove
|
||||
@@ -283,8 +282,8 @@ void __init pmsav7_adjust_lowmem_bounds(void)
|
||||
* blocks separately while iterating)
|
||||
*/
|
||||
pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
|
||||
&mem_end, ®->base);
|
||||
memblock_remove(reg->base, 0 - reg->base);
|
||||
&mem_end, ®_start);
|
||||
memblock_remove(reg_start, 0 - reg_start);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,20 +94,19 @@ static __init bool is_region_fixed(int number)
|
||||
void __init pmsav8_adjust_lowmem_bounds(void)
|
||||
{
|
||||
phys_addr_t mem_end;
|
||||
struct memblock_region *reg;
|
||||
bool first = true;
|
||||
phys_addr_t reg_start, reg_end;
|
||||
u64 i;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
if (first) {
|
||||
for_each_mem_range(i, ®_start, ®_end) {
|
||||
if (i == 0) {
|
||||
phys_addr_t phys_offset = PHYS_OFFSET;
|
||||
|
||||
/*
|
||||
* Initially only use memory continuous from
|
||||
* PHYS_OFFSET */
|
||||
if (reg->base != phys_offset)
|
||||
if (reg_start != phys_offset)
|
||||
panic("First memory bank must be contiguous from PHYS_OFFSET");
|
||||
mem_end = reg->base + reg->size;
|
||||
first = false;
|
||||
mem_end = reg_end;
|
||||
} else {
|
||||
/*
|
||||
* memblock auto merges contiguous blocks, remove
|
||||
@@ -115,8 +114,8 @@ void __init pmsav8_adjust_lowmem_bounds(void)
|
||||
* blocks separately while iterating)
|
||||
*/
|
||||
pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
|
||||
&mem_end, ®->base);
|
||||
memblock_remove(reg->base, 0 - reg->base);
|
||||
&mem_end, ®_start);
|
||||
memblock_remove(reg_start, 0 - reg_start);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,11 +25,12 @@
|
||||
|
||||
unsigned long xen_get_swiotlb_free_pages(unsigned int order)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t base;
|
||||
gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
|
||||
u64 i;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
if (reg->base < (phys_addr_t)0xffffffff) {
|
||||
for_each_mem_range(i, &base, NULL) {
|
||||
if (base < (phys_addr_t)0xffffffff) {
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
flags |= __GFP_DMA32;
|
||||
else
|
||||
|
||||
Reference in New Issue
Block a user