mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
a9ff696160
Making virt_to_pfn() a static inline taking a strongly typed (const void *) makes the contract of a passing a pointer of that type to the function explicit and exposes any misuse of the macro virt_to_pfn() acting polymorphic and accepting many types such as (void *), (unitptr_t) or (unsigned long) as arguments without warnings. Doing this is a bit intrusive: virt_to_pfn() requires PHYS_PFN_OFFSET and PAGE_SHIFT to be defined, and this is defined in <asm/page.h>, so this must be included *before* <asm/memory.h>. The use of macros were obscuring the unclear inclusion order here, as the macros would eventually be resolved, but a static inline like this cannot be compiled with unresolved macros. The naive solution to include <asm/page.h> at the top of <asm/memory.h> does not work, because <asm/memory.h> sometimes includes <asm/page.h> at the end of itself, which would create a confusing inclusion loop. So instead, take the approach to always unconditionally include <asm/page.h> at the end of <asm/memory.h> arch/arm uses <asm/memory.h> explicitly in a lot of places, however it turns out that if we just unconditionally include <asm/memory.h> into <asm/page.h> and switch all inclusions of <asm/memory.h> to <asm/page.h> instead, we enforce the right order and <asm/memory.h> will always have access to the definitions. Put an inclusion guard in place making it impossible to include <asm/memory.h> explicitly. Link: https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/ Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
34 lines
800 B
ArmAsm
34 lines
800 B
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (c) 2003 ARM Limited
|
|
* Copyright (c) u-boot contributors
|
|
* Copyright (c) 2012 Pavel Machek <pavel@denx.de>
|
|
*/
|
|
#include <linux/linkage.h>
|
|
#include <linux/init.h>
|
|
#include <asm/page.h>
|
|
#include <asm/assembler.h>
|
|
|
|
.arch armv7-a
|
|
.arm
|
|
|
|
ENTRY(secondary_trampoline)
|
|
/* CPU1 will always fetch from 0x0 when it is brought out of reset.
|
|
* Thus, we can just subtract the PAGE_OFFSET to get the physical
|
|
* address of &cpu1start_addr. This would not work for platforms
|
|
* where the physical memory does not start at 0x0.
|
|
*/
|
|
ARM_BE8(setend be)
|
|
adr r0, 1f
|
|
ldmia r0, {r1, r2}
|
|
sub r2, r2, #PAGE_OFFSET
|
|
ldr r3, [r2]
|
|
ldr r4, [r3]
|
|
ARM_BE8(rev r4, r4)
|
|
bx r4
|
|
|
|
.align
|
|
1: .long .
|
|
.long socfpga_cpu1start_addr
|
|
ENTRY(secondary_trampoline_end)
|