mirror of
https://github.com/torvalds/linux.git
synced 2024-11-30 16:11:38 +00:00
a9ff696160
Making virt_to_pfn() a static inline taking a strongly typed (const void *) makes the contract of a passing a pointer of that type to the function explicit and exposes any misuse of the macro virt_to_pfn() acting polymorphic and accepting many types such as (void *), (unitptr_t) or (unsigned long) as arguments without warnings. Doing this is a bit intrusive: virt_to_pfn() requires PHYS_PFN_OFFSET and PAGE_SHIFT to be defined, and this is defined in <asm/page.h>, so this must be included *before* <asm/memory.h>. The use of macros were obscuring the unclear inclusion order here, as the macros would eventually be resolved, but a static inline like this cannot be compiled with unresolved macros. The naive solution to include <asm/page.h> at the top of <asm/memory.h> does not work, because <asm/memory.h> sometimes includes <asm/page.h> at the end of itself, which would create a confusing inclusion loop. So instead, take the approach to always unconditionally include <asm/page.h> at the end of <asm/memory.h> arch/arm uses <asm/memory.h> explicitly in a lot of places, however it turns out that if we just unconditionally include <asm/memory.h> into <asm/page.h> and switch all inclusions of <asm/memory.h> to <asm/page.h> instead, we enforce the right order and <asm/memory.h> will always have access to the definitions. Put an inclusion guard in place making it impossible to include <asm/memory.h> explicitly. Link: https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/ Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
86 lines
1.8 KiB
ArmAsm
86 lines
1.8 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2015 Russell King
|
|
*
|
|
* This assembly is required to safely remap the physical address space
|
|
* for Keystone 2
|
|
*/
|
|
#include <linux/linkage.h>
|
|
#include <linux/pgtable.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/cp15.h>
|
|
#include <asm/page.h>
|
|
|
|
.section ".idmap.text", "ax"
|
|
|
|
#define L1_ORDER 3
|
|
#define L2_ORDER 3
|
|
|
|
ENTRY(lpae_pgtables_remap_asm)
|
|
stmfd sp!, {r4-r8, lr}
|
|
|
|
mrc p15, 0, r8, c1, c0, 0 @ read control reg
|
|
bic ip, r8, #CR_M @ disable caches and MMU
|
|
mcr p15, 0, ip, c1, c0, 0
|
|
dsb
|
|
isb
|
|
|
|
/* Update level 2 entries covering the kernel */
|
|
ldr r6, =(_end - 1)
|
|
add r7, r2, #0x1000
|
|
add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
|
|
add r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER)
|
|
1: ldrd r4, r5, [r7]
|
|
adds r4, r4, r0
|
|
adc r5, r5, r1
|
|
strd r4, r5, [r7], #1 << L2_ORDER
|
|
cmp r7, r6
|
|
bls 1b
|
|
|
|
/* Update level 2 entries for the boot data */
|
|
add r7, r2, #0x1000
|
|
movw r3, #FDT_FIXED_BASE >> (SECTION_SHIFT - L2_ORDER)
|
|
add r7, r7, r3
|
|
ldrd r4, r5, [r7]
|
|
adds r4, r4, r0
|
|
adc r5, r5, r1
|
|
strd r4, r5, [r7], #1 << L2_ORDER
|
|
ldrd r4, r5, [r7]
|
|
adds r4, r4, r0
|
|
adc r5, r5, r1
|
|
strd r4, r5, [r7]
|
|
|
|
/* Update level 1 entries */
|
|
mov r6, #4
|
|
mov r7, r2
|
|
2: ldrd r4, r5, [r7]
|
|
adds r4, r4, r0
|
|
adc r5, r5, r1
|
|
strd r4, r5, [r7], #1 << L1_ORDER
|
|
subs r6, r6, #1
|
|
bne 2b
|
|
|
|
mrrc p15, 0, r4, r5, c2 @ read TTBR0
|
|
adds r4, r4, r0 @ update physical address
|
|
adc r5, r5, r1
|
|
mcrr p15, 0, r4, r5, c2 @ write back TTBR0
|
|
mrrc p15, 1, r4, r5, c2 @ read TTBR1
|
|
adds r4, r4, r0 @ update physical address
|
|
adc r5, r5, r1
|
|
mcrr p15, 1, r4, r5, c2 @ write back TTBR1
|
|
|
|
dsb
|
|
|
|
mov ip, #0
|
|
mcr p15, 0, ip, c7, c5, 0 @ I+BTB cache invalidate
|
|
mcr p15, 0, ip, c8, c7, 0 @ local_flush_tlb_all()
|
|
dsb
|
|
isb
|
|
|
|
mcr p15, 0, r8, c1, c0, 0 @ re-enable MMU
|
|
dsb
|
|
isb
|
|
|
|
ldmfd sp!, {r4-r8, pc}
|
|
ENDPROC(lpae_pgtables_remap_asm)
|