From ba090f9cafd53dbabe0f0a8c4ccae44203d3731b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 3 Nov 2020 14:49:04 +0100 Subject: [PATCH 1/7] arm64: kprobes: Remove redundant kprobe_step_ctx The kprobe_step_ctx (kcb->ss_ctx) has ss_pending and match_addr, but those are redundant because those can be replaced by KPROBE_HIT_SS and &cur_kprobe->ainsn.api.insn[1] respectively. To simplify the code, remove the kprobe_step_ctx. Signed-off-by: Masami Hiramatsu Reviewed-by: Jean-Philippe Brucker Acked-by: Will Deacon Link: https://lore.kernel.org/r/20201103134900.337243-2-jean-philippe@linaro.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kprobes.h | 7 ---- arch/arm64/kernel/probes/kprobes.c | 53 +++++++----------------------- 2 files changed, 12 insertions(+), 48 deletions(-) diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 8699ce30f587..5d38ff4a4806 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h @@ -28,18 +28,11 @@ struct prev_kprobe { unsigned int status; }; -/* Single step context for kprobe */ -struct kprobe_step_ctx { - unsigned long ss_pending; - unsigned long match_addr; -}; - /* per-cpu kprobe control block */ struct kprobe_ctlblk { unsigned int kprobe_status; unsigned long saved_irqflag; struct prev_kprobe prev_kprobe; - struct kprobe_step_ctx ss_ctx; }; void arch_remove_kprobe(struct kprobe *); diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index f11a1a1f7026..89c64ada8732 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static void __kprobes -post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); +post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { @@ -68,7 +68,7 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs); /* single step simulated, now go for post processing */ - post_kprobe_handler(kcb, regs); + post_kprobe_handler(p, kcb, regs); } int __kprobes arch_prepare_kprobe(struct kprobe *p) @@ -177,19 +177,6 @@ static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, regs->pstate |= kcb->saved_irqflag; } -static void __kprobes -set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr) -{ - kcb->ss_ctx.ss_pending = true; - kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t); -} - -static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) -{ - kcb->ss_ctx.ss_pending = false; - kcb->ss_ctx.match_addr = 0; -} - static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) @@ -209,7 +196,6 @@ static void __kprobes setup_singlestep(struct kprobe *p, /* prepare for single stepping */ slot = (unsigned long)p->ainsn.api.insn; - set_ss_context(kcb, slot); /* mark pending ss */ kprobes_save_local_irqflag(kcb, regs); instruction_pointer_set(regs, slot); } else { @@ -243,13 +229,8 @@ static int __kprobes reenter_kprobe(struct kprobe *p, } static void __kprobes -post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) +post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs) { - struct kprobe *cur = kprobe_running(); - - if (!cur) - return; - /* return addr restore if non-branching insn */ if (cur->ainsn.api.restore != 0) instruction_pointer_set(regs, cur->ainsn.api.restore); @@ -364,33 +345,23 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) */ } -static int __kprobes -kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr) -{ - if ((kcb->ss_ctx.ss_pending) - && (kcb->ss_ctx.match_addr == addr)) { - clear_ss_context(kcb); /* clear pending ss */ - return DBG_HOOK_HANDLED; - } - /* not ours, kprobes should ignore it */ - return DBG_HOOK_ERROR; -} - static int __kprobes kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - int retval; + unsigned long addr = instruction_pointer(regs); + struct kprobe *cur = kprobe_running(); - /* return error if this is not our step */ - retval = kprobe_ss_hit(kcb, instruction_pointer(regs)); - - if (retval == DBG_HOOK_HANDLED) { + if (cur && (kcb->kprobe_status == KPROBE_HIT_SS) + && ((unsigned long)&cur->ainsn.api.insn[1] == addr)) { kprobes_restore_local_irqflag(kcb, regs); - post_kprobe_handler(kcb, regs); + post_kprobe_handler(cur, kcb, regs); + + return DBG_HOOK_HANDLED; } - return retval; + /* not ours, kprobes should ignore it */ + return DBG_HOOK_ERROR; } static struct break_hook kprobes_break_ss_hook = { From 833be850f1cabd0e3b5337c0fcab20a6e936dd48 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 3 Nov 2020 10:22:29 +0000 Subject: [PATCH 2/7] arm64: consistently use reserved_pg_dir Depending on configuration options and specific code paths, we either use the empty_zero_page or the configuration-dependent reserved_ttbr0 as a reserved value for TTBR{0,1}_EL1. To simplify this code, let's always allocate and use the same reserved_pg_dir, replacing reserved_ttbr0. Note that this is allocated (and hence pre-zeroed), and is also marked as read-only in the kernel Image mapping. Keeping this separate from the empty_zero_page potentially helps with robustness as the empty_zero_page is used in a number of cases where a failure to map it read-only could allow it to become corrupted. The (presently unused) swapper_pg_end symbol is also removed, and comments are added wherever we rely on the offsets between the pre-allocated pg_dirs to keep these cases easily identifiable. Signed-off-by: Mark Rutland Cc: Will Deacon Link: https://lore.kernel.org/r/20201103102229.8542-1-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/asm-uaccess.h | 4 ++-- arch/arm64/include/asm/kernel-pgtable.h | 6 ------ arch/arm64/include/asm/mmu_context.h | 6 +++--- arch/arm64/include/asm/pgtable.h | 1 + arch/arm64/include/asm/uaccess.h | 4 ++-- arch/arm64/kernel/entry.S | 6 ++++-- arch/arm64/kernel/setup.c | 2 +- arch/arm64/kernel/vmlinux.lds.S | 8 +++----- arch/arm64/mm/proc.S | 2 +- 9 files changed, 17 insertions(+), 22 deletions(-) diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index f68a0e64482a..5ef624fef44a 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -15,10 +15,10 @@ .macro __uaccess_ttbr0_disable, tmp1 mrs \tmp1, ttbr1_el1 // swapper_pg_dir bic \tmp1, \tmp1, #TTBR_ASID_MASK - sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir + sub \tmp1, \tmp1, #PAGE_SIZE // reserved_pg_dir just before swapper_pg_dir msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 isb - add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE + add \tmp1, \tmp1, #PAGE_SIZE msr ttbr1_el1, \tmp1 // set reserved ASID isb .endm diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 19ca76ea60d9..587c504a4c8b 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -89,12 +89,6 @@ #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end)) #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) -#ifdef CONFIG_ARM64_SW_TTBR0_PAN -#define RESERVED_TTBR0_SIZE (PAGE_SIZE) -#else -#define RESERVED_TTBR0_SIZE (0) -#endif - /* Initial memory map size */ #if ARM64_SWAPPER_USES_SECTION_MAPS #define SWAPPER_BLOCK_SHIFT SECTION_SHIFT diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 0672236e1aea..5c72c20bd300 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -36,11 +36,11 @@ static inline void contextidr_thread_switch(struct task_struct *next) } /* - * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. + * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0. */ static inline void cpu_set_reserved_ttbr0(void) { - unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page)); + unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); write_sysreg(ttbr, ttbr0_el1); isb(); @@ -195,7 +195,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, return; if (mm == &init_mm) - ttbr = __pa_symbol(empty_zero_page); + ttbr = __pa_symbol(reserved_pg_dir); else ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 4ff12a7adcfd..74694e129f9a 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -519,6 +519,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_end[]; extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; +extern pgd_t reserved_pg_dir[PTRS_PER_PGD]; extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 991dd5f031e4..385a189f7d39 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -113,8 +113,8 @@ static inline void __uaccess_ttbr0_disable(void) local_irq_save(flags); ttbr = read_sysreg(ttbr1_el1); ttbr &= ~TTBR_ASID_MASK; - /* reserved_ttbr0 placed before swapper_pg_dir */ - write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1); + /* reserved_pg_dir placed before swapper_pg_dir */ + write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1); isb(); /* Set reserved ASID */ write_sysreg(ttbr, ttbr1_el1); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b295fb912b12..6f31c2c06788 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -807,9 +807,10 @@ SYM_CODE_END(ret_to_user) */ .pushsection ".entry.tramp.text", "ax" + // Move from tramp_pg_dir to swapper_pg_dir .macro tramp_map_kernel, tmp mrs \tmp, ttbr1_el1 - add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE) + add \tmp, \tmp, #(2 * PAGE_SIZE) bic \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 @@ -826,9 +827,10 @@ alternative_else_nop_endif #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ .endm + // Move from swapper_pg_dir to tramp_pg_dir .macro tramp_unmap_kernel, tmp mrs \tmp, ttbr1_el1 - sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE) + sub \tmp, \tmp, #(2 * PAGE_SIZE) orr \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp /* diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 133257ffd859..c28a9ec76b11 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -366,7 +366,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) * faults in case uaccess_enable() is inadvertently called by the init * thread. */ - init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page); + init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir); #endif if (boot_args[1] || boot_args[2] || boot_args[3]) { diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 1bda604f4c70..30c102978942 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -164,13 +164,11 @@ SECTIONS . += PAGE_SIZE; #endif -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - reserved_ttbr0 = .; - . += RESERVED_TTBR0_SIZE; -#endif + reserved_pg_dir = .; + . += PAGE_SIZE; + swapper_pg_dir = .; . += PAGE_SIZE; - swapper_pg_end = .; . = ALIGN(SEGMENT_ALIGN); __init_begin = .; diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 23c326a06b2d..0eaf16b0442a 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -168,7 +168,7 @@ SYM_FUNC_END(cpu_do_resume) .pushsection ".idmap.text", "awx" .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 - adrp \tmp1, empty_zero_page + adrp \tmp1, reserved_pg_dir phys_to_ttbr \tmp2, \tmp1 offset_ttbr1 \tmp2, \tmp1 msr ttbr1_el1, \tmp2 From 0edaee42eb8c0f3b767ec3c51bee4a3855aa2555 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 9 Nov 2020 17:08:36 +0530 Subject: [PATCH 3/7] arm64/smp: Drop the macro S(x,s) Mapping between IPI type index and its string is direct without requiring an additional offset. Hence the existing macro S(x, s) is now redundant and can just be dropped. This also makes the code clean and simple. Signed-off-by: Anshuman Khandual Acked-by: Mark Rutland Cc: Will Deacon Cc: Mark Rutland Cc: Marc Zyngier Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Link: https://lore.kernel.org/r/1604921916-23368-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/smp.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 09c96f57818c..65d18a618abe 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -786,14 +786,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } static const char *ipi_types[NR_IPI] __tracepoint_string = { -#define S(x,s) [x] = s - S(IPI_RESCHEDULE, "Rescheduling interrupts"), - S(IPI_CALL_FUNC, "Function call interrupts"), - S(IPI_CPU_STOP, "CPU stop interrupts"), - S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"), - S(IPI_TIMER, "Timer broadcast interrupts"), - S(IPI_IRQ_WORK, "IRQ work interrupts"), - S(IPI_WAKEUP, "CPU wake-up interrupts"), + [IPI_RESCHEDULE] = "Rescheduling interrupts", + [IPI_CALL_FUNC] = "Function call interrupts", + [IPI_CPU_STOP] = "CPU stop interrupts", + [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", + [IPI_TIMER] = "Timer broadcast interrupts", + [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_WAKEUP] = "CPU wake-up interrupts", }; static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); From 9f84f39f5515fd412398a1019e3f50ac3ab51a80 Mon Sep 17 00:00:00 2001 From: Sudarshan Rajagopalan Date: Wed, 14 Oct 2020 17:51:23 -0700 Subject: [PATCH 4/7] arm64/mm: add fallback option to allocate virtually contiguous memory When section mappings are enabled, we allocate vmemmap pages from physically continuous memory of size PMD_SIZE using vmemmap_alloc_block_buf(). Section mappings are good to reduce TLB pressure. But when system is highly fragmented and memory blocks are being hot-added at runtime, its possible that such physically continuous memory allocations can fail. Rather than failing the memory hot-add procedure, add a fallback option to allocate vmemmap pages from discontinuous pages using vmemmap_populate_basepages(). Signed-off-by: Sudarshan Rajagopalan Reviewed-by: Gavin Shan Reviewed-by: Anshuman Khandual Acked-by: Will Deacon Cc: Will Deacon Cc: Anshuman Khandual Cc: Mark Rutland Cc: Logan Gunthorpe Cc: David Hildenbrand Cc: Andrew Morton Cc: Steven Price Link: https://lore.kernel.org/r/d6c06f2ef39bbe6c715b2f6db76eb16155fdcee6.1602722808.git.sudaraja@codeaurora.org Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1c0f3e02f731..a773d2a1a2bf 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1132,8 +1132,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, void *p = NULL; p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); - if (!p) - return -ENOMEM; + if (!p) { + if (vmemmap_populate_basepages(addr, next, node, altmap)) + return -ENOMEM; + continue; + } pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); } else From 739003c6428387432d42b9b80be185cde93978aa Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Tue, 17 Nov 2020 19:20:51 -0800 Subject: [PATCH 5/7] arm64: mte: optimize asynchronous tag check fault flag check We don't need to check for MTE support before checking the flag because it can only be set if the hardware supports MTE. As a result we can unconditionally check the flag bit which is expected to be in a register and therefore the check can be done in a single instruction instead of first needing to load the hwcaps. On a DragonBoard 845c with a kernel built with CONFIG_ARM64_MTE=y with the powersave governor this reduces the cost of a kernel entry/exit (invalid syscall) from 465.1ns to 463.8ns. Signed-off-by: Peter Collingbourne Link: https://linux-review.googlesource.com/id/If4dc3501fd4e4f287322f17805509613cfe47d24 Link: https://lore.kernel.org/r/20201118032051.1405907-1-pcc@google.com [catalin.marinas@arm.com: remove IS_ENABLED(CONFIG_ARM64_MTE)] Signed-off-by: Catalin Marinas --- arch/arm64/kernel/syscall.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index e4c0dadf0d92..fe3fa8fd13c8 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -123,7 +123,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, local_daif_restore(DAIF_PROCCTX); user_exit(); - if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) { + if (flags & _TIF_MTE_ASYNC_FAULT) { /* * Process the asynchronous tag check fault before the actual * syscall. do_notify_resume() will send a signal to userspace From 49b3cf035edc5d7deb3ad1bf6805ce456ababc5b Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Sat, 21 Nov 2020 01:59:02 -0800 Subject: [PATCH 6/7] kasan: arm64: set TCR_EL1.TBID1 when enabled On hardware supporting pointer authentication, we previously ended up enabling TBI on instruction accesses when tag-based ASAN was enabled, but this was costing us 8 bits of PAC entropy, which was unnecessary since tag-based ASAN does not require TBI on instruction accesses. Get them back by setting TCR_EL1.TBID1. Signed-off-by: Peter Collingbourne Reviewed-by: Andrey Konovalov Link: https://linux-review.googlesource.com/id/I3dded7824be2e70ea64df0aabab9598d5aebfcc4 Link: https://lore.kernel.org/r/20f64e26fc8a1309caa446fffcb1b4e2fe9e229f.1605952129.git.pcc@google.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable-hwdef.h | 1 + arch/arm64/mm/proc.S | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 01a96d07ae74..42442a0ae2ab 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -260,6 +260,7 @@ #define TCR_TBI1 (UL(1) << 38) #define TCR_HA (UL(1) << 39) #define TCR_HD (UL(1) << 40) +#define TCR_TBID1 (UL(1) << 52) #define TCR_NFD0 (UL(1) << 53) #define TCR_NFD1 (UL(1) << 54) #define TCR_E0PD0 (UL(1) << 55) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 0eaf16b0442a..04945f72c64d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -40,7 +40,7 @@ #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA #ifdef CONFIG_KASAN_SW_TAGS -#define TCR_KASAN_FLAGS TCR_TBI1 +#define TCR_KASAN_FLAGS TCR_TBI1 | TCR_TBID1 #else #define TCR_KASAN_FLAGS 0 #endif From 344f2db2a18af45faafce13133c84c4f076876a6 Mon Sep 17 00:00:00 2001 From: Youling Tang Date: Thu, 19 Nov 2020 09:45:40 +0800 Subject: [PATCH 7/7] arm64: vmlinux.lds.S: Drop redundant *.init.rodata.* We currently try to emit *.init.rodata.* twice, once in INIT_DATA, and once in the line immediately following it. As the two section definitions are identical, the latter is redundant and can be dropped. This patch drops the redundant *.init.rodata.* section definition. Signed-off-by: Youling Tang Acked-by: Will Deacon Link: https://lore.kernel.org/r/1605750340-910-1-git-send-email-tangyouling@loongson.cn Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vmlinux.lds.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 30c102978942..746afbc68df9 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -199,7 +199,7 @@ SECTIONS INIT_CALLS CON_INITCALL INIT_RAM_FS - *(.init.rodata.* .init.bss) /* from the EFI stub */ + *(.init.bss) /* from the EFI stub */ } .exit.data : { EXIT_DATA