arm64 fixes for -rc1
- Fix lockdep false alarm on resume-from-cpuidle path - Fix memory leak in kexec_file - Fix module linker script to work with GDB - Fix error code when trying to use uprobes with AArch32 instructions - Fix late VHE enabling with 64k pages - Add missing ISBs after TLB invalidation - Fix seccomp when tracing syscall -1 - Fix stacktrace return code at end of stack - Fix inconsistent whitespace for pointer return values - Fix compiler warnings when building with W=1 -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmA40kUQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNLMUB/93o3Ucd3SeLLmOziyZMWjxCNcuzXAXDhFH z0q0Zq8U5+xHaCH+jPASNwS7gT6dMX8E60SlXcvVaHuBaH5zsrZnOtpJ5mZQAQ7E nR1M5ANfusMJ8uRpDHhy5ymJ4IcE/yn74rapBIeGs1e4vWF60Lb6nSVrEJMNRada zbRr2z9bMecQPGX+KSWpgYg4dLRpyTo8oSYJiYmyoSczGvXhrFHlnIJeaKrJuvGt IIhil8l9uZd5j0ucVWGiYgAcAuqzgkH2yEiNbkGRwn0nMK+4HGbXpEuzUm/90p3y lRLQSvx/hKwerIlodUYbFDx4FMXoFfMRQm/8/6tCBrUn/4exDslZ =wuLk -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "The big one is a fix for the VHE enabling path during early boot, where the code enabling the MMU wasn't necessarily in the identity map of the new page-tables, resulting in a consistent crash with 64k pages. In fixing that, we noticed some missing barriers too, so we added those for the sake of architectural compliance. Other than that, just the usual merge window trickle. There'll be more to come, too. Summary: - Fix lockdep false alarm on resume-from-cpuidle path - Fix memory leak in kexec_file - Fix module linker script to work with GDB - Fix error code when trying to use uprobes with AArch32 instructions - Fix late VHE enabling with 64k pages - Add missing ISBs after TLB invalidation - Fix seccomp when tracing syscall -1 - Fix stacktrace return code at end of stack - Fix inconsistent whitespace for pointer return values - Fix compiler warnings when building with W=1" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: stacktrace: Report when we reach the end of the stack arm64: ptrace: Fix seccomp of traced syscall -1 (NO_SYSCALL) arm64: Add missing ISB after invalidating TLB in enter_vhe arm64: Add missing ISB after invalidating TLB in __primary_switch arm64: VHE: Enable EL2 MMU from the idmap KVM: arm64: make the hyp vector table entries local arm64/mm: Fixed some coding style issues arm64: uprobe: Return EOPNOTSUPP for AARCH32 instruction probing kexec: move machine_kexec_post_load() to public interface arm64 module: set plt* section addresses to 0x0 arm64: kexec_file: fix memory leakage in create_dtb() when fdt_open_into() fails arm64: spectre: Prevent lockdep splat on v4 mitigation enable path
This commit is contained in:
commit
8f47d753d4
@ -1,7 +1,7 @@
|
||||
#ifdef CONFIG_ARM64_MODULE_PLTS
|
||||
SECTIONS {
|
||||
.plt (NOLOAD) : { BYTE(0) }
|
||||
.init.plt (NOLOAD) : { BYTE(0) }
|
||||
.text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
|
||||
.plt 0 (NOLOAD) : { BYTE(0) }
|
||||
.init.plt 0 (NOLOAD) : { BYTE(0) }
|
||||
.text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
|
||||
}
|
||||
#endif
|
||||
|
@ -837,6 +837,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
|
||||
|
||||
tlbi vmalle1 // Remove any stale TLB entries
|
||||
dsb nsh
|
||||
isb
|
||||
|
||||
set_sctlr_el1 x19 // re-enable the MMU
|
||||
|
||||
|
@ -75,9 +75,6 @@ SYM_CODE_END(el1_sync)
|
||||
|
||||
// nVHE? No way! Give me the real thing!
|
||||
SYM_CODE_START_LOCAL(mutate_to_vhe)
|
||||
// Be prepared to fail
|
||||
mov_q x0, HVC_STUB_ERR
|
||||
|
||||
// Sanity check: MMU *must* be off
|
||||
mrs x1, sctlr_el2
|
||||
tbnz x1, #0, 1f
|
||||
@ -96,8 +93,11 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
|
||||
cmp x1, xzr
|
||||
and x2, x2, x1
|
||||
csinv x2, x2, xzr, ne
|
||||
cbz x2, 1f
|
||||
cbnz x2, 2f
|
||||
|
||||
1: mov_q x0, HVC_STUB_ERR
|
||||
eret
|
||||
2:
|
||||
// Engage the VHE magic!
|
||||
mov_q x0, HCR_HOST_VHE_FLAGS
|
||||
msr hcr_el2, x0
|
||||
@ -131,9 +131,28 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
|
||||
msr mair_el1, x0
|
||||
isb
|
||||
|
||||
// Hack the exception return to stay at EL2
|
||||
mrs x0, spsr_el1
|
||||
and x0, x0, #~PSR_MODE_MASK
|
||||
mov x1, #PSR_MODE_EL2h
|
||||
orr x0, x0, x1
|
||||
msr spsr_el1, x0
|
||||
|
||||
b enter_vhe
|
||||
SYM_CODE_END(mutate_to_vhe)
|
||||
|
||||
// At the point where we reach enter_vhe(), we run with
|
||||
// the MMU off (which is enforced by mutate_to_vhe()).
|
||||
// We thus need to be in the idmap, or everything will
|
||||
// explode when enabling the MMU.
|
||||
|
||||
.pushsection .idmap.text, "ax"
|
||||
|
||||
SYM_CODE_START_LOCAL(enter_vhe)
|
||||
// Invalidate TLBs before enabling the MMU
|
||||
tlbi vmalle1
|
||||
dsb nsh
|
||||
isb
|
||||
|
||||
// Enable the EL2 S1 MMU, as set up from EL1
|
||||
mrs_s x0, SYS_SCTLR_EL12
|
||||
@ -143,17 +162,12 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
|
||||
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
||||
msr_s SYS_SCTLR_EL12, x0
|
||||
|
||||
// Hack the exception return to stay at EL2
|
||||
mrs x0, spsr_el1
|
||||
and x0, x0, #~PSR_MODE_MASK
|
||||
mov x1, #PSR_MODE_EL2h
|
||||
orr x0, x0, x1
|
||||
msr spsr_el1, x0
|
||||
|
||||
mov x0, xzr
|
||||
|
||||
1: eret
|
||||
SYM_CODE_END(mutate_to_vhe)
|
||||
eret
|
||||
SYM_CODE_END(enter_vhe)
|
||||
|
||||
.popsection
|
||||
|
||||
.macro invalid_vector label
|
||||
SYM_CODE_START_LOCAL(\label)
|
||||
|
@ -182,8 +182,10 @@ static int create_dtb(struct kimage *image,
|
||||
|
||||
/* duplicate a device tree blob */
|
||||
ret = fdt_open_into(initial_boot_params, buf, buf_size);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
vfree(buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = setup_dtb(image, initrd_load_addr, initrd_len,
|
||||
cmdline, buf);
|
||||
|
@ -38,7 +38,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
|
||||
|
||||
/* TODO: Currently we do not support AARCH32 instruction probing */
|
||||
if (mm->context.flags & MMCF_AARCH32)
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1797,7 +1797,7 @@ int syscall_trace_enter(struct pt_regs *regs)
|
||||
|
||||
if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
|
||||
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
|
||||
if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
|
||||
if (flags & _TIF_SYSCALL_EMU)
|
||||
return NO_SYSCALL;
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
||||
|
||||
/* Terminal record; nothing to unwind */
|
||||
if (!fp)
|
||||
return -EINVAL;
|
||||
return -ENOENT;
|
||||
|
||||
if (fp & 0xf)
|
||||
return -EINVAL;
|
||||
|
@ -119,7 +119,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
if (!ret)
|
||||
ret = -EOPNOTSUPP;
|
||||
} else {
|
||||
__cpu_suspend_exit();
|
||||
RCU_NONIDLE(__cpu_suspend_exit());
|
||||
}
|
||||
|
||||
unpause_graph_tracing();
|
||||
|
@ -119,7 +119,7 @@ el2_error:
|
||||
|
||||
.macro invalid_vector label, target = __guest_exit_panic
|
||||
.align 2
|
||||
SYM_CODE_START(\label)
|
||||
SYM_CODE_START_LOCAL(\label)
|
||||
b \target
|
||||
SYM_CODE_END(\label)
|
||||
.endm
|
||||
|
@ -1155,7 +1155,7 @@ void vmemmap_free(unsigned long start, unsigned long end,
|
||||
}
|
||||
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
||||
|
||||
static inline pud_t * fixmap_pud(unsigned long addr)
|
||||
static inline pud_t *fixmap_pud(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgdp = pgd_offset_k(addr);
|
||||
p4d_t *p4dp = p4d_offset(pgdp, addr);
|
||||
@ -1166,7 +1166,7 @@ static inline pud_t * fixmap_pud(unsigned long addr)
|
||||
return pud_offset_kimg(p4dp, addr);
|
||||
}
|
||||
|
||||
static inline pmd_t * fixmap_pmd(unsigned long addr)
|
||||
static inline pmd_t *fixmap_pmd(unsigned long addr)
|
||||
{
|
||||
pud_t *pudp = fixmap_pud(addr);
|
||||
pud_t pud = READ_ONCE(*pudp);
|
||||
@ -1176,7 +1176,7 @@ static inline pmd_t * fixmap_pmd(unsigned long addr)
|
||||
return pmd_offset_kimg(pudp, addr);
|
||||
}
|
||||
|
||||
static inline pte_t * fixmap_pte(unsigned long addr)
|
||||
static inline pte_t *fixmap_pte(unsigned long addr)
|
||||
{
|
||||
return &bm_pte[pte_index(addr)];
|
||||
}
|
||||
|
@ -314,6 +314,8 @@ extern void machine_kexec_cleanup(struct kimage *image);
|
||||
extern int kernel_kexec(void);
|
||||
extern struct page *kimage_alloc_control_pages(struct kimage *image,
|
||||
unsigned int order);
|
||||
int machine_kexec_post_load(struct kimage *image);
|
||||
|
||||
extern void __crash_kexec(struct pt_regs *);
|
||||
extern void crash_kexec(struct pt_regs *);
|
||||
int kexec_should_crash(struct task_struct *);
|
||||
|
@ -13,8 +13,6 @@ void kimage_terminate(struct kimage *image);
|
||||
int kimage_is_destination_range(struct kimage *image,
|
||||
unsigned long start, unsigned long end);
|
||||
|
||||
int machine_kexec_post_load(struct kimage *image);
|
||||
|
||||
extern struct mutex kexec_mutex;
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
|
Loading…
Reference in New Issue
Block a user