mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
arm64 fixes for 4.3
- Fix corruption in SWP emulation when STXR fails due to contention - Fix MMU re-initialisation when resuming from a low-power state - Fix stack unwinding code to match what ftrace expects - Fix relocation code in the EFI stub when DRAM base is not 2MB aligned -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABCgAGBQJWMlK9AAoJELescNyEwWM0iIcH/AmtOcTR9EZjwxD6szeF1/qg JuJCW7D0q2TNEw4Ed8TW8W3mSFPBuDWWbypxwpcV7pQZjBKpbDBTd79sdwAbRViZ mYGgV3NKt6BuChjxxMxnwRXYdF1J38bpFFyXslPue8zmnTkZ5EQ7IR2hMksX2+er KoPY4Gu0nSTaLD55jFfbtA3pMgoT9gRi+UsMGBseb03TLwAxEmYrtzZNTTMrDqbC BomRDbE/duOxnhigAMeWb0T/oHY66pZoEjy9YxsSd9HyqZ+l7MbMUkBRkuXj3g5O 9XLsbBWauKp3nGUgBaxu76JovJhbveWaSB4fBF6t5G2WoP47wFjfxhWxpDg6B3Y= =3Rb/ -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "Apologies for this being so late, but we've uncovered a few nasty issues on arm64 which didn't settle down until yesterday and the fixes all look suitable for 4.3. Of the four patches, three of them are Cc'd to stable, with the remaining patch fixing an issue that only took effect during the merge window. Summary: - Fix corruption in SWP emulation when STXR fails due to contention - Fix MMU re-initialisation when resuming from a low-power state - Fix stack unwinding code to match what ftrace expects - Fix relocation code in the EFI stub when DRAM base is not 2MB aligned" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64/efi: do not assume DRAM base is aligned to 2 MB Revert "ARM64: unwind: Fix PC calculation" arm64: kernel: fix tcr_el1.t0sz restore on systems with extended idmap arm64: compat: fix stxr failure case in SWP emulation
This commit is contained in:
commit
9b971e771e
@ -284,21 +284,23 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
|
||||
__asm__ __volatile__( \
|
||||
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
|
||||
CONFIG_ARM64_PAN) \
|
||||
" mov %w2, %w1\n" \
|
||||
"0: ldxr"B" %w1, [%3]\n" \
|
||||
"1: stxr"B" %w0, %w2, [%3]\n" \
|
||||
"0: ldxr"B" %w2, [%3]\n" \
|
||||
"1: stxr"B" %w0, %w1, [%3]\n" \
|
||||
" cbz %w0, 2f\n" \
|
||||
" mov %w0, %w4\n" \
|
||||
" b 3f\n" \
|
||||
"2:\n" \
|
||||
" mov %w1, %w2\n" \
|
||||
"3:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
"3: mov %w0, %w5\n" \
|
||||
" b 2b\n" \
|
||||
"4: mov %w0, %w5\n" \
|
||||
" b 3b\n" \
|
||||
" .popsection" \
|
||||
" .pushsection __ex_table,\"a\"\n" \
|
||||
" .align 3\n" \
|
||||
" .quad 0b, 3b\n" \
|
||||
" .quad 1b, 3b\n" \
|
||||
" .quad 0b, 4b\n" \
|
||||
" .quad 1b, 4b\n" \
|
||||
" .popsection\n" \
|
||||
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
|
||||
CONFIG_ARM64_PAN) \
|
||||
|
@ -25,10 +25,20 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
|
||||
unsigned long kernel_size, kernel_memsize = 0;
|
||||
unsigned long nr_pages;
|
||||
void *old_image_addr = (void *)*image_addr;
|
||||
unsigned long preferred_offset;
|
||||
|
||||
/*
|
||||
* The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
|
||||
* a 2 MB aligned base, which itself may be lower than dram_base, as
|
||||
* long as the resulting offset equals or exceeds it.
|
||||
*/
|
||||
preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET;
|
||||
if (preferred_offset < dram_base)
|
||||
preferred_offset += SZ_2M;
|
||||
|
||||
/* Relocate the image, if required. */
|
||||
kernel_size = _edata - _text;
|
||||
if (*image_addr != (dram_base + TEXT_OFFSET)) {
|
||||
if (*image_addr != preferred_offset) {
|
||||
kernel_memsize = kernel_size + (_end - _edata);
|
||||
|
||||
/*
|
||||
@ -42,7 +52,7 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
|
||||
* Mustang), we can still place the kernel at the address
|
||||
* 'dram_base + TEXT_OFFSET'.
|
||||
*/
|
||||
*image_addr = *reserve_addr = dram_base + TEXT_OFFSET;
|
||||
*image_addr = *reserve_addr = preferred_offset;
|
||||
nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
|
||||
EFI_PAGE_SIZE;
|
||||
status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
|
||||
|
@ -48,11 +48,7 @@ int notrace unwind_frame(struct stackframe *frame)
|
||||
|
||||
frame->sp = fp + 0x10;
|
||||
frame->fp = *(unsigned long *)(fp);
|
||||
/*
|
||||
* -4 here because we care about the PC at time of bl,
|
||||
* not where the return will go.
|
||||
*/
|
||||
frame->pc = *(unsigned long *)(fp + 8) - 4;
|
||||
frame->pc = *(unsigned long *)(fp + 8);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -80,17 +80,21 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
if (ret == 0) {
|
||||
/*
|
||||
* We are resuming from reset with TTBR0_EL1 set to the
|
||||
* idmap to enable the MMU; restore the active_mm mappings in
|
||||
* TTBR0_EL1 unless the active_mm == &init_mm, in which case
|
||||
* the thread entered cpu_suspend with TTBR0_EL1 set to
|
||||
* reserved TTBR0 page tables and should be restored as such.
|
||||
* idmap to enable the MMU; set the TTBR0 to the reserved
|
||||
* page tables to prevent speculative TLB allocations, flush
|
||||
* the local tlb and set the default tcr_el1.t0sz so that
|
||||
* the TTBR0 address space set-up is properly restored.
|
||||
* If the current active_mm != &init_mm we entered cpu_suspend
|
||||
* with mappings in TTBR0 that must be restored, so we switch
|
||||
* them back to complete the address space configuration
|
||||
* restoration before returning.
|
||||
*/
|
||||
if (mm == &init_mm)
|
||||
cpu_set_reserved_ttbr0();
|
||||
else
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
|
||||
cpu_set_reserved_ttbr0();
|
||||
flush_tlb_all();
|
||||
cpu_set_default_tcr_t0sz();
|
||||
|
||||
if (mm != &init_mm)
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
|
||||
/*
|
||||
* Restore per-cpu offset before any kernel
|
||||
|
Loading…
Reference in New Issue
Block a user