* 'arm64/vmap-stack' of git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux:
  arm64: add VMAP_STACK overflow detection
  arm64: add on_accessible_stack()
  arm64: add basic VMAP_STACK support
  arm64: use an irq stack pointer
  arm64: assembler: allow adr_this_cpu to use the stack pointer
  arm64: factor out entry stack manipulation
  efi/arm64: add EFI_KIMG_ALIGN
  arm64: move SEGMENT_ALIGN to <asm/memory.h>
  arm64: clean up irq stack definitions
  arm64: clean up THREAD_* definitions
  arm64: factor out PAGE_* and CONT_* definitions
  arm64: kernel: remove {THREAD,IRQ_STACK}_START_SP
  fork: allow arch-override of VMAP stack alignment
  arm64: remove __die()'s stack dump
		
	
			
		
			
				
	
	
		
			951 lines
		
	
	
		
			22 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			951 lines
		
	
	
		
			22 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
| /*
 | |
|  * Low-level exception handling code
 | |
|  *
 | |
|  * Copyright (C) 2012 ARM Ltd.
 | |
|  * Authors:	Catalin Marinas <catalin.marinas@arm.com>
 | |
|  *		Will Deacon <will.deacon@arm.com>
 | |
|  *
 | |
|  * This program is free software; you can redistribute it and/or modify
 | |
|  * it under the terms of the GNU General Public License version 2 as
 | |
|  * published by the Free Software Foundation.
 | |
|  *
 | |
|  * This program is distributed in the hope that it will be useful,
 | |
|  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
|  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | |
|  * GNU General Public License for more details.
 | |
|  *
 | |
|  * You should have received a copy of the GNU General Public License
 | |
|  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | |
|  */
 | |
| 
 | |
| #include <linux/init.h>
 | |
| #include <linux/linkage.h>
 | |
| 
 | |
| #include <asm/alternative.h>
 | |
| #include <asm/assembler.h>
 | |
| #include <asm/asm-offsets.h>
 | |
| #include <asm/cpufeature.h>
 | |
| #include <asm/errno.h>
 | |
| #include <asm/esr.h>
 | |
| #include <asm/irq.h>
 | |
| #include <asm/memory.h>
 | |
| #include <asm/ptrace.h>
 | |
| #include <asm/thread_info.h>
 | |
| #include <asm/asm-uaccess.h>
 | |
| #include <asm/unistd.h>
 | |
| 
 | |
| /*
 | |
|  * Context tracking subsystem.  Used to instrument transitions
 | |
|  * between user and kernel mode.
 | |
|  */
 | |
| 	.macro ct_user_exit, syscall = 0
 | |
| #ifdef CONFIG_CONTEXT_TRACKING
 | |
| 	bl	context_tracking_user_exit
 | |
| 	.if \syscall == 1
 | |
| 	/*
 | |
| 	 * Save/restore needed during syscalls.  Restore syscall arguments from
 | |
| 	 * the values already saved on stack during kernel_entry.
 | |
| 	 */
 | |
| 	ldp	x0, x1, [sp]
 | |
| 	ldp	x2, x3, [sp, #S_X2]
 | |
| 	ldp	x4, x5, [sp, #S_X4]
 | |
| 	ldp	x6, x7, [sp, #S_X6]
 | |
| 	.endif
 | |
| #endif
 | |
| 	.endm
 | |
| 
 | |
| 	.macro ct_user_enter
 | |
| #ifdef CONFIG_CONTEXT_TRACKING
 | |
| 	bl	context_tracking_user_enter
 | |
| #endif
 | |
| 	.endm
 | |
| 
 | |
| /*
 | |
|  * Bad Abort numbers
 | |
|  *-----------------
 | |
|  */
 | |
| #define BAD_SYNC	0
 | |
| #define BAD_IRQ		1
 | |
| #define BAD_FIQ		2
 | |
| #define BAD_ERROR	3
 | |
| 
 | |
| 	.macro kernel_ventry	label
 | |
| 	.align 7
 | |
| 	sub	sp, sp, #S_FRAME_SIZE
 | |
| #ifdef CONFIG_VMAP_STACK
 | |
| 	/*
 | |
| 	 * Test whether the SP has overflowed, without corrupting a GPR.
 | |
| 	 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
 | |
| 	 */
 | |
| 	add	sp, sp, x0			// sp' = sp + x0
 | |
| 	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
 | |
| 	tbnz	x0, #THREAD_SHIFT, 0f
 | |
| 	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
 | |
| 	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
 | |
| 	b	\label
 | |
| 
 | |
| 0:
 | |
| 	/*
 | |
| 	 * Either we've just detected an overflow, or we've taken an exception
 | |
| 	 * while on the overflow stack. Either way, we won't return to
 | |
| 	 * userspace, and can clobber EL0 registers to free up GPRs.
 | |
| 	 */
 | |
| 
 | |
| 	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
 | |
| 	msr	tpidr_el0, x0
 | |
| 
 | |
| 	/* Recover the original x0 value and stash it in tpidrro_el0 */
 | |
| 	sub	x0, sp, x0
 | |
| 	msr	tpidrro_el0, x0
 | |
| 
 | |
| 	/* Switch to the overflow stack */
 | |
| 	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
 | |
| 
 | |
| 	/*
 | |
| 	 * Check whether we were already on the overflow stack. This may happen
 | |
| 	 * after panic() re-enables interrupts.
 | |
| 	 */
 | |
| 	mrs	x0, tpidr_el0			// sp of interrupted context
 | |
| 	sub	x0, sp, x0			// delta with top of overflow stack
 | |
| 	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
 | |
| 	b.ne	__bad_stack			// no? -> bad stack pointer
 | |
| 
 | |
| 	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
 | |
| 	sub	sp, sp, x0
 | |
| 	mrs	x0, tpidrro_el0
 | |
| #endif
 | |
| 	b	\label
 | |
| 	.endm
 | |
| 
 | |
| 	.macro	kernel_entry, el, regsize = 64
 | |
| 	.if	\regsize == 32
 | |
| 	mov	w0, w0				// zero upper 32 bits of x0
 | |
| 	.endif
 | |
| 	stp	x0, x1, [sp, #16 * 0]
 | |
| 	stp	x2, x3, [sp, #16 * 1]
 | |
| 	stp	x4, x5, [sp, #16 * 2]
 | |
| 	stp	x6, x7, [sp, #16 * 3]
 | |
| 	stp	x8, x9, [sp, #16 * 4]
 | |
| 	stp	x10, x11, [sp, #16 * 5]
 | |
| 	stp	x12, x13, [sp, #16 * 6]
 | |
| 	stp	x14, x15, [sp, #16 * 7]
 | |
| 	stp	x16, x17, [sp, #16 * 8]
 | |
| 	stp	x18, x19, [sp, #16 * 9]
 | |
| 	stp	x20, x21, [sp, #16 * 10]
 | |
| 	stp	x22, x23, [sp, #16 * 11]
 | |
| 	stp	x24, x25, [sp, #16 * 12]
 | |
| 	stp	x26, x27, [sp, #16 * 13]
 | |
| 	stp	x28, x29, [sp, #16 * 14]
 | |
| 
 | |
| 	.if	\el == 0
 | |
| 	mrs	x21, sp_el0
 | |
| 	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
 | |
| 	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
 | |
| 	disable_step_tsk x19, x20		// exceptions when scheduling.
 | |
| 
 | |
| 	mov	x29, xzr			// fp pointed to user-space
 | |
| 	.else
 | |
| 	add	x21, sp, #S_FRAME_SIZE
 | |
| 	get_thread_info tsk
 | |
| 	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
 | |
| 	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 | |
| 	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
 | |
| 	mov	x20, #TASK_SIZE_64
 | |
| 	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 | |
| 	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
 | |
| 	.endif /* \el == 0 */
 | |
| 	mrs	x22, elr_el1
 | |
| 	mrs	x23, spsr_el1
 | |
| 	stp	lr, x21, [sp, #S_LR]
 | |
| 
 | |
| 	/*
 | |
| 	 * In order to be able to dump the contents of struct pt_regs at the
 | |
| 	 * time the exception was taken (in case we attempt to walk the call
 | |
| 	 * stack later), chain it together with the stack frames.
 | |
| 	 */
 | |
| 	.if \el == 0
 | |
| 	stp	xzr, xzr, [sp, #S_STACKFRAME]
 | |
| 	.else
 | |
| 	stp	x29, x22, [sp, #S_STACKFRAME]
 | |
| 	.endif
 | |
| 	add	x29, sp, #S_STACKFRAME
 | |
| 
 | |
| #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 | |
| 	/*
 | |
| 	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
 | |
| 	 * EL0, there is no need to check the state of TTBR0_EL1 since
 | |
| 	 * accesses are always enabled.
 | |
| 	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
 | |
| 	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
 | |
| 	 * user mappings.
 | |
| 	 */
 | |
| alternative_if ARM64_HAS_PAN
 | |
| 	b	1f				// skip TTBR0 PAN
 | |
| alternative_else_nop_endif
 | |
| 
 | |
| 	.if	\el != 0
 | |
| 	mrs	x21, ttbr0_el1
 | |
| 	tst	x21, #0xffff << 48		// Check for the reserved ASID
 | |
| 	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
 | |
| 	b.eq	1f				// TTBR0 access already disabled
 | |
| 	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
 | |
| 	.endif
 | |
| 
 | |
| 	__uaccess_ttbr0_disable x21
 | |
| 1:
 | |
| #endif
 | |
| 
 | |
| 	stp	x22, x23, [sp, #S_PC]
 | |
| 
 | |
| 	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
 | |
| 	.if	\el == 0
 | |
| 	mov	w21, #NO_SYSCALL
 | |
| 	str	w21, [sp, #S_SYSCALLNO]
 | |
| 	.endif
 | |
| 
 | |
| 	/*
 | |
| 	 * Set sp_el0 to current thread_info.
 | |
| 	 */
 | |
| 	.if	\el == 0
 | |
| 	msr	sp_el0, tsk
 | |
| 	.endif
 | |
| 
 | |
| 	/*
 | |
| 	 * Registers that may be useful after this macro is invoked:
 | |
| 	 *
 | |
| 	 * x21 - aborted SP
 | |
| 	 * x22 - aborted PC
 | |
| 	 * x23 - aborted PSTATE
 | |
| 	*/
 | |
| 	.endm
 | |
| 
 | |
| 	.macro	kernel_exit, el
 | |
| 	.if	\el != 0
 | |
| 	/* Restore the task's original addr_limit. */
 | |
| 	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
 | |
| 	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 | |
| 
 | |
| 	/* No need to restore UAO, it will be restored from SPSR_EL1 */
 | |
| 	.endif
 | |
| 
 | |
| 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
 | |
| 	.if	\el == 0
 | |
| 	ct_user_enter
 | |
| 	.endif
 | |
| 
 | |
| #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 | |
| 	/*
 | |
| 	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
 | |
| 	 * PAN bit checking.
 | |
| 	 */
 | |
| alternative_if ARM64_HAS_PAN
 | |
| 	b	2f				// skip TTBR0 PAN
 | |
| alternative_else_nop_endif
 | |
| 
 | |
| 	.if	\el != 0
 | |
| 	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
 | |
| 	.endif
 | |
| 
 | |
| 	__uaccess_ttbr0_enable x0
 | |
| 
 | |
| 	.if	\el == 0
 | |
| 	/*
 | |
| 	 * Enable errata workarounds only if returning to user. The only
 | |
| 	 * workaround currently required for TTBR0_EL1 changes are for the
 | |
| 	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
 | |
| 	 * corruption).
 | |
| 	 */
 | |
| 	post_ttbr0_update_workaround
 | |
| 	.endif
 | |
| 1:
 | |
| 	.if	\el != 0
 | |
| 	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
 | |
| 	.endif
 | |
| 2:
 | |
| #endif
 | |
| 
 | |
| 	.if	\el == 0
 | |
| 	ldr	x23, [sp, #S_SP]		// load return stack pointer
 | |
| 	msr	sp_el0, x23
 | |
| #ifdef CONFIG_ARM64_ERRATUM_845719
 | |
| alternative_if ARM64_WORKAROUND_845719
 | |
| 	tbz	x22, #4, 1f
 | |
| #ifdef CONFIG_PID_IN_CONTEXTIDR
 | |
| 	mrs	x29, contextidr_el1
 | |
| 	msr	contextidr_el1, x29
 | |
| #else
 | |
| 	msr contextidr_el1, xzr
 | |
| #endif
 | |
| 1:
 | |
| alternative_else_nop_endif
 | |
| #endif
 | |
| 	.endif
 | |
| 
 | |
| 	msr	elr_el1, x21			// set up the return data
 | |
| 	msr	spsr_el1, x22
 | |
| 	ldp	x0, x1, [sp, #16 * 0]
 | |
| 	ldp	x2, x3, [sp, #16 * 1]
 | |
| 	ldp	x4, x5, [sp, #16 * 2]
 | |
| 	ldp	x6, x7, [sp, #16 * 3]
 | |
| 	ldp	x8, x9, [sp, #16 * 4]
 | |
| 	ldp	x10, x11, [sp, #16 * 5]
 | |
| 	ldp	x12, x13, [sp, #16 * 6]
 | |
| 	ldp	x14, x15, [sp, #16 * 7]
 | |
| 	ldp	x16, x17, [sp, #16 * 8]
 | |
| 	ldp	x18, x19, [sp, #16 * 9]
 | |
| 	ldp	x20, x21, [sp, #16 * 10]
 | |
| 	ldp	x22, x23, [sp, #16 * 11]
 | |
| 	ldp	x24, x25, [sp, #16 * 12]
 | |
| 	ldp	x26, x27, [sp, #16 * 13]
 | |
| 	ldp	x28, x29, [sp, #16 * 14]
 | |
| 	ldr	lr, [sp, #S_LR]
 | |
| 	add	sp, sp, #S_FRAME_SIZE		// restore sp
 | |
| 	eret					// return to kernel
 | |
| 	.endm
 | |
| 
 | |
| 	.macro	irq_stack_entry
 | |
| 	mov	x19, sp			// preserve the original sp
 | |
| 
 | |
| 	/*
 | |
| 	 * Compare sp with the base of the task stack.
 | |
| 	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
 | |
| 	 * and should switch to the irq stack.
 | |
| 	 */
 | |
| 	ldr	x25, [tsk, TSK_STACK]
 | |
| 	eor	x25, x25, x19
 | |
| 	and	x25, x25, #~(THREAD_SIZE - 1)
 | |
| 	cbnz	x25, 9998f
 | |
| 
 | |
| 	ldr_this_cpu x25, irq_stack_ptr, x26
 | |
| 	mov	x26, #IRQ_STACK_SIZE
 | |
| 	add	x26, x25, x26
 | |
| 
 | |
| 	/* switch to the irq stack */
 | |
| 	mov	sp, x26
 | |
| 9998:
 | |
| 	.endm
 | |
| 
 | |
| 	/*
 | |
| 	 * x19 should be preserved between irq_stack_entry and
 | |
| 	 * irq_stack_exit.
 | |
| 	 */
 | |
| 	.macro	irq_stack_exit
 | |
| 	mov	sp, x19
 | |
| 	.endm
 | |
| 
 | |
| /*
 | |
|  * These are the registers used in the syscall handler, and allow us to
 | |
|  * have in theory up to 7 arguments to a function - x0 to x6.
 | |
|  *
 | |
|  * x7 is reserved for the system call number in 32-bit mode.
 | |
|  */
 | |
| wsc_nr	.req	w25		// number of system calls
 | |
| wscno	.req	w26		// syscall number
 | |
| xscno	.req	x26		// syscall number (zero-extended)
 | |
| stbl	.req	x27		// syscall table pointer
 | |
| tsk	.req	x28		// current thread_info
 | |
| 
 | |
| /*
 | |
|  * Interrupt handling.
 | |
|  */
 | |
| 	.macro	irq_handler
 | |
| 	ldr_l	x1, handle_arch_irq
 | |
| 	mov	x0, sp
 | |
| 	irq_stack_entry
 | |
| 	blr	x1
 | |
| 	irq_stack_exit
 | |
| 	.endm
 | |
| 
 | |
| 	.text
 | |
| 
 | |
| /*
 | |
|  * Exception vectors.
 | |
|  */
 | |
| 	.pushsection ".entry.text", "ax"
 | |
| 
 | |
| 	.align	11
 | |
| ENTRY(vectors)
 | |
| 	kernel_ventry	el1_sync_invalid		// Synchronous EL1t
 | |
| 	kernel_ventry	el1_irq_invalid			// IRQ EL1t
 | |
| 	kernel_ventry	el1_fiq_invalid			// FIQ EL1t
 | |
| 	kernel_ventry	el1_error_invalid		// Error EL1t
 | |
| 
 | |
| 	kernel_ventry	el1_sync			// Synchronous EL1h
 | |
| 	kernel_ventry	el1_irq				// IRQ EL1h
 | |
| 	kernel_ventry	el1_fiq_invalid			// FIQ EL1h
 | |
| 	kernel_ventry	el1_error_invalid		// Error EL1h
 | |
| 
 | |
| 	kernel_ventry	el0_sync			// Synchronous 64-bit EL0
 | |
| 	kernel_ventry	el0_irq				// IRQ 64-bit EL0
 | |
| 	kernel_ventry	el0_fiq_invalid			// FIQ 64-bit EL0
 | |
| 	kernel_ventry	el0_error_invalid		// Error 64-bit EL0
 | |
| 
 | |
| #ifdef CONFIG_COMPAT
 | |
| 	kernel_ventry	el0_sync_compat			// Synchronous 32-bit EL0
 | |
| 	kernel_ventry	el0_irq_compat			// IRQ 32-bit EL0
 | |
| 	kernel_ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
 | |
| 	kernel_ventry	el0_error_invalid_compat	// Error 32-bit EL0
 | |
| #else
 | |
| 	kernel_ventry	el0_sync_invalid		// Synchronous 32-bit EL0
 | |
| 	kernel_ventry	el0_irq_invalid			// IRQ 32-bit EL0
 | |
| 	kernel_ventry	el0_fiq_invalid			// FIQ 32-bit EL0
 | |
| 	kernel_ventry	el0_error_invalid		// Error 32-bit EL0
 | |
| #endif
 | |
| END(vectors)
 | |
| 
 | |
| #ifdef CONFIG_VMAP_STACK
 | |
| 	/*
 | |
| 	 * We detected an overflow in kernel_ventry, which switched to the
 | |
| 	 * overflow stack. Stash the exception regs, and head to our overflow
 | |
| 	 * handler.
 | |
| 	 */
 | |
| __bad_stack:
 | |
| 	/* Restore the original x0 value */
 | |
| 	mrs	x0, tpidrro_el0
 | |
| 
 | |
| 	/*
 | |
| 	 * Store the original GPRs to the new stack. The orginal SP (minus
 | |
| 	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
 | |
| 	 */
 | |
| 	sub	sp, sp, #S_FRAME_SIZE
 | |
| 	kernel_entry 1
 | |
| 	mrs	x0, tpidr_el0
 | |
| 	add	x0, x0, #S_FRAME_SIZE
 | |
| 	str	x0, [sp, #S_SP]
 | |
| 
 | |
| 	/* Stash the regs for handle_bad_stack */
 | |
| 	mov	x0, sp
 | |
| 
 | |
| 	/* Time to die */
 | |
| 	bl	handle_bad_stack
 | |
| 	ASM_BUG()
 | |
| #endif /* CONFIG_VMAP_STACK */
 | |
| 
 | |
| /*
 | |
|  * Invalid mode handlers
 | |
|  */
 | |
| 	.macro	inv_entry, el, reason, regsize = 64
 | |
| 	kernel_entry \el, \regsize
 | |
| 	mov	x0, sp
 | |
| 	mov	x1, #\reason
 | |
| 	mrs	x2, esr_el1
 | |
| 	bl	bad_mode
 | |
| 	ASM_BUG()
 | |
| 	.endm
 | |
| 
 | |
| el0_sync_invalid:
 | |
| 	inv_entry 0, BAD_SYNC
 | |
| ENDPROC(el0_sync_invalid)
 | |
| 
 | |
| el0_irq_invalid:
 | |
| 	inv_entry 0, BAD_IRQ
 | |
| ENDPROC(el0_irq_invalid)
 | |
| 
 | |
| el0_fiq_invalid:
 | |
| 	inv_entry 0, BAD_FIQ
 | |
| ENDPROC(el0_fiq_invalid)
 | |
| 
 | |
| el0_error_invalid:
 | |
| 	inv_entry 0, BAD_ERROR
 | |
| ENDPROC(el0_error_invalid)
 | |
| 
 | |
| #ifdef CONFIG_COMPAT
 | |
| el0_fiq_invalid_compat:
 | |
| 	inv_entry 0, BAD_FIQ, 32
 | |
| ENDPROC(el0_fiq_invalid_compat)
 | |
| 
 | |
| el0_error_invalid_compat:
 | |
| 	inv_entry 0, BAD_ERROR, 32
 | |
| ENDPROC(el0_error_invalid_compat)
 | |
| #endif
 | |
| 
 | |
| el1_sync_invalid:
 | |
| 	inv_entry 1, BAD_SYNC
 | |
| ENDPROC(el1_sync_invalid)
 | |
| 
 | |
| el1_irq_invalid:
 | |
| 	inv_entry 1, BAD_IRQ
 | |
| ENDPROC(el1_irq_invalid)
 | |
| 
 | |
| el1_fiq_invalid:
 | |
| 	inv_entry 1, BAD_FIQ
 | |
| ENDPROC(el1_fiq_invalid)
 | |
| 
 | |
| el1_error_invalid:
 | |
| 	inv_entry 1, BAD_ERROR
 | |
| ENDPROC(el1_error_invalid)
 | |
| 
 | |
| /*
 | |
|  * EL1 mode handlers.
 | |
|  */
 | |
| 	.align	6
 | |
| el1_sync:
 | |
| 	kernel_entry 1
 | |
| 	mrs	x1, esr_el1			// read the syndrome register
 | |
| 	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
 | |
| 	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
 | |
| 	b.eq	el1_da
 | |
| 	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
 | |
| 	b.eq	el1_ia
 | |
| 	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
 | |
| 	b.eq	el1_undef
 | |
| 	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
 | |
| 	b.eq	el1_sp_pc
 | |
| 	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
 | |
| 	b.eq	el1_sp_pc
 | |
| 	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
 | |
| 	b.eq	el1_undef
 | |
| 	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
 | |
| 	b.ge	el1_dbg
 | |
| 	b	el1_inv
 | |
| 
 | |
| el1_ia:
 | |
| 	/*
 | |
| 	 * Fall through to the Data abort case
 | |
| 	 */
 | |
| el1_da:
 | |
| 	/*
 | |
| 	 * Data abort handling
 | |
| 	 */
 | |
| 	mrs	x3, far_el1
 | |
| 	enable_dbg
 | |
| 	// re-enable interrupts if they were enabled in the aborted context
 | |
| 	tbnz	x23, #7, 1f			// PSR_I_BIT
 | |
| 	enable_irq
 | |
| 1:
 | |
| 	clear_address_tag x0, x3
 | |
| 	mov	x2, sp				// struct pt_regs
 | |
| 	bl	do_mem_abort
 | |
| 
 | |
| 	// disable interrupts before pulling preserved data off the stack
 | |
| 	disable_irq
 | |
| 	kernel_exit 1
 | |
| el1_sp_pc:
 | |
| 	/*
 | |
| 	 * Stack or PC alignment exception handling
 | |
| 	 */
 | |
| 	mrs	x0, far_el1
 | |
| 	enable_dbg
 | |
| 	mov	x2, sp
 | |
| 	bl	do_sp_pc_abort
 | |
| 	ASM_BUG()
 | |
| el1_undef:
 | |
| 	/*
 | |
| 	 * Undefined instruction
 | |
| 	 */
 | |
| 	enable_dbg
 | |
| 	mov	x0, sp
 | |
| 	bl	do_undefinstr
 | |
| 	ASM_BUG()
 | |
| el1_dbg:
 | |
| 	/*
 | |
| 	 * Debug exception handling
 | |
| 	 */
 | |
| 	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
 | |
| 	cinc	x24, x24, eq			// set bit '0'
 | |
| 	tbz	x24, #0, el1_inv		// EL1 only
 | |
| 	mrs	x0, far_el1
 | |
| 	mov	x2, sp				// struct pt_regs
 | |
| 	bl	do_debug_exception
 | |
| 	kernel_exit 1
 | |
| el1_inv:
 | |
| 	// TODO: add support for undefined instructions in kernel mode
 | |
| 	enable_dbg
 | |
| 	mov	x0, sp
 | |
| 	mov	x2, x1
 | |
| 	mov	x1, #BAD_SYNC
 | |
| 	bl	bad_mode
 | |
| 	ASM_BUG()
 | |
| ENDPROC(el1_sync)
 | |
| 
 | |
| 	.align	6
 | |
| el1_irq:
 | |
| 	kernel_entry 1
 | |
| 	enable_dbg
 | |
| #ifdef CONFIG_TRACE_IRQFLAGS
 | |
| 	bl	trace_hardirqs_off
 | |
| #endif
 | |
| 
 | |
| 	irq_handler
 | |
| 
 | |
| #ifdef CONFIG_PREEMPT
 | |
| 	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
 | |
| 	cbnz	w24, 1f				// preempt count != 0
 | |
| 	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
 | |
| 	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
 | |
| 	bl	el1_preempt
 | |
| 1:
 | |
| #endif
 | |
| #ifdef CONFIG_TRACE_IRQFLAGS
 | |
| 	bl	trace_hardirqs_on
 | |
| #endif
 | |
| 	kernel_exit 1
 | |
| ENDPROC(el1_irq)
 | |
| 
 | |
| #ifdef CONFIG_PREEMPT
 | |
| el1_preempt:
 | |
| 	mov	x24, lr
 | |
| 1:	bl	preempt_schedule_irq		// irq en/disable is done inside
 | |
| 	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
 | |
| 	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
 | |
| 	ret	x24
 | |
| #endif
 | |
| 
 | |
| /*
 | |
|  * EL0 mode handlers.
 | |
|  */
 | |
| 	.align	6
 | |
| el0_sync:
 | |
| 	kernel_entry 0
 | |
| 	mrs	x25, esr_el1			// read the syndrome register
 | |
| 	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
 | |
| 	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
 | |
| 	b.eq	el0_svc
 | |
| 	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
 | |
| 	b.eq	el0_da
 | |
| 	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
 | |
| 	b.eq	el0_ia
 | |
| 	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
 | |
| 	b.eq	el0_fpsimd_acc
 | |
| 	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
 | |
| 	b.eq	el0_fpsimd_exc
 | |
| 	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
 | |
| 	b.eq	el0_sys
 | |
| 	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
 | |
| 	b.eq	el0_sp_pc
 | |
| 	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
 | |
| 	b.eq	el0_sp_pc
 | |
| 	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
 | |
| 	b.eq	el0_undef
 | |
| 	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
 | |
| 	b.ge	el0_dbg
 | |
| 	b	el0_inv
 | |
| 
 | |
| #ifdef CONFIG_COMPAT
 | |
| 	.align	6
 | |
| el0_sync_compat:
 | |
| 	kernel_entry 0, 32
 | |
| 	mrs	x25, esr_el1			// read the syndrome register
 | |
| 	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
 | |
| 	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
 | |
| 	b.eq	el0_svc_compat
 | |
| 	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
 | |
| 	b.eq	el0_da
 | |
| 	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
 | |
| 	b.eq	el0_ia
 | |
| 	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
 | |
| 	b.eq	el0_fpsimd_acc
 | |
| 	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
 | |
| 	b.eq	el0_fpsimd_exc
 | |
| 	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
 | |
| 	b.eq	el0_sp_pc
 | |
| 	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
 | |
| 	b.eq	el0_undef
 | |
| 	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
 | |
| 	b.eq	el0_undef
 | |
| 	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
 | |
| 	b.eq	el0_undef
 | |
| 	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
 | |
| 	b.eq	el0_undef
 | |
| 	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
 | |
| 	b.eq	el0_undef
 | |
| 	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
 | |
| 	b.eq	el0_undef
 | |
| 	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
 | |
| 	b.ge	el0_dbg
 | |
| 	b	el0_inv
 | |
| el0_svc_compat:
 | |
| 	/*
 | |
| 	 * AArch32 syscall handling
 | |
| 	 */
 | |
| 	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
 | |
| 	mov	wscno, w7			// syscall number in w7 (r7)
 | |
| 	mov     wsc_nr, #__NR_compat_syscalls
 | |
| 	b	el0_svc_naked
 | |
| 
 | |
| 	.align	6
 | |
| el0_irq_compat:
 | |
| 	kernel_entry 0, 32
 | |
| 	b	el0_irq_naked
 | |
| #endif
 | |
| 
 | |
| el0_da:
 | |
| 	/*
 | |
| 	 * Data abort handling
 | |
| 	 */
 | |
| 	mrs	x26, far_el1
 | |
| 	// enable interrupts before calling the main handler
 | |
| 	enable_dbg_and_irq
 | |
| 	ct_user_exit
 | |
| 	clear_address_tag x0, x26
 | |
| 	mov	x1, x25
 | |
| 	mov	x2, sp
 | |
| 	bl	do_mem_abort
 | |
| 	b	ret_to_user
 | |
| el0_ia:
 | |
| 	/*
 | |
| 	 * Instruction abort handling
 | |
| 	 */
 | |
| 	mrs	x26, far_el1
 | |
| 	// enable interrupts before calling the main handler
 | |
| 	enable_dbg_and_irq
 | |
| 	ct_user_exit
 | |
| 	mov	x0, x26
 | |
| 	mov	x1, x25
 | |
| 	mov	x2, sp
 | |
| 	bl	do_mem_abort
 | |
| 	b	ret_to_user
 | |
| el0_fpsimd_acc:
 | |
| 	/*
 | |
| 	 * Floating Point or Advanced SIMD access
 | |
| 	 */
 | |
| 	enable_dbg
 | |
| 	ct_user_exit
 | |
| 	mov	x0, x25
 | |
| 	mov	x1, sp
 | |
| 	bl	do_fpsimd_acc
 | |
| 	b	ret_to_user
 | |
| el0_fpsimd_exc:
 | |
| 	/*
 | |
| 	 * Floating Point or Advanced SIMD exception
 | |
| 	 */
 | |
| 	enable_dbg
 | |
| 	ct_user_exit
 | |
| 	mov	x0, x25
 | |
| 	mov	x1, sp
 | |
| 	bl	do_fpsimd_exc
 | |
| 	b	ret_to_user
 | |
| el0_sp_pc:
 | |
| 	/*
 | |
| 	 * Stack or PC alignment exception handling
 | |
| 	 */
 | |
| 	mrs	x26, far_el1
 | |
| 	// enable interrupts before calling the main handler
 | |
| 	enable_dbg_and_irq
 | |
| 	ct_user_exit
 | |
| 	mov	x0, x26
 | |
| 	mov	x1, x25
 | |
| 	mov	x2, sp
 | |
| 	bl	do_sp_pc_abort
 | |
| 	b	ret_to_user
 | |
| el0_undef:
 | |
| 	/*
 | |
| 	 * Undefined instruction
 | |
| 	 */
 | |
| 	// enable interrupts before calling the main handler
 | |
| 	enable_dbg_and_irq
 | |
| 	ct_user_exit
 | |
| 	mov	x0, sp
 | |
| 	bl	do_undefinstr
 | |
| 	b	ret_to_user
 | |
| el0_sys:
 | |
| 	/*
 | |
| 	 * System instructions, for trapped cache maintenance instructions
 | |
| 	 */
 | |
| 	enable_dbg_and_irq
 | |
| 	ct_user_exit
 | |
| 	mov	x0, x25
 | |
| 	mov	x1, sp
 | |
| 	bl	do_sysinstr
 | |
| 	b	ret_to_user
 | |
| el0_dbg:
 | |
| 	/*
 | |
| 	 * Debug exception handling
 | |
| 	 */
 | |
| 	tbnz	x24, #0, el0_inv		// EL0 only
 | |
| 	mrs	x0, far_el1
 | |
| 	mov	x1, x25
 | |
| 	mov	x2, sp
 | |
| 	bl	do_debug_exception
 | |
| 	enable_dbg
 | |
| 	ct_user_exit
 | |
| 	b	ret_to_user
 | |
| el0_inv:
 | |
| 	enable_dbg
 | |
| 	ct_user_exit
 | |
| 	mov	x0, sp
 | |
| 	mov	x1, #BAD_SYNC
 | |
| 	mov	x2, x25
 | |
| 	bl	bad_el0_sync
 | |
| 	b	ret_to_user
 | |
| ENDPROC(el0_sync)
 | |
| 
 | |
| 	.align	6
 | |
| el0_irq:
 | |
| 	kernel_entry 0
 | |
| el0_irq_naked:
 | |
| 	enable_dbg
 | |
| #ifdef CONFIG_TRACE_IRQFLAGS
 | |
| 	bl	trace_hardirqs_off
 | |
| #endif
 | |
| 
 | |
| 	ct_user_exit
 | |
| 	irq_handler
 | |
| 
 | |
| #ifdef CONFIG_TRACE_IRQFLAGS
 | |
| 	bl	trace_hardirqs_on
 | |
| #endif
 | |
| 	b	ret_to_user
 | |
| ENDPROC(el0_irq)
 | |
| 
 | |
| /*
 | |
|  * This is the fast syscall return path.  We do as little as possible here,
 | |
|  * and this includes saving x0 back into the kernel stack.
 | |
|  */
 | |
| ret_fast_syscall:
 | |
| 	disable_irq				// disable interrupts
 | |
| 	str	x0, [sp, #S_X0]			// returned x0
 | |
| 	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for syscall tracing
 | |
| 	and	x2, x1, #_TIF_SYSCALL_WORK
 | |
| 	cbnz	x2, ret_fast_syscall_trace
 | |
| 	and	x2, x1, #_TIF_WORK_MASK
 | |
| 	cbnz	x2, work_pending
 | |
| 	enable_step_tsk x1, x2
 | |
| 	kernel_exit 0
 | |
| ret_fast_syscall_trace:
 | |
| 	enable_irq				// enable interrupts
 | |
| 	b	__sys_trace_return_skipped	// we already saved x0
 | |
| 
 | |
| /*
 | |
|  * Ok, we need to do extra processing, enter the slow path.
 | |
|  */
 | |
| work_pending:
 | |
| 	mov	x0, sp				// 'regs'
 | |
| 	bl	do_notify_resume
 | |
| #ifdef CONFIG_TRACE_IRQFLAGS
 | |
| 	bl	trace_hardirqs_on		// enabled while in userspace
 | |
| #endif
 | |
| 	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
 | |
| 	b	finish_ret_to_user
 | |
| /*
 | |
|  * "slow" syscall return path.
 | |
|  */
 | |
| ret_to_user:
 | |
| 	disable_irq				// disable interrupts
 | |
| 	ldr	x1, [tsk, #TSK_TI_FLAGS]
 | |
| 	and	x2, x1, #_TIF_WORK_MASK
 | |
| 	cbnz	x2, work_pending
 | |
| finish_ret_to_user:
 | |
| 	enable_step_tsk x1, x2
 | |
| 	kernel_exit 0
 | |
| ENDPROC(ret_to_user)
 | |
| 
 | |
| /*
 | |
|  * SVC handler.
 | |
|  */
 | |
| 	.align	6
 | |
| el0_svc:
 | |
| 	adrp	stbl, sys_call_table		// load syscall table pointer
 | |
| 	mov	wscno, w8			// syscall number in w8
 | |
| 	mov	wsc_nr, #__NR_syscalls
 | |
| el0_svc_naked:					// compat entry point
 | |
| 	stp	x0, xscno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
 | |
| 	enable_dbg_and_irq
 | |
| 	ct_user_exit 1
 | |
| 
 | |
| 	ldr	x16, [tsk, #TSK_TI_FLAGS]	// check for syscall hooks
 | |
| 	tst	x16, #_TIF_SYSCALL_WORK
 | |
| 	b.ne	__sys_trace
 | |
| 	cmp     wscno, wsc_nr			// check upper syscall limit
 | |
| 	b.hs	ni_sys
 | |
| 	ldr	x16, [stbl, xscno, lsl #3]	// address in the syscall table
 | |
| 	blr	x16				// call sys_* routine
 | |
| 	b	ret_fast_syscall
 | |
| ni_sys:
 | |
| 	mov	x0, sp
 | |
| 	bl	do_ni_syscall
 | |
| 	b	ret_fast_syscall
 | |
| ENDPROC(el0_svc)
 | |
| 
 | |
| 	/*
 | |
| 	 * This is the really slow path.  We're going to be doing context
 | |
| 	 * switches, and waiting for our parent to respond.
 | |
| 	 */
 | |
| __sys_trace:
 | |
| 	cmp     wscno, #NO_SYSCALL		// user-issued syscall(-1)?
 | |
| 	b.ne	1f
 | |
| 	mov	x0, #-ENOSYS			// set default errno if so
 | |
| 	str	x0, [sp, #S_X0]
 | |
| 1:	mov	x0, sp
 | |
| 	bl	syscall_trace_enter
 | |
| 	cmp	w0, #NO_SYSCALL			// skip the syscall?
 | |
| 	b.eq	__sys_trace_return_skipped
 | |
| 	mov	wscno, w0			// syscall number (possibly new)
 | |
| 	mov	x1, sp				// pointer to regs
 | |
| 	cmp	wscno, wsc_nr			// check upper syscall limit
 | |
| 	b.hs	__ni_sys_trace
 | |
| 	ldp	x0, x1, [sp]			// restore the syscall args
 | |
| 	ldp	x2, x3, [sp, #S_X2]
 | |
| 	ldp	x4, x5, [sp, #S_X4]
 | |
| 	ldp	x6, x7, [sp, #S_X6]
 | |
| 	ldr	x16, [stbl, xscno, lsl #3]	// address in the syscall table
 | |
| 	blr	x16				// call sys_* routine
 | |
| 
 | |
| __sys_trace_return:
 | |
| 	str	x0, [sp, #S_X0]			// save returned x0
 | |
| __sys_trace_return_skipped:
 | |
| 	mov	x0, sp
 | |
| 	bl	syscall_trace_exit
 | |
| 	b	ret_to_user
 | |
| 
 | |
| __ni_sys_trace:
 | |
| 	mov	x0, sp
 | |
| 	bl	do_ni_syscall
 | |
| 	b	__sys_trace_return
 | |
| 
 | |
| 	.popsection				// .entry.text
 | |
| 
 | |
| /*
 | |
|  * Special system call wrappers.
 | |
|  */
 | |
| ENTRY(sys_rt_sigreturn_wrapper)
 | |
| 	mov	x0, sp
 | |
| 	b	sys_rt_sigreturn
 | |
| ENDPROC(sys_rt_sigreturn_wrapper)
 | |
| 
 | |
| /*
 | |
|  * Register switch for AArch64. The callee-saved registers need to be saved
 | |
|  * and restored. On entry:
 | |
|  *   x0 = previous task_struct (must be preserved across the switch)
 | |
|  *   x1 = next task_struct
 | |
|  * Previous and next are guaranteed not to be the same.
 | |
|  *
 | |
|  */
 | |
| ENTRY(cpu_switch_to)
 | |
| 	mov	x10, #THREAD_CPU_CONTEXT
 | |
| 	add	x8, x0, x10
 | |
| 	mov	x9, sp
 | |
| 	stp	x19, x20, [x8], #16		// store callee-saved registers
 | |
| 	stp	x21, x22, [x8], #16
 | |
| 	stp	x23, x24, [x8], #16
 | |
| 	stp	x25, x26, [x8], #16
 | |
| 	stp	x27, x28, [x8], #16
 | |
| 	stp	x29, x9, [x8], #16
 | |
| 	str	lr, [x8]
 | |
| 	add	x8, x1, x10
 | |
| 	ldp	x19, x20, [x8], #16		// restore callee-saved registers
 | |
| 	ldp	x21, x22, [x8], #16
 | |
| 	ldp	x23, x24, [x8], #16
 | |
| 	ldp	x25, x26, [x8], #16
 | |
| 	ldp	x27, x28, [x8], #16
 | |
| 	ldp	x29, x9, [x8], #16
 | |
| 	ldr	lr, [x8]
 | |
| 	mov	sp, x9
 | |
| 	msr	sp_el0, x1
 | |
| 	ret
 | |
| ENDPROC(cpu_switch_to)
 | |
| NOKPROBE(cpu_switch_to)
 | |
| 
 | |
| /*
 | |
|  * This is how we return from a fork.
 | |
|  */
 | |
| ENTRY(ret_from_fork)
 | |
| 	bl	schedule_tail
 | |
| 	cbz	x19, 1f				// not a kernel thread
 | |
| 	mov	x0, x20
 | |
| 	blr	x19
 | |
| 1:	get_thread_info tsk
 | |
| 	b	ret_to_user
 | |
| ENDPROC(ret_from_fork)
 | |
| NOKPROBE(ret_from_fork)
 |