2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/kernel/entry-common.S
|
|
|
|
*
|
|
|
|
* Copyright (C) 2000 Russell King
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/unistd.h>
|
2008-06-21 18:17:27 +00:00
|
|
|
#include <asm/ftrace.h>
|
2008-08-05 15:14:15 +00:00
|
|
|
#include <mach/entry-macro.S>
|
2009-02-16 10:42:09 +00:00
|
|
|
#include <asm/unwind.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include "entry-header.S"
|
|
|
|
|
|
|
|
|
|
|
|
.align 5
|
|
|
|
/*
|
|
|
|
* This is the fast syscall return path. We do as little as
|
|
|
|
* possible here, and this includes saving r0 back into the SVC
|
|
|
|
* stack.
|
|
|
|
*/
|
|
|
|
ret_fast_syscall:
|
2009-02-16 10:42:09 +00:00
|
|
|
UNWIND(.fnstart )
|
|
|
|
UNWIND(.cantunwind )
|
2005-04-26 14:18:26 +00:00
|
|
|
disable_irq @ disable interrupts
|
2005-04-16 22:20:36 +00:00
|
|
|
ldr r1, [tsk, #TI_FLAGS]
|
|
|
|
tst r1, #_TIF_WORK_MASK
|
|
|
|
bne fast_work_pending
|
2005-04-26 14:20:34 +00:00
|
|
|
|
2007-02-16 21:16:32 +00:00
|
|
|
/* perform architecture specific actions before user return */
|
|
|
|
arch_ret_to_user r1, lr
|
|
|
|
|
2009-07-24 11:32:54 +00:00
|
|
|
restore_user_regs fast = 1, offset = S_OFF
|
2009-02-16 10:42:09 +00:00
|
|
|
UNWIND(.fnend )
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, we need to do extra processing, enter the slow path.
|
|
|
|
*/
|
|
|
|
fast_work_pending:
|
|
|
|
str r0, [sp, #S_R0+S_OFF]! @ returned r0
|
|
|
|
work_pending:
|
|
|
|
tst r1, #_TIF_NEED_RESCHED
|
|
|
|
bne work_resched
|
2009-09-02 08:14:16 +00:00
|
|
|
tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
|
2005-04-16 22:20:36 +00:00
|
|
|
beq no_work_pending
|
|
|
|
mov r0, sp @ 'regs'
|
|
|
|
mov r2, why @ 'syscall'
|
2010-09-17 13:56:16 +00:00
|
|
|
tst r1, #_TIF_SIGPENDING @ delivering a signal?
|
|
|
|
movne why, #0 @ prevent further restarts
|
2005-04-16 22:20:36 +00:00
|
|
|
bl do_notify_resume
|
2005-11-19 10:01:07 +00:00
|
|
|
b ret_slow_syscall @ Check work again
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
work_resched:
|
|
|
|
bl schedule
|
|
|
|
/*
|
|
|
|
* "slow" syscall return path. "why" tells us if this was a real syscall.
|
|
|
|
*/
|
|
|
|
ENTRY(ret_to_user)
|
|
|
|
ret_slow_syscall:
|
2005-04-26 14:18:26 +00:00
|
|
|
disable_irq @ disable interrupts
|
2005-04-16 22:20:36 +00:00
|
|
|
ldr r1, [tsk, #TI_FLAGS]
|
|
|
|
tst r1, #_TIF_WORK_MASK
|
|
|
|
bne work_pending
|
|
|
|
no_work_pending:
|
2007-02-16 21:16:32 +00:00
|
|
|
/* perform architecture specific actions before user return */
|
|
|
|
arch_ret_to_user r1, lr
|
|
|
|
|
2009-07-24 11:32:54 +00:00
|
|
|
restore_user_regs fast = 0, offset = 0
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(ret_to_user)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is how we return from a fork.
|
|
|
|
*/
|
|
|
|
ENTRY(ret_from_fork)
|
|
|
|
bl schedule_tail
|
|
|
|
get_thread_info tsk
|
|
|
|
ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
|
|
|
|
mov why, #1
|
|
|
|
tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
|
|
|
|
beq ret_slow_syscall
|
|
|
|
mov r1, sp
|
|
|
|
mov r0, #1 @ trace exit [IP = 1]
|
|
|
|
bl syscall_trace
|
|
|
|
b ret_slow_syscall
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(ret_from_fork)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-19 12:57:01 +00:00
|
|
|
.equ NR_syscalls,0
|
|
|
|
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "calls.S"
|
2006-01-19 12:57:01 +00:00
|
|
|
#undef CALL
|
|
|
|
#define CALL(x) .long x
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-10-06 23:06:12 +00:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2010-08-03 16:09:40 +00:00
|
|
|
/*
|
|
|
|
* When compiling with -pg, gcc inserts a call to the mcount routine at the
|
|
|
|
* start of every function. In mcount, apart from the function's address (in
|
|
|
|
* lr), we need to get hold of the function's caller's address.
|
|
|
|
*
|
|
|
|
* Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
|
|
|
|
*
|
|
|
|
* bl mcount
|
|
|
|
*
|
|
|
|
* These versions have the limitation that in order for the mcount routine to
|
|
|
|
* be able to determine the function's caller's address, an APCS-style frame
|
|
|
|
* pointer (which is set up with something like the code below) is required.
|
|
|
|
*
|
|
|
|
* mov ip, sp
|
|
|
|
* push {fp, ip, lr, pc}
|
|
|
|
* sub fp, ip, #4
|
|
|
|
*
|
|
|
|
* With EABI, these frame pointers are not available unless -mapcs-frame is
|
|
|
|
* specified, and if building as Thumb-2, not even then.
|
|
|
|
*
|
|
|
|
* Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
|
|
|
|
* with call sites like:
|
|
|
|
*
|
|
|
|
* push {lr}
|
|
|
|
* bl __gnu_mcount_nc
|
|
|
|
*
|
|
|
|
* With these compilers, frame pointers are not necessary.
|
|
|
|
*
|
|
|
|
* mcount can be thought of as a function called in the middle of a subroutine
|
|
|
|
* call. As such, it needs to be transparent for both the caller and the
|
|
|
|
* callee: the original lr needs to be restored when leaving mcount, and no
|
|
|
|
* registers should be clobbered. (In the __gnu_mcount_nc implementation, we
|
|
|
|
* clobber the ip register. This is OK because the ARM calling convention
|
|
|
|
* allows it to be clobbered in subroutines and doesn't use it to hold
|
|
|
|
* parameters.)
|
2010-08-10 18:43:28 +00:00
|
|
|
*
|
|
|
|
* When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
|
|
|
|
* for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
|
|
|
|
* arch/arm/kernel/ftrace.c).
|
2010-08-03 16:09:40 +00:00
|
|
|
*/
|
2010-08-10 18:32:37 +00:00
|
|
|
|
|
|
|
#ifndef CONFIG_OLD_MCOUNT
|
|
|
|
#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
|
|
|
|
#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2010-10-07 12:09:47 +00:00
|
|
|
.macro __mcount suffix
|
|
|
|
mcount_enter
|
|
|
|
ldr r0, =ftrace_trace_function
|
|
|
|
ldr r2, [r0]
|
|
|
|
adr r0, .Lftrace_stub
|
|
|
|
cmp r0, r2
|
|
|
|
bne 1f
|
2010-10-09 16:54:38 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
ldr r1, =ftrace_graph_return
|
|
|
|
ldr r2, [r1]
|
|
|
|
cmp r0, r2
|
|
|
|
bne ftrace_graph_caller\suffix
|
|
|
|
|
|
|
|
ldr r1, =ftrace_graph_entry
|
|
|
|
ldr r2, [r1]
|
|
|
|
ldr r0, =ftrace_graph_entry_stub
|
|
|
|
cmp r0, r2
|
|
|
|
bne ftrace_graph_caller\suffix
|
|
|
|
#endif
|
|
|
|
|
2010-10-07 12:09:47 +00:00
|
|
|
mcount_exit
|
2010-08-10 18:43:28 +00:00
|
|
|
|
2010-10-07 12:09:47 +00:00
|
|
|
1: mcount_get_lr r1 @ lr of instrumented func
|
|
|
|
mov r0, lr @ instrumented function
|
2010-08-03 16:08:09 +00:00
|
|
|
sub r0, r0, #MCOUNT_INSN_SIZE
|
2010-10-07 12:09:47 +00:00
|
|
|
adr lr, BSYM(2f)
|
|
|
|
mov pc, r2
|
|
|
|
2: mcount_exit
|
|
|
|
.endm
|
2008-05-31 08:53:50 +00:00
|
|
|
|
2010-10-07 12:09:47 +00:00
|
|
|
.macro __ftrace_caller suffix
|
|
|
|
mcount_enter
|
|
|
|
|
|
|
|
mcount_get_lr r1 @ lr of instrumented func
|
|
|
|
mov r0, lr @ instrumented function
|
|
|
|
sub r0, r0, #MCOUNT_INSN_SIZE
|
|
|
|
|
|
|
|
.globl ftrace_call\suffix
|
|
|
|
ftrace_call\suffix:
|
2010-08-03 16:08:09 +00:00
|
|
|
bl ftrace_stub
|
2010-10-07 12:09:47 +00:00
|
|
|
|
|
|
|
mcount_exit
|
|
|
|
.endm
|
2008-05-31 08:53:50 +00:00
|
|
|
|
2010-10-09 16:54:38 +00:00
|
|
|
.macro __ftrace_graph_caller
|
|
|
|
sub r0, fp, #4 @ &lr of instrumented routine (&parent)
|
|
|
|
mov r1, lr @ instrumented routine (func)
|
|
|
|
sub r1, r1, #MCOUNT_INSN_SIZE
|
|
|
|
mov r2, fp @ frame pointer
|
|
|
|
bl prepare_ftrace_return
|
|
|
|
mcount_exit
|
|
|
|
.endm
|
|
|
|
|
2010-08-10 18:43:28 +00:00
|
|
|
#ifdef CONFIG_OLD_MCOUNT
|
2010-10-07 12:09:47 +00:00
|
|
|
/*
|
|
|
|
* mcount
|
|
|
|
*/
|
|
|
|
|
|
|
|
.macro mcount_enter
|
|
|
|
stmdb sp!, {r0-r3, lr}
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro mcount_get_lr reg
|
|
|
|
ldr \reg, [fp, #-4]
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro mcount_exit
|
|
|
|
ldr lr, [fp, #-4]
|
|
|
|
ldmia sp!, {r0-r3, pc}
|
|
|
|
.endm
|
|
|
|
|
2010-08-10 18:43:28 +00:00
|
|
|
ENTRY(mcount)
|
2010-10-07 12:09:47 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
2010-08-10 18:43:28 +00:00
|
|
|
stmdb sp!, {lr}
|
|
|
|
ldr lr, [fp, #-4]
|
|
|
|
ldmia sp!, {pc}
|
2010-10-07 12:09:47 +00:00
|
|
|
#else
|
|
|
|
__mcount _old
|
|
|
|
#endif
|
2010-08-10 18:33:52 +00:00
|
|
|
ENDPROC(mcount)
|
2008-05-31 08:53:50 +00:00
|
|
|
|
2010-10-07 12:09:47 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
2010-08-10 18:43:28 +00:00
|
|
|
ENTRY(ftrace_caller_old)
|
2010-10-07 12:09:47 +00:00
|
|
|
__ftrace_caller _old
|
2010-08-10 18:43:28 +00:00
|
|
|
ENDPROC(ftrace_caller_old)
|
|
|
|
#endif
|
2008-05-31 08:53:50 +00:00
|
|
|
|
2010-10-09 16:54:38 +00:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
ENTRY(ftrace_graph_caller_old)
|
|
|
|
__ftrace_graph_caller
|
|
|
|
ENDPROC(ftrace_graph_caller_old)
|
|
|
|
#endif
|
|
|
|
|
2010-10-07 12:09:47 +00:00
|
|
|
.purgem mcount_enter
|
|
|
|
.purgem mcount_get_lr
|
|
|
|
.purgem mcount_exit
|
|
|
|
#endif
|
2008-05-31 08:53:50 +00:00
|
|
|
|
2010-10-07 12:09:47 +00:00
|
|
|
/*
|
|
|
|
* __gnu_mcount_nc
|
|
|
|
*/
|
|
|
|
|
|
|
|
.macro mcount_enter
|
2010-08-03 16:08:09 +00:00
|
|
|
stmdb sp!, {r0-r3, lr}
|
2010-10-07 12:09:47 +00:00
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro mcount_get_lr reg
|
|
|
|
ldr \reg, [sp, #20]
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro mcount_exit
|
2010-08-03 16:08:09 +00:00
|
|
|
ldmia sp!, {r0-r3, ip, lr}
|
|
|
|
mov pc, ip
|
2010-10-07 12:09:47 +00:00
|
|
|
.endm
|
2009-08-13 18:38:16 +00:00
|
|
|
|
2010-10-07 12:09:47 +00:00
|
|
|
ENTRY(__gnu_mcount_nc)
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
mov ip, lr
|
|
|
|
ldmia sp!, {lr}
|
2010-08-03 16:08:09 +00:00
|
|
|
mov pc, ip
|
2010-10-07 12:09:47 +00:00
|
|
|
#else
|
|
|
|
__mcount
|
|
|
|
#endif
|
2010-08-10 18:33:52 +00:00
|
|
|
ENDPROC(__gnu_mcount_nc)
|
2009-08-13 18:38:16 +00:00
|
|
|
|
2010-10-07 12:09:47 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
ENTRY(ftrace_caller)
|
|
|
|
__ftrace_caller
|
|
|
|
ENDPROC(ftrace_caller)
|
2010-08-10 18:32:37 +00:00
|
|
|
#endif
|
2008-05-31 08:53:50 +00:00
|
|
|
|
2010-10-09 16:54:38 +00:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
ENTRY(ftrace_graph_caller)
|
|
|
|
__ftrace_graph_caller
|
|
|
|
ENDPROC(ftrace_graph_caller)
|
|
|
|
#endif
|
|
|
|
|
2010-10-07 12:09:47 +00:00
|
|
|
.purgem mcount_enter
|
|
|
|
.purgem mcount_get_lr
|
|
|
|
.purgem mcount_exit
|
2008-05-31 08:53:50 +00:00
|
|
|
|
2010-10-09 16:54:38 +00:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
.globl return_to_handler
|
|
|
|
return_to_handler:
|
|
|
|
stmdb sp!, {r0-r3}
|
|
|
|
mov r0, fp @ frame pointer
|
|
|
|
bl ftrace_return_to_handler
|
|
|
|
mov lr, r0 @ r0 has real ret addr
|
|
|
|
ldmia sp!, {r0-r3}
|
|
|
|
mov pc, lr
|
|
|
|
#endif
|
|
|
|
|
2010-08-10 18:33:52 +00:00
|
|
|
ENTRY(ftrace_stub)
|
2010-08-10 18:37:21 +00:00
|
|
|
.Lftrace_stub:
|
2010-08-03 16:08:09 +00:00
|
|
|
mov pc, lr
|
2010-08-10 18:33:52 +00:00
|
|
|
ENDPROC(ftrace_stub)
|
2008-05-31 08:53:50 +00:00
|
|
|
|
2008-10-06 23:06:12 +00:00
|
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
2008-05-31 08:53:50 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*=============================================================================
|
|
|
|
* SWI handler
|
|
|
|
*-----------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* If we're optimising for StrongARM the resulting code won't
|
|
|
|
run on an ARM7 and we can save a couple of instructions.
|
|
|
|
--pb */
|
|
|
|
#ifdef CONFIG_CPU_ARM710
|
2006-01-14 16:31:29 +00:00
|
|
|
#define A710(code...) code
|
|
|
|
.Larm710bug:
|
2005-04-16 22:20:36 +00:00
|
|
|
ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
|
|
|
|
mov r0, r0
|
|
|
|
add sp, sp, #S_FRAME_SIZE
|
2005-10-12 18:51:24 +00:00
|
|
|
subs pc, lr, #4
|
2005-04-16 22:20:36 +00:00
|
|
|
#else
|
2006-01-14 16:31:29 +00:00
|
|
|
#define A710(code...)
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
.align 5
|
|
|
|
ENTRY(vector_swi)
|
2005-04-26 14:20:34 +00:00
|
|
|
sub sp, sp, #S_FRAME_SIZE
|
|
|
|
stmia sp, {r0 - r12} @ Calling r0 - r12
|
2009-07-24 11:32:54 +00:00
|
|
|
ARM( add r8, sp, #S_PC )
|
|
|
|
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
|
|
|
|
THUMB( mov r8, sp )
|
|
|
|
THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
|
2005-04-26 14:20:34 +00:00
|
|
|
mrs r8, spsr @ called from non-FIQ mode, so ok.
|
|
|
|
str lr, [sp, #S_PC] @ Save calling PC
|
|
|
|
str r8, [sp, #S_PSR] @ Save CPSR
|
|
|
|
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
|
2005-04-16 22:20:36 +00:00
|
|
|
zero_fp
|
2005-04-26 14:19:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the system call number.
|
|
|
|
*/
|
2006-01-14 16:31:29 +00:00
|
|
|
|
2006-01-14 16:36:12 +00:00
|
|
|
#if defined(CONFIG_OABI_COMPAT)
|
2006-01-14 16:31:29 +00:00
|
|
|
|
2006-01-14 16:36:12 +00:00
|
|
|
/*
|
|
|
|
* If we have CONFIG_OABI_COMPAT then we need to look at the swi
|
|
|
|
* value to determine if it is an EABI or an old ABI call.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_ARM_THUMB
|
|
|
|
tst r8, #PSR_T_BIT
|
|
|
|
movne r10, #0 @ no thumb OABI emulation
|
|
|
|
ldreq r10, [lr, #-4] @ get SWI instruction
|
|
|
|
#else
|
|
|
|
ldr r10, [lr, #-4] @ get SWI instruction
|
|
|
|
A710( and ip, r10, #0x0f000000 @ check for SWI )
|
|
|
|
A710( teq ip, #0x0f000000 )
|
|
|
|
A710( bne .Larm710bug )
|
|
|
|
#endif
|
2009-05-30 13:00:18 +00:00
|
|
|
#ifdef CONFIG_CPU_ENDIAN_BE8
|
|
|
|
rev r10, r10 @ little endian instruction
|
|
|
|
#endif
|
2006-01-14 16:36:12 +00:00
|
|
|
|
|
|
|
#elif defined(CONFIG_AEABI)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pure EABI user space always put syscall number into scno (r7).
|
|
|
|
*/
|
2006-01-14 16:31:29 +00:00
|
|
|
A710( ldr ip, [lr, #-4] @ get SWI instruction )
|
|
|
|
A710( and ip, ip, #0x0f000000 @ check for SWI )
|
|
|
|
A710( teq ip, #0x0f000000 )
|
|
|
|
A710( bne .Larm710bug )
|
2006-01-14 16:36:12 +00:00
|
|
|
|
2006-01-14 16:31:29 +00:00
|
|
|
#elif defined(CONFIG_ARM_THUMB)
|
2006-01-14 16:36:12 +00:00
|
|
|
|
|
|
|
/* Legacy ABI only, possibly thumb mode. */
|
2005-04-26 14:19:24 +00:00
|
|
|
tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
|
|
|
|
addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
|
|
|
|
ldreq scno, [lr, #-4]
|
2006-01-14 16:36:12 +00:00
|
|
|
|
2005-04-26 14:19:24 +00:00
|
|
|
#else
|
2006-01-14 16:36:12 +00:00
|
|
|
|
|
|
|
/* Legacy ABI only. */
|
2005-04-26 14:19:24 +00:00
|
|
|
ldr scno, [lr, #-4] @ get SWI instruction
|
2006-01-14 16:31:29 +00:00
|
|
|
A710( and ip, scno, #0x0f000000 @ check for SWI )
|
|
|
|
A710( teq ip, #0x0f000000 )
|
|
|
|
A710( bne .Larm710bug )
|
2006-01-14 16:36:12 +00:00
|
|
|
|
2005-04-26 14:19:24 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_ALIGNMENT_TRAP
|
|
|
|
ldr ip, __cr_alignment
|
|
|
|
ldr ip, [ip]
|
|
|
|
mcr p15, 0, ip, c1, c0 @ update control register
|
|
|
|
#endif
|
2005-04-26 14:18:26 +00:00
|
|
|
enable_irq
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
get_thread_info tsk
|
2006-01-14 16:36:12 +00:00
|
|
|
adr tbl, sys_call_table @ load syscall table pointer
|
|
|
|
|
|
|
|
#if defined(CONFIG_OABI_COMPAT)
|
|
|
|
/*
|
|
|
|
* If the swi argument is zero, this is an EABI call and we do nothing.
|
|
|
|
*
|
|
|
|
* If this is an old ABI call, get the syscall number into scno and
|
|
|
|
* get the old ABI syscall table address.
|
|
|
|
*/
|
|
|
|
bics r10, r10, #0xff000000
|
|
|
|
eorne scno, r10, #__NR_OABI_SYSCALL_BASE
|
|
|
|
ldrne tbl, =sys_oabi_call_table
|
|
|
|
#elif !defined(CONFIG_AEABI)
|
2005-04-16 22:20:36 +00:00
|
|
|
bic scno, scno, #0xff000000 @ mask off SWI op-code
|
2005-04-26 14:19:24 +00:00
|
|
|
eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
|
2006-01-14 16:31:29 +00:00
|
|
|
#endif
|
2006-01-14 16:36:12 +00:00
|
|
|
|
2010-08-26 22:08:35 +00:00
|
|
|
ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
|
2006-01-14 16:31:29 +00:00
|
|
|
stmdb sp!, {r4, r5} @ push fifth and sixth args
|
2010-08-26 22:08:35 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_SECCOMP
|
|
|
|
tst r10, #_TIF_SECCOMP
|
|
|
|
beq 1f
|
|
|
|
mov r0, scno
|
|
|
|
bl __secure_computing
|
|
|
|
add r0, sp, #S_R0 + S_OFF @ pointer to regs
|
|
|
|
ldmia r0, {r0 - r3} @ have to reload r0 - r3
|
|
|
|
1:
|
|
|
|
#endif
|
|
|
|
|
|
|
|
tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
|
2005-04-16 22:20:36 +00:00
|
|
|
bne __sys_trace
|
|
|
|
|
|
|
|
cmp scno, #NR_syscalls @ check upper syscall limit
|
2009-07-24 11:32:54 +00:00
|
|
|
adr lr, BSYM(ret_fast_syscall) @ return address
|
2005-04-16 22:20:36 +00:00
|
|
|
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
|
|
|
|
|
|
|
|
add r1, sp, #S_OFF
|
|
|
|
2: mov why, #0 @ no longer a real syscall
|
2005-04-26 14:19:24 +00:00
|
|
|
cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
|
|
|
|
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
|
2005-04-16 22:20:36 +00:00
|
|
|
bcs arm_syscall
|
|
|
|
b sys_ni_syscall @ not private func
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(vector_swi)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the really slow path. We're going to be doing
|
|
|
|
* context switches, and waiting for our parent to respond.
|
|
|
|
*/
|
|
|
|
__sys_trace:
|
2006-01-14 19:30:04 +00:00
|
|
|
mov r2, scno
|
2005-04-16 22:20:36 +00:00
|
|
|
add r1, sp, #S_OFF
|
|
|
|
mov r0, #0 @ trace entry [IP = 0]
|
|
|
|
bl syscall_trace
|
|
|
|
|
2009-07-24 11:32:54 +00:00
|
|
|
adr lr, BSYM(__sys_trace_return) @ return address
|
2006-01-14 19:30:04 +00:00
|
|
|
mov scno, r0 @ syscall number (possibly new)
|
2005-04-16 22:20:36 +00:00
|
|
|
add r1, sp, #S_R0 + S_OFF @ pointer to regs
|
|
|
|
cmp scno, #NR_syscalls @ check upper syscall limit
|
|
|
|
ldmccia r1, {r0 - r3} @ have to reload r0 - r3
|
|
|
|
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
|
|
|
|
b 2b
|
|
|
|
|
|
|
|
__sys_trace_return:
|
|
|
|
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
|
2006-01-14 19:30:04 +00:00
|
|
|
mov r2, scno
|
2005-04-16 22:20:36 +00:00
|
|
|
mov r1, sp
|
|
|
|
mov r0, #1 @ trace exit [IP = 1]
|
|
|
|
bl syscall_trace
|
|
|
|
b ret_slow_syscall
|
|
|
|
|
|
|
|
.align 5
|
|
|
|
#ifdef CONFIG_ALIGNMENT_TRAP
|
|
|
|
.type __cr_alignment, #object
|
|
|
|
__cr_alignment:
|
|
|
|
.word cr_alignment
|
2006-01-14 16:36:12 +00:00
|
|
|
#endif
|
|
|
|
.ltorg
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the syscall table declaration for native ABI syscalls.
|
|
|
|
* With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
|
|
|
|
*/
|
|
|
|
#define ABI(native, compat) native
|
|
|
|
#ifdef CONFIG_AEABI
|
|
|
|
#define OBSOLETE(syscall) sys_ni_syscall
|
|
|
|
#else
|
|
|
|
#define OBSOLETE(syscall) syscall
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
.type sys_call_table, #object
|
|
|
|
ENTRY(sys_call_table)
|
|
|
|
#include "calls.S"
|
2006-01-14 16:36:12 +00:00
|
|
|
#undef ABI
|
|
|
|
#undef OBSOLETE
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*============================================================================
|
|
|
|
* Special system call wrappers
|
|
|
|
*/
|
|
|
|
@ r0 = syscall number
|
2005-12-17 15:25:42 +00:00
|
|
|
@ r8 = syscall table
|
2005-04-16 22:20:36 +00:00
|
|
|
sys_syscall:
|
2006-05-16 13:25:55 +00:00
|
|
|
bic scno, r0, #__NR_OABI_SYSCALL_BASE
|
2005-04-16 22:20:36 +00:00
|
|
|
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
|
|
|
|
cmpne scno, #NR_syscalls @ check range
|
|
|
|
stmloia sp, {r5, r6} @ shuffle args
|
|
|
|
movlo r0, r1
|
|
|
|
movlo r1, r2
|
|
|
|
movlo r2, r3
|
|
|
|
movlo r3, r4
|
|
|
|
ldrlo pc, [tbl, scno, lsl #2]
|
|
|
|
b sys_ni_syscall
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_syscall)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
sys_fork_wrapper:
|
|
|
|
add r0, sp, #S_OFF
|
|
|
|
b sys_fork
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_fork_wrapper)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
sys_vfork_wrapper:
|
|
|
|
add r0, sp, #S_OFF
|
|
|
|
b sys_vfork
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_vfork_wrapper)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
sys_execve_wrapper:
|
|
|
|
add r3, sp, #S_OFF
|
|
|
|
b sys_execve
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_execve_wrapper)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
sys_clone_wrapper:
|
|
|
|
add ip, sp, #S_OFF
|
|
|
|
str ip, [sp, #4]
|
|
|
|
b sys_clone
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_clone_wrapper)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
sys_sigreturn_wrapper:
|
|
|
|
add r0, sp, #S_OFF
|
arm: fix really nasty sigreturn bug
If a signal hits us outside of a syscall and another gets delivered
when we are in sigreturn (e.g. because it had been in sa_mask for
the first one and got sent to us while we'd been in the first handler),
we have a chance of returning from the second handler to location one
insn prior to where we ought to return. If r0 happens to contain -513
(-ERESTARTNOINTR), sigreturn will get confused into doing restart
syscall song and dance.
Incredible joy to debug, since it manifests as random, infrequent and
very hard to reproduce double execution of instructions in userland
code...
The fix is simple - mark it "don't bother with restarts" in wrapper,
i.e. set r8 to 0 in sys_sigreturn and sys_rt_sigreturn wrappers,
suppressing the syscall restart handling on return from these guys.
They can't legitimately return a restart-worthy error anyway.
Testcase:
#include <unistd.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/time.h>
#include <errno.h>
void f(int n)
{
__asm__ __volatile__(
"ldr r0, [%0]\n"
"b 1f\n"
"b 2f\n"
"1:b .\n"
"2:\n" : : "r"(&n));
}
void handler1(int sig) { }
void handler2(int sig) { raise(1); }
void handler3(int sig) { exit(0); }
main()
{
struct sigaction s = {.sa_handler = handler2};
struct itimerval t1 = { .it_value = {1} };
struct itimerval t2 = { .it_value = {2} };
signal(1, handler1);
sigemptyset(&s.sa_mask);
sigaddset(&s.sa_mask, 1);
sigaction(SIGALRM, &s, NULL);
signal(SIGVTALRM, handler3);
setitimer(ITIMER_REAL, &t1, NULL);
setitimer(ITIMER_VIRTUAL, &t2, NULL);
f(-513); /* -ERESTARTNOINTR */
write(1, "buggered\n", 9);
return 1;
}
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-09-17 13:34:39 +00:00
|
|
|
mov why, #0 @ prevent syscall restart handling
|
2005-04-16 22:20:36 +00:00
|
|
|
b sys_sigreturn
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_sigreturn_wrapper)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
sys_rt_sigreturn_wrapper:
|
|
|
|
add r0, sp, #S_OFF
|
arm: fix really nasty sigreturn bug
If a signal hits us outside of a syscall and another gets delivered
when we are in sigreturn (e.g. because it had been in sa_mask for
the first one and got sent to us while we'd been in the first handler),
we have a chance of returning from the second handler to location one
insn prior to where we ought to return. If r0 happens to contain -513
(-ERESTARTNOINTR), sigreturn will get confused into doing restart
syscall song and dance.
Incredible joy to debug, since it manifests as random, infrequent and
very hard to reproduce double execution of instructions in userland
code...
The fix is simple - mark it "don't bother with restarts" in wrapper,
i.e. set r8 to 0 in sys_sigreturn and sys_rt_sigreturn wrappers,
suppressing the syscall restart handling on return from these guys.
They can't legitimately return a restart-worthy error anyway.
Testcase:
#include <unistd.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/time.h>
#include <errno.h>
void f(int n)
{
__asm__ __volatile__(
"ldr r0, [%0]\n"
"b 1f\n"
"b 2f\n"
"1:b .\n"
"2:\n" : : "r"(&n));
}
void handler1(int sig) { }
void handler2(int sig) { raise(1); }
void handler3(int sig) { exit(0); }
main()
{
struct sigaction s = {.sa_handler = handler2};
struct itimerval t1 = { .it_value = {1} };
struct itimerval t2 = { .it_value = {2} };
signal(1, handler1);
sigemptyset(&s.sa_mask);
sigaddset(&s.sa_mask, 1);
sigaction(SIGALRM, &s, NULL);
signal(SIGVTALRM, handler3);
setitimer(ITIMER_REAL, &t1, NULL);
setitimer(ITIMER_VIRTUAL, &t2, NULL);
f(-513); /* -ERESTARTNOINTR */
write(1, "buggered\n", 9);
return 1;
}
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-09-17 13:34:39 +00:00
|
|
|
mov why, #0 @ prevent syscall restart handling
|
2005-04-16 22:20:36 +00:00
|
|
|
b sys_rt_sigreturn
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_rt_sigreturn_wrapper)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
sys_sigaltstack_wrapper:
|
|
|
|
ldr r2, [sp, #S_OFF + S_SP]
|
|
|
|
b do_sigaltstack
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_sigaltstack_wrapper)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-14 16:35:03 +00:00
|
|
|
sys_statfs64_wrapper:
|
|
|
|
teq r1, #88
|
|
|
|
moveq r1, #84
|
|
|
|
b sys_statfs64
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_statfs64_wrapper)
|
2006-01-14 16:35:03 +00:00
|
|
|
|
|
|
|
sys_fstatfs64_wrapper:
|
|
|
|
teq r1, #88
|
|
|
|
moveq r1, #84
|
|
|
|
b sys_fstatfs64
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_fstatfs64_wrapper)
|
2006-01-14 16:35:03 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Note: off_4k (r5) is always units of 4K. If we can't do the requested
|
|
|
|
* offset, we return EINVAL.
|
|
|
|
*/
|
|
|
|
sys_mmap2:
|
|
|
|
#if PAGE_SHIFT > 12
|
|
|
|
tst r5, #PGOFF_MASK
|
|
|
|
moveq r5, r5, lsr #PAGE_SHIFT - 12
|
|
|
|
streq r5, [sp, #4]
|
2009-11-30 22:37:04 +00:00
|
|
|
beq sys_mmap_pgoff
|
2005-04-16 22:20:36 +00:00
|
|
|
mov r0, #-EINVAL
|
2006-06-25 10:17:23 +00:00
|
|
|
mov pc, lr
|
2005-04-16 22:20:36 +00:00
|
|
|
#else
|
|
|
|
str r5, [sp, #4]
|
2009-11-30 22:37:04 +00:00
|
|
|
b sys_mmap_pgoff
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_mmap2)
|
2006-01-14 16:35:31 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_OABI_COMPAT
|
2006-01-14 16:36:12 +00:00
|
|
|
|
2006-01-14 16:35:31 +00:00
|
|
|
/*
|
|
|
|
* These are syscalls with argument register differences
|
|
|
|
*/
|
|
|
|
|
|
|
|
sys_oabi_pread64:
|
|
|
|
stmia sp, {r3, r4}
|
|
|
|
b sys_pread64
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_oabi_pread64)
|
2006-01-14 16:35:31 +00:00
|
|
|
|
|
|
|
sys_oabi_pwrite64:
|
|
|
|
stmia sp, {r3, r4}
|
|
|
|
b sys_pwrite64
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_oabi_pwrite64)
|
2006-01-14 16:35:31 +00:00
|
|
|
|
|
|
|
sys_oabi_truncate64:
|
|
|
|
mov r3, r2
|
|
|
|
mov r2, r1
|
|
|
|
b sys_truncate64
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_oabi_truncate64)
|
2006-01-14 16:35:31 +00:00
|
|
|
|
|
|
|
sys_oabi_ftruncate64:
|
|
|
|
mov r3, r2
|
|
|
|
mov r2, r1
|
|
|
|
b sys_ftruncate64
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_oabi_ftruncate64)
|
2006-01-14 16:35:31 +00:00
|
|
|
|
|
|
|
sys_oabi_readahead:
|
|
|
|
str r3, [sp]
|
|
|
|
mov r3, r2
|
|
|
|
mov r2, r1
|
|
|
|
b sys_readahead
|
2008-08-28 10:22:32 +00:00
|
|
|
ENDPROC(sys_oabi_readahead)
|
2006-01-14 16:35:31 +00:00
|
|
|
|
2006-01-14 16:36:12 +00:00
|
|
|
/*
|
|
|
|
* Let's declare a second syscall table for old ABI binaries
|
|
|
|
* using the compatibility syscall entries.
|
|
|
|
*/
|
|
|
|
#define ABI(native, compat) compat
|
|
|
|
#define OBSOLETE(syscall) syscall
|
|
|
|
|
|
|
|
.type sys_oabi_call_table, #object
|
|
|
|
ENTRY(sys_oabi_call_table)
|
|
|
|
#include "calls.S"
|
|
|
|
#undef ABI
|
|
|
|
#undef OBSOLETE
|
|
|
|
|
2006-01-14 16:35:31 +00:00
|
|
|
#endif
|
|
|
|
|