forked from Minki/linux
6de6066c0d
When kernel's binary becomes large enough (32M and more) errors may occur during the final linkage stage. It happens because the build system uses short relocations for ARC by default. This problem may be easily resolved by passing -mlong-calls option to GCC to use long absolute jumps (j) instead of short relative branchs (b). But there are fragments of pure assembler code exist which use branchs in inappropriate places and cause a linkage error because of relocations overflow. First of these fragments is .fixup insertion in futex.h and unaligned.c. It inserts a code in the separate section (.fixup) with branch instruction. It leads to the linkage error when kernel becomes large. Second of these fragments is calling scheduler's functions (common kernel code) from entry.S of ARC's code. When kernel's binary becomes large it may lead to the linkage error because scheduler may occur far enough from ARC's code in the final binary. Signed-off-by: Yuriy Kolerov <yuriy.kolerov@synopsys.com> Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
202 lines
4.3 KiB
C
202 lines
4.3 KiB
C
/*
|
|
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* Vineetg: August 2010: From Android kernel work
|
|
*/
|
|
|
|
#ifndef _ASM_FUTEX_H
|
|
#define _ASM_FUTEX_H
|
|
|
|
#include <linux/futex.h>
|
|
#include <linux/preempt.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/errno.h>
|
|
|
|
#ifdef CONFIG_ARC_HAS_LLSC
|
|
|
|
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
|
|
\
|
|
smp_mb(); \
|
|
__asm__ __volatile__( \
|
|
"1: llock %1, [%2] \n" \
|
|
insn "\n" \
|
|
"2: scond %0, [%2] \n" \
|
|
" bnz 1b \n" \
|
|
" mov %0, 0 \n" \
|
|
"3: \n" \
|
|
" .section .fixup,\"ax\" \n" \
|
|
" .align 4 \n" \
|
|
"4: mov %0, %4 \n" \
|
|
" j 3b \n" \
|
|
" .previous \n" \
|
|
" .section __ex_table,\"a\" \n" \
|
|
" .align 4 \n" \
|
|
" .word 1b, 4b \n" \
|
|
" .word 2b, 4b \n" \
|
|
" .previous \n" \
|
|
\
|
|
: "=&r" (ret), "=&r" (oldval) \
|
|
: "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
|
|
: "cc", "memory"); \
|
|
smp_mb() \
|
|
|
|
#else /* !CONFIG_ARC_HAS_LLSC */
|
|
|
|
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
|
|
\
|
|
smp_mb(); \
|
|
__asm__ __volatile__( \
|
|
"1: ld %1, [%2] \n" \
|
|
insn "\n" \
|
|
"2: st %0, [%2] \n" \
|
|
" mov %0, 0 \n" \
|
|
"3: \n" \
|
|
" .section .fixup,\"ax\" \n" \
|
|
" .align 4 \n" \
|
|
"4: mov %0, %4 \n" \
|
|
" j 3b \n" \
|
|
" .previous \n" \
|
|
" .section __ex_table,\"a\" \n" \
|
|
" .align 4 \n" \
|
|
" .word 1b, 4b \n" \
|
|
" .word 2b, 4b \n" \
|
|
" .previous \n" \
|
|
\
|
|
: "=&r" (ret), "=&r" (oldval) \
|
|
: "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
|
|
: "cc", "memory"); \
|
|
smp_mb() \
|
|
|
|
#endif
|
|
|
|
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
|
{
|
|
int op = (encoded_op >> 28) & 7;
|
|
int cmp = (encoded_op >> 24) & 15;
|
|
int oparg = (encoded_op << 8) >> 20;
|
|
int cmparg = (encoded_op << 20) >> 20;
|
|
int oldval = 0, ret;
|
|
|
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
|
oparg = 1 << oparg;
|
|
|
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
|
return -EFAULT;
|
|
|
|
#ifndef CONFIG_ARC_HAS_LLSC
|
|
preempt_disable(); /* to guarantee atomic r-m-w of futex op */
|
|
#endif
|
|
pagefault_disable();
|
|
|
|
switch (op) {
|
|
case FUTEX_OP_SET:
|
|
__futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_ADD:
|
|
/* oldval = *uaddr; *uaddr += oparg ; ret = *uaddr */
|
|
__futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_OR:
|
|
__futex_atomic_op("or %0, %1, %3", ret, oldval, uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_ANDN:
|
|
__futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_XOR:
|
|
__futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
|
|
break;
|
|
default:
|
|
ret = -ENOSYS;
|
|
}
|
|
|
|
pagefault_enable();
|
|
#ifndef CONFIG_ARC_HAS_LLSC
|
|
preempt_enable();
|
|
#endif
|
|
|
|
if (!ret) {
|
|
switch (cmp) {
|
|
case FUTEX_OP_CMP_EQ:
|
|
ret = (oldval == cmparg);
|
|
break;
|
|
case FUTEX_OP_CMP_NE:
|
|
ret = (oldval != cmparg);
|
|
break;
|
|
case FUTEX_OP_CMP_LT:
|
|
ret = (oldval < cmparg);
|
|
break;
|
|
case FUTEX_OP_CMP_GE:
|
|
ret = (oldval >= cmparg);
|
|
break;
|
|
case FUTEX_OP_CMP_LE:
|
|
ret = (oldval <= cmparg);
|
|
break;
|
|
case FUTEX_OP_CMP_GT:
|
|
ret = (oldval > cmparg);
|
|
break;
|
|
default:
|
|
ret = -ENOSYS;
|
|
}
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* cmpxchg of futex (pagefaults disabled by caller)
|
|
* Return 0 for success, -EFAULT otherwise
|
|
*/
|
|
static inline int
|
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 expval,
|
|
u32 newval)
|
|
{
|
|
int ret = 0;
|
|
u32 existval;
|
|
|
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
|
return -EFAULT;
|
|
|
|
#ifndef CONFIG_ARC_HAS_LLSC
|
|
preempt_disable(); /* to guarantee atomic r-m-w of futex op */
|
|
#endif
|
|
smp_mb();
|
|
|
|
__asm__ __volatile__(
|
|
#ifdef CONFIG_ARC_HAS_LLSC
|
|
"1: llock %1, [%4] \n"
|
|
" brne %1, %2, 3f \n"
|
|
"2: scond %3, [%4] \n"
|
|
" bnz 1b \n"
|
|
#else
|
|
"1: ld %1, [%4] \n"
|
|
" brne %1, %2, 3f \n"
|
|
"2: st %3, [%4] \n"
|
|
#endif
|
|
"3: \n"
|
|
" .section .fixup,\"ax\" \n"
|
|
"4: mov %0, %5 \n"
|
|
" j 3b \n"
|
|
" .previous \n"
|
|
" .section __ex_table,\"a\" \n"
|
|
" .align 4 \n"
|
|
" .word 1b, 4b \n"
|
|
" .word 2b, 4b \n"
|
|
" .previous\n"
|
|
: "+&r"(ret), "=&r"(existval)
|
|
: "r"(expval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
|
|
: "cc", "memory");
|
|
|
|
smp_mb();
|
|
|
|
#ifndef CONFIG_ARC_HAS_LLSC
|
|
preempt_enable();
|
|
#endif
|
|
*uval = existval;
|
|
return ret;
|
|
}
|
|
|
|
#endif
|