mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 10:01:41 +00:00
0707ad30d1
This commit is primarily changes caused by reviewing "sparse" and "checkpatch" output on our sources, so is somewhat noisy, since things like "printk() -> pr_err()" (or whatever) throughout the codebase tend to get tedious to read. Rather than trying to tease apart precisely which things changed due to which type of code review, this commit includes various cleanups in the code: - sparse: Add declarations in headers for globals. - sparse: Fix __user annotations. - sparse: Using gfp_t consistently instead of int. - sparse: removing functions not actually used. - checkpatch: Clean up printk() warnings by using pr_info(), etc.; also avoid partial-line printks except in bootup code. - checkpatch: Use exposed structs rather than typedefs. - checkpatch: Change some C99 comments to C89 comments. In addition, a couple of minor other changes are rolled in to this commit: - Add support for a "raise" instruction to cause SIGFPE, etc., to be raised. - Remove some compat code that is unnecessary when we fully eliminate some of the deprecated syscalls from the generic syscall ABI. - Update the tile_defconfig to reflect current config contents. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
142 lines
3.5 KiB
C
142 lines
3.5 KiB
C
/*
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation, version 2.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* These routines make two important assumptions:
|
|
*
|
|
* 1. atomic_t is really an int and can be freely cast back and forth
|
|
* (validated in __init_atomic_per_cpu).
|
|
*
|
|
* 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
|
|
* the same locking convention that all the kernel atomic routines use.
|
|
*/
|
|
|
|
#ifndef _ASM_TILE_FUTEX_H
|
|
#define _ASM_TILE_FUTEX_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/futex.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/errno.h>
|
|
|
|
extern struct __get_user futex_set(int __user *v, int i);
|
|
extern struct __get_user futex_add(int __user *v, int n);
|
|
extern struct __get_user futex_or(int __user *v, int n);
|
|
extern struct __get_user futex_andn(int __user *v, int n);
|
|
extern struct __get_user futex_cmpxchg(int __user *v, int o, int n);
|
|
|
|
#ifndef __tilegx__
|
|
extern struct __get_user futex_xor(int __user *v, int n);
|
|
#else
|
|
static inline struct __get_user futex_xor(int __user *uaddr, int n)
|
|
{
|
|
struct __get_user asm_ret = __get_user_4(uaddr);
|
|
if (!asm_ret.err) {
|
|
int oldval, newval;
|
|
do {
|
|
oldval = asm_ret.val;
|
|
newval = oldval ^ n;
|
|
asm_ret = futex_cmpxchg(uaddr, oldval, newval);
|
|
} while (asm_ret.err == 0 && oldval != asm_ret.val);
|
|
}
|
|
return asm_ret;
|
|
}
|
|
#endif
|
|
|
|
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|
{
|
|
int op = (encoded_op >> 28) & 7;
|
|
int cmp = (encoded_op >> 24) & 15;
|
|
int oparg = (encoded_op << 8) >> 20;
|
|
int cmparg = (encoded_op << 20) >> 20;
|
|
int ret;
|
|
struct __get_user asm_ret;
|
|
|
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
|
oparg = 1 << oparg;
|
|
|
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
|
return -EFAULT;
|
|
|
|
pagefault_disable();
|
|
switch (op) {
|
|
case FUTEX_OP_SET:
|
|
asm_ret = futex_set(uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_ADD:
|
|
asm_ret = futex_add(uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_OR:
|
|
asm_ret = futex_or(uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_ANDN:
|
|
asm_ret = futex_andn(uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_XOR:
|
|
asm_ret = futex_xor(uaddr, oparg);
|
|
break;
|
|
default:
|
|
asm_ret.err = -ENOSYS;
|
|
}
|
|
pagefault_enable();
|
|
|
|
ret = asm_ret.err;
|
|
|
|
if (!ret) {
|
|
switch (cmp) {
|
|
case FUTEX_OP_CMP_EQ:
|
|
ret = (asm_ret.val == cmparg);
|
|
break;
|
|
case FUTEX_OP_CMP_NE:
|
|
ret = (asm_ret.val != cmparg);
|
|
break;
|
|
case FUTEX_OP_CMP_LT:
|
|
ret = (asm_ret.val < cmparg);
|
|
break;
|
|
case FUTEX_OP_CMP_GE:
|
|
ret = (asm_ret.val >= cmparg);
|
|
break;
|
|
case FUTEX_OP_CMP_LE:
|
|
ret = (asm_ret.val <= cmparg);
|
|
break;
|
|
case FUTEX_OP_CMP_GT:
|
|
ret = (asm_ret.val > cmparg);
|
|
break;
|
|
default:
|
|
ret = -ENOSYS;
|
|
}
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
|
|
int newval)
|
|
{
|
|
struct __get_user asm_ret;
|
|
|
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
|
return -EFAULT;
|
|
|
|
asm_ret = futex_cmpxchg(uaddr, oldval, newval);
|
|
return asm_ret.err ? asm_ret.err : asm_ret.val;
|
|
}
|
|
|
|
#ifndef __tilegx__
|
|
/* Return failure from the atomic wrappers. */
|
|
struct __get_user __atomic_bad_address(int __user *addr);
|
|
#endif
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_TILE_FUTEX_H */
|