mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 13:41:51 +00:00
d9b9ff8c18
Let's explicitly disable/enable preemption in the !CONFIG_SMP version of futex_atomic_cmpxchg_inatomic(), to prepare for pagefault_disable() not touching preemption anymore. This is needed for this function to be callable from both, atomic and non-atomic context. Otherwise we might break mutual exclusion when relying on a get_user()/ put_user() implementation. Reviewed-and-tested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: David.Laight@ACULAB.COM Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: airlied@linux.ie Cc: akpm@linux-foundation.org Cc: benh@kernel.crashing.org Cc: bigeasy@linutronix.de Cc: borntraeger@de.ibm.com Cc: daniel.vetter@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: hocko@suse.cz Cc: hughd@google.com Cc: mst@redhat.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: schwidefsky@de.ibm.com Cc: yang.shi@windriver.com Link: http://lkml.kernel.org/r/1431359540-32227-10-git-send-email-dahi@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
175 lines
3.7 KiB
C
175 lines
3.7 KiB
C
#ifndef _ASM_GENERIC_FUTEX_H
|
|
#define _ASM_GENERIC_FUTEX_H
|
|
|
|
#include <linux/futex.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/errno.h>
|
|
|
|
#ifndef CONFIG_SMP
|
|
/*
|
|
* The following implementation only for uniprocessor machines.
|
|
* It relies on preempt_disable() ensuring mutual exclusion.
|
|
*
|
|
*/
|
|
|
|
/**
|
|
* futex_atomic_op_inuser() - Atomic arithmetic operation with constant
|
|
* argument and comparison of the previous
|
|
* futex value with another constant.
|
|
*
|
|
* @encoded_op: encoded operation to execute
|
|
* @uaddr: pointer to user space address
|
|
*
|
|
* Return:
|
|
* 0 - On success
|
|
* <0 - On error
|
|
*/
|
|
static inline int
|
|
futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
|
{
|
|
int op = (encoded_op >> 28) & 7;
|
|
int cmp = (encoded_op >> 24) & 15;
|
|
int oparg = (encoded_op << 8) >> 20;
|
|
int cmparg = (encoded_op << 20) >> 20;
|
|
int oldval, ret;
|
|
u32 tmp;
|
|
|
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
|
oparg = 1 << oparg;
|
|
|
|
preempt_disable();
|
|
pagefault_disable();
|
|
|
|
ret = -EFAULT;
|
|
if (unlikely(get_user(oldval, uaddr) != 0))
|
|
goto out_pagefault_enable;
|
|
|
|
ret = 0;
|
|
tmp = oldval;
|
|
|
|
switch (op) {
|
|
case FUTEX_OP_SET:
|
|
tmp = oparg;
|
|
break;
|
|
case FUTEX_OP_ADD:
|
|
tmp += oparg;
|
|
break;
|
|
case FUTEX_OP_OR:
|
|
tmp |= oparg;
|
|
break;
|
|
case FUTEX_OP_ANDN:
|
|
tmp &= ~oparg;
|
|
break;
|
|
case FUTEX_OP_XOR:
|
|
tmp ^= oparg;
|
|
break;
|
|
default:
|
|
ret = -ENOSYS;
|
|
}
|
|
|
|
if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
|
|
ret = -EFAULT;
|
|
|
|
out_pagefault_enable:
|
|
pagefault_enable();
|
|
preempt_enable();
|
|
|
|
if (ret == 0) {
|
|
switch (cmp) {
|
|
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
|
|
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
|
|
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
|
|
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
|
|
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
|
|
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
|
|
default: ret = -ENOSYS;
|
|
}
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
|
|
* uaddr with newval if the current value is
|
|
* oldval.
|
|
* @uval: pointer to store content of @uaddr
|
|
* @uaddr: pointer to user space address
|
|
* @oldval: old value
|
|
* @newval: new value to store to @uaddr
|
|
*
|
|
* Return:
|
|
* 0 - On success
|
|
* <0 - On error
|
|
*/
|
|
static inline int
|
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
u32 oldval, u32 newval)
|
|
{
|
|
u32 val;
|
|
|
|
preempt_disable();
|
|
if (unlikely(get_user(val, uaddr) != 0))
|
|
return -EFAULT;
|
|
|
|
if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
|
|
return -EFAULT;
|
|
|
|
*uval = val;
|
|
preempt_enable();
|
|
|
|
return 0;
|
|
}
|
|
|
|
#else
|
|
static inline int
|
|
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
|
{
|
|
int op = (encoded_op >> 28) & 7;
|
|
int cmp = (encoded_op >> 24) & 15;
|
|
int oparg = (encoded_op << 8) >> 20;
|
|
int cmparg = (encoded_op << 20) >> 20;
|
|
int oldval = 0, ret;
|
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
|
oparg = 1 << oparg;
|
|
|
|
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
|
return -EFAULT;
|
|
|
|
pagefault_disable();
|
|
|
|
switch (op) {
|
|
case FUTEX_OP_SET:
|
|
case FUTEX_OP_ADD:
|
|
case FUTEX_OP_OR:
|
|
case FUTEX_OP_ANDN:
|
|
case FUTEX_OP_XOR:
|
|
default:
|
|
ret = -ENOSYS;
|
|
}
|
|
|
|
pagefault_enable();
|
|
|
|
if (!ret) {
|
|
switch (cmp) {
|
|
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
|
|
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
|
|
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
|
|
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
|
|
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
|
|
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
|
|
default: ret = -ENOSYS;
|
|
}
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
static inline int
|
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
u32 oldval, u32 newval)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
#endif
|