forked from Minki/linux
12249b3441
The rwsem initializers and related macros and functions are mostly the same. Some of them lack the lockdep initializer, but having it in place does not matter for architectures which do not support lockdep. powerpc, sparc, x86: No functional change sh, s390: Removes the duplicate init_rwsem (inline and #define) alpha, ia64, xtensa: Use the lockdep capable init function in lib/rwsem.c which is just uninlining the init function for the LOCKDEP=n case Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: David Howells <dhowells@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Matt Turner <mattst88@gmail.com> Acked-by: Tony Luck <tony.luck@intel.com> Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Acked-by: David Miller <davem@davemloft.net> Cc: Chris Zankel <chris@zankel.net> LKML-Reference: <20110126195833.771812729@linutronix.de>
234 lines
5.0 KiB
C
234 lines
5.0 KiB
C
#ifndef _ALPHA_RWSEM_H
|
|
#define _ALPHA_RWSEM_H
|
|
|
|
/*
|
|
* Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
|
|
* Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
|
|
*/
|
|
|
|
#ifndef _LINUX_RWSEM_H
|
|
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
|
|
#endif
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
|
|
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
|
|
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
|
|
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
|
|
|
|
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
|
|
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
|
|
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
|
|
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
|
|
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
|
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
|
|
|
static inline void __down_read(struct rw_semaphore *sem)
|
|
{
|
|
long oldcount;
|
|
#ifndef CONFIG_SMP
|
|
oldcount = sem->count;
|
|
sem->count += RWSEM_ACTIVE_READ_BIAS;
|
|
#else
|
|
long temp;
|
|
__asm__ __volatile__(
|
|
"1: ldq_l %0,%1\n"
|
|
" addq %0,%3,%2\n"
|
|
" stq_c %2,%1\n"
|
|
" beq %2,2f\n"
|
|
" mb\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
|
|
:"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
|
|
#endif
|
|
if (unlikely(oldcount < 0))
|
|
rwsem_down_read_failed(sem);
|
|
}
|
|
|
|
/*
|
|
* trylock for reading -- returns 1 if successful, 0 if contention
|
|
*/
|
|
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
|
{
|
|
long old, new, res;
|
|
|
|
res = sem->count;
|
|
do {
|
|
new = res + RWSEM_ACTIVE_READ_BIAS;
|
|
if (new <= 0)
|
|
break;
|
|
old = res;
|
|
res = cmpxchg(&sem->count, old, new);
|
|
} while (res != old);
|
|
return res >= 0 ? 1 : 0;
|
|
}
|
|
|
|
static inline void __down_write(struct rw_semaphore *sem)
|
|
{
|
|
long oldcount;
|
|
#ifndef CONFIG_SMP
|
|
oldcount = sem->count;
|
|
sem->count += RWSEM_ACTIVE_WRITE_BIAS;
|
|
#else
|
|
long temp;
|
|
__asm__ __volatile__(
|
|
"1: ldq_l %0,%1\n"
|
|
" addq %0,%3,%2\n"
|
|
" stq_c %2,%1\n"
|
|
" beq %2,2f\n"
|
|
" mb\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
|
|
:"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
|
|
#endif
|
|
if (unlikely(oldcount))
|
|
rwsem_down_write_failed(sem);
|
|
}
|
|
|
|
/*
|
|
* trylock for writing -- returns 1 if successful, 0 if contention
|
|
*/
|
|
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
|
{
|
|
long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
|
|
RWSEM_ACTIVE_WRITE_BIAS);
|
|
if (ret == RWSEM_UNLOCKED_VALUE)
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
static inline void __up_read(struct rw_semaphore *sem)
|
|
{
|
|
long oldcount;
|
|
#ifndef CONFIG_SMP
|
|
oldcount = sem->count;
|
|
sem->count -= RWSEM_ACTIVE_READ_BIAS;
|
|
#else
|
|
long temp;
|
|
__asm__ __volatile__(
|
|
" mb\n"
|
|
"1: ldq_l %0,%1\n"
|
|
" subq %0,%3,%2\n"
|
|
" stq_c %2,%1\n"
|
|
" beq %2,2f\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
|
|
:"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
|
|
#endif
|
|
if (unlikely(oldcount < 0))
|
|
if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
|
|
rwsem_wake(sem);
|
|
}
|
|
|
|
static inline void __up_write(struct rw_semaphore *sem)
|
|
{
|
|
long count;
|
|
#ifndef CONFIG_SMP
|
|
sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
|
|
count = sem->count;
|
|
#else
|
|
long temp;
|
|
__asm__ __volatile__(
|
|
" mb\n"
|
|
"1: ldq_l %0,%1\n"
|
|
" subq %0,%3,%2\n"
|
|
" stq_c %2,%1\n"
|
|
" beq %2,2f\n"
|
|
" subq %0,%3,%0\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (count), "=m" (sem->count), "=&r" (temp)
|
|
:"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
|
|
#endif
|
|
if (unlikely(count))
|
|
if ((int)count == 0)
|
|
rwsem_wake(sem);
|
|
}
|
|
|
|
/*
|
|
* downgrade write lock to read lock
|
|
*/
|
|
static inline void __downgrade_write(struct rw_semaphore *sem)
|
|
{
|
|
long oldcount;
|
|
#ifndef CONFIG_SMP
|
|
oldcount = sem->count;
|
|
sem->count -= RWSEM_WAITING_BIAS;
|
|
#else
|
|
long temp;
|
|
__asm__ __volatile__(
|
|
"1: ldq_l %0,%1\n"
|
|
" addq %0,%3,%2\n"
|
|
" stq_c %2,%1\n"
|
|
" beq %2,2f\n"
|
|
" mb\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
|
|
:"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
|
|
#endif
|
|
if (unlikely(oldcount < 0))
|
|
rwsem_downgrade_wake(sem);
|
|
}
|
|
|
|
static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
|
|
{
|
|
#ifndef CONFIG_SMP
|
|
sem->count += val;
|
|
#else
|
|
long temp;
|
|
__asm__ __volatile__(
|
|
"1: ldq_l %0,%1\n"
|
|
" addq %0,%2,%0\n"
|
|
" stq_c %0,%1\n"
|
|
" beq %0,2f\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (temp), "=m" (sem->count)
|
|
:"Ir" (val), "m" (sem->count));
|
|
#endif
|
|
}
|
|
|
|
static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
|
|
{
|
|
#ifndef CONFIG_SMP
|
|
sem->count += val;
|
|
return sem->count;
|
|
#else
|
|
long ret, temp;
|
|
__asm__ __volatile__(
|
|
"1: ldq_l %0,%1\n"
|
|
" addq %0,%3,%2\n"
|
|
" addq %0,%3,%0\n"
|
|
" stq_c %2,%1\n"
|
|
" beq %2,2f\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (ret), "=m" (sem->count), "=&r" (temp)
|
|
:"Ir" (val), "m" (sem->count));
|
|
|
|
return ret;
|
|
#endif
|
|
}
|
|
|
|
static inline int rwsem_is_locked(struct rw_semaphore *sem)
|
|
{
|
|
return (sem->count != 0);
|
|
}
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ALPHA_RWSEM_H */
|