ia64: lock bitops

Convert ia64 to new bitops.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Nick Piggin 2007-10-18 03:06:52 -07:00 committed by Linus Torvalds
parent 44086d5286
commit 87371e4fa4

View File

@ -93,6 +93,38 @@ clear_bit (int nr, volatile void *addr)
} while (cmpxchg_acq(m, old, new) != old);
}
/**
* clear_bit_unlock - Clears a bit in memory with release
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit_unlock() is atomic and may not be reordered. It does
* contain a memory barrier suitable for unlock type operations.
*/
static __inline__ void
clear_bit_unlock (int nr, volatile void *addr)
{
__u32 mask, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
mask = ~(1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old & mask;
} while (cmpxchg_rel(m, old, new) != old);
}
/**
* __clear_bit_unlock - Non-atomically clear a bit with release
*
* This is like clear_bit_unlock, but the implementation may use a non-atomic
* store (this one uses an atomic, however).
*/
#define __clear_bit_unlock clear_bit_unlock
/**
* __clear_bit - Clears a bit in memory (non-atomic version)
*/
@ -169,6 +201,15 @@ test_and_set_bit (int nr, volatile void *addr)
return (old & bit) != 0;
}
/**
* test_and_set_bit_lock - Set a bit and return its old value for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This is the same as test_and_set_bit on ia64
*/
#define test_and_set_bit_lock test_and_set_bit
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
@ -371,8 +412,6 @@ hweight64 (unsigned long x)
#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>