forked from Minki/linux
MIPS: cmpxchg: Implement 1 byte & 2 byte cmpxchg()
Implement support for 1 & 2 byte cmpxchg() using read-modify-write atop a 4 byte cmpxchg(). This allows us to support these atomic operations despite the MIPS ISA only providing 4 & 8 byte atomic operations. This is required in order to support queued rwlocks (qrwlock) in a later patch, since these make use of a 1 byte cmpxchg() in their slow path. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16355/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
b70eb30056
commit
3ba7f44d2b
@ -142,10 +142,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
|
||||
__ret; \
|
||||
})
|
||||
|
||||
extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, unsigned int size);
|
||||
|
||||
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, unsigned int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
case 2:
|
||||
return __cmpxchg_small(ptr, old, new, size);
|
||||
|
||||
case 4:
|
||||
return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new);
|
||||
|
||||
|
@ -50,3 +50,60 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
|
||||
|
||||
return (load32 & mask) >> shift;
|
||||
}
|
||||
|
||||
unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, unsigned int size)
|
||||
{
|
||||
u32 mask, old32, new32, load32;
|
||||
volatile u32 *ptr32;
|
||||
unsigned int shift;
|
||||
u8 load;
|
||||
|
||||
/* Check that ptr is naturally aligned */
|
||||
WARN_ON((unsigned long)ptr & (size - 1));
|
||||
|
||||
/* Mask inputs to the correct size. */
|
||||
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
|
||||
old &= mask;
|
||||
new &= mask;
|
||||
|
||||
/*
|
||||
* Calculate a shift & mask that correspond to the value we wish to
|
||||
* compare & exchange within the naturally aligned 4 byte integer
|
||||
* that includes it.
|
||||
*/
|
||||
shift = (unsigned long)ptr & 0x3;
|
||||
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
|
||||
shift ^= sizeof(u32) - size;
|
||||
shift *= BITS_PER_BYTE;
|
||||
mask <<= shift;
|
||||
|
||||
/*
|
||||
* Calculate a pointer to the naturally aligned 4 byte integer that
|
||||
* includes our byte of interest, and load its value.
|
||||
*/
|
||||
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
|
||||
load32 = *ptr32;
|
||||
|
||||
while (true) {
|
||||
/*
|
||||
* Ensure the byte we want to exchange matches the expected
|
||||
* old value, and if not then bail.
|
||||
*/
|
||||
load = (load32 & mask) >> shift;
|
||||
if (load != old)
|
||||
return load;
|
||||
|
||||
/*
|
||||
* Calculate the old & new values of the naturally aligned
|
||||
* 4 byte integer that include the byte we want to exchange.
|
||||
* Attempt to exchange the old value for the new value, and
|
||||
* return if we succeed.
|
||||
*/
|
||||
old32 = (load32 & ~mask) | (old << shift);
|
||||
new32 = (load32 & ~mask) | (new << shift);
|
||||
load32 = cmpxchg(ptr32, old32, new32);
|
||||
if (load32 == old32)
|
||||
return old;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user