forked from Minki/linux
42990701f9
When dereferencing the memory address contained in a register and modifying the value at that memory address, the register should not be listed in the inline asm outputs. The value at the memory address is an output (which is taken care of with the "memory" clobber), not the register. Signed-off-by: Matt Fleming <matt@console-pimps.org> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
147 lines
2.8 KiB
C
147 lines
2.8 KiB
C
#ifndef __ASM_SH_BITOPS_LLSC_H
|
|
#define __ASM_SH_BITOPS_LLSC_H
|
|
|
|
static inline void set_bit(int nr, volatile void *addr)
|
|
{
|
|
int mask;
|
|
volatile unsigned int *a = addr;
|
|
unsigned long tmp;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
|
|
__asm__ __volatile__ (
|
|
"1: \n\t"
|
|
"movli.l @%1, %0 ! set_bit \n\t"
|
|
"or %2, %0 \n\t"
|
|
"movco.l %0, @%1 \n\t"
|
|
"bf 1b \n\t"
|
|
: "=&z" (tmp)
|
|
: "r" (a), "r" (mask)
|
|
: "t", "memory"
|
|
);
|
|
}
|
|
|
|
static inline void clear_bit(int nr, volatile void *addr)
|
|
{
|
|
int mask;
|
|
volatile unsigned int *a = addr;
|
|
unsigned long tmp;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
|
|
__asm__ __volatile__ (
|
|
"1: \n\t"
|
|
"movli.l @%1, %0 ! clear_bit \n\t"
|
|
"and %2, %0 \n\t"
|
|
"movco.l %0, @%1 \n\t"
|
|
"bf 1b \n\t"
|
|
: "=&z" (tmp)
|
|
: "r" (a), "r" (~mask)
|
|
: "t", "memory"
|
|
);
|
|
}
|
|
|
|
static inline void change_bit(int nr, volatile void *addr)
|
|
{
|
|
int mask;
|
|
volatile unsigned int *a = addr;
|
|
unsigned long tmp;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
|
|
__asm__ __volatile__ (
|
|
"1: \n\t"
|
|
"movli.l @%1, %0 ! change_bit \n\t"
|
|
"xor %2, %0 \n\t"
|
|
"movco.l %0, @%1 \n\t"
|
|
"bf 1b \n\t"
|
|
: "=&z" (tmp)
|
|
: "r" (a), "r" (mask)
|
|
: "t", "memory"
|
|
);
|
|
}
|
|
|
|
static inline int test_and_set_bit(int nr, volatile void *addr)
|
|
{
|
|
int mask, retval;
|
|
volatile unsigned int *a = addr;
|
|
unsigned long tmp;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
|
|
__asm__ __volatile__ (
|
|
"1: \n\t"
|
|
"movli.l @%2, %0 ! test_and_set_bit \n\t"
|
|
"mov %0, %1 \n\t"
|
|
"or %3, %0 \n\t"
|
|
"movco.l %0, @%2 \n\t"
|
|
"bf 1b \n\t"
|
|
"and %3, %1 \n\t"
|
|
: "=&z" (tmp), "=&r" (retval)
|
|
: "r" (a), "r" (mask)
|
|
: "t", "memory"
|
|
);
|
|
|
|
return retval != 0;
|
|
}
|
|
|
|
static inline int test_and_clear_bit(int nr, volatile void *addr)
|
|
{
|
|
int mask, retval;
|
|
volatile unsigned int *a = addr;
|
|
unsigned long tmp;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
|
|
__asm__ __volatile__ (
|
|
"1: \n\t"
|
|
"movli.l @%2, %0 ! test_and_clear_bit \n\t"
|
|
"mov %0, %1 \n\t"
|
|
"and %4, %0 \n\t"
|
|
"movco.l %0, @%2 \n\t"
|
|
"bf 1b \n\t"
|
|
"and %3, %1 \n\t"
|
|
"synco \n\t"
|
|
: "=&z" (tmp), "=&r" (retval)
|
|
: "r" (a), "r" (mask), "r" (~mask)
|
|
: "t", "memory"
|
|
);
|
|
|
|
return retval != 0;
|
|
}
|
|
|
|
static inline int test_and_change_bit(int nr, volatile void *addr)
|
|
{
|
|
int mask, retval;
|
|
volatile unsigned int *a = addr;
|
|
unsigned long tmp;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
|
|
__asm__ __volatile__ (
|
|
"1: \n\t"
|
|
"movli.l @%2, %0 ! test_and_change_bit \n\t"
|
|
"mov %0, %1 \n\t"
|
|
"xor %3, %0 \n\t"
|
|
"movco.l %0, @%2 \n\t"
|
|
"bf 1b \n\t"
|
|
"and %3, %1 \n\t"
|
|
"synco \n\t"
|
|
: "=&z" (tmp), "=&r" (retval)
|
|
: "r" (a), "r" (mask)
|
|
: "t", "memory"
|
|
);
|
|
|
|
return retval != 0;
|
|
}
|
|
|
|
#include <asm-generic/bitops/non-atomic.h>
|
|
|
|
#endif /* __ASM_SH_BITOPS_LLSC_H */
|