linux/include/asm-sh/atomic.h
Christoph Lameter d3cb487149 [PATCH] atomic_long_t & include/asm-generic/atomic.h V2
Several counters already have the need to use 64 atomic variables on 64 bit
platforms (see mm_counter_t in sched.h).  We have to do ugly ifdefs to fall
back to 32 bit atomic on 32 bit platforms.

The VM statistics patch that I am working on will also make more extensive
use of atomic64.

This patch introduces a new type atomic_long_t by providing definitions in
asm-generic/atomic.h that works similar to the c "long" type.  Its 32 bits
on 32 bit platforms and 64 bits on 64 bit platforms.

Also cleans up the determination of the mm_counter_t in sched.h.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-06 08:33:29 -08:00

145 lines
3.0 KiB
C

#ifndef __ASM_SH_ATOMIC_H
#define __ASM_SH_ATOMIC_H
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
*/
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#include <asm/system.h>
/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
static __inline__ void atomic_add(int i, atomic_t * v)
{
unsigned long flags;
local_irq_save(flags);
*(long *)v += i;
local_irq_restore(flags);
}
static __inline__ void atomic_sub(int i, atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
*(long *)v -= i;
local_irq_restore(flags);
}
static __inline__ int atomic_add_return(int i, atomic_t * v)
{
unsigned long temp, flags;
local_irq_save(flags);
temp = *(long *)v;
temp += i;
*(long *)v = temp;
local_irq_restore(flags);
return temp;
}
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
static __inline__ int atomic_sub_return(int i, atomic_t * v)
{
unsigned long temp, flags;
local_irq_save(flags);
temp = *(long *)v;
temp -= i;
*(long *)v = temp;
local_irq_restore(flags);
return temp;
}
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
local_irq_save(flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
local_irq_restore(flags);
return ret;
}
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
local_irq_save(flags);
ret = v->counter;
if (ret != u)
v->counter += a;
local_irq_restore(flags);
return ret != u;
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
*(long *)v &= ~mask;
local_irq_restore(flags);
}
static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
*(long *)v |= mask;
local_irq_restore(flags);
}
/* Atomic operations are already serializing on SH */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* __ASM_SH_ATOMIC_H */