locking/atomic, arch/frv: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()

Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2016-04-18 01:16:08 +02:00 committed by Ingo Molnar
parent e87fc0ec07
commit d9c7302816
2 changed files with 14 additions and 20 deletions

View File

@ -60,16 +60,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
return atomic_add_return(i, v) < 0; return atomic_add_return(i, v) < 0;
} }
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
}
static inline void atomic_sub(int i, atomic_t *v)
{
atomic_sub_return(i, v);
}
static inline void atomic_inc(atomic_t *v) static inline void atomic_inc(atomic_t *v)
{ {
atomic_inc_return(v); atomic_inc_return(v);
@ -84,6 +74,8 @@ static inline void atomic_dec(atomic_t *v)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
#define atomic_fetch_or atomic_fetch_or
/* /*
* 64-bit atomic ops * 64-bit atomic ops
*/ */
@ -136,16 +128,6 @@ static inline long long atomic64_add_negative(long long i, atomic64_t *v)
return atomic64_add_return(i, v) < 0; return atomic64_add_return(i, v) < 0;
} }
static inline void atomic64_add(long long i, atomic64_t *v)
{
atomic64_add_return(i, v);
}
static inline void atomic64_sub(long long i, atomic64_t *v)
{
atomic64_sub_return(i, v);
}
static inline void atomic64_inc(atomic64_t *v) static inline void atomic64_inc(atomic64_t *v)
{ {
atomic64_inc_return(v); atomic64_inc_return(v);
@ -182,11 +164,19 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
} }
#define ATOMIC_OP(op) \ #define ATOMIC_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
return __atomic32_fetch_##op(i, &v->counter); \
} \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void atomic_##op(int i, atomic_t *v) \
{ \ { \
(void)__atomic32_fetch_##op(i, &v->counter); \ (void)__atomic32_fetch_##op(i, &v->counter); \
} \ } \
\ \
static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
{ \
return __atomic64_fetch_##op(i, &v->counter); \
} \
static inline void atomic64_##op(long long i, atomic64_t *v) \ static inline void atomic64_##op(long long i, atomic64_t *v) \
{ \ { \
(void)__atomic64_fetch_##op(i, &v->counter); \ (void)__atomic64_fetch_##op(i, &v->counter); \
@ -195,6 +185,8 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
ATOMIC_OP(or) ATOMIC_OP(or)
ATOMIC_OP(and) ATOMIC_OP(and)
ATOMIC_OP(xor) ATOMIC_OP(xor)
ATOMIC_OP(add)
ATOMIC_OP(sub)
#undef ATOMIC_OP #undef ATOMIC_OP

View File

@ -162,6 +162,8 @@ ATOMIC_EXPORT(__atomic64_fetch_##op);
ATOMIC_FETCH_OP(or) ATOMIC_FETCH_OP(or)
ATOMIC_FETCH_OP(and) ATOMIC_FETCH_OP(and)
ATOMIC_FETCH_OP(xor) ATOMIC_FETCH_OP(xor)
ATOMIC_FETCH_OP(add)
ATOMIC_FETCH_OP(sub)
ATOMIC_OP_RETURN(add) ATOMIC_OP_RETURN(add)
ATOMIC_OP_RETURN(sub) ATOMIC_OP_RETURN(sub)