mirror of
https://github.com/torvalds/linux.git
synced 2024-12-24 11:51:27 +00:00
4331f4d5ad
Fix kernel-doc warnings in arch/x86/include/asm/atomic.h that are caused by
having a #define macro between the kernel-doc notation and the function
name. Fixed by moving the #define macro to after the function
implementation.
Make the same change for atomic64_{32,64}.h for consistency even though
there were no kernel-doc warnings found in these header files, but there
would be if they were used in generation of documentation.
Fixes these kernel-doc warnings:
../arch/x86/include/asm/atomic.h:84: warning: Excess function parameter 'i' description in 'arch_atomic_sub_and_test'
../arch/x86/include/asm/atomic.h:84: warning: Excess function parameter 'v' description in 'arch_atomic_sub_and_test'
../arch/x86/include/asm/atomic.h:96: warning: Excess function parameter 'v' description in 'arch_atomic_inc'
../arch/x86/include/asm/atomic.h:109: warning: Excess function parameter 'v' description in 'arch_atomic_dec'
../arch/x86/include/asm/atomic.h:124: warning: Excess function parameter 'v' description in 'arch_atomic_dec_and_test'
../arch/x86/include/asm/atomic.h:138: warning: Excess function parameter 'v' description in 'arch_atomic_inc_and_test'
../arch/x86/include/asm/atomic.h:153: warning: Excess function parameter 'i' description in 'arch_atomic_add_negative'
../arch/x86/include/asm/atomic.h:153: warning: Excess function parameter 'v' description in 'arch_atomic_add_negative'
Fixes: 18cc1814d4
("atomics/treewide: Make test ops optional")
Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Mark Rutland <mark.rutland@arm.com>
Link: https://lkml.kernel.org/r/0a1e678d-c8c5-b32c-2640-ed4e94d399d2@infradead.org
336 lines
8.0 KiB
C
336 lines
8.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_ATOMIC64_32_H
|
|
#define _ASM_X86_ATOMIC64_32_H
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/types.h>
|
|
//#include <asm/cmpxchg.h>
|
|
|
|
/* An 64bit atomic type */
|
|
|
|
typedef struct {
|
|
u64 __aligned(8) counter;
|
|
} atomic64_t;
|
|
|
|
#define ATOMIC64_INIT(val) { (val) }
|
|
|
|
#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
|
|
#ifndef ATOMIC64_EXPORT
|
|
#define ATOMIC64_DECL_ONE __ATOMIC64_DECL
|
|
#else
|
|
#define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
|
|
ATOMIC64_EXPORT(atomic64_##sym)
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_CMPXCHG64
|
|
#define __alternative_atomic64(f, g, out, in...) \
|
|
asm volatile("call %P[func]" \
|
|
: out : [func] "i" (atomic64_##g##_cx8), ## in)
|
|
|
|
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
|
|
#else
|
|
#define __alternative_atomic64(f, g, out, in...) \
|
|
alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
|
|
X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
|
|
|
|
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
|
|
ATOMIC64_DECL_ONE(sym##_386)
|
|
|
|
ATOMIC64_DECL_ONE(add_386);
|
|
ATOMIC64_DECL_ONE(sub_386);
|
|
ATOMIC64_DECL_ONE(inc_386);
|
|
ATOMIC64_DECL_ONE(dec_386);
|
|
#endif
|
|
|
|
#define alternative_atomic64(f, out, in...) \
|
|
__alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
|
|
|
|
ATOMIC64_DECL(read);
|
|
ATOMIC64_DECL(set);
|
|
ATOMIC64_DECL(xchg);
|
|
ATOMIC64_DECL(add_return);
|
|
ATOMIC64_DECL(sub_return);
|
|
ATOMIC64_DECL(inc_return);
|
|
ATOMIC64_DECL(dec_return);
|
|
ATOMIC64_DECL(dec_if_positive);
|
|
ATOMIC64_DECL(inc_not_zero);
|
|
ATOMIC64_DECL(add_unless);
|
|
|
|
#undef ATOMIC64_DECL
|
|
#undef ATOMIC64_DECL_ONE
|
|
#undef __ATOMIC64_DECL
|
|
#undef ATOMIC64_EXPORT
|
|
|
|
/**
|
|
* arch_atomic64_cmpxchg - cmpxchg atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
* @o: expected value
|
|
* @n: new value
|
|
*
|
|
* Atomically sets @v to @n if it was equal to @o and returns
|
|
* the old value.
|
|
*/
|
|
|
|
static inline long long arch_atomic64_cmpxchg(atomic64_t *v, long long o,
|
|
long long n)
|
|
{
|
|
return arch_cmpxchg64(&v->counter, o, n);
|
|
}
|
|
|
|
/**
|
|
* arch_atomic64_xchg - xchg atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
* @n: value to assign
|
|
*
|
|
* Atomically xchgs the value of @v to @n and returns
|
|
* the old value.
|
|
*/
|
|
static inline long long arch_atomic64_xchg(atomic64_t *v, long long n)
|
|
{
|
|
long long o;
|
|
unsigned high = (unsigned)(n >> 32);
|
|
unsigned low = (unsigned)n;
|
|
alternative_atomic64(xchg, "=&A" (o),
|
|
"S" (v), "b" (low), "c" (high)
|
|
: "memory");
|
|
return o;
|
|
}
|
|
|
|
/**
|
|
* arch_atomic64_set - set atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
* @i: value to assign
|
|
*
|
|
* Atomically sets the value of @v to @n.
|
|
*/
|
|
static inline void arch_atomic64_set(atomic64_t *v, long long i)
|
|
{
|
|
unsigned high = (unsigned)(i >> 32);
|
|
unsigned low = (unsigned)i;
|
|
alternative_atomic64(set, /* no output */,
|
|
"S" (v), "b" (low), "c" (high)
|
|
: "eax", "edx", "memory");
|
|
}
|
|
|
|
/**
|
|
* arch_atomic64_read - read atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically reads the value of @v and returns it.
|
|
*/
|
|
static inline long long arch_atomic64_read(const atomic64_t *v)
|
|
{
|
|
long long r;
|
|
alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
|
|
return r;
|
|
}
|
|
|
|
/**
|
|
* arch_atomic64_add_return - add and return
|
|
* @i: integer value to add
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically adds @i to @v and returns @i + *@v
|
|
*/
|
|
static inline long long arch_atomic64_add_return(long long i, atomic64_t *v)
|
|
{
|
|
alternative_atomic64(add_return,
|
|
ASM_OUTPUT2("+A" (i), "+c" (v)),
|
|
ASM_NO_INPUT_CLOBBER("memory"));
|
|
return i;
|
|
}
|
|
|
|
/*
|
|
* Other variants with different arithmetic operators:
|
|
*/
|
|
static inline long long arch_atomic64_sub_return(long long i, atomic64_t *v)
|
|
{
|
|
alternative_atomic64(sub_return,
|
|
ASM_OUTPUT2("+A" (i), "+c" (v)),
|
|
ASM_NO_INPUT_CLOBBER("memory"));
|
|
return i;
|
|
}
|
|
|
|
static inline long long arch_atomic64_inc_return(atomic64_t *v)
|
|
{
|
|
long long a;
|
|
alternative_atomic64(inc_return, "=&A" (a),
|
|
"S" (v) : "memory", "ecx");
|
|
return a;
|
|
}
|
|
#define arch_atomic64_inc_return arch_atomic64_inc_return
|
|
|
|
static inline long long arch_atomic64_dec_return(atomic64_t *v)
|
|
{
|
|
long long a;
|
|
alternative_atomic64(dec_return, "=&A" (a),
|
|
"S" (v) : "memory", "ecx");
|
|
return a;
|
|
}
|
|
#define arch_atomic64_dec_return arch_atomic64_dec_return
|
|
|
|
/**
|
|
* arch_atomic64_add - add integer to atomic64 variable
|
|
* @i: integer value to add
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically adds @i to @v.
|
|
*/
|
|
static inline long long arch_atomic64_add(long long i, atomic64_t *v)
|
|
{
|
|
__alternative_atomic64(add, add_return,
|
|
ASM_OUTPUT2("+A" (i), "+c" (v)),
|
|
ASM_NO_INPUT_CLOBBER("memory"));
|
|
return i;
|
|
}
|
|
|
|
/**
|
|
* arch_atomic64_sub - subtract the atomic64 variable
|
|
* @i: integer value to subtract
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically subtracts @i from @v.
|
|
*/
|
|
static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
|
|
{
|
|
__alternative_atomic64(sub, sub_return,
|
|
ASM_OUTPUT2("+A" (i), "+c" (v)),
|
|
ASM_NO_INPUT_CLOBBER("memory"));
|
|
return i;
|
|
}
|
|
|
|
/**
|
|
* arch_atomic64_inc - increment atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically increments @v by 1.
|
|
*/
|
|
static inline void arch_atomic64_inc(atomic64_t *v)
|
|
{
|
|
__alternative_atomic64(inc, inc_return, /* no output */,
|
|
"S" (v) : "memory", "eax", "ecx", "edx");
|
|
}
|
|
#define arch_atomic64_inc arch_atomic64_inc
|
|
|
|
/**
|
|
* arch_atomic64_dec - decrement atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically decrements @v by 1.
|
|
*/
|
|
static inline void arch_atomic64_dec(atomic64_t *v)
|
|
{
|
|
__alternative_atomic64(dec, dec_return, /* no output */,
|
|
"S" (v) : "memory", "eax", "ecx", "edx");
|
|
}
|
|
#define arch_atomic64_dec arch_atomic64_dec
|
|
|
|
/**
|
|
* arch_atomic64_add_unless - add unless the number is a given value
|
|
* @v: pointer of type atomic64_t
|
|
* @a: the amount to add to v...
|
|
* @u: ...unless v is equal to u.
|
|
*
|
|
* Atomically adds @a to @v, so long as it was not @u.
|
|
* Returns non-zero if the add was done, zero otherwise.
|
|
*/
|
|
static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
|
|
long long u)
|
|
{
|
|
unsigned low = (unsigned)u;
|
|
unsigned high = (unsigned)(u >> 32);
|
|
alternative_atomic64(add_unless,
|
|
ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
|
|
"S" (v) : "memory");
|
|
return (int)a;
|
|
}
|
|
|
|
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
|
|
{
|
|
int r;
|
|
alternative_atomic64(inc_not_zero, "=&a" (r),
|
|
"S" (v) : "ecx", "edx", "memory");
|
|
return r;
|
|
}
|
|
#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
|
|
|
|
static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
|
|
{
|
|
long long r;
|
|
alternative_atomic64(dec_if_positive, "=&A" (r),
|
|
"S" (v) : "ecx", "memory");
|
|
return r;
|
|
}
|
|
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
|
|
|
|
#undef alternative_atomic64
|
|
#undef __alternative_atomic64
|
|
|
|
static inline void arch_atomic64_and(long long i, atomic64_t *v)
|
|
{
|
|
long long old, c = 0;
|
|
|
|
while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
|
|
c = old;
|
|
}
|
|
|
|
static inline long long arch_atomic64_fetch_and(long long i, atomic64_t *v)
|
|
{
|
|
long long old, c = 0;
|
|
|
|
while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
|
|
c = old;
|
|
|
|
return old;
|
|
}
|
|
|
|
static inline void arch_atomic64_or(long long i, atomic64_t *v)
|
|
{
|
|
long long old, c = 0;
|
|
|
|
while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
|
|
c = old;
|
|
}
|
|
|
|
static inline long long arch_atomic64_fetch_or(long long i, atomic64_t *v)
|
|
{
|
|
long long old, c = 0;
|
|
|
|
while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
|
|
c = old;
|
|
|
|
return old;
|
|
}
|
|
|
|
static inline void arch_atomic64_xor(long long i, atomic64_t *v)
|
|
{
|
|
long long old, c = 0;
|
|
|
|
while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
|
|
c = old;
|
|
}
|
|
|
|
static inline long long arch_atomic64_fetch_xor(long long i, atomic64_t *v)
|
|
{
|
|
long long old, c = 0;
|
|
|
|
while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
|
|
c = old;
|
|
|
|
return old;
|
|
}
|
|
|
|
static inline long long arch_atomic64_fetch_add(long long i, atomic64_t *v)
|
|
{
|
|
long long old, c = 0;
|
|
|
|
while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c)
|
|
c = old;
|
|
|
|
return old;
|
|
}
|
|
|
|
#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
|
|
|
|
#endif /* _ASM_X86_ATOMIC64_32_H */
|