2010-05-29 03:09:12 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation, version 2.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/cache.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/mm.h>
|
2011-07-26 23:09:06 +00:00
|
|
|
#include <linux/atomic.h>
|
2010-05-29 03:09:12 +00:00
|
|
|
#include <arch/chip.h>
|
|
|
|
|
|
|
|
/* This page is remapped on startup to be hash-for-home. */
|
2011-02-27 23:52:24 +00:00
|
|
|
int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
|
2010-05-29 03:09:12 +00:00
|
|
|
|
2012-03-29 17:39:51 +00:00
|
|
|
int *__atomic_hashed_lock(volatile void *v)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
2011-02-28 20:58:39 +00:00
|
|
|
/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
|
2010-05-29 03:09:12 +00:00
|
|
|
/*
|
|
|
|
* Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
|
|
|
|
* Using mm works here because atomic_locks is page aligned.
|
|
|
|
*/
|
|
|
|
unsigned long ptr = __insn_mm((unsigned long)v >> 1,
|
|
|
|
(unsigned long)atomic_locks,
|
|
|
|
2, (ATOMIC_HASH_SHIFT + 2) - 1);
|
|
|
|
return (int *)ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* Return whether the passed pointer is a valid atomic lock pointer. */
|
|
|
|
static int is_atomic_lock(int *p)
|
|
|
|
{
|
|
|
|
return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
|
|
|
|
}
|
|
|
|
|
|
|
|
void __atomic_fault_unlock(int *irqlock_word)
|
|
|
|
{
|
|
|
|
BUG_ON(!is_atomic_lock(irqlock_word));
|
|
|
|
BUG_ON(*irqlock_word != 1);
|
|
|
|
*irqlock_word = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
|
|
static inline int *__atomic_setup(volatile void *v)
|
|
|
|
{
|
|
|
|
/* Issue a load to the target to bring it into cache. */
|
|
|
|
*(volatile int *)v;
|
|
|
|
return __atomic_hashed_lock(v);
|
|
|
|
}
|
|
|
|
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 12:56:45 +00:00
|
|
|
int _atomic_xchg(int *v, int n)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
2016-06-22 09:16:49 +00:00
|
|
|
return __atomic32_xchg(v, __atomic_setup(v), n).val;
|
2010-05-29 03:09:12 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(_atomic_xchg);
|
|
|
|
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 12:56:45 +00:00
|
|
|
int _atomic_xchg_add(int *v, int i)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
2016-06-22 09:16:49 +00:00
|
|
|
return __atomic32_xchg_add(v, __atomic_setup(v), i).val;
|
2010-05-29 03:09:12 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(_atomic_xchg_add);
|
|
|
|
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 12:56:45 +00:00
|
|
|
int _atomic_xchg_add_unless(int *v, int a, int u)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Note: argument order is switched here since it is easier
|
|
|
|
* to use the first argument consistently as the "old value"
|
|
|
|
* in the assembly, as is done for _atomic_cmpxchg().
|
|
|
|
*/
|
2016-06-22 09:16:49 +00:00
|
|
|
return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val;
|
2010-05-29 03:09:12 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(_atomic_xchg_add_unless);
|
|
|
|
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 12:56:45 +00:00
|
|
|
int _atomic_cmpxchg(int *v, int o, int n)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
2016-06-22 09:16:49 +00:00
|
|
|
return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val;
|
2010-05-29 03:09:12 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(_atomic_cmpxchg);
|
|
|
|
|
2016-04-17 23:16:03 +00:00
|
|
|
unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
2016-06-22 09:16:49 +00:00
|
|
|
return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val;
|
2010-05-29 03:09:12 +00:00
|
|
|
}
|
2016-04-17 23:16:03 +00:00
|
|
|
EXPORT_SYMBOL(_atomic_fetch_or);
|
2010-05-29 03:09:12 +00:00
|
|
|
|
2016-04-17 23:16:03 +00:00
|
|
|
unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask)
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-09 20:38:17 +00:00
|
|
|
{
|
2016-06-22 09:16:49 +00:00
|
|
|
return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val;
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-09 20:38:17 +00:00
|
|
|
}
|
2016-04-17 23:16:03 +00:00
|
|
|
EXPORT_SYMBOL(_atomic_fetch_and);
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-09 20:38:17 +00:00
|
|
|
|
2016-04-17 23:16:03 +00:00
|
|
|
unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
2016-06-22 09:16:49 +00:00
|
|
|
return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val;
|
2010-05-29 03:09:12 +00:00
|
|
|
}
|
2016-04-17 23:16:03 +00:00
|
|
|
EXPORT_SYMBOL(_atomic_fetch_andn);
|
2010-05-29 03:09:12 +00:00
|
|
|
|
2016-04-17 23:16:03 +00:00
|
|
|
unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
2016-06-22 09:16:49 +00:00
|
|
|
return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val;
|
2010-05-29 03:09:12 +00:00
|
|
|
}
|
2016-04-17 23:16:03 +00:00
|
|
|
EXPORT_SYMBOL(_atomic_fetch_xor);
|
2010-05-29 03:09:12 +00:00
|
|
|
|
|
|
|
|
2013-09-25 04:14:08 +00:00
|
|
|
long long _atomic64_xchg(long long *v, long long n)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 12:56:45 +00:00
|
|
|
return __atomic64_xchg(v, __atomic_setup(v), n);
|
2010-05-29 03:09:12 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(_atomic64_xchg);
|
|
|
|
|
2013-09-25 04:14:08 +00:00
|
|
|
long long _atomic64_xchg_add(long long *v, long long i)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 12:56:45 +00:00
|
|
|
return __atomic64_xchg_add(v, __atomic_setup(v), i);
|
2010-05-29 03:09:12 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(_atomic64_xchg_add);
|
|
|
|
|
2013-09-25 04:14:08 +00:00
|
|
|
long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Note: argument order is switched here since it is easier
|
|
|
|
* to use the first argument consistently as the "old value"
|
|
|
|
* in the assembly, as is done for _atomic_cmpxchg().
|
|
|
|
*/
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 12:56:45 +00:00
|
|
|
return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
|
2010-05-29 03:09:12 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(_atomic64_xchg_add_unless);
|
|
|
|
|
2013-09-25 04:14:08 +00:00
|
|
|
long long _atomic64_cmpxchg(long long *v, long long o, long long n)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 12:56:45 +00:00
|
|
|
return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
|
2010-05-29 03:09:12 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(_atomic64_cmpxchg);
|
|
|
|
|
2016-04-17 23:16:03 +00:00
|
|
|
long long _atomic64_fetch_and(long long *v, long long n)
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-09 20:38:17 +00:00
|
|
|
{
|
2016-04-17 23:16:03 +00:00
|
|
|
return __atomic64_fetch_and(v, __atomic_setup(v), n);
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-09 20:38:17 +00:00
|
|
|
}
|
2016-04-17 23:16:03 +00:00
|
|
|
EXPORT_SYMBOL(_atomic64_fetch_and);
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-09 20:38:17 +00:00
|
|
|
|
2016-04-17 23:16:03 +00:00
|
|
|
long long _atomic64_fetch_or(long long *v, long long n)
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-09 20:38:17 +00:00
|
|
|
{
|
2016-04-17 23:16:03 +00:00
|
|
|
return __atomic64_fetch_or(v, __atomic_setup(v), n);
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-09 20:38:17 +00:00
|
|
|
}
|
2016-04-17 23:16:03 +00:00
|
|
|
EXPORT_SYMBOL(_atomic64_fetch_or);
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-09 20:38:17 +00:00
|
|
|
|
2016-04-17 23:16:03 +00:00
|
|
|
long long _atomic64_fetch_xor(long long *v, long long n)
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-09 20:38:17 +00:00
|
|
|
{
|
2016-04-17 23:16:03 +00:00
|
|
|
return __atomic64_fetch_xor(v, __atomic_setup(v), n);
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-09 20:38:17 +00:00
|
|
|
}
|
2016-04-17 23:16:03 +00:00
|
|
|
EXPORT_SYMBOL(_atomic64_fetch_xor);
|
2010-05-29 03:09:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If any of the atomic or futex routines hit a bad address (not in
|
|
|
|
* the page tables at kernel PL) this routine is called. The futex
|
|
|
|
* routines are never used on kernel space, and the normal atomics and
|
|
|
|
* bitops are never used on user space. So a fault on kernel space
|
|
|
|
* must be fatal, but a fault on userspace is a futex fault and we
|
|
|
|
* need to return -EFAULT. Note that the context this routine is
|
|
|
|
* invoked in is the context of the "_atomic_xxx()" routines called
|
|
|
|
* by the functions in this file.
|
|
|
|
*/
|
2010-06-25 21:04:17 +00:00
|
|
|
struct __get_user __atomic_bad_address(int __user *addr)
|
2010-05-29 03:09:12 +00:00
|
|
|
{
|
|
|
|
if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
|
|
|
|
panic("Bad address used for kernel atomic op: %p\n", addr);
|
|
|
|
return (struct __get_user) { .err = -EFAULT };
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void __init __init_atomic_per_cpu(void)
|
|
|
|
{
|
|
|
|
/* Validate power-of-two and "bigger than cpus" assumption */
|
2010-10-05 15:55:29 +00:00
|
|
|
BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
|
2010-05-29 03:09:12 +00:00
|
|
|
BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On TILEPro we prefer to use a single hash-for-home
|
|
|
|
* page, since this means atomic operations are less
|
|
|
|
* likely to encounter a TLB fault and thus should
|
|
|
|
* in general perform faster. You may wish to disable
|
|
|
|
* this in situations where few hash-for-home tiles
|
|
|
|
* are configured.
|
|
|
|
*/
|
|
|
|
BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
|
|
|
|
|
|
|
|
/* The locks must all fit on one page. */
|
2010-10-05 15:55:29 +00:00
|
|
|
BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
|
2010-05-29 03:09:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We use the page offset of the atomic value's address as
|
|
|
|
* an index into atomic_locks, excluding the low 3 bits.
|
|
|
|
* That should not produce more indices than ATOMIC_HASH_SIZE.
|
|
|
|
*/
|
2010-10-05 15:55:29 +00:00
|
|
|
BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
|
2010-05-29 03:09:12 +00:00
|
|
|
}
|