mirror of
https://github.com/torvalds/linux.git
synced 2024-11-29 07:31:29 +00:00
arm64/io: add constant-argument check
In some configurations __const_iowrite32_copy() does not get inlined
and gcc runs into the BUILD_BUG():
In file included from <command-line>:
In function '__const_memcpy_toio_aligned32',
inlined from '__const_iowrite32_copy' at arch/arm64/include/asm/io.h:203:3,
inlined from '__const_iowrite32_copy' at arch/arm64/include/asm/io.h:199:20:
include/linux/compiler_types.h:487:45: error: call to '__compiletime_assert_538' declared with attribute error: BUILD_BUG failed
487 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
| ^
include/linux/compiler_types.h:468:25: note: in definition of macro '__compiletime_assert'
468 | prefix ## suffix(); \
| ^~~~~~
include/linux/compiler_types.h:487:9: note: in expansion of macro '_compiletime_assert'
487 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
| ^~~~~~~~~~~~~~~~~~~
include/linux/build_bug.h:39:37: note: in expansion of macro 'compiletime_assert'
39 | #define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
| ^~~~~~~~~~~~~~~~~~
include/linux/build_bug.h:59:21: note: in expansion of macro 'BUILD_BUG_ON_MSG'
59 | #define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
| ^~~~~~~~~~~~~~~~
arch/arm64/include/asm/io.h:193:17: note: in expansion of macro 'BUILD_BUG'
193 | BUILD_BUG();
| ^~~~~~~~~
Move the check for constant arguments into the inline function to ensure
it is still constant if the compiler decides against inlining it, and
mark them as __always_inline to override the logic that sometimes leads
to the compiler not producing the simplified output.
Note that either the __always_inline annotation or the check for a
constant value are sufficient here, but combining the two looks cleaner
as it also avoids the macro. With clang-8 and older, the macro was still
needed, but all versions of gcc and clang can reliably perform constant
folding here.
Fixes: ead79118da
("arm64/io: Provide a WC friendly __iowriteXX_copy()")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20240604210006.668912-1-arnd@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
14951beaec
commit
5c40e428ae
@ -153,8 +153,9 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
|
|||||||
* emit the large TLP from the CPU.
|
* emit the large TLP from the CPU.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
|
static __always_inline void
|
||||||
const u32 *from, size_t count)
|
__const_memcpy_toio_aligned32(volatile u32 __iomem *to, const u32 *from,
|
||||||
|
size_t count)
|
||||||
{
|
{
|
||||||
switch (count) {
|
switch (count) {
|
||||||
case 8:
|
case 8:
|
||||||
@ -196,24 +197,22 @@ static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
|
|||||||
|
|
||||||
void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
|
void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
|
||||||
|
|
||||||
static inline void __const_iowrite32_copy(void __iomem *to, const void *from,
|
static __always_inline void
|
||||||
size_t count)
|
__iowrite32_copy(void __iomem *to, const void *from, size_t count)
|
||||||
{
|
{
|
||||||
if (count == 8 || count == 4 || count == 2 || count == 1) {
|
if (__builtin_constant_p(count) &&
|
||||||
|
(count == 8 || count == 4 || count == 2 || count == 1)) {
|
||||||
__const_memcpy_toio_aligned32(to, from, count);
|
__const_memcpy_toio_aligned32(to, from, count);
|
||||||
dgh();
|
dgh();
|
||||||
} else {
|
} else {
|
||||||
__iowrite32_copy_full(to, from, count);
|
__iowrite32_copy_full(to, from, count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#define __iowrite32_copy __iowrite32_copy
|
||||||
|
|
||||||
#define __iowrite32_copy(to, from, count) \
|
static __always_inline void
|
||||||
(__builtin_constant_p(count) ? \
|
__const_memcpy_toio_aligned64(volatile u64 __iomem *to, const u64 *from,
|
||||||
__const_iowrite32_copy(to, from, count) : \
|
size_t count)
|
||||||
__iowrite32_copy_full(to, from, count))
|
|
||||||
|
|
||||||
static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
|
|
||||||
const u64 *from, size_t count)
|
|
||||||
{
|
{
|
||||||
switch (count) {
|
switch (count) {
|
||||||
case 8:
|
case 8:
|
||||||
@ -255,21 +254,18 @@ static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
|
|||||||
|
|
||||||
void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count);
|
void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count);
|
||||||
|
|
||||||
static inline void __const_iowrite64_copy(void __iomem *to, const void *from,
|
static __always_inline void
|
||||||
size_t count)
|
__iowrite64_copy(void __iomem *to, const void *from, size_t count)
|
||||||
{
|
{
|
||||||
if (count == 8 || count == 4 || count == 2 || count == 1) {
|
if (__builtin_constant_p(count) &&
|
||||||
|
(count == 8 || count == 4 || count == 2 || count == 1)) {
|
||||||
__const_memcpy_toio_aligned64(to, from, count);
|
__const_memcpy_toio_aligned64(to, from, count);
|
||||||
dgh();
|
dgh();
|
||||||
} else {
|
} else {
|
||||||
__iowrite64_copy_full(to, from, count);
|
__iowrite64_copy_full(to, from, count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#define __iowrite64_copy __iowrite64_copy
|
||||||
#define __iowrite64_copy(to, from, count) \
|
|
||||||
(__builtin_constant_p(count) ? \
|
|
||||||
__const_iowrite64_copy(to, from, count) : \
|
|
||||||
__iowrite64_copy_full(to, from, count))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* I/O memory mapping functions.
|
* I/O memory mapping functions.
|
||||||
|
Loading…
Reference in New Issue
Block a user