mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
1ab33c0314
__put_unaligned_be24() and friends use implicit casts to convert larger-sized data to bytes, which trips sparse truncation warnings when the argument is a constant: CC [M] drivers/input/touchscreen/hynitron_cstxxx.o CHECK drivers/input/touchscreen/hynitron_cstxxx.c drivers/input/touchscreen/hynitron_cstxxx.c: note: in included file (through arch/x86/include/generated/asm/unaligned.h): include/asm-generic/unaligned.h:119:16: warning: cast truncates bits from constant value (aa01a0 becomes a0) include/asm-generic/unaligned.h:120:20: warning: cast truncates bits from constant value (aa01 becomes 1) include/asm-generic/unaligned.h:119:16: warning: cast truncates bits from constant value (ab00d0 becomes d0) include/asm-generic/unaligned.h:120:20: warning: cast truncates bits from constant value (ab00 becomes 0) To avoid this let's mask off upper bits explicitly, the resulting code should be exactly the same, but it will keep sparse happy. Reported-by: kernel test robot <lkp@intel.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Closes: https://lore.kernel.org/oe-kbuild-all/202401070147.gqwVulOn-lkp@intel.com/ Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
156 lines
3.4 KiB
C
156 lines
3.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_GENERIC_UNALIGNED_H
|
|
#define __ASM_GENERIC_UNALIGNED_H
|
|
|
|
/*
|
|
* This is the most generic implementation of unaligned accesses
|
|
* and should work almost anywhere.
|
|
*/
|
|
#include <linux/unaligned/packed_struct.h>
|
|
#include <asm/byteorder.h>
|
|
|
|
#define __get_unaligned_t(type, ptr) ({ \
|
|
const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
|
|
__pptr->x; \
|
|
})
|
|
|
|
#define __put_unaligned_t(type, val, ptr) do { \
|
|
struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
|
|
__pptr->x = (val); \
|
|
} while (0)
|
|
|
|
#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
|
|
#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
|
|
|
|
static inline u16 get_unaligned_le16(const void *p)
|
|
{
|
|
return le16_to_cpu(__get_unaligned_t(__le16, p));
|
|
}
|
|
|
|
static inline u32 get_unaligned_le32(const void *p)
|
|
{
|
|
return le32_to_cpu(__get_unaligned_t(__le32, p));
|
|
}
|
|
|
|
static inline u64 get_unaligned_le64(const void *p)
|
|
{
|
|
return le64_to_cpu(__get_unaligned_t(__le64, p));
|
|
}
|
|
|
|
static inline void put_unaligned_le16(u16 val, void *p)
|
|
{
|
|
__put_unaligned_t(__le16, cpu_to_le16(val), p);
|
|
}
|
|
|
|
static inline void put_unaligned_le32(u32 val, void *p)
|
|
{
|
|
__put_unaligned_t(__le32, cpu_to_le32(val), p);
|
|
}
|
|
|
|
static inline void put_unaligned_le64(u64 val, void *p)
|
|
{
|
|
__put_unaligned_t(__le64, cpu_to_le64(val), p);
|
|
}
|
|
|
|
static inline u16 get_unaligned_be16(const void *p)
|
|
{
|
|
return be16_to_cpu(__get_unaligned_t(__be16, p));
|
|
}
|
|
|
|
static inline u32 get_unaligned_be32(const void *p)
|
|
{
|
|
return be32_to_cpu(__get_unaligned_t(__be32, p));
|
|
}
|
|
|
|
static inline u64 get_unaligned_be64(const void *p)
|
|
{
|
|
return be64_to_cpu(__get_unaligned_t(__be64, p));
|
|
}
|
|
|
|
static inline void put_unaligned_be16(u16 val, void *p)
|
|
{
|
|
__put_unaligned_t(__be16, cpu_to_be16(val), p);
|
|
}
|
|
|
|
static inline void put_unaligned_be32(u32 val, void *p)
|
|
{
|
|
__put_unaligned_t(__be32, cpu_to_be32(val), p);
|
|
}
|
|
|
|
static inline void put_unaligned_be64(u64 val, void *p)
|
|
{
|
|
__put_unaligned_t(__be64, cpu_to_be64(val), p);
|
|
}
|
|
|
|
static inline u32 __get_unaligned_be24(const u8 *p)
|
|
{
|
|
return p[0] << 16 | p[1] << 8 | p[2];
|
|
}
|
|
|
|
static inline u32 get_unaligned_be24(const void *p)
|
|
{
|
|
return __get_unaligned_be24(p);
|
|
}
|
|
|
|
static inline u32 __get_unaligned_le24(const u8 *p)
|
|
{
|
|
return p[0] | p[1] << 8 | p[2] << 16;
|
|
}
|
|
|
|
static inline u32 get_unaligned_le24(const void *p)
|
|
{
|
|
return __get_unaligned_le24(p);
|
|
}
|
|
|
|
static inline void __put_unaligned_be24(const u32 val, u8 *p)
|
|
{
|
|
*p++ = (val >> 16) & 0xff;
|
|
*p++ = (val >> 8) & 0xff;
|
|
*p++ = val & 0xff;
|
|
}
|
|
|
|
static inline void put_unaligned_be24(const u32 val, void *p)
|
|
{
|
|
__put_unaligned_be24(val, p);
|
|
}
|
|
|
|
static inline void __put_unaligned_le24(const u32 val, u8 *p)
|
|
{
|
|
*p++ = val & 0xff;
|
|
*p++ = (val >> 8) & 0xff;
|
|
*p++ = (val >> 16) & 0xff;
|
|
}
|
|
|
|
static inline void put_unaligned_le24(const u32 val, void *p)
|
|
{
|
|
__put_unaligned_le24(val, p);
|
|
}
|
|
|
|
static inline void __put_unaligned_be48(const u64 val, u8 *p)
|
|
{
|
|
*p++ = (val >> 40) & 0xff;
|
|
*p++ = (val >> 32) & 0xff;
|
|
*p++ = (val >> 24) & 0xff;
|
|
*p++ = (val >> 16) & 0xff;
|
|
*p++ = (val >> 8) & 0xff;
|
|
*p++ = val & 0xff;
|
|
}
|
|
|
|
static inline void put_unaligned_be48(const u64 val, void *p)
|
|
{
|
|
__put_unaligned_be48(val, p);
|
|
}
|
|
|
|
static inline u64 __get_unaligned_be48(const u8 *p)
|
|
{
|
|
return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 |
|
|
p[3] << 16 | p[4] << 8 | p[5];
|
|
}
|
|
|
|
static inline u64 get_unaligned_be48(const void *p)
|
|
{
|
|
return __get_unaligned_be48(p);
|
|
}
|
|
|
|
#endif /* __ASM_GENERIC_UNALIGNED_H */
|