mirror of
https://github.com/torvalds/linux.git
synced 2024-11-29 07:31:29 +00:00
asm-generic fixes for 6.3
These are minor fixes to address false-positive build warnings: Some of the less common I/O accessors are missing __force casts and cause sparse warnings for their implied byteswap, and a recent change to __generic_cmpxchg_local() causes a warning about constant integer truncation. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEo6/YBQwIrVS28WGKmmx57+YAGNkFAmQufaMACgkQmmx57+YA GNmuyA//WBjgOgXPNA7kV3/UcScoW1MEq4Ri8NKJANKyOHWYa1TxIwrHehJkE2Zm B0Pr+DmRv3tYav/eytmXm8KMAGdqjVllHdBM4fe2HDjJspvqNKOEX/Z2UMzvNbLN uWQneHxFxHK8eZHT+wO4U3062heuBYQ0QQOK0Mk4OaWwsvWz0JVn6dC6uo8z0C4l 20HAwkyQriB4GaFuEE9iVFYbUfjWGdTdRv9hbL8QpQKMGn+gsG9CgXDNgK+LJ/70 Q7oJ8qvocjkKAxnbxtXzpb4iKLcnf1VDvwKmCFtvT6GEE/n4Rd00RIF+LKm6J+mC vLqAfaDu88mXP/JVRDz/Rpv/lNjGWMd+mR/Y9Rr8jmkA1imJXUKr9cRttJgsDcsT 8KxJdejakLvHzZKIjdjoE4aOwr5HPcPNi3Kge6DVnmW4r88Ma+lz+aOQueheBsA3 4mSSNi+c95AWSp0TznUR944RVKlqJ9FwNsXE6BskthhOBTG/4kOsU5nR1z6P6JlP De8i5Dd76oYGOXUxf8CAPcqTDligXkx8BBEA+AuLbUyimUBgvFWPqgiBvmLHftrK jR2mKjUznkC6A/WzHwUq/uwVz76qjor4aHo+WbvlhAJSSqYPlvZcdbOPU/O+hHhK obEhGHH+iKeGaRAd/Rv8fFHIjzq5hzriB8ls2uRiHe50FL/v10s= =UzVO -----END PGP SIGNATURE----- Merge tag 'asm-generic-fixes-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic Pull asm-generic fixes from Arnd Bergmann: "These are minor fixes to address false-positive build warnings: Some of the less common I/O accessors are missing __force casts and cause sparse warnings for their implied byteswap, and a recent change to __generic_cmpxchg_local() causes a warning about constant integer truncation" * tag 'asm-generic-fixes-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: asm-generic: avoid __generic_cmpxchg_local warnings asm-generic/io.h: suppress endianness warnings for relaxed accessors asm-generic/io.h: suppress endianness warnings for readq() and writeq()
This commit is contained in:
commit
fcff5f99ea
@ -130,7 +130,7 @@ ATOMIC_OP(xor, ^)
|
||||
#define arch_atomic_read(v) READ_ONCE((v)->counter)
|
||||
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
|
||||
|
||||
#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
|
||||
#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
|
||||
#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (u32)(v)))
|
||||
#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (u32)(old), (u32)(new)))
|
||||
|
||||
#endif /* __ASM_GENERIC_ATOMIC_H */
|
||||
|
@ -26,16 +26,16 @@ static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
|
||||
raw_local_irq_save(flags);
|
||||
switch (size) {
|
||||
case 1: prev = *(u8 *)ptr;
|
||||
if (prev == (u8)old)
|
||||
*(u8 *)ptr = (u8)new;
|
||||
if (prev == (old & 0xffu))
|
||||
*(u8 *)ptr = (new & 0xffu);
|
||||
break;
|
||||
case 2: prev = *(u16 *)ptr;
|
||||
if (prev == (u16)old)
|
||||
*(u16 *)ptr = (u16)new;
|
||||
if (prev == (old & 0xffffu))
|
||||
*(u16 *)ptr = (new & 0xffffu);
|
||||
break;
|
||||
case 4: prev = *(u32 *)ptr;
|
||||
if (prev == (u32)old)
|
||||
*(u32 *)ptr = (u32)new;
|
||||
if (prev == (old & 0xffffffffffu))
|
||||
*(u32 *)ptr = (new & 0xffffffffu);
|
||||
break;
|
||||
case 8: prev = *(u64 *)ptr;
|
||||
if (prev == old)
|
||||
|
@ -32,7 +32,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
|
||||
#else
|
||||
local_irq_save(flags);
|
||||
ret = *(volatile u8 *)ptr;
|
||||
*(volatile u8 *)ptr = x;
|
||||
*(volatile u8 *)ptr = (x & 0xffu);
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
#endif /* __xchg_u8 */
|
||||
@ -43,7 +43,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
|
||||
#else
|
||||
local_irq_save(flags);
|
||||
ret = *(volatile u16 *)ptr;
|
||||
*(volatile u16 *)ptr = x;
|
||||
*(volatile u16 *)ptr = (x & 0xffffu);
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
#endif /* __xchg_u16 */
|
||||
@ -54,7 +54,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
|
||||
#else
|
||||
local_irq_save(flags);
|
||||
ret = *(volatile u32 *)ptr;
|
||||
*(volatile u32 *)ptr = x;
|
||||
*(volatile u32 *)ptr = (x & 0xffffffffu);
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
#endif /* __xchg_u32 */
|
||||
|
@ -236,7 +236,7 @@ static inline u64 readq(const volatile void __iomem *addr)
|
||||
|
||||
log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
|
||||
__io_br();
|
||||
val = __le64_to_cpu(__raw_readq(addr));
|
||||
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
|
||||
__io_ar(val);
|
||||
log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
return val;
|
||||
@ -287,7 +287,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
|
||||
{
|
||||
log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
__io_bw();
|
||||
__raw_writeq(__cpu_to_le64(value), addr);
|
||||
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
|
||||
__io_aw();
|
||||
log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
}
|
||||
@ -319,7 +319,7 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
|
||||
u16 val;
|
||||
|
||||
log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
|
||||
val = __le16_to_cpu(__raw_readw(addr));
|
||||
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
|
||||
log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
return val;
|
||||
}
|
||||
@ -332,7 +332,7 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
|
||||
u32 val;
|
||||
|
||||
log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
|
||||
val = __le32_to_cpu(__raw_readl(addr));
|
||||
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
|
||||
log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
return val;
|
||||
}
|
||||
@ -345,7 +345,7 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
|
||||
u64 val;
|
||||
|
||||
log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
|
||||
val = __le64_to_cpu(__raw_readq(addr));
|
||||
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
|
||||
log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
return val;
|
||||
}
|
||||
@ -366,7 +366,7 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
|
||||
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
|
||||
{
|
||||
log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
__raw_writew(cpu_to_le16(value), addr);
|
||||
__raw_writew((u16 __force)cpu_to_le16(value), addr);
|
||||
log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
@ -376,7 +376,7 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
|
||||
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
|
||||
{
|
||||
log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
__raw_writel(__cpu_to_le32(value), addr);
|
||||
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
|
||||
log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
@ -386,7 +386,7 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
|
||||
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
|
||||
{
|
||||
log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
__raw_writeq(__cpu_to_le64(value), addr);
|
||||
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
|
||||
log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user