mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
1beb35ec61
Provide mul_u64_u32_add_u64_shr() which is a calculation that will be used by timekeeping and VDSO. Place #include <vdso/math64.h> after #include <asm/div64.h> to allow architecture-specific overrides, at least for the kernel. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240325064023.2997-6-adrian.hunter@intel.com
63 lines
1.3 KiB
C
63 lines
1.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __VDSO_MATH64_H
|
|
#define __VDSO_MATH64_H
|
|
|
|
static __always_inline u32
|
|
__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
|
|
{
|
|
u32 ret = 0;
|
|
|
|
while (dividend >= divisor) {
|
|
/* The following asm() prevents the compiler from
|
|
optimising this loop into a modulo operation. */
|
|
asm("" : "+rm"(dividend));
|
|
|
|
dividend -= divisor;
|
|
ret++;
|
|
}
|
|
|
|
*remainder = dividend;
|
|
|
|
return ret;
|
|
}
|
|
|
|
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
|
|
|
|
#ifndef mul_u64_u32_add_u64_shr
|
|
static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
|
|
{
|
|
return (u64)((((unsigned __int128)a * mul) + b) >> shift);
|
|
}
|
|
#endif /* mul_u64_u32_add_u64_shr */
|
|
|
|
#else
|
|
|
|
#ifndef mul_u64_u32_add_u64_shr
|
|
#ifndef mul_u32_u32
|
|
static inline u64 mul_u32_u32(u32 a, u32 b)
|
|
{
|
|
return (u64)a * b;
|
|
}
|
|
#define mul_u32_u32 mul_u32_u32
|
|
#endif
|
|
static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
|
|
{
|
|
u32 ah = a >> 32, al = a;
|
|
bool ovf;
|
|
u64 ret;
|
|
|
|
ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret);
|
|
ret >>= shift;
|
|
if (ovf && shift)
|
|
ret += 1ULL << (64 - shift);
|
|
if (ah)
|
|
ret += mul_u32_u32(ah, mul) << (32 - shift);
|
|
|
|
return ret;
|
|
}
|
|
#endif /* mul_u64_u32_add_u64_shr */
|
|
|
|
#endif
|
|
|
|
#endif /* __VDSO_MATH64_H */
|