vdso, math64: Provide mul_u64_u32_add_u64_shr()

Provide mul_u64_u32_add_u64_shr() which is a calculation that will be used
by timekeeping and VDSO.

Place #include <vdso/math64.h> after #include <asm/div64.h> to allow
architecture-specific overrides, at least for the kernel.

Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20240325064023.2997-6-adrian.hunter@intel.com
This commit is contained in:
Adrian Hunter 2024-03-25 08:40:09 +02:00 committed by Thomas Gleixner
parent 5e5e51422c
commit 1beb35ec61
2 changed files with 39 additions and 1 deletions

View File

@ -4,8 +4,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/math.h> #include <linux/math.h>
#include <vdso/math64.h>
#include <asm/div64.h> #include <asm/div64.h>
#include <vdso/math64.h>
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64

View File

@ -21,4 +21,42 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret; return ret;
} }
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
#ifndef mul_u64_u32_add_u64_shr
static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
{
return (u64)((((unsigned __int128)a * mul) + b) >> shift);
}
#endif /* mul_u64_u32_add_u64_shr */
#else
#ifndef mul_u64_u32_add_u64_shr
#ifndef mul_u32_u32
static inline u64 mul_u32_u32(u32 a, u32 b)
{
return (u64)a * b;
}
#define mul_u32_u32 mul_u32_u32
#endif
static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
{
u32 ah = a >> 32, al = a;
bool ovf;
u64 ret;
ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret);
ret >>= shift;
if (ovf && shift)
ret += 1ULL << (64 - shift);
if (ah)
ret += mul_u32_u32(ah, mul) << (32 - shift);
return ret;
}
#endif /* mul_u64_u32_add_u64_shr */
#endif
#endif /* __VDSO_MATH64_H */ #endif /* __VDSO_MATH64_H */