mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
409ca45526
Remove an unnecessary arch complication: arch/x86/include/asm/arch_hweight.h uses __sw_hweight{32,64} as alternatives, and they are implemented in arch/x86/lib/hweight.S x86 does not rely on the generic C implementation lib/hweight.c at all, so CONFIG_GENERIC_HWEIGHT should be disabled. __HAVE_ARCH_SW_HWEIGHT is not necessary either. No change in functionality intended. Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Uros Bizjak <ubizjak@gmail.com> Link: http://lkml.kernel.org/r/1557665521-17570-1-git-send-email-yamada.masahiro@socionext.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
69 lines
1.9 KiB
C
69 lines
1.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/export.h>
|
|
#include <linux/bitops.h>
|
|
#include <asm/types.h>
|
|
|
|
/**
|
|
* hweightN - returns the hamming weight of a N-bit word
|
|
* @x: the word to weigh
|
|
*
|
|
* The Hamming Weight of a number is the total number of bits set in it.
|
|
*/
|
|
|
|
unsigned int __sw_hweight32(unsigned int w)
|
|
{
|
|
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
|
|
w -= (w >> 1) & 0x55555555;
|
|
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
|
|
w = (w + (w >> 4)) & 0x0f0f0f0f;
|
|
return (w * 0x01010101) >> 24;
|
|
#else
|
|
unsigned int res = w - ((w >> 1) & 0x55555555);
|
|
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
|
|
res = (res + (res >> 4)) & 0x0F0F0F0F;
|
|
res = res + (res >> 8);
|
|
return (res + (res >> 16)) & 0x000000FF;
|
|
#endif
|
|
}
|
|
EXPORT_SYMBOL(__sw_hweight32);
|
|
|
|
unsigned int __sw_hweight16(unsigned int w)
|
|
{
|
|
unsigned int res = w - ((w >> 1) & 0x5555);
|
|
res = (res & 0x3333) + ((res >> 2) & 0x3333);
|
|
res = (res + (res >> 4)) & 0x0F0F;
|
|
return (res + (res >> 8)) & 0x00FF;
|
|
}
|
|
EXPORT_SYMBOL(__sw_hweight16);
|
|
|
|
unsigned int __sw_hweight8(unsigned int w)
|
|
{
|
|
unsigned int res = w - ((w >> 1) & 0x55);
|
|
res = (res & 0x33) + ((res >> 2) & 0x33);
|
|
return (res + (res >> 4)) & 0x0F;
|
|
}
|
|
EXPORT_SYMBOL(__sw_hweight8);
|
|
|
|
unsigned long __sw_hweight64(__u64 w)
|
|
{
|
|
#if BITS_PER_LONG == 32
|
|
return __sw_hweight32((unsigned int)(w >> 32)) +
|
|
__sw_hweight32((unsigned int)w);
|
|
#elif BITS_PER_LONG == 64
|
|
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
|
|
w -= (w >> 1) & 0x5555555555555555ul;
|
|
w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
|
|
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
|
|
return (w * 0x0101010101010101ul) >> 56;
|
|
#else
|
|
__u64 res = w - ((w >> 1) & 0x5555555555555555ul);
|
|
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
|
|
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
|
|
res = res + (res >> 8);
|
|
res = res + (res >> 16);
|
|
return (res + (res >> 32)) & 0x00000000000000FFul;
|
|
#endif
|
|
#endif
|
|
}
|
|
EXPORT_SYMBOL(__sw_hweight64);
|