mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
lib/find_bit.c: join _find_next_bit{_le}
_find_next_bit and _find_next_bit_le are very similar functions. It's possible to join them by adding 1 parameter and a couple of simple checks. It's simplify maintenance and make possible to shrink the size of .text by un-inlining the unified function (in the following patch). Link: http://lkml.kernel.org/r/20200103202846.21616-2-yury.norov@gmail.com Signed-off-by: Yury Norov <yury.norov@gmail.com> Cc: Allison Randal <allison@lohutok.net> Cc: Joe Perches <joe@perches.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: William Breathitt Gray <vilhelm.gray@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d5767057c9
commit
b78c57135d
@ -18,8 +18,8 @@
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
|
||||
!defined(find_next_bit_le) || !defined(find_next_zero_bit_le) || \
|
||||
!defined(find_next_and_bit)
|
||||
|
||||
/*
|
||||
* This is a common helper function for find_next_bit, find_next_zero_bit, and
|
||||
* find_next_and_bit. The differences are:
|
||||
@ -29,9 +29,9 @@
|
||||
*/
|
||||
static inline unsigned long _find_next_bit(const unsigned long *addr1,
|
||||
const unsigned long *addr2, unsigned long nbits,
|
||||
unsigned long start, unsigned long invert)
|
||||
unsigned long start, unsigned long invert, unsigned long le)
|
||||
{
|
||||
unsigned long tmp;
|
||||
unsigned long tmp, mask;
|
||||
|
||||
if (unlikely(start >= nbits))
|
||||
return nbits;
|
||||
@ -42,7 +42,12 @@ static inline unsigned long _find_next_bit(const unsigned long *addr1,
|
||||
tmp ^= invert;
|
||||
|
||||
/* Handle 1st word. */
|
||||
tmp &= BITMAP_FIRST_WORD_MASK(start);
|
||||
mask = BITMAP_FIRST_WORD_MASK(start);
|
||||
if (le)
|
||||
mask = swab(mask);
|
||||
|
||||
tmp &= mask;
|
||||
|
||||
start = round_down(start, BITS_PER_LONG);
|
||||
|
||||
while (!tmp) {
|
||||
@ -56,6 +61,9 @@ static inline unsigned long _find_next_bit(const unsigned long *addr1,
|
||||
tmp ^= invert;
|
||||
}
|
||||
|
||||
if (le)
|
||||
tmp = swab(tmp);
|
||||
|
||||
return min(start + __ffs(tmp), nbits);
|
||||
}
|
||||
#endif
|
||||
@ -67,7 +75,7 @@ static inline unsigned long _find_next_bit(const unsigned long *addr1,
|
||||
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
return _find_next_bit(addr, NULL, size, offset, 0UL);
|
||||
return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_bit);
|
||||
#endif
|
||||
@ -76,7 +84,7 @@ EXPORT_SYMBOL(find_next_bit);
|
||||
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
return _find_next_bit(addr, NULL, size, offset, ~0UL);
|
||||
return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_zero_bit);
|
||||
#endif
|
||||
@ -86,7 +94,7 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
|
||||
const unsigned long *addr2, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
return _find_next_bit(addr1, addr2, size, offset, 0UL);
|
||||
return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_and_bit);
|
||||
#endif
|
||||
@ -149,45 +157,11 @@ EXPORT_SYMBOL(find_last_bit);
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
|
||||
#if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
|
||||
static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
|
||||
const unsigned long *addr2, unsigned long nbits,
|
||||
unsigned long start, unsigned long invert)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
if (unlikely(start >= nbits))
|
||||
return nbits;
|
||||
|
||||
tmp = addr1[start / BITS_PER_LONG];
|
||||
if (addr2)
|
||||
tmp &= addr2[start / BITS_PER_LONG];
|
||||
tmp ^= invert;
|
||||
|
||||
/* Handle 1st word. */
|
||||
tmp &= swab(BITMAP_FIRST_WORD_MASK(start));
|
||||
start = round_down(start, BITS_PER_LONG);
|
||||
|
||||
while (!tmp) {
|
||||
start += BITS_PER_LONG;
|
||||
if (start >= nbits)
|
||||
return nbits;
|
||||
|
||||
tmp = addr1[start / BITS_PER_LONG];
|
||||
if (addr2)
|
||||
tmp &= addr2[start / BITS_PER_LONG];
|
||||
tmp ^= invert;
|
||||
}
|
||||
|
||||
return min(start + __ffs(swab(tmp)), nbits);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef find_next_zero_bit_le
|
||||
unsigned long find_next_zero_bit_le(const void *addr, unsigned
|
||||
long size, unsigned long offset)
|
||||
{
|
||||
return _find_next_bit_le(addr, NULL, size, offset, ~0UL);
|
||||
return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_zero_bit_le);
|
||||
#endif
|
||||
@ -196,7 +170,7 @@ EXPORT_SYMBOL(find_next_zero_bit_le);
|
||||
unsigned long find_next_bit_le(const void *addr, unsigned
|
||||
long size, unsigned long offset)
|
||||
{
|
||||
return _find_next_bit_le(addr, NULL, size, offset, 0UL);
|
||||
return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_bit_le);
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user