mirror of
https://github.com/torvalds/linux.git
synced 2024-12-01 00:21:32 +00:00
7dfaa98f64
It saves 25% of .text for arm64, and more for BE architectures. Before: $ size lib/find_bit.o text data bss dec hex filename 1012 56 0 1068 42c lib/find_bit.o After: $ size lib/find_bit.o text data bss dec hex filename 776 56 0 832 340 lib/find_bit.o Link: http://lkml.kernel.org/r/20200103202846.21616-3-yury.norov@gmail.com Signed-off-by: Yury Norov <yury.norov@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Allison Randal <allison@lohutok.net> Cc: William Breathitt Gray <vilhelm.gray@gmail.com> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
193 lines
4.5 KiB
C
193 lines
4.5 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* bit search implementation
|
|
*
|
|
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* Copyright (C) 2008 IBM Corporation
|
|
* 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
|
|
* (Inspired by David Howell's find_next_bit implementation)
|
|
*
|
|
* Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
|
|
* size and improve performance, 2015.
|
|
*/
|
|
|
|
#include <linux/bitops.h>
|
|
#include <linux/bitmap.h>
|
|
#include <linux/export.h>
|
|
#include <linux/kernel.h>
|
|
|
|
#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
|
|
!defined(find_next_bit_le) || !defined(find_next_zero_bit_le) || \
|
|
!defined(find_next_and_bit)
|
|
/*
|
|
* This is a common helper function for find_next_bit, find_next_zero_bit, and
|
|
* find_next_and_bit. The differences are:
|
|
* - The "invert" argument, which is XORed with each fetched word before
|
|
* searching it for one bits.
|
|
* - The optional "addr2", which is anded with "addr1" if present.
|
|
*/
|
|
static unsigned long _find_next_bit(const unsigned long *addr1,
|
|
const unsigned long *addr2, unsigned long nbits,
|
|
unsigned long start, unsigned long invert, unsigned long le)
|
|
{
|
|
unsigned long tmp, mask;
|
|
|
|
if (unlikely(start >= nbits))
|
|
return nbits;
|
|
|
|
tmp = addr1[start / BITS_PER_LONG];
|
|
if (addr2)
|
|
tmp &= addr2[start / BITS_PER_LONG];
|
|
tmp ^= invert;
|
|
|
|
/* Handle 1st word. */
|
|
mask = BITMAP_FIRST_WORD_MASK(start);
|
|
if (le)
|
|
mask = swab(mask);
|
|
|
|
tmp &= mask;
|
|
|
|
start = round_down(start, BITS_PER_LONG);
|
|
|
|
while (!tmp) {
|
|
start += BITS_PER_LONG;
|
|
if (start >= nbits)
|
|
return nbits;
|
|
|
|
tmp = addr1[start / BITS_PER_LONG];
|
|
if (addr2)
|
|
tmp &= addr2[start / BITS_PER_LONG];
|
|
tmp ^= invert;
|
|
}
|
|
|
|
if (le)
|
|
tmp = swab(tmp);
|
|
|
|
return min(start + __ffs(tmp), nbits);
|
|
}
|
|
#endif
|
|
|
|
#ifndef find_next_bit
|
|
/*
|
|
* Find the next set bit in a memory region.
|
|
*/
|
|
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
|
unsigned long offset)
|
|
{
|
|
return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
|
|
}
|
|
EXPORT_SYMBOL(find_next_bit);
|
|
#endif
|
|
|
|
#ifndef find_next_zero_bit
|
|
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
|
unsigned long offset)
|
|
{
|
|
return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
|
|
}
|
|
EXPORT_SYMBOL(find_next_zero_bit);
|
|
#endif
|
|
|
|
#if !defined(find_next_and_bit)
|
|
unsigned long find_next_and_bit(const unsigned long *addr1,
|
|
const unsigned long *addr2, unsigned long size,
|
|
unsigned long offset)
|
|
{
|
|
return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
|
|
}
|
|
EXPORT_SYMBOL(find_next_and_bit);
|
|
#endif
|
|
|
|
#ifndef find_first_bit
|
|
/*
|
|
* Find the first set bit in a memory region.
|
|
*/
|
|
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
|
|
{
|
|
unsigned long idx;
|
|
|
|
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
|
|
if (addr[idx])
|
|
return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
|
|
}
|
|
|
|
return size;
|
|
}
|
|
EXPORT_SYMBOL(find_first_bit);
|
|
#endif
|
|
|
|
#ifndef find_first_zero_bit
|
|
/*
|
|
* Find the first cleared bit in a memory region.
|
|
*/
|
|
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
|
|
{
|
|
unsigned long idx;
|
|
|
|
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
|
|
if (addr[idx] != ~0UL)
|
|
return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
|
|
}
|
|
|
|
return size;
|
|
}
|
|
EXPORT_SYMBOL(find_first_zero_bit);
|
|
#endif
|
|
|
|
#ifndef find_last_bit
|
|
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
|
|
{
|
|
if (size) {
|
|
unsigned long val = BITMAP_LAST_WORD_MASK(size);
|
|
unsigned long idx = (size-1) / BITS_PER_LONG;
|
|
|
|
do {
|
|
val &= addr[idx];
|
|
if (val)
|
|
return idx * BITS_PER_LONG + __fls(val);
|
|
|
|
val = ~0ul;
|
|
} while (idx--);
|
|
}
|
|
return size;
|
|
}
|
|
EXPORT_SYMBOL(find_last_bit);
|
|
#endif
|
|
|
|
#ifdef __BIG_ENDIAN
|
|
|
|
#ifndef find_next_zero_bit_le
|
|
unsigned long find_next_zero_bit_le(const void *addr, unsigned
|
|
long size, unsigned long offset)
|
|
{
|
|
return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
|
|
}
|
|
EXPORT_SYMBOL(find_next_zero_bit_le);
|
|
#endif
|
|
|
|
#ifndef find_next_bit_le
|
|
unsigned long find_next_bit_le(const void *addr, unsigned
|
|
long size, unsigned long offset)
|
|
{
|
|
return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
|
|
}
|
|
EXPORT_SYMBOL(find_next_bit_le);
|
|
#endif
|
|
|
|
#endif /* __BIG_ENDIAN */
|
|
|
|
unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr,
|
|
unsigned long size, unsigned long offset)
|
|
{
|
|
offset = find_next_bit(addr, size, offset);
|
|
if (offset == size)
|
|
return size;
|
|
|
|
offset = round_down(offset, 8);
|
|
*clump = bitmap_get_value8(addr, offset);
|
|
|
|
return offset;
|
|
}
|
|
EXPORT_SYMBOL(find_next_clump8);
|