forked from Minki/linux
bitmap patches for v6.1-rc1
From Phil Auld: drivers/base: Fix unsigned comparison to -1 in CPUMAP_FILE_MAX_BYTES From me: cpumask: cleanup nr_cpu_ids vs nr_cpumask_bits mess This series cleans that mess and adds new config FORCE_NR_CPUS that allows to optimize cpumask subsystem if the number of CPUs is known at compile-time. From me: lib: optimize find_bit() functions Reworks find_bit() functions based on new FIND_{FIRST,NEXT}_BIT() macros. From me: lib/find: add find_nth_bit() Adds find_nth_bit(), which is ~70 times faster than bitcounting with for_each() loop: for_each_set_bit(bit, mask, size) if (n-- == 0) return bit; Also adds bitmap_weight_and() to let people replace this pattern: tmp = bitmap_alloc(nbits); bitmap_and(tmp, map1, map2, nbits); weight = bitmap_weight(tmp, nbits); bitmap_free(tmp); with a single bitmap_weight_and() call. From me: cpumask: repair cpumask_check() After switching cpumask to use nr_cpu_ids, cpumask_check() started generating many false-positive warnings. This series fixes it. From Valentin Schneider: bitmap,cpumask: Add for_each_cpu_andnot() and for_each_cpu_andnot() Extends the API with one more function and applies it in sched/core. -----BEGIN PGP SIGNATURE----- iQGzBAABCgAdFiEEi8GdvG6xMhdgpu/4sUSA/TofvsgFAmNBwmUACgkQsUSA/Tof vshPRwv+KlqnZlKtuSPgbo/Kgswworpi/7TqfnN9GWlb8AJ2uhjBKI3GFwv4TDow 7KV6wdKdXYLr4pktcIhWy3qLrT+bDDExfarHRo3QI1A1W42EJ+ZiUaGnQGcnVMzD 5q/K1YMJYq0oaesHEw5PVUh8mm6h9qRD8VbX1u+riW/VCWBj3bho9Dp4mffQ48Q6 hVy/SnMGgClQwNYp+sxkqYx38xUqUGYoU5MzeziUmoS6pZQh+4lF33MULnI3EKmc /ehXilPPtOV/Tm0RovDWFfm3rjNapV9FXHu8Ob2z/c+1A29EgXnE3pwrBDkAx001 TQrL9qbCANRDGPLzWQHw0dwFIaXvTdrSttCsfYYfU5hI4JbnJEe0Pqkaaohy7jqm r0dW/TlyOG5T+k8Kwdx9w9A+jKs8TbKKZ8HOaN8BpkXswVnpbzpQbj3TITZI4aeV 6YR4URBQ5UkrVLEXFXbrOzwjL2zqDdyNoBdTJmGLJ+5b/n0HHzmyMVkegNIwLLM3 GR7sMQae =Q/+F -----END PGP SIGNATURE----- Merge tag 'bitmap-6.1-rc1' of https://github.com/norov/linux Pull bitmap updates from Yury Norov: - Fix unsigned comparison to -1 in CPUMAP_FILE_MAX_BYTES (Phil Auld) - cleanup nr_cpu_ids vs nr_cpumask_bits mess (me) This series cleans that mess and adds new config FORCE_NR_CPUS that allows to optimize cpumask subsystem if the number of CPUs is known at compile-time. - optimize find_bit() functions (me) Reworks find_bit() functions based on new FIND_{FIRST,NEXT}_BIT() macros. - add find_nth_bit() (me) Adds find_nth_bit(), which is ~70 times faster than bitcounting with for_each() loop: for_each_set_bit(bit, mask, size) if (n-- == 0) return bit; Also adds bitmap_weight_and() to let people replace this pattern: tmp = bitmap_alloc(nbits); bitmap_and(tmp, map1, map2, nbits); weight = bitmap_weight(tmp, nbits); bitmap_free(tmp); with a single bitmap_weight_and() call. - repair cpumask_check() (me) After switching cpumask to use nr_cpu_ids, cpumask_check() started generating many false-positive warnings. This series fixes it. - Add for_each_cpu_andnot() and for_each_cpu_andnot() (Valentin Schneider) Extends the API with one more function and applies it in sched/core. * tag 'bitmap-6.1-rc1' of https://github.com/norov/linux: (28 commits) sched/core: Merge cpumask_andnot()+for_each_cpu() into for_each_cpu_andnot() lib/test_cpumask: Add for_each_cpu_and(not) tests cpumask: Introduce for_each_cpu_andnot() lib/find_bit: Introduce find_next_andnot_bit() cpumask: fix checking valid cpu range lib/bitmap: add tests for for_each() loops lib/find: optimize for_each() macros lib/bitmap: introduce for_each_set_bit_wrap() macro lib/find_bit: add find_next{,_and}_bit_wrap cpumask: switch for_each_cpu{,_not} to use for_each_bit() net: fix cpu_max_bits_warn() usage in netif_attrmask_next{,_and} cpumask: add cpumask_nth_{,and,andnot} lib/bitmap: remove bitmap_ord_to_pos lib/bitmap: add tests for find_nth_bit() lib: add find_nth{,_and,_andnot}_bit() lib/bitmap: add bitmap_weight_and() lib/bitmap: don't call __bitmap_weight() in kernel code tools: sync find_bit() implementation lib/find_bit: optimize find_next_bit() functions lib/find_bit: create find_first_zero_bit_le() ...
This commit is contained in:
commit
d4013bc4d4
@ -336,7 +336,7 @@ static void __init prefill_possible_map(void)
|
|||||||
for (; i < NR_CPUS; i++)
|
for (; i < NR_CPUS; i++)
|
||||||
set_cpu_possible(i, false);
|
set_cpu_possible(i, false);
|
||||||
|
|
||||||
nr_cpu_ids = possible;
|
set_nr_cpu_ids(possible);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -751,7 +751,7 @@ static void __init prefill_possible_map(void)
|
|||||||
for (; i < NR_CPUS; i++)
|
for (; i < NR_CPUS; i++)
|
||||||
set_cpu_possible(i, false);
|
set_cpu_possible(i, false);
|
||||||
|
|
||||||
nr_cpu_ids = possible;
|
set_nr_cpu_ids(possible);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void prefill_possible_map(void) {}
|
static inline void prefill_possible_map(void) {}
|
||||||
|
@ -393,8 +393,12 @@ generic_secondary_common_init:
|
|||||||
#else
|
#else
|
||||||
LOAD_REG_ADDR(r8, paca_ptrs) /* Load paca_ptrs pointe */
|
LOAD_REG_ADDR(r8, paca_ptrs) /* Load paca_ptrs pointe */
|
||||||
ld r8,0(r8) /* Get base vaddr of array */
|
ld r8,0(r8) /* Get base vaddr of array */
|
||||||
|
#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
|
||||||
|
LOAD_REG_IMMEDIATE(r7, NR_CPUS)
|
||||||
|
#else
|
||||||
LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
|
LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
|
||||||
lwz r7,0(r7) /* also the max paca allocated */
|
lwz r7,0(r7) /* also the max paca allocated */
|
||||||
|
#endif
|
||||||
li r5,0 /* logical cpu id */
|
li r5,0 /* logical cpu id */
|
||||||
1:
|
1:
|
||||||
sldi r9,r5,3 /* get paca_ptrs[] index from cpu id */
|
sldi r9,r5,3 /* get paca_ptrs[] index from cpu id */
|
||||||
|
@ -1316,7 +1316,7 @@ static void __init smp_sanity_check(void)
|
|||||||
nr++;
|
nr++;
|
||||||
}
|
}
|
||||||
|
|
||||||
nr_cpu_ids = 8;
|
set_nr_cpu_ids(8);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -1569,7 +1569,7 @@ __init void prefill_possible_map(void)
|
|||||||
possible = i;
|
possible = i;
|
||||||
}
|
}
|
||||||
|
|
||||||
nr_cpu_ids = possible;
|
set_nr_cpu_ids(possible);
|
||||||
|
|
||||||
pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
|
pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
|
||||||
possible, max_t(int, possible - num_processors, 0));
|
possible, max_t(int, possible - num_processors, 0));
|
||||||
|
@ -179,7 +179,7 @@ static void __init _get_smp_config(unsigned int early)
|
|||||||
* hypercall to expand the max number of VCPUs an already
|
* hypercall to expand the max number of VCPUs an already
|
||||||
* running guest has. So cap it up to X. */
|
* running guest has. So cap it up to X. */
|
||||||
if (subtract)
|
if (subtract)
|
||||||
nr_cpu_ids = nr_cpu_ids - subtract;
|
set_nr_cpu_ids(nr_cpu_ids - subtract);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -560,7 +560,7 @@ static int wnd_rescan(struct wnd_bitmap *wnd)
|
|||||||
|
|
||||||
buf = (ulong *)bh->b_data;
|
buf = (ulong *)bh->b_data;
|
||||||
|
|
||||||
used = __bitmap_weight(buf, wbits);
|
used = bitmap_weight(buf, wbits);
|
||||||
if (used < wbits) {
|
if (used < wbits) {
|
||||||
frb = wbits - used;
|
frb = wbits - used;
|
||||||
wnd->free_bits[iw] = frb;
|
wnd->free_bits[iw] = frb;
|
||||||
@ -1364,7 +1364,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
|
|||||||
buf = (ulong *)bh->b_data;
|
buf = (ulong *)bh->b_data;
|
||||||
|
|
||||||
__bitmap_clear(buf, b0, blocksize * 8 - b0);
|
__bitmap_clear(buf, b0, blocksize * 8 - b0);
|
||||||
frb = wbits - __bitmap_weight(buf, wbits);
|
frb = wbits - bitmap_weight(buf, wbits);
|
||||||
wnd->total_zeroes += frb - wnd->free_bits[iw];
|
wnd->total_zeroes += frb - wnd->free_bits[iw];
|
||||||
wnd->free_bits[iw] = frb;
|
wnd->free_bits[iw] = frb;
|
||||||
|
|
||||||
|
@ -51,6 +51,7 @@ struct device;
|
|||||||
* bitmap_empty(src, nbits) Are all bits zero in *src?
|
* bitmap_empty(src, nbits) Are all bits zero in *src?
|
||||||
* bitmap_full(src, nbits) Are all bits set in *src?
|
* bitmap_full(src, nbits) Are all bits set in *src?
|
||||||
* bitmap_weight(src, nbits) Hamming Weight: number set bits
|
* bitmap_weight(src, nbits) Hamming Weight: number set bits
|
||||||
|
* bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap
|
||||||
* bitmap_set(dst, pos, nbits) Set specified bit area
|
* bitmap_set(dst, pos, nbits) Set specified bit area
|
||||||
* bitmap_clear(dst, pos, nbits) Clear specified bit area
|
* bitmap_clear(dst, pos, nbits) Clear specified bit area
|
||||||
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
|
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
|
||||||
@ -164,6 +165,8 @@ bool __bitmap_intersects(const unsigned long *bitmap1,
|
|||||||
bool __bitmap_subset(const unsigned long *bitmap1,
|
bool __bitmap_subset(const unsigned long *bitmap1,
|
||||||
const unsigned long *bitmap2, unsigned int nbits);
|
const unsigned long *bitmap2, unsigned int nbits);
|
||||||
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
|
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
|
||||||
|
unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
|
||||||
|
const unsigned long *bitmap2, unsigned int nbits);
|
||||||
void __bitmap_set(unsigned long *map, unsigned int start, int len);
|
void __bitmap_set(unsigned long *map, unsigned int start, int len);
|
||||||
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
|
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
|
||||||
|
|
||||||
@ -222,7 +225,6 @@ void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int n
|
|||||||
#else
|
#else
|
||||||
#define bitmap_copy_le bitmap_copy
|
#define bitmap_copy_le bitmap_copy
|
||||||
#endif
|
#endif
|
||||||
unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
|
|
||||||
int bitmap_print_to_pagebuf(bool list, char *buf,
|
int bitmap_print_to_pagebuf(bool list, char *buf,
|
||||||
const unsigned long *maskp, int nmaskbits);
|
const unsigned long *maskp, int nmaskbits);
|
||||||
|
|
||||||
@ -439,6 +441,15 @@ unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
|
|||||||
return __bitmap_weight(src, nbits);
|
return __bitmap_weight(src, nbits);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline
|
||||||
|
unsigned long bitmap_weight_and(const unsigned long *src1,
|
||||||
|
const unsigned long *src2, unsigned int nbits)
|
||||||
|
{
|
||||||
|
if (small_const_nbits(nbits))
|
||||||
|
return hweight_long(*src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits));
|
||||||
|
return __bitmap_weight_and(src1, src2, nbits);
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
|
static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
|
||||||
unsigned int nbits)
|
unsigned int nbits)
|
||||||
{
|
{
|
||||||
|
@ -247,6 +247,25 @@ static inline unsigned long __ffs64(u64 word)
|
|||||||
return __ffs((unsigned long)word);
|
return __ffs((unsigned long)word);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* fns - find N'th set bit in a word
|
||||||
|
* @word: The word to search
|
||||||
|
* @n: Bit to find
|
||||||
|
*/
|
||||||
|
static inline unsigned long fns(unsigned long word, unsigned int n)
|
||||||
|
{
|
||||||
|
unsigned int bit;
|
||||||
|
|
||||||
|
while (word) {
|
||||||
|
bit = __ffs(word);
|
||||||
|
if (n-- == 0)
|
||||||
|
return bit;
|
||||||
|
__clear_bit(bit, &word);
|
||||||
|
}
|
||||||
|
|
||||||
|
return BITS_PER_LONG;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* assign_bit - Assign value to a bit in memory
|
* assign_bit - Assign value to a bit in memory
|
||||||
* @nr: the bit to set
|
* @nr: the bit to set
|
||||||
|
@ -35,19 +35,23 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
|
|||||||
*/
|
*/
|
||||||
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
|
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
|
||||||
|
|
||||||
#if NR_CPUS == 1
|
#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
|
||||||
#define nr_cpu_ids 1U
|
#define nr_cpu_ids ((unsigned int)NR_CPUS)
|
||||||
#else
|
#else
|
||||||
extern unsigned int nr_cpu_ids;
|
extern unsigned int nr_cpu_ids;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
static inline void set_nr_cpu_ids(unsigned int nr)
|
||||||
/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
|
{
|
||||||
* not all bits may be allocated. */
|
#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
|
||||||
#define nr_cpumask_bits nr_cpu_ids
|
WARN_ON(nr != nr_cpu_ids);
|
||||||
#else
|
#else
|
||||||
#define nr_cpumask_bits ((unsigned int)NR_CPUS)
|
nr_cpu_ids = nr;
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Deprecated. Always use nr_cpu_ids. */
|
||||||
|
#define nr_cpumask_bits nr_cpu_ids
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following particular system cpumasks and operations manage
|
* The following particular system cpumasks and operations manage
|
||||||
@ -67,10 +71,6 @@ extern unsigned int nr_cpu_ids;
|
|||||||
* cpu_online_mask is the dynamic subset of cpu_present_mask,
|
* cpu_online_mask is the dynamic subset of cpu_present_mask,
|
||||||
* indicating those CPUs available for scheduling.
|
* indicating those CPUs available for scheduling.
|
||||||
*
|
*
|
||||||
* If HOTPLUG is enabled, then cpu_possible_mask is forced to have
|
|
||||||
* all NR_CPUS bits set, otherwise it is just the set of CPUs that
|
|
||||||
* ACPI reports present at boot.
|
|
||||||
*
|
|
||||||
* If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
|
* If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
|
||||||
* depending on what ACPI reports as currently plugged in, otherwise
|
* depending on what ACPI reports as currently plugged in, otherwise
|
||||||
* cpu_present_mask is just a copy of cpu_possible_mask.
|
* cpu_present_mask is just a copy of cpu_possible_mask.
|
||||||
@ -174,9 +174,8 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
|
|||||||
static inline
|
static inline
|
||||||
unsigned int cpumask_next(int n, const struct cpumask *srcp)
|
unsigned int cpumask_next(int n, const struct cpumask *srcp)
|
||||||
{
|
{
|
||||||
/* -1 is a legal arg here. */
|
/* n is a prior cpu */
|
||||||
if (n != -1)
|
cpumask_check(n + 1);
|
||||||
cpumask_check(n);
|
|
||||||
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
|
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -189,9 +188,8 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp)
|
|||||||
*/
|
*/
|
||||||
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
|
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
|
||||||
{
|
{
|
||||||
/* -1 is a legal arg here. */
|
/* n is a prior cpu */
|
||||||
if (n != -1)
|
cpumask_check(n + 1);
|
||||||
cpumask_check(n);
|
|
||||||
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
|
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,9 +229,8 @@ static inline
|
|||||||
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
|
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
|
||||||
const struct cpumask *src2p)
|
const struct cpumask *src2p)
|
||||||
{
|
{
|
||||||
/* -1 is a legal arg here. */
|
/* n is a prior cpu */
|
||||||
if (n != -1)
|
cpumask_check(n + 1);
|
||||||
cpumask_check(n);
|
|
||||||
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
|
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
|
||||||
nr_cpumask_bits, n + 1);
|
nr_cpumask_bits, n + 1);
|
||||||
}
|
}
|
||||||
@ -246,9 +243,7 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
|
|||||||
* After the loop, cpu is >= nr_cpu_ids.
|
* After the loop, cpu is >= nr_cpu_ids.
|
||||||
*/
|
*/
|
||||||
#define for_each_cpu(cpu, mask) \
|
#define for_each_cpu(cpu, mask) \
|
||||||
for ((cpu) = -1; \
|
for_each_set_bit(cpu, cpumask_bits(mask), nr_cpumask_bits)
|
||||||
(cpu) = cpumask_next((cpu), (mask)), \
|
|
||||||
(cpu) < nr_cpu_ids;)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* for_each_cpu_not - iterate over every cpu in a complemented mask
|
* for_each_cpu_not - iterate over every cpu in a complemented mask
|
||||||
@ -258,17 +253,15 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
|
|||||||
* After the loop, cpu is >= nr_cpu_ids.
|
* After the loop, cpu is >= nr_cpu_ids.
|
||||||
*/
|
*/
|
||||||
#define for_each_cpu_not(cpu, mask) \
|
#define for_each_cpu_not(cpu, mask) \
|
||||||
for ((cpu) = -1; \
|
for_each_clear_bit(cpu, cpumask_bits(mask), nr_cpumask_bits)
|
||||||
(cpu) = cpumask_next_zero((cpu), (mask)), \
|
|
||||||
(cpu) < nr_cpu_ids;)
|
|
||||||
|
|
||||||
#if NR_CPUS == 1
|
#if NR_CPUS == 1
|
||||||
static inline
|
static inline
|
||||||
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
|
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
|
||||||
{
|
{
|
||||||
cpumask_check(start);
|
cpumask_check(start);
|
||||||
if (n != -1)
|
/* n is a prior cpu */
|
||||||
cpumask_check(n);
|
cpumask_check(n + 1);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the first available CPU when wrapping, or when starting before cpu0,
|
* Return the first available CPU when wrapping, or when starting before cpu0,
|
||||||
@ -293,10 +286,8 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
|
|||||||
*
|
*
|
||||||
* After the loop, cpu is >= nr_cpu_ids.
|
* After the loop, cpu is >= nr_cpu_ids.
|
||||||
*/
|
*/
|
||||||
#define for_each_cpu_wrap(cpu, mask, start) \
|
#define for_each_cpu_wrap(cpu, mask, start) \
|
||||||
for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
|
for_each_set_bit_wrap(cpu, cpumask_bits(mask), nr_cpumask_bits, start)
|
||||||
(cpu) < nr_cpumask_bits; \
|
|
||||||
(cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* for_each_cpu_and - iterate over every cpu in both masks
|
* for_each_cpu_and - iterate over every cpu in both masks
|
||||||
@ -313,9 +304,25 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
|
|||||||
* After the loop, cpu is >= nr_cpu_ids.
|
* After the loop, cpu is >= nr_cpu_ids.
|
||||||
*/
|
*/
|
||||||
#define for_each_cpu_and(cpu, mask1, mask2) \
|
#define for_each_cpu_and(cpu, mask1, mask2) \
|
||||||
for ((cpu) = -1; \
|
for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), nr_cpumask_bits)
|
||||||
(cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
|
|
||||||
(cpu) < nr_cpu_ids;)
|
/**
|
||||||
|
* for_each_cpu_andnot - iterate over every cpu present in one mask, excluding
|
||||||
|
* those present in another.
|
||||||
|
* @cpu: the (optionally unsigned) integer iterator
|
||||||
|
* @mask1: the first cpumask pointer
|
||||||
|
* @mask2: the second cpumask pointer
|
||||||
|
*
|
||||||
|
* This saves a temporary CPU mask in many places. It is equivalent to:
|
||||||
|
* struct cpumask tmp;
|
||||||
|
* cpumask_andnot(&tmp, &mask1, &mask2);
|
||||||
|
* for_each_cpu(cpu, &tmp)
|
||||||
|
* ...
|
||||||
|
*
|
||||||
|
* After the loop, cpu is >= nr_cpu_ids.
|
||||||
|
*/
|
||||||
|
#define for_each_cpu_andnot(cpu, mask1, mask2) \
|
||||||
|
for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), nr_cpumask_bits)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpumask_any_but - return a "random" in a cpumask, but not this one.
|
* cpumask_any_but - return a "random" in a cpumask, but not this one.
|
||||||
@ -337,6 +344,50 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
|
|||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cpumask_nth - get the first cpu in a cpumask
|
||||||
|
* @srcp: the cpumask pointer
|
||||||
|
* @cpu: the N'th cpu to find, starting from 0
|
||||||
|
*
|
||||||
|
* Returns >= nr_cpu_ids if such cpu doesn't exist.
|
||||||
|
*/
|
||||||
|
static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
|
||||||
|
{
|
||||||
|
return find_nth_bit(cpumask_bits(srcp), nr_cpumask_bits, cpumask_check(cpu));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cpumask_nth_and - get the first cpu in 2 cpumasks
|
||||||
|
* @srcp1: the cpumask pointer
|
||||||
|
* @srcp2: the cpumask pointer
|
||||||
|
* @cpu: the N'th cpu to find, starting from 0
|
||||||
|
*
|
||||||
|
* Returns >= nr_cpu_ids if such cpu doesn't exist.
|
||||||
|
*/
|
||||||
|
static inline
|
||||||
|
unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
|
||||||
|
const struct cpumask *srcp2)
|
||||||
|
{
|
||||||
|
return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
|
||||||
|
nr_cpumask_bits, cpumask_check(cpu));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cpumask_nth_andnot - get the first cpu set in 1st cpumask, and clear in 2nd.
|
||||||
|
* @srcp1: the cpumask pointer
|
||||||
|
* @srcp2: the cpumask pointer
|
||||||
|
* @cpu: the N'th cpu to find, starting from 0
|
||||||
|
*
|
||||||
|
* Returns >= nr_cpu_ids if such cpu doesn't exist.
|
||||||
|
*/
|
||||||
|
static inline
|
||||||
|
unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
|
||||||
|
const struct cpumask *srcp2)
|
||||||
|
{
|
||||||
|
return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
|
||||||
|
nr_cpumask_bits, cpumask_check(cpu));
|
||||||
|
}
|
||||||
|
|
||||||
#define CPU_BITS_NONE \
|
#define CPU_BITS_NONE \
|
||||||
{ \
|
{ \
|
||||||
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
|
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
|
||||||
@ -586,6 +637,17 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
|
|||||||
return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
|
return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
|
||||||
|
* @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
|
||||||
|
* @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
|
||||||
|
*/
|
||||||
|
static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
|
||||||
|
const struct cpumask *srcp2)
|
||||||
|
{
|
||||||
|
return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), nr_cpumask_bits);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpumask_shift_right - *dstp = *srcp >> n
|
* cpumask_shift_right - *dstp = *srcp >> n
|
||||||
* @dstp: the cpumask result
|
* @dstp: the cpumask result
|
||||||
|
@ -8,15 +8,33 @@
|
|||||||
|
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
|
|
||||||
extern unsigned long _find_next_bit(const unsigned long *addr1,
|
unsigned long _find_next_bit(const unsigned long *addr1, unsigned long nbits,
|
||||||
const unsigned long *addr2, unsigned long nbits,
|
unsigned long start);
|
||||||
unsigned long start, unsigned long invert, unsigned long le);
|
unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
|
||||||
|
unsigned long nbits, unsigned long start);
|
||||||
|
unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
|
||||||
|
unsigned long nbits, unsigned long start);
|
||||||
|
unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
|
||||||
|
unsigned long start);
|
||||||
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
|
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
|
||||||
|
unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n);
|
||||||
|
unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
|
||||||
|
unsigned long size, unsigned long n);
|
||||||
|
unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
|
||||||
|
unsigned long size, unsigned long n);
|
||||||
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
|
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
|
||||||
const unsigned long *addr2, unsigned long size);
|
const unsigned long *addr2, unsigned long size);
|
||||||
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
|
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
|
||||||
extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
|
extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
|
||||||
|
|
||||||
|
#ifdef __BIG_ENDIAN
|
||||||
|
unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size);
|
||||||
|
unsigned long _find_next_zero_bit_le(const unsigned long *addr, unsigned
|
||||||
|
long size, unsigned long offset);
|
||||||
|
unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
|
||||||
|
long size, unsigned long offset);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef find_next_bit
|
#ifndef find_next_bit
|
||||||
/**
|
/**
|
||||||
* find_next_bit - find the next set bit in a memory region
|
* find_next_bit - find the next set bit in a memory region
|
||||||
@ -41,7 +59,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
|||||||
return val ? __ffs(val) : size;
|
return val ? __ffs(val) : size;
|
||||||
}
|
}
|
||||||
|
|
||||||
return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
|
return _find_next_bit(addr, size, offset);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -71,7 +89,38 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
|
|||||||
return val ? __ffs(val) : size;
|
return val ? __ffs(val) : size;
|
||||||
}
|
}
|
||||||
|
|
||||||
return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
|
return _find_next_and_bit(addr1, addr2, size, offset);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef find_next_andnot_bit
|
||||||
|
/**
|
||||||
|
* find_next_andnot_bit - find the next set bit in *addr1 excluding all the bits
|
||||||
|
* in *addr2
|
||||||
|
* @addr1: The first address to base the search on
|
||||||
|
* @addr2: The second address to base the search on
|
||||||
|
* @size: The bitmap size in bits
|
||||||
|
* @offset: The bitnumber to start searching at
|
||||||
|
*
|
||||||
|
* Returns the bit number for the next set bit
|
||||||
|
* If no bits are set, returns @size.
|
||||||
|
*/
|
||||||
|
static inline
|
||||||
|
unsigned long find_next_andnot_bit(const unsigned long *addr1,
|
||||||
|
const unsigned long *addr2, unsigned long size,
|
||||||
|
unsigned long offset)
|
||||||
|
{
|
||||||
|
if (small_const_nbits(size)) {
|
||||||
|
unsigned long val;
|
||||||
|
|
||||||
|
if (unlikely(offset >= size))
|
||||||
|
return size;
|
||||||
|
|
||||||
|
val = *addr1 & ~*addr2 & GENMASK(size - 1, offset);
|
||||||
|
return val ? __ffs(val) : size;
|
||||||
|
}
|
||||||
|
|
||||||
|
return _find_next_andnot_bit(addr1, addr2, size, offset);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -99,7 +148,7 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
|||||||
return val == ~0UL ? size : ffz(val);
|
return val == ~0UL ? size : ffz(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
|
return _find_next_zero_bit(addr, size, offset);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -125,6 +174,87 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* find_nth_bit - find N'th set bit in a memory region
|
||||||
|
* @addr: The address to start the search at
|
||||||
|
* @size: The maximum number of bits to search
|
||||||
|
* @n: The number of set bit, which position is needed, counting from 0
|
||||||
|
*
|
||||||
|
* The following is semantically equivalent:
|
||||||
|
* idx = find_nth_bit(addr, size, 0);
|
||||||
|
* idx = find_first_bit(addr, size);
|
||||||
|
*
|
||||||
|
* Returns the bit number of the N'th set bit.
|
||||||
|
* If no such, returns @size.
|
||||||
|
*/
|
||||||
|
static inline
|
||||||
|
unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
|
||||||
|
{
|
||||||
|
if (n >= size)
|
||||||
|
return size;
|
||||||
|
|
||||||
|
if (small_const_nbits(size)) {
|
||||||
|
unsigned long val = *addr & GENMASK(size - 1, 0);
|
||||||
|
|
||||||
|
return val ? fns(val, n) : size;
|
||||||
|
}
|
||||||
|
|
||||||
|
return __find_nth_bit(addr, size, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* find_nth_and_bit - find N'th set bit in 2 memory regions
|
||||||
|
* @addr1: The 1st address to start the search at
|
||||||
|
* @addr2: The 2nd address to start the search at
|
||||||
|
* @size: The maximum number of bits to search
|
||||||
|
* @n: The number of set bit, which position is needed, counting from 0
|
||||||
|
*
|
||||||
|
* Returns the bit number of the N'th set bit.
|
||||||
|
* If no such, returns @size.
|
||||||
|
*/
|
||||||
|
static inline
|
||||||
|
unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
|
||||||
|
unsigned long size, unsigned long n)
|
||||||
|
{
|
||||||
|
if (n >= size)
|
||||||
|
return size;
|
||||||
|
|
||||||
|
if (small_const_nbits(size)) {
|
||||||
|
unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
|
||||||
|
|
||||||
|
return val ? fns(val, n) : size;
|
||||||
|
}
|
||||||
|
|
||||||
|
return __find_nth_and_bit(addr1, addr2, size, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* find_nth_andnot_bit - find N'th set bit in 2 memory regions,
|
||||||
|
* flipping bits in 2nd region
|
||||||
|
* @addr1: The 1st address to start the search at
|
||||||
|
* @addr2: The 2nd address to start the search at
|
||||||
|
* @size: The maximum number of bits to search
|
||||||
|
* @n: The number of set bit, which position is needed, counting from 0
|
||||||
|
*
|
||||||
|
* Returns the bit number of the N'th set bit.
|
||||||
|
* If no such, returns @size.
|
||||||
|
*/
|
||||||
|
static inline
|
||||||
|
unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
|
||||||
|
unsigned long size, unsigned long n)
|
||||||
|
{
|
||||||
|
if (n >= size)
|
||||||
|
return size;
|
||||||
|
|
||||||
|
if (small_const_nbits(size)) {
|
||||||
|
unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
|
||||||
|
|
||||||
|
return val ? fns(val, n) : size;
|
||||||
|
}
|
||||||
|
|
||||||
|
return __find_nth_andnot_bit(addr1, addr2, size, n);
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef find_first_and_bit
|
#ifndef find_first_and_bit
|
||||||
/**
|
/**
|
||||||
* find_first_and_bit - find the first set bit in both memory regions
|
* find_first_and_bit - find the first set bit in both memory regions
|
||||||
@ -193,6 +323,78 @@ unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* find_next_and_bit_wrap - find the next set bit in both memory regions
|
||||||
|
* @addr1: The first address to base the search on
|
||||||
|
* @addr2: The second address to base the search on
|
||||||
|
* @size: The bitmap size in bits
|
||||||
|
* @offset: The bitnumber to start searching at
|
||||||
|
*
|
||||||
|
* Returns the bit number for the next set bit, or first set bit up to @offset
|
||||||
|
* If no bits are set, returns @size.
|
||||||
|
*/
|
||||||
|
static inline
|
||||||
|
unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
|
||||||
|
const unsigned long *addr2,
|
||||||
|
unsigned long size, unsigned long offset)
|
||||||
|
{
|
||||||
|
unsigned long bit = find_next_and_bit(addr1, addr2, size, offset);
|
||||||
|
|
||||||
|
if (bit < size)
|
||||||
|
return bit;
|
||||||
|
|
||||||
|
bit = find_first_and_bit(addr1, addr2, offset);
|
||||||
|
return bit < offset ? bit : size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* find_next_bit_wrap - find the next set bit in both memory regions
|
||||||
|
* @addr: The first address to base the search on
|
||||||
|
* @size: The bitmap size in bits
|
||||||
|
* @offset: The bitnumber to start searching at
|
||||||
|
*
|
||||||
|
* Returns the bit number for the next set bit, or first set bit up to @offset
|
||||||
|
* If no bits are set, returns @size.
|
||||||
|
*/
|
||||||
|
static inline
|
||||||
|
unsigned long find_next_bit_wrap(const unsigned long *addr,
|
||||||
|
unsigned long size, unsigned long offset)
|
||||||
|
{
|
||||||
|
unsigned long bit = find_next_bit(addr, size, offset);
|
||||||
|
|
||||||
|
if (bit < size)
|
||||||
|
return bit;
|
||||||
|
|
||||||
|
bit = find_first_bit(addr, offset);
|
||||||
|
return bit < offset ? bit : size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Helper for for_each_set_bit_wrap(). Make sure you're doing right thing
|
||||||
|
* before using it alone.
|
||||||
|
*/
|
||||||
|
static inline
|
||||||
|
unsigned long __for_each_wrap(const unsigned long *bitmap, unsigned long size,
|
||||||
|
unsigned long start, unsigned long n)
|
||||||
|
{
|
||||||
|
unsigned long bit;
|
||||||
|
|
||||||
|
/* If not wrapped around */
|
||||||
|
if (n > start) {
|
||||||
|
/* and have a bit, just return it. */
|
||||||
|
bit = find_next_bit(bitmap, size, n);
|
||||||
|
if (bit < size)
|
||||||
|
return bit;
|
||||||
|
|
||||||
|
/* Otherwise, wrap around and ... */
|
||||||
|
n = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Search the other part. */
|
||||||
|
bit = find_next_bit(bitmap, start, n);
|
||||||
|
return bit < start ? bit : size;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* find_next_clump8 - find next 8-bit clump with set bits in a memory region
|
* find_next_clump8 - find next 8-bit clump with set bits in a memory region
|
||||||
* @clump: location to store copy of found clump
|
* @clump: location to store copy of found clump
|
||||||
@ -247,7 +449,21 @@ unsigned long find_next_zero_bit_le(const void *addr, unsigned
|
|||||||
return val == ~0UL ? size : ffz(val);
|
return val == ~0UL ? size : ffz(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
|
return _find_next_zero_bit_le(addr, size, offset);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef find_first_zero_bit_le
|
||||||
|
static inline
|
||||||
|
unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
|
||||||
|
{
|
||||||
|
if (small_const_nbits(size)) {
|
||||||
|
unsigned long val = swab(*(const unsigned long *)addr) | ~GENMASK(size - 1, 0);
|
||||||
|
|
||||||
|
return val == ~0UL ? size : ffz(val);
|
||||||
|
}
|
||||||
|
|
||||||
|
return _find_first_zero_bit_le(addr, size);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -266,40 +482,39 @@ unsigned long find_next_bit_le(const void *addr, unsigned
|
|||||||
return val ? __ffs(val) : size;
|
return val ? __ffs(val) : size;
|
||||||
}
|
}
|
||||||
|
|
||||||
return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
|
return _find_next_bit_le(addr, size, offset);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef find_first_zero_bit_le
|
|
||||||
#define find_first_zero_bit_le(addr, size) \
|
|
||||||
find_next_zero_bit_le((addr), (size), 0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#error "Please fix <asm/byteorder.h>"
|
#error "Please fix <asm/byteorder.h>"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define for_each_set_bit(bit, addr, size) \
|
#define for_each_set_bit(bit, addr, size) \
|
||||||
for ((bit) = find_next_bit((addr), (size), 0); \
|
for ((bit) = 0; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
|
||||||
(bit) < (size); \
|
|
||||||
(bit) = find_next_bit((addr), (size), (bit) + 1))
|
#define for_each_and_bit(bit, addr1, addr2, size) \
|
||||||
|
for ((bit) = 0; \
|
||||||
|
(bit) = find_next_and_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
|
||||||
|
(bit)++)
|
||||||
|
|
||||||
|
#define for_each_andnot_bit(bit, addr1, addr2, size) \
|
||||||
|
for ((bit) = 0; \
|
||||||
|
(bit) = find_next_andnot_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
|
||||||
|
(bit)++)
|
||||||
|
|
||||||
/* same as for_each_set_bit() but use bit as value to start with */
|
/* same as for_each_set_bit() but use bit as value to start with */
|
||||||
#define for_each_set_bit_from(bit, addr, size) \
|
#define for_each_set_bit_from(bit, addr, size) \
|
||||||
for ((bit) = find_next_bit((addr), (size), (bit)); \
|
for (; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
|
||||||
(bit) < (size); \
|
|
||||||
(bit) = find_next_bit((addr), (size), (bit) + 1))
|
|
||||||
|
|
||||||
#define for_each_clear_bit(bit, addr, size) \
|
#define for_each_clear_bit(bit, addr, size) \
|
||||||
for ((bit) = find_next_zero_bit((addr), (size), 0); \
|
for ((bit) = 0; \
|
||||||
(bit) < (size); \
|
(bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); \
|
||||||
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
|
(bit)++)
|
||||||
|
|
||||||
/* same as for_each_clear_bit() but use bit as value to start with */
|
/* same as for_each_clear_bit() but use bit as value to start with */
|
||||||
#define for_each_clear_bit_from(bit, addr, size) \
|
#define for_each_clear_bit_from(bit, addr, size) \
|
||||||
for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
|
for (; (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
|
||||||
(bit) < (size); \
|
|
||||||
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* for_each_set_bitrange - iterate over all set bit ranges [b; e)
|
* for_each_set_bitrange - iterate over all set bit ranges [b; e)
|
||||||
@ -309,11 +524,11 @@ unsigned long find_next_bit_le(const void *addr, unsigned
|
|||||||
* @size: bitmap size in number of bits
|
* @size: bitmap size in number of bits
|
||||||
*/
|
*/
|
||||||
#define for_each_set_bitrange(b, e, addr, size) \
|
#define for_each_set_bitrange(b, e, addr, size) \
|
||||||
for ((b) = find_next_bit((addr), (size), 0), \
|
for ((b) = 0; \
|
||||||
(e) = find_next_zero_bit((addr), (size), (b) + 1); \
|
(b) = find_next_bit((addr), (size), b), \
|
||||||
|
(e) = find_next_zero_bit((addr), (size), (b) + 1), \
|
||||||
(b) < (size); \
|
(b) < (size); \
|
||||||
(b) = find_next_bit((addr), (size), (e) + 1), \
|
(b) = (e) + 1)
|
||||||
(e) = find_next_zero_bit((addr), (size), (b) + 1))
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* for_each_set_bitrange_from - iterate over all set bit ranges [b; e)
|
* for_each_set_bitrange_from - iterate over all set bit ranges [b; e)
|
||||||
@ -323,11 +538,11 @@ unsigned long find_next_bit_le(const void *addr, unsigned
|
|||||||
* @size: bitmap size in number of bits
|
* @size: bitmap size in number of bits
|
||||||
*/
|
*/
|
||||||
#define for_each_set_bitrange_from(b, e, addr, size) \
|
#define for_each_set_bitrange_from(b, e, addr, size) \
|
||||||
for ((b) = find_next_bit((addr), (size), (b)), \
|
for (; \
|
||||||
(e) = find_next_zero_bit((addr), (size), (b) + 1); \
|
(b) = find_next_bit((addr), (size), (b)), \
|
||||||
|
(e) = find_next_zero_bit((addr), (size), (b) + 1), \
|
||||||
(b) < (size); \
|
(b) < (size); \
|
||||||
(b) = find_next_bit((addr), (size), (e) + 1), \
|
(b) = (e) + 1)
|
||||||
(e) = find_next_zero_bit((addr), (size), (b) + 1))
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* for_each_clear_bitrange - iterate over all unset bit ranges [b; e)
|
* for_each_clear_bitrange - iterate over all unset bit ranges [b; e)
|
||||||
@ -337,11 +552,11 @@ unsigned long find_next_bit_le(const void *addr, unsigned
|
|||||||
* @size: bitmap size in number of bits
|
* @size: bitmap size in number of bits
|
||||||
*/
|
*/
|
||||||
#define for_each_clear_bitrange(b, e, addr, size) \
|
#define for_each_clear_bitrange(b, e, addr, size) \
|
||||||
for ((b) = find_next_zero_bit((addr), (size), 0), \
|
for ((b) = 0; \
|
||||||
(e) = find_next_bit((addr), (size), (b) + 1); \
|
(b) = find_next_zero_bit((addr), (size), (b)), \
|
||||||
|
(e) = find_next_bit((addr), (size), (b) + 1), \
|
||||||
(b) < (size); \
|
(b) < (size); \
|
||||||
(b) = find_next_zero_bit((addr), (size), (e) + 1), \
|
(b) = (e) + 1)
|
||||||
(e) = find_next_bit((addr), (size), (b) + 1))
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* for_each_clear_bitrange_from - iterate over all unset bit ranges [b; e)
|
* for_each_clear_bitrange_from - iterate over all unset bit ranges [b; e)
|
||||||
@ -351,11 +566,24 @@ unsigned long find_next_bit_le(const void *addr, unsigned
|
|||||||
* @size: bitmap size in number of bits
|
* @size: bitmap size in number of bits
|
||||||
*/
|
*/
|
||||||
#define for_each_clear_bitrange_from(b, e, addr, size) \
|
#define for_each_clear_bitrange_from(b, e, addr, size) \
|
||||||
for ((b) = find_next_zero_bit((addr), (size), (b)), \
|
for (; \
|
||||||
(e) = find_next_bit((addr), (size), (b) + 1); \
|
(b) = find_next_zero_bit((addr), (size), (b)), \
|
||||||
|
(e) = find_next_bit((addr), (size), (b) + 1), \
|
||||||
(b) < (size); \
|
(b) < (size); \
|
||||||
(b) = find_next_zero_bit((addr), (size), (e) + 1), \
|
(b) = (e) + 1)
|
||||||
(e) = find_next_bit((addr), (size), (b) + 1))
|
|
||||||
|
/**
|
||||||
|
* for_each_set_bit_wrap - iterate over all set bits starting from @start, and
|
||||||
|
* wrapping around the end of bitmap.
|
||||||
|
* @bit: offset for current iteration
|
||||||
|
* @addr: bitmap address to base the search on
|
||||||
|
* @size: bitmap size in number of bits
|
||||||
|
* @start: Starting bit for bitmap traversing, wrapping around the bitmap end
|
||||||
|
*/
|
||||||
|
#define for_each_set_bit_wrap(bit, addr, size, start) \
|
||||||
|
for ((bit) = find_next_bit_wrap((addr), (size), (start)); \
|
||||||
|
(bit) < (size); \
|
||||||
|
(bit) = __for_each_wrap((addr), (size), (start), (bit) + 1))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
|
* for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
|
||||||
|
@ -3663,9 +3663,8 @@ static inline bool netif_attr_test_online(unsigned long j,
|
|||||||
static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
|
static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
|
||||||
unsigned int nr_bits)
|
unsigned int nr_bits)
|
||||||
{
|
{
|
||||||
/* -1 is a legal arg here. */
|
/* n is a prior cpu */
|
||||||
if (n != -1)
|
cpu_max_bits_warn(n + 1, nr_bits);
|
||||||
cpu_max_bits_warn(n, nr_bits);
|
|
||||||
|
|
||||||
if (srcp)
|
if (srcp)
|
||||||
return find_next_bit(srcp, nr_bits, n + 1);
|
return find_next_bit(srcp, nr_bits, n + 1);
|
||||||
@ -3686,9 +3685,8 @@ static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
|
|||||||
const unsigned long *src2p,
|
const unsigned long *src2p,
|
||||||
unsigned int nr_bits)
|
unsigned int nr_bits)
|
||||||
{
|
{
|
||||||
/* -1 is a legal arg here. */
|
/* n is a prior cpu */
|
||||||
if (n != -1)
|
cpu_max_bits_warn(n + 1, nr_bits);
|
||||||
cpu_max_bits_warn(n, nr_bits);
|
|
||||||
|
|
||||||
if (src1p && src2p)
|
if (src1p && src2p)
|
||||||
return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
|
return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
|
||||||
|
@ -508,8 +508,7 @@ static inline int node_random(const nodemask_t *maskp)
|
|||||||
|
|
||||||
w = nodes_weight(*maskp);
|
w = nodes_weight(*maskp);
|
||||||
if (w)
|
if (w)
|
||||||
bit = bitmap_ord_to_pos(maskp->bits,
|
bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_int() % w);
|
||||||
get_random_int() % w, MAX_NUMNODES);
|
|
||||||
return bit;
|
return bit;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -357,10 +357,7 @@ static void __sched_core_flip(bool enabled)
|
|||||||
/*
|
/*
|
||||||
* Toggle the offline CPUs.
|
* Toggle the offline CPUs.
|
||||||
*/
|
*/
|
||||||
cpumask_copy(&sched_core_mask, cpu_possible_mask);
|
for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
|
||||||
cpumask_andnot(&sched_core_mask, &sched_core_mask, cpu_online_mask);
|
|
||||||
|
|
||||||
for_each_cpu(cpu, &sched_core_mask)
|
|
||||||
cpu_rq(cpu)->core_enabled = enabled;
|
cpu_rq(cpu)->core_enabled = enabled;
|
||||||
|
|
||||||
cpus_read_unlock();
|
cpus_read_unlock();
|
||||||
|
@ -1069,7 +1069,7 @@ static int __init nrcpus(char *str)
|
|||||||
int nr_cpus;
|
int nr_cpus;
|
||||||
|
|
||||||
if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
|
if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
|
||||||
nr_cpu_ids = nr_cpus;
|
set_nr_cpu_ids(nr_cpus);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1087,14 +1087,16 @@ static int __init maxcpus(char *str)
|
|||||||
|
|
||||||
early_param("maxcpus", maxcpus);
|
early_param("maxcpus", maxcpus);
|
||||||
|
|
||||||
|
#if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS)
|
||||||
/* Setup number of possible processor ids */
|
/* Setup number of possible processor ids */
|
||||||
unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
|
unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
|
||||||
EXPORT_SYMBOL(nr_cpu_ids);
|
EXPORT_SYMBOL(nr_cpu_ids);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
|
/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
|
||||||
void __init setup_nr_cpu_ids(void)
|
void __init setup_nr_cpu_ids(void)
|
||||||
{
|
{
|
||||||
nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
|
set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called by boot processor to activate the rest. */
|
/* Called by boot processor to activate the rest. */
|
||||||
|
@ -531,6 +531,15 @@ config CPUMASK_OFFSTACK
|
|||||||
them on the stack. This is a bit more expensive, but avoids
|
them on the stack. This is a bit more expensive, but avoids
|
||||||
stack overflow.
|
stack overflow.
|
||||||
|
|
||||||
|
config FORCE_NR_CPUS
|
||||||
|
bool "NR_CPUS is set to an actual number of CPUs"
|
||||||
|
depends on SMP
|
||||||
|
help
|
||||||
|
Say Yes if you have NR_CPUS set to an actual number of possible
|
||||||
|
CPUs in your system, not to a default value. This forces the core
|
||||||
|
code to rely on compile-time value and optimize kernel routines
|
||||||
|
better.
|
||||||
|
|
||||||
config CPU_RMAP
|
config CPU_RMAP
|
||||||
bool
|
bool
|
||||||
depends on SMP
|
depends on SMP
|
||||||
|
68
lib/bitmap.c
68
lib/bitmap.c
@ -333,20 +333,32 @@ bool __bitmap_subset(const unsigned long *bitmap1,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__bitmap_subset);
|
EXPORT_SYMBOL(__bitmap_subset);
|
||||||
|
|
||||||
|
#define BITMAP_WEIGHT(FETCH, bits) \
|
||||||
|
({ \
|
||||||
|
unsigned int __bits = (bits), idx, w = 0; \
|
||||||
|
\
|
||||||
|
for (idx = 0; idx < __bits / BITS_PER_LONG; idx++) \
|
||||||
|
w += hweight_long(FETCH); \
|
||||||
|
\
|
||||||
|
if (__bits % BITS_PER_LONG) \
|
||||||
|
w += hweight_long((FETCH) & BITMAP_LAST_WORD_MASK(__bits)); \
|
||||||
|
\
|
||||||
|
w; \
|
||||||
|
})
|
||||||
|
|
||||||
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
|
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
|
||||||
{
|
{
|
||||||
unsigned int k, lim = bits/BITS_PER_LONG, w = 0;
|
return BITMAP_WEIGHT(bitmap[idx], bits);
|
||||||
|
|
||||||
for (k = 0; k < lim; k++)
|
|
||||||
w += hweight_long(bitmap[k]);
|
|
||||||
|
|
||||||
if (bits % BITS_PER_LONG)
|
|
||||||
w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
|
|
||||||
|
|
||||||
return w;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__bitmap_weight);
|
EXPORT_SYMBOL(__bitmap_weight);
|
||||||
|
|
||||||
|
unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
|
||||||
|
const unsigned long *bitmap2, unsigned int bits)
|
||||||
|
{
|
||||||
|
return BITMAP_WEIGHT(bitmap1[idx] & bitmap2[idx], bits);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__bitmap_weight_and);
|
||||||
|
|
||||||
void __bitmap_set(unsigned long *map, unsigned int start, int len)
|
void __bitmap_set(unsigned long *map, unsigned int start, int len)
|
||||||
{
|
{
|
||||||
unsigned long *p = map + BIT_WORD(start);
|
unsigned long *p = map + BIT_WORD(start);
|
||||||
@ -953,37 +965,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigne
|
|||||||
if (pos >= nbits || !test_bit(pos, buf))
|
if (pos >= nbits || !test_bit(pos, buf))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
return __bitmap_weight(buf, pos);
|
return bitmap_weight(buf, pos);
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* bitmap_ord_to_pos - find position of n-th set bit in bitmap
|
|
||||||
* @buf: pointer to bitmap
|
|
||||||
* @ord: ordinal bit position (n-th set bit, n >= 0)
|
|
||||||
* @nbits: number of valid bit positions in @buf
|
|
||||||
*
|
|
||||||
* Map the ordinal offset of bit @ord in @buf to its position in @buf.
|
|
||||||
* Value of @ord should be in range 0 <= @ord < weight(buf). If @ord
|
|
||||||
* >= weight(buf), returns @nbits.
|
|
||||||
*
|
|
||||||
* If for example, just bits 4 through 7 are set in @buf, then @ord
|
|
||||||
* values 0 through 3 will get mapped to 4 through 7, respectively,
|
|
||||||
* and all other @ord values returns @nbits. When @ord value 3
|
|
||||||
* gets mapped to (returns) @pos value 7 in this example, that means
|
|
||||||
* that the 3rd set bit (starting with 0th) is at position 7 in @buf.
|
|
||||||
*
|
|
||||||
* The bit positions 0 through @nbits-1 are valid positions in @buf.
|
|
||||||
*/
|
|
||||||
unsigned int bitmap_ord_to_pos(const unsigned long *buf, unsigned int ord, unsigned int nbits)
|
|
||||||
{
|
|
||||||
unsigned int pos;
|
|
||||||
|
|
||||||
for (pos = find_first_bit(buf, nbits);
|
|
||||||
pos < nbits && ord;
|
|
||||||
pos = find_next_bit(buf, nbits, pos + 1))
|
|
||||||
ord--;
|
|
||||||
|
|
||||||
return pos;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1035,7 +1017,7 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src,
|
|||||||
if (n < 0 || w == 0)
|
if (n < 0 || w == 0)
|
||||||
set_bit(oldbit, dst); /* identity map */
|
set_bit(oldbit, dst); /* identity map */
|
||||||
else
|
else
|
||||||
set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst);
|
set_bit(find_nth_bit(new, nbits, n % w), dst);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bitmap_remap);
|
EXPORT_SYMBOL(bitmap_remap);
|
||||||
@ -1074,7 +1056,7 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
|
|||||||
if (n < 0 || w == 0)
|
if (n < 0 || w == 0)
|
||||||
return oldbit;
|
return oldbit;
|
||||||
else
|
else
|
||||||
return bitmap_ord_to_pos(new, n % w, bits);
|
return find_nth_bit(new, bits, n % w);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bitmap_bitremap);
|
EXPORT_SYMBOL(bitmap_bitremap);
|
||||||
|
|
||||||
@ -1198,7 +1180,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
|
|||||||
* The following code is a more efficient, but less
|
* The following code is a more efficient, but less
|
||||||
* obvious, equivalent to the loop:
|
* obvious, equivalent to the loop:
|
||||||
* for (m = 0; m < bitmap_weight(relmap, bits); m++) {
|
* for (m = 0; m < bitmap_weight(relmap, bits); m++) {
|
||||||
* n = bitmap_ord_to_pos(orig, m, bits);
|
* n = find_nth_bit(orig, bits, m);
|
||||||
* if (test_bit(m, orig))
|
* if (test_bit(m, orig))
|
||||||
* set_bit(n, dst);
|
* set_bit(n, dst);
|
||||||
* }
|
* }
|
||||||
|
@ -128,23 +128,21 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
|
|||||||
i %= num_online_cpus();
|
i %= num_online_cpus();
|
||||||
|
|
||||||
if (node == NUMA_NO_NODE) {
|
if (node == NUMA_NO_NODE) {
|
||||||
for_each_cpu(cpu, cpu_online_mask)
|
cpu = cpumask_nth(i, cpu_online_mask);
|
||||||
if (i-- == 0)
|
if (cpu < nr_cpu_ids)
|
||||||
return cpu;
|
return cpu;
|
||||||
} else {
|
} else {
|
||||||
/* NUMA first. */
|
/* NUMA first. */
|
||||||
for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
|
cpu = cpumask_nth_and(i, cpu_online_mask, cpumask_of_node(node));
|
||||||
if (i-- == 0)
|
if (cpu < nr_cpu_ids)
|
||||||
return cpu;
|
return cpu;
|
||||||
|
|
||||||
for_each_cpu(cpu, cpu_online_mask) {
|
i -= cpumask_weight_and(cpu_online_mask, cpumask_of_node(node));
|
||||||
/* Skip NUMA nodes, done above. */
|
|
||||||
if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (i-- == 0)
|
/* Skip NUMA nodes, done above. */
|
||||||
return cpu;
|
cpu = cpumask_nth_andnot(i, cpu_online_mask, cpumask_of_node(node));
|
||||||
}
|
if (cpu < nr_cpu_ids)
|
||||||
|
return cpu;
|
||||||
}
|
}
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
@ -168,10 +166,8 @@ unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
|
|||||||
/* NOTE: our first selection will skip 0. */
|
/* NOTE: our first selection will skip 0. */
|
||||||
prev = __this_cpu_read(distribute_cpu_mask_prev);
|
prev = __this_cpu_read(distribute_cpu_mask_prev);
|
||||||
|
|
||||||
next = cpumask_next_and(prev, src1p, src2p);
|
next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
|
||||||
if (next >= nr_cpu_ids)
|
nr_cpumask_bits, prev + 1);
|
||||||
next = cpumask_first_and(src1p, src2p);
|
|
||||||
|
|
||||||
if (next < nr_cpu_ids)
|
if (next < nr_cpu_ids)
|
||||||
__this_cpu_write(distribute_cpu_mask_prev, next);
|
__this_cpu_write(distribute_cpu_mask_prev, next);
|
||||||
|
|
||||||
@ -185,11 +181,7 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp)
|
|||||||
|
|
||||||
/* NOTE: our first selection will skip 0. */
|
/* NOTE: our first selection will skip 0. */
|
||||||
prev = __this_cpu_read(distribute_cpu_mask_prev);
|
prev = __this_cpu_read(distribute_cpu_mask_prev);
|
||||||
|
next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
|
||||||
next = cpumask_next(prev, srcp);
|
|
||||||
if (next >= nr_cpu_ids)
|
|
||||||
next = cpumask_first(srcp);
|
|
||||||
|
|
||||||
if (next < nr_cpu_ids)
|
if (next < nr_cpu_ids)
|
||||||
__this_cpu_write(distribute_cpu_mask_prev, next);
|
__this_cpu_write(distribute_cpu_mask_prev, next);
|
||||||
|
|
||||||
|
@ -33,6 +33,19 @@
|
|||||||
KUNIT_EXPECT_EQ_MSG((test), nr_cpu_ids - mask_weight, iter, MASK_MSG(mask)); \
|
KUNIT_EXPECT_EQ_MSG((test), nr_cpu_ids - mask_weight, iter, MASK_MSG(mask)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define EXPECT_FOR_EACH_CPU_OP_EQ(test, op, mask1, mask2) \
|
||||||
|
do { \
|
||||||
|
const cpumask_t *m1 = (mask1); \
|
||||||
|
const cpumask_t *m2 = (mask2); \
|
||||||
|
int weight; \
|
||||||
|
int cpu, iter = 0; \
|
||||||
|
cpumask_##op(&mask_tmp, m1, m2); \
|
||||||
|
weight = cpumask_weight(&mask_tmp); \
|
||||||
|
for_each_cpu_##op(cpu, mask1, mask2) \
|
||||||
|
iter++; \
|
||||||
|
KUNIT_EXPECT_EQ((test), weight, iter); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define EXPECT_FOR_EACH_CPU_WRAP_EQ(test, mask) \
|
#define EXPECT_FOR_EACH_CPU_WRAP_EQ(test, mask) \
|
||||||
do { \
|
do { \
|
||||||
const cpumask_t *m = (mask); \
|
const cpumask_t *m = (mask); \
|
||||||
@ -54,6 +67,7 @@
|
|||||||
|
|
||||||
static cpumask_t mask_empty;
|
static cpumask_t mask_empty;
|
||||||
static cpumask_t mask_all;
|
static cpumask_t mask_all;
|
||||||
|
static cpumask_t mask_tmp;
|
||||||
|
|
||||||
static void test_cpumask_weight(struct kunit *test)
|
static void test_cpumask_weight(struct kunit *test)
|
||||||
{
|
{
|
||||||
@ -101,10 +115,15 @@ static void test_cpumask_iterators(struct kunit *test)
|
|||||||
EXPECT_FOR_EACH_CPU_EQ(test, &mask_empty);
|
EXPECT_FOR_EACH_CPU_EQ(test, &mask_empty);
|
||||||
EXPECT_FOR_EACH_CPU_NOT_EQ(test, &mask_empty);
|
EXPECT_FOR_EACH_CPU_NOT_EQ(test, &mask_empty);
|
||||||
EXPECT_FOR_EACH_CPU_WRAP_EQ(test, &mask_empty);
|
EXPECT_FOR_EACH_CPU_WRAP_EQ(test, &mask_empty);
|
||||||
|
EXPECT_FOR_EACH_CPU_OP_EQ(test, and, &mask_empty, &mask_empty);
|
||||||
|
EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, &mask_empty);
|
||||||
|
EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, &mask_empty, &mask_empty);
|
||||||
|
|
||||||
EXPECT_FOR_EACH_CPU_EQ(test, cpu_possible_mask);
|
EXPECT_FOR_EACH_CPU_EQ(test, cpu_possible_mask);
|
||||||
EXPECT_FOR_EACH_CPU_NOT_EQ(test, cpu_possible_mask);
|
EXPECT_FOR_EACH_CPU_NOT_EQ(test, cpu_possible_mask);
|
||||||
EXPECT_FOR_EACH_CPU_WRAP_EQ(test, cpu_possible_mask);
|
EXPECT_FOR_EACH_CPU_WRAP_EQ(test, cpu_possible_mask);
|
||||||
|
EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, cpu_possible_mask);
|
||||||
|
EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, cpu_possible_mask, &mask_empty);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_cpumask_iterators_builtin(struct kunit *test)
|
static void test_cpumask_iterators_builtin(struct kunit *test)
|
||||||
|
233
lib/find_bit.c
233
lib/find_bit.c
@ -19,57 +19,78 @@
|
|||||||
#include <linux/minmax.h>
|
#include <linux/minmax.h>
|
||||||
#include <linux/swab.h>
|
#include <linux/swab.h>
|
||||||
|
|
||||||
#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
|
|
||||||
!defined(find_next_bit_le) || !defined(find_next_zero_bit_le) || \
|
|
||||||
!defined(find_next_and_bit)
|
|
||||||
/*
|
/*
|
||||||
* This is a common helper function for find_next_bit, find_next_zero_bit, and
|
* Common helper for find_bit() function family
|
||||||
* find_next_and_bit. The differences are:
|
* @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
|
||||||
* - The "invert" argument, which is XORed with each fetched word before
|
* @MUNGE: The expression that post-processes a word containing found bit (may be empty)
|
||||||
* searching it for one bits.
|
* @size: The bitmap size in bits
|
||||||
* - The optional "addr2", which is anded with "addr1" if present.
|
|
||||||
*/
|
*/
|
||||||
unsigned long _find_next_bit(const unsigned long *addr1,
|
#define FIND_FIRST_BIT(FETCH, MUNGE, size) \
|
||||||
const unsigned long *addr2, unsigned long nbits,
|
({ \
|
||||||
unsigned long start, unsigned long invert, unsigned long le)
|
unsigned long idx, val, sz = (size); \
|
||||||
{
|
\
|
||||||
unsigned long tmp, mask;
|
for (idx = 0; idx * BITS_PER_LONG < sz; idx++) { \
|
||||||
|
val = (FETCH); \
|
||||||
|
if (val) { \
|
||||||
|
sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(val)), sz); \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
sz; \
|
||||||
|
})
|
||||||
|
|
||||||
if (unlikely(start >= nbits))
|
/*
|
||||||
return nbits;
|
* Common helper for find_next_bit() function family
|
||||||
|
* @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
|
||||||
|
* @MUNGE: The expression that post-processes a word containing found bit (may be empty)
|
||||||
|
* @size: The bitmap size in bits
|
||||||
|
* @start: The bitnumber to start searching at
|
||||||
|
*/
|
||||||
|
#define FIND_NEXT_BIT(FETCH, MUNGE, size, start) \
|
||||||
|
({ \
|
||||||
|
unsigned long mask, idx, tmp, sz = (size), __start = (start); \
|
||||||
|
\
|
||||||
|
if (unlikely(__start >= sz)) \
|
||||||
|
goto out; \
|
||||||
|
\
|
||||||
|
mask = MUNGE(BITMAP_FIRST_WORD_MASK(__start)); \
|
||||||
|
idx = __start / BITS_PER_LONG; \
|
||||||
|
\
|
||||||
|
for (tmp = (FETCH) & mask; !tmp; tmp = (FETCH)) { \
|
||||||
|
if ((idx + 1) * BITS_PER_LONG >= sz) \
|
||||||
|
goto out; \
|
||||||
|
idx++; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \
|
||||||
|
out: \
|
||||||
|
sz; \
|
||||||
|
})
|
||||||
|
|
||||||
tmp = addr1[start / BITS_PER_LONG];
|
#define FIND_NTH_BIT(FETCH, size, num) \
|
||||||
if (addr2)
|
({ \
|
||||||
tmp &= addr2[start / BITS_PER_LONG];
|
unsigned long sz = (size), nr = (num), idx, w, tmp; \
|
||||||
tmp ^= invert;
|
\
|
||||||
|
for (idx = 0; (idx + 1) * BITS_PER_LONG <= sz; idx++) { \
|
||||||
/* Handle 1st word. */
|
if (idx * BITS_PER_LONG + nr >= sz) \
|
||||||
mask = BITMAP_FIRST_WORD_MASK(start);
|
goto out; \
|
||||||
if (le)
|
\
|
||||||
mask = swab(mask);
|
tmp = (FETCH); \
|
||||||
|
w = hweight_long(tmp); \
|
||||||
tmp &= mask;
|
if (w > nr) \
|
||||||
|
goto found; \
|
||||||
start = round_down(start, BITS_PER_LONG);
|
\
|
||||||
|
nr -= w; \
|
||||||
while (!tmp) {
|
} \
|
||||||
start += BITS_PER_LONG;
|
\
|
||||||
if (start >= nbits)
|
if (sz % BITS_PER_LONG) \
|
||||||
return nbits;
|
tmp = (FETCH) & BITMAP_LAST_WORD_MASK(sz); \
|
||||||
|
found: \
|
||||||
tmp = addr1[start / BITS_PER_LONG];
|
sz = min(idx * BITS_PER_LONG + fns(tmp, nr), sz); \
|
||||||
if (addr2)
|
out: \
|
||||||
tmp &= addr2[start / BITS_PER_LONG];
|
sz; \
|
||||||
tmp ^= invert;
|
})
|
||||||
}
|
|
||||||
|
|
||||||
if (le)
|
|
||||||
tmp = swab(tmp);
|
|
||||||
|
|
||||||
return min(start + __ffs(tmp), nbits);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(_find_next_bit);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef find_first_bit
|
#ifndef find_first_bit
|
||||||
/*
|
/*
|
||||||
@ -77,14 +98,7 @@ EXPORT_SYMBOL(_find_next_bit);
|
|||||||
*/
|
*/
|
||||||
unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
|
unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
|
||||||
{
|
{
|
||||||
unsigned long idx;
|
return FIND_FIRST_BIT(addr[idx], /* nop */, size);
|
||||||
|
|
||||||
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
|
|
||||||
if (addr[idx])
|
|
||||||
return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
|
|
||||||
}
|
|
||||||
|
|
||||||
return size;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_find_first_bit);
|
EXPORT_SYMBOL(_find_first_bit);
|
||||||
#endif
|
#endif
|
||||||
@ -97,15 +111,7 @@ unsigned long _find_first_and_bit(const unsigned long *addr1,
|
|||||||
const unsigned long *addr2,
|
const unsigned long *addr2,
|
||||||
unsigned long size)
|
unsigned long size)
|
||||||
{
|
{
|
||||||
unsigned long idx, val;
|
return FIND_FIRST_BIT(addr1[idx] & addr2[idx], /* nop */, size);
|
||||||
|
|
||||||
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
|
|
||||||
val = addr1[idx] & addr2[idx];
|
|
||||||
if (val)
|
|
||||||
return min(idx * BITS_PER_LONG + __ffs(val), size);
|
|
||||||
}
|
|
||||||
|
|
||||||
return size;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_find_first_and_bit);
|
EXPORT_SYMBOL(_find_first_and_bit);
|
||||||
#endif
|
#endif
|
||||||
@ -116,18 +122,66 @@ EXPORT_SYMBOL(_find_first_and_bit);
|
|||||||
*/
|
*/
|
||||||
unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
|
unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
|
||||||
{
|
{
|
||||||
unsigned long idx;
|
return FIND_FIRST_BIT(~addr[idx], /* nop */, size);
|
||||||
|
|
||||||
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
|
|
||||||
if (addr[idx] != ~0UL)
|
|
||||||
return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
|
|
||||||
}
|
|
||||||
|
|
||||||
return size;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_find_first_zero_bit);
|
EXPORT_SYMBOL(_find_first_zero_bit);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef find_next_bit
|
||||||
|
unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start)
|
||||||
|
{
|
||||||
|
return FIND_NEXT_BIT(addr[idx], /* nop */, nbits, start);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(_find_next_bit);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
|
||||||
|
{
|
||||||
|
return FIND_NTH_BIT(addr[idx], size, n);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__find_nth_bit);
|
||||||
|
|
||||||
|
unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
|
||||||
|
unsigned long size, unsigned long n)
|
||||||
|
{
|
||||||
|
return FIND_NTH_BIT(addr1[idx] & addr2[idx], size, n);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__find_nth_and_bit);
|
||||||
|
|
||||||
|
unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
|
||||||
|
unsigned long size, unsigned long n)
|
||||||
|
{
|
||||||
|
return FIND_NTH_BIT(addr1[idx] & ~addr2[idx], size, n);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__find_nth_andnot_bit);
|
||||||
|
|
||||||
|
#ifndef find_next_and_bit
|
||||||
|
unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
|
||||||
|
unsigned long nbits, unsigned long start)
|
||||||
|
{
|
||||||
|
return FIND_NEXT_BIT(addr1[idx] & addr2[idx], /* nop */, nbits, start);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(_find_next_and_bit);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef find_next_andnot_bit
|
||||||
|
unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
|
||||||
|
unsigned long nbits, unsigned long start)
|
||||||
|
{
|
||||||
|
return FIND_NEXT_BIT(addr1[idx] & ~addr2[idx], /* nop */, nbits, start);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(_find_next_andnot_bit);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef find_next_zero_bit
|
||||||
|
unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
|
||||||
|
unsigned long start)
|
||||||
|
{
|
||||||
|
return FIND_NEXT_BIT(~addr[idx], /* nop */, nbits, start);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(_find_next_zero_bit);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef find_last_bit
|
#ifndef find_last_bit
|
||||||
unsigned long _find_last_bit(const unsigned long *addr, unsigned long size)
|
unsigned long _find_last_bit(const unsigned long *addr, unsigned long size)
|
||||||
{
|
{
|
||||||
@ -161,3 +215,38 @@ unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr,
|
|||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(find_next_clump8);
|
EXPORT_SYMBOL(find_next_clump8);
|
||||||
|
|
||||||
|
#ifdef __BIG_ENDIAN
|
||||||
|
|
||||||
|
#ifndef find_first_zero_bit_le
|
||||||
|
/*
|
||||||
|
* Find the first cleared bit in an LE memory region.
|
||||||
|
*/
|
||||||
|
unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size)
|
||||||
|
{
|
||||||
|
return FIND_FIRST_BIT(~addr[idx], swab, size);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(_find_first_zero_bit_le);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef find_next_zero_bit_le
|
||||||
|
unsigned long _find_next_zero_bit_le(const unsigned long *addr,
|
||||||
|
unsigned long size, unsigned long offset)
|
||||||
|
{
|
||||||
|
return FIND_NEXT_BIT(~addr[idx], swab, size, offset);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(_find_next_zero_bit_le);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef find_next_bit_le
|
||||||
|
unsigned long _find_next_bit_le(const unsigned long *addr,
|
||||||
|
unsigned long size, unsigned long offset)
|
||||||
|
{
|
||||||
|
return FIND_NEXT_BIT(addr[idx], swab, size, offset);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(_find_next_bit_le);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* __BIG_ENDIAN */
|
||||||
|
@ -115,6 +115,22 @@ static int __init test_find_last_bit(const void *bitmap, unsigned long len)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init test_find_nth_bit(const unsigned long *bitmap, unsigned long len)
|
||||||
|
{
|
||||||
|
unsigned long l, n, w = bitmap_weight(bitmap, len);
|
||||||
|
ktime_t time;
|
||||||
|
|
||||||
|
time = ktime_get();
|
||||||
|
for (n = 0; n < w; n++) {
|
||||||
|
l = find_nth_bit(bitmap, len, n);
|
||||||
|
WARN_ON(l >= len);
|
||||||
|
}
|
||||||
|
time = ktime_get() - time;
|
||||||
|
pr_err("find_nth_bit: %18llu ns, %6ld iterations\n", time, w);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int __init test_find_next_and_bit(const void *bitmap,
|
static int __init test_find_next_and_bit(const void *bitmap,
|
||||||
const void *bitmap2, unsigned long len)
|
const void *bitmap2, unsigned long len)
|
||||||
{
|
{
|
||||||
@ -142,6 +158,7 @@ static int __init find_bit_test(void)
|
|||||||
test_find_next_bit(bitmap, BITMAP_LEN);
|
test_find_next_bit(bitmap, BITMAP_LEN);
|
||||||
test_find_next_zero_bit(bitmap, BITMAP_LEN);
|
test_find_next_zero_bit(bitmap, BITMAP_LEN);
|
||||||
test_find_last_bit(bitmap, BITMAP_LEN);
|
test_find_last_bit(bitmap, BITMAP_LEN);
|
||||||
|
test_find_nth_bit(bitmap, BITMAP_LEN / 10);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* test_find_first_bit() may take some time, so
|
* test_find_first_bit() may take some time, so
|
||||||
@ -164,6 +181,7 @@ static int __init find_bit_test(void)
|
|||||||
test_find_next_bit(bitmap, BITMAP_LEN);
|
test_find_next_bit(bitmap, BITMAP_LEN);
|
||||||
test_find_next_zero_bit(bitmap, BITMAP_LEN);
|
test_find_next_zero_bit(bitmap, BITMAP_LEN);
|
||||||
test_find_last_bit(bitmap, BITMAP_LEN);
|
test_find_last_bit(bitmap, BITMAP_LEN);
|
||||||
|
test_find_nth_bit(bitmap, BITMAP_LEN);
|
||||||
test_find_first_bit(bitmap, BITMAP_LEN);
|
test_find_first_bit(bitmap, BITMAP_LEN);
|
||||||
test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN);
|
test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN);
|
||||||
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
|
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
|
||||||
|
@ -16,6 +16,8 @@
|
|||||||
|
|
||||||
#include "../tools/testing/selftests/kselftest_module.h"
|
#include "../tools/testing/selftests/kselftest_module.h"
|
||||||
|
|
||||||
|
#define EXP1_IN_BITS (sizeof(exp1) * 8)
|
||||||
|
|
||||||
KSTM_MODULE_GLOBALS();
|
KSTM_MODULE_GLOBALS();
|
||||||
|
|
||||||
static char pbl_buffer[PAGE_SIZE] __initdata;
|
static char pbl_buffer[PAGE_SIZE] __initdata;
|
||||||
@ -219,6 +221,47 @@ static void __init test_zero_clear(void)
|
|||||||
expect_eq_pbl("", bmap, 1024);
|
expect_eq_pbl("", bmap, 1024);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __init test_find_nth_bit(void)
|
||||||
|
{
|
||||||
|
unsigned long b, bit, cnt = 0;
|
||||||
|
DECLARE_BITMAP(bmap, 64 * 3);
|
||||||
|
|
||||||
|
bitmap_zero(bmap, 64 * 3);
|
||||||
|
__set_bit(10, bmap);
|
||||||
|
__set_bit(20, bmap);
|
||||||
|
__set_bit(30, bmap);
|
||||||
|
__set_bit(40, bmap);
|
||||||
|
__set_bit(50, bmap);
|
||||||
|
__set_bit(60, bmap);
|
||||||
|
__set_bit(80, bmap);
|
||||||
|
__set_bit(123, bmap);
|
||||||
|
|
||||||
|
expect_eq_uint(10, find_nth_bit(bmap, 64 * 3, 0));
|
||||||
|
expect_eq_uint(20, find_nth_bit(bmap, 64 * 3, 1));
|
||||||
|
expect_eq_uint(30, find_nth_bit(bmap, 64 * 3, 2));
|
||||||
|
expect_eq_uint(40, find_nth_bit(bmap, 64 * 3, 3));
|
||||||
|
expect_eq_uint(50, find_nth_bit(bmap, 64 * 3, 4));
|
||||||
|
expect_eq_uint(60, find_nth_bit(bmap, 64 * 3, 5));
|
||||||
|
expect_eq_uint(80, find_nth_bit(bmap, 64 * 3, 6));
|
||||||
|
expect_eq_uint(123, find_nth_bit(bmap, 64 * 3, 7));
|
||||||
|
expect_eq_uint(64 * 3, find_nth_bit(bmap, 64 * 3, 8));
|
||||||
|
|
||||||
|
expect_eq_uint(10, find_nth_bit(bmap, 64 * 3 - 1, 0));
|
||||||
|
expect_eq_uint(20, find_nth_bit(bmap, 64 * 3 - 1, 1));
|
||||||
|
expect_eq_uint(30, find_nth_bit(bmap, 64 * 3 - 1, 2));
|
||||||
|
expect_eq_uint(40, find_nth_bit(bmap, 64 * 3 - 1, 3));
|
||||||
|
expect_eq_uint(50, find_nth_bit(bmap, 64 * 3 - 1, 4));
|
||||||
|
expect_eq_uint(60, find_nth_bit(bmap, 64 * 3 - 1, 5));
|
||||||
|
expect_eq_uint(80, find_nth_bit(bmap, 64 * 3 - 1, 6));
|
||||||
|
expect_eq_uint(123, find_nth_bit(bmap, 64 * 3 - 1, 7));
|
||||||
|
expect_eq_uint(64 * 3 - 1, find_nth_bit(bmap, 64 * 3 - 1, 8));
|
||||||
|
|
||||||
|
for_each_set_bit(bit, exp1, EXP1_IN_BITS) {
|
||||||
|
b = find_nth_bit(exp1, EXP1_IN_BITS, cnt++);
|
||||||
|
expect_eq_uint(b, bit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void __init test_fill_set(void)
|
static void __init test_fill_set(void)
|
||||||
{
|
{
|
||||||
DECLARE_BITMAP(bmap, 1024);
|
DECLARE_BITMAP(bmap, 1024);
|
||||||
@ -557,8 +600,6 @@ static void __init test_bitmap_parse(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define EXP1_IN_BITS (sizeof(exp1) * 8)
|
|
||||||
|
|
||||||
static void __init test_bitmap_arr32(void)
|
static void __init test_bitmap_arr32(void)
|
||||||
{
|
{
|
||||||
unsigned int nbits, next_bit;
|
unsigned int nbits, next_bit;
|
||||||
@ -685,6 +726,239 @@ static void __init test_for_each_set_clump8(void)
|
|||||||
expect_eq_clump8(start, CLUMP_EXP_NUMBITS, clump_exp, &clump);
|
expect_eq_clump8(start, CLUMP_EXP_NUMBITS, clump_exp, &clump);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __init test_for_each_set_bit_wrap(void)
|
||||||
|
{
|
||||||
|
DECLARE_BITMAP(orig, 500);
|
||||||
|
DECLARE_BITMAP(copy, 500);
|
||||||
|
unsigned int wr, bit;
|
||||||
|
|
||||||
|
bitmap_zero(orig, 500);
|
||||||
|
|
||||||
|
/* Set individual bits */
|
||||||
|
for (bit = 0; bit < 500; bit += 10)
|
||||||
|
bitmap_set(orig, bit, 1);
|
||||||
|
|
||||||
|
/* Set range of bits */
|
||||||
|
bitmap_set(orig, 100, 50);
|
||||||
|
|
||||||
|
for (wr = 0; wr < 500; wr++) {
|
||||||
|
bitmap_zero(copy, 500);
|
||||||
|
|
||||||
|
for_each_set_bit_wrap(bit, orig, 500, wr)
|
||||||
|
bitmap_set(copy, bit, 1);
|
||||||
|
|
||||||
|
expect_eq_bitmap(orig, copy, 500);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init test_for_each_set_bit(void)
|
||||||
|
{
|
||||||
|
DECLARE_BITMAP(orig, 500);
|
||||||
|
DECLARE_BITMAP(copy, 500);
|
||||||
|
unsigned int bit;
|
||||||
|
|
||||||
|
bitmap_zero(orig, 500);
|
||||||
|
bitmap_zero(copy, 500);
|
||||||
|
|
||||||
|
/* Set individual bits */
|
||||||
|
for (bit = 0; bit < 500; bit += 10)
|
||||||
|
bitmap_set(orig, bit, 1);
|
||||||
|
|
||||||
|
/* Set range of bits */
|
||||||
|
bitmap_set(orig, 100, 50);
|
||||||
|
|
||||||
|
for_each_set_bit(bit, orig, 500)
|
||||||
|
bitmap_set(copy, bit, 1);
|
||||||
|
|
||||||
|
expect_eq_bitmap(orig, copy, 500);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init test_for_each_set_bit_from(void)
|
||||||
|
{
|
||||||
|
DECLARE_BITMAP(orig, 500);
|
||||||
|
DECLARE_BITMAP(copy, 500);
|
||||||
|
unsigned int wr, bit;
|
||||||
|
|
||||||
|
bitmap_zero(orig, 500);
|
||||||
|
|
||||||
|
/* Set individual bits */
|
||||||
|
for (bit = 0; bit < 500; bit += 10)
|
||||||
|
bitmap_set(orig, bit, 1);
|
||||||
|
|
||||||
|
/* Set range of bits */
|
||||||
|
bitmap_set(orig, 100, 50);
|
||||||
|
|
||||||
|
for (wr = 0; wr < 500; wr++) {
|
||||||
|
DECLARE_BITMAP(tmp, 500);
|
||||||
|
|
||||||
|
bitmap_zero(copy, 500);
|
||||||
|
bit = wr;
|
||||||
|
|
||||||
|
for_each_set_bit_from(bit, orig, 500)
|
||||||
|
bitmap_set(copy, bit, 1);
|
||||||
|
|
||||||
|
bitmap_copy(tmp, orig, 500);
|
||||||
|
bitmap_clear(tmp, 0, wr);
|
||||||
|
expect_eq_bitmap(tmp, copy, 500);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init test_for_each_clear_bit(void)
|
||||||
|
{
|
||||||
|
DECLARE_BITMAP(orig, 500);
|
||||||
|
DECLARE_BITMAP(copy, 500);
|
||||||
|
unsigned int bit;
|
||||||
|
|
||||||
|
bitmap_fill(orig, 500);
|
||||||
|
bitmap_fill(copy, 500);
|
||||||
|
|
||||||
|
/* Set individual bits */
|
||||||
|
for (bit = 0; bit < 500; bit += 10)
|
||||||
|
bitmap_clear(orig, bit, 1);
|
||||||
|
|
||||||
|
/* Set range of bits */
|
||||||
|
bitmap_clear(orig, 100, 50);
|
||||||
|
|
||||||
|
for_each_clear_bit(bit, orig, 500)
|
||||||
|
bitmap_clear(copy, bit, 1);
|
||||||
|
|
||||||
|
expect_eq_bitmap(orig, copy, 500);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init test_for_each_clear_bit_from(void)
|
||||||
|
{
|
||||||
|
DECLARE_BITMAP(orig, 500);
|
||||||
|
DECLARE_BITMAP(copy, 500);
|
||||||
|
unsigned int wr, bit;
|
||||||
|
|
||||||
|
bitmap_fill(orig, 500);
|
||||||
|
|
||||||
|
/* Set individual bits */
|
||||||
|
for (bit = 0; bit < 500; bit += 10)
|
||||||
|
bitmap_clear(orig, bit, 1);
|
||||||
|
|
||||||
|
/* Set range of bits */
|
||||||
|
bitmap_clear(orig, 100, 50);
|
||||||
|
|
||||||
|
for (wr = 0; wr < 500; wr++) {
|
||||||
|
DECLARE_BITMAP(tmp, 500);
|
||||||
|
|
||||||
|
bitmap_fill(copy, 500);
|
||||||
|
bit = wr;
|
||||||
|
|
||||||
|
for_each_clear_bit_from(bit, orig, 500)
|
||||||
|
bitmap_clear(copy, bit, 1);
|
||||||
|
|
||||||
|
bitmap_copy(tmp, orig, 500);
|
||||||
|
bitmap_set(tmp, 0, wr);
|
||||||
|
expect_eq_bitmap(tmp, copy, 500);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init test_for_each_set_bitrange(void)
|
||||||
|
{
|
||||||
|
DECLARE_BITMAP(orig, 500);
|
||||||
|
DECLARE_BITMAP(copy, 500);
|
||||||
|
unsigned int s, e;
|
||||||
|
|
||||||
|
bitmap_zero(orig, 500);
|
||||||
|
bitmap_zero(copy, 500);
|
||||||
|
|
||||||
|
/* Set individual bits */
|
||||||
|
for (s = 0; s < 500; s += 10)
|
||||||
|
bitmap_set(orig, s, 1);
|
||||||
|
|
||||||
|
/* Set range of bits */
|
||||||
|
bitmap_set(orig, 100, 50);
|
||||||
|
|
||||||
|
for_each_set_bitrange(s, e, orig, 500)
|
||||||
|
bitmap_set(copy, s, e-s);
|
||||||
|
|
||||||
|
expect_eq_bitmap(orig, copy, 500);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init test_for_each_clear_bitrange(void)
|
||||||
|
{
|
||||||
|
DECLARE_BITMAP(orig, 500);
|
||||||
|
DECLARE_BITMAP(copy, 500);
|
||||||
|
unsigned int s, e;
|
||||||
|
|
||||||
|
bitmap_fill(orig, 500);
|
||||||
|
bitmap_fill(copy, 500);
|
||||||
|
|
||||||
|
/* Set individual bits */
|
||||||
|
for (s = 0; s < 500; s += 10)
|
||||||
|
bitmap_clear(orig, s, 1);
|
||||||
|
|
||||||
|
/* Set range of bits */
|
||||||
|
bitmap_clear(orig, 100, 50);
|
||||||
|
|
||||||
|
for_each_clear_bitrange(s, e, orig, 500)
|
||||||
|
bitmap_clear(copy, s, e-s);
|
||||||
|
|
||||||
|
expect_eq_bitmap(orig, copy, 500);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init test_for_each_set_bitrange_from(void)
|
||||||
|
{
|
||||||
|
DECLARE_BITMAP(orig, 500);
|
||||||
|
DECLARE_BITMAP(copy, 500);
|
||||||
|
unsigned int wr, s, e;
|
||||||
|
|
||||||
|
bitmap_zero(orig, 500);
|
||||||
|
|
||||||
|
/* Set individual bits */
|
||||||
|
for (s = 0; s < 500; s += 10)
|
||||||
|
bitmap_set(orig, s, 1);
|
||||||
|
|
||||||
|
/* Set range of bits */
|
||||||
|
bitmap_set(orig, 100, 50);
|
||||||
|
|
||||||
|
for (wr = 0; wr < 500; wr++) {
|
||||||
|
DECLARE_BITMAP(tmp, 500);
|
||||||
|
|
||||||
|
bitmap_zero(copy, 500);
|
||||||
|
s = wr;
|
||||||
|
|
||||||
|
for_each_set_bitrange_from(s, e, orig, 500)
|
||||||
|
bitmap_set(copy, s, e - s);
|
||||||
|
|
||||||
|
bitmap_copy(tmp, orig, 500);
|
||||||
|
bitmap_clear(tmp, 0, wr);
|
||||||
|
expect_eq_bitmap(tmp, copy, 500);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init test_for_each_clear_bitrange_from(void)
|
||||||
|
{
|
||||||
|
DECLARE_BITMAP(orig, 500);
|
||||||
|
DECLARE_BITMAP(copy, 500);
|
||||||
|
unsigned int wr, s, e;
|
||||||
|
|
||||||
|
bitmap_fill(orig, 500);
|
||||||
|
|
||||||
|
/* Set individual bits */
|
||||||
|
for (s = 0; s < 500; s += 10)
|
||||||
|
bitmap_clear(orig, s, 1);
|
||||||
|
|
||||||
|
/* Set range of bits */
|
||||||
|
bitmap_set(orig, 100, 50);
|
||||||
|
|
||||||
|
for (wr = 0; wr < 500; wr++) {
|
||||||
|
DECLARE_BITMAP(tmp, 500);
|
||||||
|
|
||||||
|
bitmap_fill(copy, 500);
|
||||||
|
s = wr;
|
||||||
|
|
||||||
|
for_each_clear_bitrange_from(s, e, orig, 500)
|
||||||
|
bitmap_clear(copy, s, e - s);
|
||||||
|
|
||||||
|
bitmap_copy(tmp, orig, 500);
|
||||||
|
bitmap_set(tmp, 0, wr);
|
||||||
|
expect_eq_bitmap(tmp, copy, 500);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct test_bitmap_cut {
|
struct test_bitmap_cut {
|
||||||
unsigned int first;
|
unsigned int first;
|
||||||
unsigned int cut;
|
unsigned int cut;
|
||||||
@ -948,10 +1222,21 @@ static void __init selftest(void)
|
|||||||
test_bitmap_parselist();
|
test_bitmap_parselist();
|
||||||
test_bitmap_printlist();
|
test_bitmap_printlist();
|
||||||
test_mem_optimisations();
|
test_mem_optimisations();
|
||||||
test_for_each_set_clump8();
|
|
||||||
test_bitmap_cut();
|
test_bitmap_cut();
|
||||||
test_bitmap_print_buf();
|
test_bitmap_print_buf();
|
||||||
test_bitmap_const_eval();
|
test_bitmap_const_eval();
|
||||||
|
|
||||||
|
test_find_nth_bit();
|
||||||
|
test_for_each_set_bit();
|
||||||
|
test_for_each_set_bit_from();
|
||||||
|
test_for_each_clear_bit();
|
||||||
|
test_for_each_clear_bit_from();
|
||||||
|
test_for_each_set_bitrange();
|
||||||
|
test_for_each_clear_bitrange();
|
||||||
|
test_for_each_set_bitrange_from();
|
||||||
|
test_for_each_clear_bitrange_from();
|
||||||
|
test_for_each_set_clump8();
|
||||||
|
test_for_each_set_bit_wrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
KSTM_MODULE_LOADERS(test_bitmap);
|
KSTM_MODULE_LOADERS(test_bitmap);
|
||||||
|
@ -8,21 +8,23 @@
|
|||||||
|
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
|
|
||||||
extern unsigned long _find_next_bit(const unsigned long *addr1,
|
unsigned long _find_next_bit(const unsigned long *addr1, unsigned long nbits,
|
||||||
const unsigned long *addr2, unsigned long nbits,
|
unsigned long start);
|
||||||
unsigned long start, unsigned long invert, unsigned long le);
|
unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
|
||||||
|
unsigned long nbits, unsigned long start);
|
||||||
|
unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
|
||||||
|
unsigned long start);
|
||||||
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
|
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
|
||||||
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
|
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
|
||||||
const unsigned long *addr2, unsigned long size);
|
const unsigned long *addr2, unsigned long size);
|
||||||
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
|
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
|
||||||
extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
|
|
||||||
|
|
||||||
#ifndef find_next_bit
|
#ifndef find_next_bit
|
||||||
/**
|
/**
|
||||||
* find_next_bit - find the next set bit in a memory region
|
* find_next_bit - find the next set bit in a memory region
|
||||||
* @addr: The address to base the search on
|
* @addr: The address to base the search on
|
||||||
* @offset: The bitnumber to start searching at
|
|
||||||
* @size: The bitmap size in bits
|
* @size: The bitmap size in bits
|
||||||
|
* @offset: The bitnumber to start searching at
|
||||||
*
|
*
|
||||||
* Returns the bit number for the next set bit
|
* Returns the bit number for the next set bit
|
||||||
* If no bits are set, returns @size.
|
* If no bits are set, returns @size.
|
||||||
@ -41,7 +43,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
|||||||
return val ? __ffs(val) : size;
|
return val ? __ffs(val) : size;
|
||||||
}
|
}
|
||||||
|
|
||||||
return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
|
return _find_next_bit(addr, size, offset);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -50,8 +52,8 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
|||||||
* find_next_and_bit - find the next set bit in both memory regions
|
* find_next_and_bit - find the next set bit in both memory regions
|
||||||
* @addr1: The first address to base the search on
|
* @addr1: The first address to base the search on
|
||||||
* @addr2: The second address to base the search on
|
* @addr2: The second address to base the search on
|
||||||
* @offset: The bitnumber to start searching at
|
|
||||||
* @size: The bitmap size in bits
|
* @size: The bitmap size in bits
|
||||||
|
* @offset: The bitnumber to start searching at
|
||||||
*
|
*
|
||||||
* Returns the bit number for the next set bit
|
* Returns the bit number for the next set bit
|
||||||
* If no bits are set, returns @size.
|
* If no bits are set, returns @size.
|
||||||
@ -71,7 +73,7 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
|
|||||||
return val ? __ffs(val) : size;
|
return val ? __ffs(val) : size;
|
||||||
}
|
}
|
||||||
|
|
||||||
return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
|
return _find_next_and_bit(addr1, addr2, size, offset);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -79,8 +81,8 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
|
|||||||
/**
|
/**
|
||||||
* find_next_zero_bit - find the next cleared bit in a memory region
|
* find_next_zero_bit - find the next cleared bit in a memory region
|
||||||
* @addr: The address to base the search on
|
* @addr: The address to base the search on
|
||||||
* @offset: The bitnumber to start searching at
|
|
||||||
* @size: The bitmap size in bits
|
* @size: The bitmap size in bits
|
||||||
|
* @offset: The bitnumber to start searching at
|
||||||
*
|
*
|
||||||
* Returns the bit number of the next zero bit
|
* Returns the bit number of the next zero bit
|
||||||
* If no bits are zero, returns @size.
|
* If no bits are zero, returns @size.
|
||||||
@ -99,7 +101,7 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
|||||||
return val == ~0UL ? size : ffz(val);
|
return val == ~0UL ? size : ffz(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
|
return _find_next_zero_bit(addr, size, offset);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -172,43 +174,4 @@ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef find_last_bit
|
|
||||||
/**
|
|
||||||
* find_last_bit - find the last set bit in a memory region
|
|
||||||
* @addr: The address to start the search at
|
|
||||||
* @size: The number of bits to search
|
|
||||||
*
|
|
||||||
* Returns the bit number of the last set bit, or size.
|
|
||||||
*/
|
|
||||||
static inline
|
|
||||||
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
|
|
||||||
{
|
|
||||||
if (small_const_nbits(size)) {
|
|
||||||
unsigned long val = *addr & GENMASK(size - 1, 0);
|
|
||||||
|
|
||||||
return val ? __fls(val) : size;
|
|
||||||
}
|
|
||||||
|
|
||||||
return _find_last_bit(addr, size);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
|
||||||
* find_next_clump8 - find next 8-bit clump with set bits in a memory region
|
|
||||||
* @clump: location to store copy of found clump
|
|
||||||
* @addr: address to base the search on
|
|
||||||
* @size: bitmap size in number of bits
|
|
||||||
* @offset: bit offset at which to start searching
|
|
||||||
*
|
|
||||||
* Returns the bit offset for the next set clump; the found clump value is
|
|
||||||
* copied to the location pointed by @clump. If no bits are set, returns @size.
|
|
||||||
*/
|
|
||||||
extern unsigned long find_next_clump8(unsigned long *clump,
|
|
||||||
const unsigned long *addr,
|
|
||||||
unsigned long size, unsigned long offset);
|
|
||||||
|
|
||||||
#define find_first_clump8(clump, bits, size) \
|
|
||||||
find_next_clump8((clump), (bits), (size), 0)
|
|
||||||
|
|
||||||
|
|
||||||
#endif /*__LINUX_FIND_H_ */
|
#endif /*__LINUX_FIND_H_ */
|
||||||
|
@ -18,66 +18,54 @@
|
|||||||
#include <linux/bitmap.h>
|
#include <linux/bitmap.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
|
||||||
#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
|
/*
|
||||||
!defined(find_next_and_bit)
|
* Common helper for find_bit() function family
|
||||||
|
* @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
|
||||||
|
* @MUNGE: The expression that post-processes a word containing found bit (may be empty)
|
||||||
|
* @size: The bitmap size in bits
|
||||||
|
*/
|
||||||
|
#define FIND_FIRST_BIT(FETCH, MUNGE, size) \
|
||||||
|
({ \
|
||||||
|
unsigned long idx, val, sz = (size); \
|
||||||
|
\
|
||||||
|
for (idx = 0; idx * BITS_PER_LONG < sz; idx++) { \
|
||||||
|
val = (FETCH); \
|
||||||
|
if (val) { \
|
||||||
|
sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(val)), sz); \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
sz; \
|
||||||
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is a common helper function for find_next_bit, find_next_zero_bit, and
|
* Common helper for find_next_bit() function family
|
||||||
* find_next_and_bit. The differences are:
|
* @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
|
||||||
* - The "invert" argument, which is XORed with each fetched word before
|
* @MUNGE: The expression that post-processes a word containing found bit (may be empty)
|
||||||
* searching it for one bits.
|
* @size: The bitmap size in bits
|
||||||
* - The optional "addr2", which is anded with "addr1" if present.
|
* @start: The bitnumber to start searching at
|
||||||
*/
|
*/
|
||||||
unsigned long _find_next_bit(const unsigned long *addr1,
|
#define FIND_NEXT_BIT(FETCH, MUNGE, size, start) \
|
||||||
const unsigned long *addr2, unsigned long nbits,
|
({ \
|
||||||
unsigned long start, unsigned long invert, unsigned long le)
|
unsigned long mask, idx, tmp, sz = (size), __start = (start); \
|
||||||
{
|
\
|
||||||
unsigned long tmp, mask;
|
if (unlikely(__start >= sz)) \
|
||||||
(void) le;
|
goto out; \
|
||||||
|
\
|
||||||
if (unlikely(start >= nbits))
|
mask = MUNGE(BITMAP_FIRST_WORD_MASK(__start)); \
|
||||||
return nbits;
|
idx = __start / BITS_PER_LONG; \
|
||||||
|
\
|
||||||
tmp = addr1[start / BITS_PER_LONG];
|
for (tmp = (FETCH) & mask; !tmp; tmp = (FETCH)) { \
|
||||||
if (addr2)
|
if ((idx + 1) * BITS_PER_LONG >= sz) \
|
||||||
tmp &= addr2[start / BITS_PER_LONG];
|
goto out; \
|
||||||
tmp ^= invert;
|
idx++; \
|
||||||
|
} \
|
||||||
/* Handle 1st word. */
|
\
|
||||||
mask = BITMAP_FIRST_WORD_MASK(start);
|
sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \
|
||||||
|
out: \
|
||||||
/*
|
sz; \
|
||||||
* Due to the lack of swab() in tools, and the fact that it doesn't
|
})
|
||||||
* need little-endian support, just comment it out
|
|
||||||
*/
|
|
||||||
#if (0)
|
|
||||||
if (le)
|
|
||||||
mask = swab(mask);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
tmp &= mask;
|
|
||||||
|
|
||||||
start = round_down(start, BITS_PER_LONG);
|
|
||||||
|
|
||||||
while (!tmp) {
|
|
||||||
start += BITS_PER_LONG;
|
|
||||||
if (start >= nbits)
|
|
||||||
return nbits;
|
|
||||||
|
|
||||||
tmp = addr1[start / BITS_PER_LONG];
|
|
||||||
if (addr2)
|
|
||||||
tmp &= addr2[start / BITS_PER_LONG];
|
|
||||||
tmp ^= invert;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if (0)
|
|
||||||
if (le)
|
|
||||||
tmp = swab(tmp);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return min(start + __ffs(tmp), nbits);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef find_first_bit
|
#ifndef find_first_bit
|
||||||
/*
|
/*
|
||||||
@ -85,14 +73,7 @@ unsigned long _find_next_bit(const unsigned long *addr1,
|
|||||||
*/
|
*/
|
||||||
unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
|
unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
|
||||||
{
|
{
|
||||||
unsigned long idx;
|
return FIND_FIRST_BIT(addr[idx], /* nop */, size);
|
||||||
|
|
||||||
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
|
|
||||||
if (addr[idx])
|
|
||||||
return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
|
|
||||||
}
|
|
||||||
|
|
||||||
return size;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -104,15 +85,7 @@ unsigned long _find_first_and_bit(const unsigned long *addr1,
|
|||||||
const unsigned long *addr2,
|
const unsigned long *addr2,
|
||||||
unsigned long size)
|
unsigned long size)
|
||||||
{
|
{
|
||||||
unsigned long idx, val;
|
return FIND_FIRST_BIT(addr1[idx] & addr2[idx], /* nop */, size);
|
||||||
|
|
||||||
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
|
|
||||||
val = addr1[idx] & addr2[idx];
|
|
||||||
if (val)
|
|
||||||
return min(idx * BITS_PER_LONG + __ffs(val), size);
|
|
||||||
}
|
|
||||||
|
|
||||||
return size;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -122,13 +95,29 @@ unsigned long _find_first_and_bit(const unsigned long *addr1,
|
|||||||
*/
|
*/
|
||||||
unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
|
unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
|
||||||
{
|
{
|
||||||
unsigned long idx;
|
return FIND_FIRST_BIT(~addr[idx], /* nop */, size);
|
||||||
|
}
|
||||||
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
|
#endif
|
||||||
if (addr[idx] != ~0UL)
|
|
||||||
return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
|
#ifndef find_next_bit
|
||||||
}
|
unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start)
|
||||||
|
{
|
||||||
return size;
|
return FIND_NEXT_BIT(addr[idx], /* nop */, nbits, start);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef find_next_and_bit
|
||||||
|
unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
|
||||||
|
unsigned long nbits, unsigned long start)
|
||||||
|
{
|
||||||
|
return FIND_NEXT_BIT(addr1[idx] & addr2[idx], /* nop */, nbits, start);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef find_next_zero_bit
|
||||||
|
unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
|
||||||
|
unsigned long start)
|
||||||
|
{
|
||||||
|
return FIND_NEXT_BIT(~addr[idx], /* nop */, nbits, start);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user