mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 13:41:51 +00:00
sbitmap: add sbitmap_get_shallow() operation
This operation supports the use case of limiting the number of bits that can be allocated for a given operation. Rather than setting aside some bits at the end of the bitmap, we can set aside bits in each word of the bitmap. This means we can keep the allocation hints spread out and support sbitmap_resize() nicely at the cost of lower granularity for the allowed depth. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
8425339492
commit
c05e667337
@ -175,6 +175,25 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
|
||||
*/
|
||||
int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
|
||||
|
||||
/**
|
||||
* sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
|
||||
* limiting the depth used from each word.
|
||||
* @sb: Bitmap to allocate from.
|
||||
* @alloc_hint: Hint for where to start searching for a free bit.
|
||||
* @shallow_depth: The maximum number of bits to allocate from a single word.
|
||||
*
|
||||
* This rather specific operation allows for having multiple users with
|
||||
* different allocation limits. E.g., there can be a high-priority class that
|
||||
* uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
|
||||
* with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
|
||||
* class can only allocate half of the total bits in the bitmap, preventing it
|
||||
* from starving out the high-priority class.
|
||||
*
|
||||
* Return: Non-negative allocated bit number if successful, -1 otherwise.
|
||||
*/
|
||||
int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
|
||||
unsigned long shallow_depth);
|
||||
|
||||
/**
|
||||
* sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
|
||||
* @sb: Bitmap to check.
|
||||
@ -325,6 +344,19 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
|
||||
*/
|
||||
int __sbitmap_queue_get(struct sbitmap_queue *sbq);
|
||||
|
||||
/**
|
||||
* __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
|
||||
* sbitmap_queue, limiting the depth used from each word, with preemption
|
||||
* already disabled.
|
||||
* @sbq: Bitmap queue to allocate from.
|
||||
* @shallow_depth: The maximum number of bits to allocate from a single word.
|
||||
* See sbitmap_get_shallow().
|
||||
*
|
||||
* Return: Non-negative allocated bit number if successful, -1 otherwise.
|
||||
*/
|
||||
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
|
||||
unsigned int shallow_depth);
|
||||
|
||||
/**
|
||||
* sbitmap_queue_get() - Try to allocate a free bit from a &struct
|
||||
* sbitmap_queue.
|
||||
@ -345,6 +377,29 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
|
||||
return nr;
|
||||
}
|
||||
|
||||
/**
|
||||
* sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
|
||||
* sbitmap_queue, limiting the depth used from each word.
|
||||
* @sbq: Bitmap queue to allocate from.
|
||||
* @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
|
||||
* sbitmap_queue_clear()).
|
||||
* @shallow_depth: The maximum number of bits to allocate from a single word.
|
||||
* See sbitmap_get_shallow().
|
||||
*
|
||||
* Return: Non-negative allocated bit number if successful, -1 otherwise.
|
||||
*/
|
||||
static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
|
||||
unsigned int *cpu,
|
||||
unsigned int shallow_depth)
|
||||
{
|
||||
int nr;
|
||||
|
||||
*cpu = get_cpu();
|
||||
nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
|
||||
put_cpu();
|
||||
return nr;
|
||||
}
|
||||
|
||||
/**
|
||||
* sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
|
||||
* &struct sbitmap_queue.
|
||||
|
@ -79,15 +79,15 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sbitmap_resize);
|
||||
|
||||
static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
|
||||
bool wrap)
|
||||
static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
|
||||
unsigned int hint, bool wrap)
|
||||
{
|
||||
unsigned int orig_hint = hint;
|
||||
int nr;
|
||||
|
||||
while (1) {
|
||||
nr = find_next_zero_bit(&word->word, word->depth, hint);
|
||||
if (unlikely(nr >= word->depth)) {
|
||||
nr = find_next_zero_bit(word, depth, hint);
|
||||
if (unlikely(nr >= depth)) {
|
||||
/*
|
||||
* We started with an offset, and we didn't reset the
|
||||
* offset to 0 in a failure case, so start from 0 to
|
||||
@ -100,11 +100,11 @@ static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!test_and_set_bit(nr, &word->word))
|
||||
if (!test_and_set_bit(nr, word))
|
||||
break;
|
||||
|
||||
hint = nr + 1;
|
||||
if (hint >= word->depth - 1)
|
||||
if (hint >= depth - 1)
|
||||
hint = 0;
|
||||
}
|
||||
|
||||
@ -119,7 +119,8 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
|
||||
index = SB_NR_TO_INDEX(sb, alloc_hint);
|
||||
|
||||
for (i = 0; i < sb->map_nr; i++) {
|
||||
nr = __sbitmap_get_word(&sb->map[index],
|
||||
nr = __sbitmap_get_word(&sb->map[index].word,
|
||||
sb->map[index].depth,
|
||||
SB_NR_TO_BIT(sb, alloc_hint),
|
||||
!round_robin);
|
||||
if (nr != -1) {
|
||||
@ -141,6 +142,37 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sbitmap_get);
|
||||
|
||||
int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
|
||||
unsigned long shallow_depth)
|
||||
{
|
||||
unsigned int i, index;
|
||||
int nr = -1;
|
||||
|
||||
index = SB_NR_TO_INDEX(sb, alloc_hint);
|
||||
|
||||
for (i = 0; i < sb->map_nr; i++) {
|
||||
nr = __sbitmap_get_word(&sb->map[index].word,
|
||||
min(sb->map[index].depth, shallow_depth),
|
||||
SB_NR_TO_BIT(sb, alloc_hint), true);
|
||||
if (nr != -1) {
|
||||
nr += index << sb->shift;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Jump to next index. */
|
||||
index++;
|
||||
alloc_hint = index << sb->shift;
|
||||
|
||||
if (index >= sb->map_nr) {
|
||||
index = 0;
|
||||
alloc_hint = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return nr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
|
||||
|
||||
bool sbitmap_any_bit_set(const struct sbitmap *sb)
|
||||
{
|
||||
unsigned int i;
|
||||
@ -342,6 +374,35 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
|
||||
|
||||
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
|
||||
unsigned int shallow_depth)
|
||||
{
|
||||
unsigned int hint, depth;
|
||||
int nr;
|
||||
|
||||
hint = this_cpu_read(*sbq->alloc_hint);
|
||||
depth = READ_ONCE(sbq->sb.depth);
|
||||
if (unlikely(hint >= depth)) {
|
||||
hint = depth ? prandom_u32() % depth : 0;
|
||||
this_cpu_write(*sbq->alloc_hint, hint);
|
||||
}
|
||||
nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
|
||||
|
||||
if (nr == -1) {
|
||||
/* If the map is full, a hint won't do us much good. */
|
||||
this_cpu_write(*sbq->alloc_hint, 0);
|
||||
} else if (nr == hint || unlikely(sbq->round_robin)) {
|
||||
/* Only update the hint if we used it. */
|
||||
hint = nr + 1;
|
||||
if (hint >= depth - 1)
|
||||
hint = 0;
|
||||
this_cpu_write(*sbq->alloc_hint, hint);
|
||||
}
|
||||
|
||||
return nr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
|
||||
|
||||
static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
|
||||
{
|
||||
int i, wake_index;
|
||||
|
Loading…
Reference in New Issue
Block a user