forked from Minki/linux
RDMA/rxe: Fix bug in rxe_alloc()
A recent patch which added an 'unlocked' version of rxe_alloc introduced a
bug causing kzalloc(..., GFP_KERNEL) to be called while holding a spin
lock. This patch corrects that error.
rxe_alloc_nl() should always be called while holding the pool->pool_lock
so the 2nd argument to kzalloc there should be GFP_ATOMIC.
rxe_alloc() prior to the change only locked the code around checking that
pool->state is RXE_POOL_STATE_VALID to avoid races between working threads
and a thread shutting down the rxe driver. This patch reverts rxe_alloc()
to this behavior so the lock is not held when kzalloc() is called.
Link: https://lore.kernel.org/r/20210125211641.2694-2-rpearson@hpe.com
Reported-by: syzbot+ec2fd72374785d0e558e@syzkaller.appspotmail.com
Fixes: 3853c35e24
("RDMA/rxe: Add unlocked versions of pool APIs")
Signed-off-by: Bob Pearson <rpearson@hpe.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
def4cd43f5
commit
c4369575b2
@ -343,13 +343,54 @@ void *rxe_alloc_nl(struct rxe_pool *pool)
|
||||
struct rxe_pool_entry *elem;
|
||||
u8 *obj;
|
||||
|
||||
might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
|
||||
|
||||
if (pool->state != RXE_POOL_STATE_VALID)
|
||||
return NULL;
|
||||
|
||||
kref_get(&pool->ref_cnt);
|
||||
|
||||
if (!ib_device_try_get(&pool->rxe->ib_dev))
|
||||
goto out_put_pool;
|
||||
|
||||
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
|
||||
goto out_cnt;
|
||||
|
||||
obj = kzalloc(info->size, GFP_ATOMIC);
|
||||
if (!obj)
|
||||
goto out_cnt;
|
||||
|
||||
elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
|
||||
|
||||
elem->pool = pool;
|
||||
kref_init(&elem->ref_cnt);
|
||||
|
||||
return obj;
|
||||
|
||||
out_cnt:
|
||||
atomic_dec(&pool->num_elem);
|
||||
ib_device_put(&pool->rxe->ib_dev);
|
||||
out_put_pool:
|
||||
rxe_pool_put(pool);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *rxe_alloc(struct rxe_pool *pool)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rxe_type_info *info = &rxe_type_info[pool->type];
|
||||
struct rxe_pool_entry *elem;
|
||||
u8 *obj;
|
||||
|
||||
might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
|
||||
|
||||
read_lock_irqsave(&pool->pool_lock, flags);
|
||||
if (pool->state != RXE_POOL_STATE_VALID) {
|
||||
read_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
kref_get(&pool->ref_cnt);
|
||||
read_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
|
||||
if (!ib_device_try_get(&pool->rxe->ib_dev))
|
||||
goto out_put_pool;
|
||||
|
||||
@ -376,18 +417,6 @@ out_put_pool:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *rxe_alloc(struct rxe_pool *pool)
|
||||
{
|
||||
u8 *obj;
|
||||
unsigned long flags;
|
||||
|
||||
read_lock_irqsave(&pool->pool_lock, flags);
|
||||
obj = rxe_alloc_nl(pool);
|
||||
read_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
Loading…
Reference in New Issue
Block a user