linux/drivers/infiniband/sw/rxe/rxe_pool.h
Bob Pearson eae5f0642e RDMA/rxe: Remove unneeded pool->state
rxe_pool.c uses the field pool->state to mark a pool as invalid when it is
shut down and checks it in several pool APIs to verify that the pool has
not been shut down. This is unneeded because the pools are not marked
invalid unless the entire driver is being removed at which point no
functional APIs should or could be executing. This patch removes this
field and associated code.

Link: https://lore.kernel.org/r/20210125211641.2694-6-rpearson@hpe.com
Suggested-by: zyjzyj2000@gmail.c
Signed-off-by: Bob Pearson <rpearson@hpe.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2021-01-28 15:29:56 -04:00

177 lines
4.5 KiB
C

/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*/
#ifndef RXE_POOL_H
#define RXE_POOL_H
#define RXE_POOL_ALIGN (16)
#define RXE_POOL_CACHE_FLAGS (0)
enum rxe_pool_flags {
RXE_POOL_INDEX = BIT(1),
RXE_POOL_KEY = BIT(2),
RXE_POOL_NO_ALLOC = BIT(4),
};
enum rxe_elem_type {
RXE_TYPE_UC,
RXE_TYPE_PD,
RXE_TYPE_AH,
RXE_TYPE_SRQ,
RXE_TYPE_QP,
RXE_TYPE_CQ,
RXE_TYPE_MR,
RXE_TYPE_MW,
RXE_TYPE_MC_GRP,
RXE_TYPE_MC_ELEM,
RXE_NUM_TYPES, /* keep me last */
};
struct rxe_pool_entry;
struct rxe_type_info {
const char *name;
size_t size;
size_t elem_offset;
void (*cleanup)(struct rxe_pool_entry *obj);
enum rxe_pool_flags flags;
u32 max_index;
u32 min_index;
size_t key_offset;
size_t key_size;
};
extern struct rxe_type_info rxe_type_info[];
struct rxe_pool_entry {
struct rxe_pool *pool;
struct kref ref_cnt;
struct list_head list;
/* only used if keyed */
struct rb_node key_node;
/* only used if indexed */
struct rb_node index_node;
u32 index;
};
struct rxe_pool {
struct rxe_dev *rxe;
rwlock_t pool_lock; /* protects pool add/del/search */
size_t elem_size;
void (*cleanup)(struct rxe_pool_entry *obj);
enum rxe_pool_flags flags;
enum rxe_elem_type type;
unsigned int max_elem;
atomic_t num_elem;
/* only used if indexed */
struct {
struct rb_root tree;
unsigned long *table;
size_t table_size;
u32 last;
u32 max_index;
u32 min_index;
} index;
/* only used if keyed */
struct {
struct rb_root tree;
size_t key_offset;
size_t key_size;
} key;
};
/* initialize a pool of objects with given limit on
* number of elements. gets parameters from rxe_type_info
* pool elements will be allocated out of a slab cache
*/
int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
enum rxe_elem_type type, u32 max_elem);
/* free resources from object pool */
void rxe_pool_cleanup(struct rxe_pool *pool);
/* allocate an object from pool holding and not holding the pool lock */
void *rxe_alloc_locked(struct rxe_pool *pool);
void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem)
/* assign an index to an indexed object and insert object into
* pool's rb tree holding and not holding the pool_lock
*/
void __rxe_add_index_locked(struct rxe_pool_entry *elem);
#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem)
void __rxe_add_index(struct rxe_pool_entry *elem);
#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem)
/* drop an index and remove object from rb tree
* holding and not holding the pool_lock
*/
void __rxe_drop_index_locked(struct rxe_pool_entry *elem);
#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem)
void __rxe_drop_index(struct rxe_pool_entry *elem);
#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem)
/* assign a key to a keyed object and insert object into
* pool's rb tree holding and not holding pool_lock
*/
void __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key);
#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key)
void __rxe_add_key(struct rxe_pool_entry *elem, void *key);
#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key)
/* remove elem from rb tree holding and not holding the pool_lock */
void __rxe_drop_key_locked(struct rxe_pool_entry *elem);
#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem)
void __rxe_drop_key(struct rxe_pool_entry *elem);
#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem)
/* lookup an indexed object from index holding and not holding the pool_lock.
* takes a reference on object
*/
void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index);
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
/* lookup keyed object from key holding and not holding the pool_lock.
* takes a reference on the objecti
*/
void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key);
void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
/* cleanup an object when all references are dropped */
void rxe_elem_release(struct kref *kref);
/* take a reference on an object */
#define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt)
/* drop a reference on an object */
#define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release)
#endif /* RXE_POOL_H */