forked from Minki/linux
RDMA/cxgb4: Use refcount_t instead of atomic_t for reference counting
The refcount_t API will WARN on underflow and overflow of a reference counter, and avoid use-after-free risks. Link: https://lore.kernel.org/r/1622194663-2383-12-git-send-email-liweihang@huawei.com Cc: Potnuri Bharat Teja <bharat@chelsio.com> Signed-off-by: Weihang Li <liweihang@huawei.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
8f9513d89f
commit
7183451f84
@ -976,8 +976,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
chp = to_c4iw_cq(ib_cq);
|
||||
|
||||
xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
|
||||
atomic_dec(&chp->refcnt);
|
||||
wait_event(chp->wait, !atomic_read(&chp->refcnt));
|
||||
refcount_dec(&chp->refcnt);
|
||||
wait_event(chp->wait, !refcount_read(&chp->refcnt));
|
||||
|
||||
ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
|
||||
ibucontext);
|
||||
@ -1080,7 +1080,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
chp->ibcq.cqe = entries - 2;
|
||||
spin_lock_init(&chp->lock);
|
||||
spin_lock_init(&chp->comp_handler_lock);
|
||||
atomic_set(&chp->refcnt, 1);
|
||||
refcount_set(&chp->refcnt, 1);
|
||||
init_waitqueue_head(&chp->wait);
|
||||
ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
|
||||
if (ret)
|
||||
|
@ -151,7 +151,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||
}
|
||||
|
||||
c4iw_qp_add_ref(&qhp->ibqp);
|
||||
atomic_inc(&chp->refcnt);
|
||||
refcount_inc(&chp->refcnt);
|
||||
xa_unlock_irq(&dev->qps);
|
||||
|
||||
/* Bad incoming write */
|
||||
@ -213,7 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||
break;
|
||||
}
|
||||
done:
|
||||
if (atomic_dec_and_test(&chp->refcnt))
|
||||
if (refcount_dec_and_test(&chp->refcnt))
|
||||
wake_up(&chp->wait);
|
||||
c4iw_qp_rem_ref(&qhp->ibqp);
|
||||
out:
|
||||
@ -228,13 +228,13 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
|
||||
xa_lock_irqsave(&dev->cqs, flag);
|
||||
chp = xa_load(&dev->cqs, qid);
|
||||
if (chp) {
|
||||
atomic_inc(&chp->refcnt);
|
||||
refcount_inc(&chp->refcnt);
|
||||
xa_unlock_irqrestore(&dev->cqs, flag);
|
||||
t4_clear_cq_armed(&chp->cq);
|
||||
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||
if (atomic_dec_and_test(&chp->refcnt))
|
||||
if (refcount_dec_and_test(&chp->refcnt))
|
||||
wake_up(&chp->wait);
|
||||
} else {
|
||||
pr_debug("unknown cqid 0x%x\n", qid);
|
||||
|
@ -427,7 +427,7 @@ struct c4iw_cq {
|
||||
struct t4_cq cq;
|
||||
spinlock_t lock;
|
||||
spinlock_t comp_handler_lock;
|
||||
atomic_t refcnt;
|
||||
refcount_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
struct c4iw_wr_wait *wr_waitp;
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user