IB/rxe: Disable completion upcalls when a CQ is destroyed

This prevents the stack from accessing userspace objects while they
are being torn down.

One possible sequence of events:
 - Userspace program exits
 - ib_uverbs_cleanup_ucontext() runs, calling ib_destroy_qp(),
   ib_destroy_cq(), etc. and releasing/freeing the UCQ
   - The QP still has tasklets running, so it isn't destroyed yet
   - The CQ is referenced by the QP, so the CQ isn't destroyed yet
   - The UCQ is kfree()'d anyway
 - A send work request completes
 - rxe_send_complete() calls cq->ibcq.comp_handler()
 - ib_uverbs_comp_handler() runs and crashes; the event queue is checked
   for is_closed, but it has no way to check the ib_ucq_object before
   accessing it

The reference counting on the CQ doesn't protect against this since the CQ
hasn't been destroyed yet.
There's no available interface to deregister the UCQ from the CQ, and it
didn't appear that attempting to add reference counting to the UCQ was
going to be a good way to go since this solution is much simpler.

Fixes: 8700e3e7c4 ("Soft RoCE driver")
Signed-off-by: Andrew Boyer <andrew.boyer@dell.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Andrew Boyer 2017-08-28 16:11:50 -04:00 committed by Doug Ledford
parent 9eb7f8e44d
commit bfc3ae0566
4 changed files with 24 additions and 0 deletions

View File

@ -69,6 +69,14 @@ err1:
static void rxe_send_complete(unsigned long data)
{
struct rxe_cq *cq = (struct rxe_cq *)data;
unsigned long flags;
spin_lock_irqsave(&cq->cq_lock, flags);
if (cq->is_dying) {
spin_unlock_irqrestore(&cq->cq_lock, flags);
return;
}
spin_unlock_irqrestore(&cq->cq_lock, flags);
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@ -97,6 +105,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
if (udata)
cq->is_user = 1;
cq->is_dying = false;
tasklet_init(&cq->comp_task, rxe_send_complete, (unsigned long)cq);
spin_lock_init(&cq->cq_lock);
@ -156,6 +166,15 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
return 0;
}
void rxe_cq_disable(struct rxe_cq *cq)
{
unsigned long flags;
spin_lock_irqsave(&cq->cq_lock, flags);
cq->is_dying = true;
spin_unlock_irqrestore(&cq->cq_lock, flags);
}
void rxe_cq_cleanup(struct rxe_pool_entry *arg)
{
struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);

View File

@ -64,6 +64,8 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct ib_udata *udata);
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
void rxe_cq_disable(struct rxe_cq *cq);
void rxe_cq_cleanup(struct rxe_pool_entry *arg);
/* rxe_mcast.c */

View File

@ -919,6 +919,8 @@ static int rxe_destroy_cq(struct ib_cq *ibcq)
{
struct rxe_cq *cq = to_rcq(ibcq);
rxe_cq_disable(cq);
rxe_drop_ref(cq);
return 0;
}

View File

@ -89,6 +89,7 @@ struct rxe_cq {
struct rxe_queue *queue;
spinlock_t cq_lock;
u8 notify;
bool is_dying;
int is_user;
struct tasklet_struct comp_task;
};