mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
IB/core: add support for draining Shared receive queues
To avoid leakage for QPs assocoated with SRQ, according to IB spec (section 10.3.1): "Note, for QPs that are associated with an SRQ, the Consumer should take the QP through the Error State before invoking a Destroy QP or a Modify QP to the Reset State. The Consumer may invoke the Destroy QP without first performing a Modify QP to the Error State and waiting for the Affiliated Asynchronous Last WQE Reached Event. However, if the Consumer does not wait for the Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment leakage may occur. Therefore, it is good programming practice to teardown a QP that is associated with an SRQ by using the following process: - Put the QP in the Error State; - wait for the Affiliated Asynchronous Last WQE Reached Event; - either: - drain the CQ by invoking the Poll CQ verb and either wait for CQ to be empty or the number of Poll CQ operations has exceeded CQ capacity size; or - post another WR that completes on the same CQ and wait for this WR to return as a WC; - and then invoke a Destroy QP or Reset QP." Catch the Last WQE Reached Event in the core layer during drain QP flow. Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com> Link: https://lore.kernel.org/r/20240619171153.34631-2-mgurtovoy@nvidia.com Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
0c5275bf75
commit
844bc12e6d
@ -1101,6 +1101,16 @@ EXPORT_SYMBOL(ib_destroy_srq_user);
|
||||
|
||||
/* Queue pairs */
|
||||
|
||||
static void __ib_qp_event_handler(struct ib_event *event, void *context)
|
||||
{
|
||||
struct ib_qp *qp = event->element.qp;
|
||||
|
||||
if (event->event == IB_EVENT_QP_LAST_WQE_REACHED)
|
||||
complete(&qp->srq_completion);
|
||||
if (qp->registered_event_handler)
|
||||
qp->registered_event_handler(event, qp->qp_context);
|
||||
}
|
||||
|
||||
static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
|
||||
{
|
||||
struct ib_qp *qp = context;
|
||||
@ -1221,13 +1231,15 @@ static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
|
||||
qp->qp_type = attr->qp_type;
|
||||
qp->rwq_ind_tbl = attr->rwq_ind_tbl;
|
||||
qp->srq = attr->srq;
|
||||
qp->event_handler = attr->event_handler;
|
||||
qp->event_handler = __ib_qp_event_handler;
|
||||
qp->registered_event_handler = attr->event_handler;
|
||||
qp->port = attr->port_num;
|
||||
qp->qp_context = attr->qp_context;
|
||||
|
||||
spin_lock_init(&qp->mr_lock);
|
||||
INIT_LIST_HEAD(&qp->rdma_mrs);
|
||||
INIT_LIST_HEAD(&qp->sig_mrs);
|
||||
init_completion(&qp->srq_completion);
|
||||
|
||||
qp->send_cq = attr->send_cq;
|
||||
qp->recv_cq = attr->recv_cq;
|
||||
@ -2884,6 +2896,72 @@ static void __ib_drain_rq(struct ib_qp *qp)
|
||||
wait_for_completion(&rdrain.done);
|
||||
}
|
||||
|
||||
/*
|
||||
* __ib_drain_srq() - Block until Last WQE Reached event arrives, or timeout
|
||||
* expires.
|
||||
* @qp: queue pair associated with SRQ to drain
|
||||
*
|
||||
* Quoting 10.3.1 Queue Pair and EE Context States:
|
||||
*
|
||||
* Note, for QPs that are associated with an SRQ, the Consumer should take the
|
||||
* QP through the Error State before invoking a Destroy QP or a Modify QP to the
|
||||
* Reset State. The Consumer may invoke the Destroy QP without first performing
|
||||
* a Modify QP to the Error State and waiting for the Affiliated Asynchronous
|
||||
* Last WQE Reached Event. However, if the Consumer does not wait for the
|
||||
* Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment
|
||||
* leakage may occur. Therefore, it is good programming practice to tear down a
|
||||
* QP that is associated with an SRQ by using the following process:
|
||||
*
|
||||
* - Put the QP in the Error State
|
||||
* - Wait for the Affiliated Asynchronous Last WQE Reached Event;
|
||||
* - either:
|
||||
* drain the CQ by invoking the Poll CQ verb and either wait for CQ
|
||||
* to be empty or the number of Poll CQ operations has exceeded
|
||||
* CQ capacity size;
|
||||
* - or
|
||||
* post another WR that completes on the same CQ and wait for this
|
||||
* WR to return as a WC;
|
||||
* - and then invoke a Destroy QP or Reset QP.
|
||||
*
|
||||
* We use the first option.
|
||||
*/
|
||||
static void __ib_drain_srq(struct ib_qp *qp)
|
||||
{
|
||||
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
|
||||
struct ib_cq *cq;
|
||||
int n, polled = 0;
|
||||
int ret;
|
||||
|
||||
if (!qp->srq) {
|
||||
WARN_ONCE(1, "QP 0x%p is not associated with SRQ\n", qp);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
|
||||
if (ret) {
|
||||
WARN_ONCE(ret, "failed to drain shared recv queue: %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ib_srq_has_cq(qp->srq->srq_type)) {
|
||||
cq = qp->srq->ext.cq;
|
||||
} else if (qp->recv_cq) {
|
||||
cq = qp->recv_cq;
|
||||
} else {
|
||||
WARN_ONCE(1, "QP 0x%p has no CQ associated with SRQ\n", qp);
|
||||
return;
|
||||
}
|
||||
|
||||
if (wait_for_completion_timeout(&qp->srq_completion, 60 * HZ) > 0) {
|
||||
while (polled != cq->cqe) {
|
||||
n = ib_process_cq_direct(cq, cq->cqe - polled);
|
||||
if (!n)
|
||||
return;
|
||||
polled += n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_drain_sq() - Block until all SQ CQEs have been consumed by the
|
||||
* application.
|
||||
@ -2962,6 +3040,8 @@ void ib_drain_qp(struct ib_qp *qp)
|
||||
ib_drain_sq(qp);
|
||||
if (!qp->srq)
|
||||
ib_drain_rq(qp);
|
||||
else
|
||||
__ib_drain_srq(qp);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_drain_qp);
|
||||
|
||||
|
@ -1788,6 +1788,7 @@ struct ib_qp {
|
||||
struct list_head rdma_mrs;
|
||||
struct list_head sig_mrs;
|
||||
struct ib_srq *srq;
|
||||
struct completion srq_completion;
|
||||
struct ib_xrcd *xrcd; /* XRC TGT QPs only */
|
||||
struct list_head xrcd_list;
|
||||
|
||||
@ -1797,6 +1798,7 @@ struct ib_qp {
|
||||
struct ib_qp *real_qp;
|
||||
struct ib_uqp_object *uobject;
|
||||
void (*event_handler)(struct ib_event *, void *);
|
||||
void (*registered_event_handler)(struct ib_event *, void *);
|
||||
void *qp_context;
|
||||
/* sgid_attrs associated with the AV's */
|
||||
const struct ib_gid_attr *av_sgid_attr;
|
||||
|
Loading…
Reference in New Issue
Block a user