forked from Minki/linux
staging/rdma/hfi1: Insure last cursor is updated prior to complete
This patch is a prerequisite for adding a separate lock for post send. The timing of updating s_last needs to be before returning any send completion to avoid a race between a poll cq seeing a completion and the post send checking for a full queue. Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
20658661bc
commit
6c2ab0b857
@ -1123,10 +1123,18 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr)
|
||||
hfi1_add_retry_timer(qp);
|
||||
|
||||
while (qp->s_last != qp->s_acked) {
|
||||
u32 s_last;
|
||||
|
||||
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
|
||||
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
|
||||
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
|
||||
break;
|
||||
s_last = qp->s_last;
|
||||
if (++s_last >= qp->s_size)
|
||||
s_last = 0;
|
||||
qp->s_last = s_last;
|
||||
/* see post_send() */
|
||||
barrier();
|
||||
for (i = 0; i < wqe->wr.num_sge; i++) {
|
||||
struct rvt_sge *sge = &wqe->sg_list[i];
|
||||
|
||||
@ -1143,8 +1151,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr)
|
||||
wc.qp = &qp->ibqp;
|
||||
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
|
||||
}
|
||||
if (++qp->s_last >= qp->s_size)
|
||||
qp->s_last = 0;
|
||||
}
|
||||
/*
|
||||
* If we were waiting for sends to complete before re-sending,
|
||||
@ -1184,11 +1190,19 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
|
||||
*/
|
||||
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
|
||||
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
|
||||
u32 s_last;
|
||||
|
||||
for (i = 0; i < wqe->wr.num_sge; i++) {
|
||||
struct rvt_sge *sge = &wqe->sg_list[i];
|
||||
|
||||
rvt_put_mr(sge->mr);
|
||||
}
|
||||
s_last = qp->s_last;
|
||||
if (++s_last >= qp->s_size)
|
||||
s_last = 0;
|
||||
qp->s_last = s_last;
|
||||
/* see post_send() */
|
||||
barrier();
|
||||
/* Post a send completion queue entry if requested. */
|
||||
if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
|
||||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
|
||||
@ -1200,8 +1214,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
|
||||
wc.qp = &qp->ibqp;
|
||||
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
|
||||
}
|
||||
if (++qp->s_last >= qp->s_size)
|
||||
qp->s_last = 0;
|
||||
} else {
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
|
||||
|
@ -921,6 +921,13 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
||||
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
|
||||
return;
|
||||
|
||||
last = qp->s_last;
|
||||
old_last = last;
|
||||
if (++last >= qp->s_size)
|
||||
last = 0;
|
||||
qp->s_last = last;
|
||||
/* See post_send() */
|
||||
barrier();
|
||||
for (i = 0; i < wqe->wr.num_sge; i++) {
|
||||
struct rvt_sge *sge = &wqe->sg_list[i];
|
||||
|
||||
@ -948,11 +955,6 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
||||
status != IB_WC_SUCCESS);
|
||||
}
|
||||
|
||||
last = qp->s_last;
|
||||
old_last = last;
|
||||
if (++last >= qp->s_size)
|
||||
last = 0;
|
||||
qp->s_last = last;
|
||||
if (qp->s_acked == old_last)
|
||||
qp->s_acked = last;
|
||||
if (qp->s_cur == old_last)
|
||||
|
Loading…
Reference in New Issue
Block a user