forked from Minki/linux
IB/ipath: Fix RNR NAK handling
This patch fixes a couple of minor problems with RNR NAK handling: - The insertion sort was causing extra delay when inserting ahead vs. behind an existing entry on the list. - A resend of a first packet of a message which is still not ready, needs another RNR NAK (i.e., it was suppressed when it shouldn't). - Also, the resend tasklet doesn't need to be woken up unless the ACK/NAK actually indicates progress has been made. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
e57d62a147
commit
cc65edcf0c
@ -647,6 +647,7 @@ static void send_rc_ack(struct ipath_qp *qp)
|
|||||||
|
|
||||||
queue_ack:
|
queue_ack:
|
||||||
spin_lock_irqsave(&qp->s_lock, flags);
|
spin_lock_irqsave(&qp->s_lock, flags);
|
||||||
|
dev->n_rc_qacks++;
|
||||||
qp->s_flags |= IPATH_S_ACK_PENDING;
|
qp->s_flags |= IPATH_S_ACK_PENDING;
|
||||||
qp->s_nak_state = qp->r_nak_state;
|
qp->s_nak_state = qp->r_nak_state;
|
||||||
qp->s_ack_psn = qp->r_ack_psn;
|
qp->s_ack_psn = qp->r_ack_psn;
|
||||||
@ -798,11 +799,13 @@ bail:
|
|||||||
|
|
||||||
static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
|
static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
|
||||||
{
|
{
|
||||||
if (qp->s_wait_credit) {
|
if (qp->s_last_psn != psn) {
|
||||||
qp->s_wait_credit = 0;
|
qp->s_last_psn = psn;
|
||||||
tasklet_hi_schedule(&qp->s_task);
|
if (qp->s_wait_credit) {
|
||||||
|
qp->s_wait_credit = 0;
|
||||||
|
tasklet_hi_schedule(&qp->s_task);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
qp->s_last_psn = psn;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1653,13 +1656,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
|
|||||||
case OP(SEND_FIRST):
|
case OP(SEND_FIRST):
|
||||||
if (!ipath_get_rwqe(qp, 0)) {
|
if (!ipath_get_rwqe(qp, 0)) {
|
||||||
rnr_nak:
|
rnr_nak:
|
||||||
/*
|
|
||||||
* A RNR NAK will ACK earlier sends and RDMA writes.
|
|
||||||
* Don't queue the NAK if a RDMA read or atomic
|
|
||||||
* is pending though.
|
|
||||||
*/
|
|
||||||
if (qp->r_nak_state)
|
|
||||||
goto done;
|
|
||||||
qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
|
qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
|
||||||
qp->r_ack_psn = qp->r_psn;
|
qp->r_ack_psn = qp->r_psn;
|
||||||
goto send_ack;
|
goto send_ack;
|
||||||
|
@ -98,11 +98,15 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp)
|
|||||||
while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
|
while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
|
||||||
qp->s_rnr_timeout -= nqp->s_rnr_timeout;
|
qp->s_rnr_timeout -= nqp->s_rnr_timeout;
|
||||||
l = l->next;
|
l = l->next;
|
||||||
if (l->next == &dev->rnrwait)
|
if (l->next == &dev->rnrwait) {
|
||||||
|
nqp = NULL;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
nqp = list_entry(l->next, struct ipath_qp,
|
nqp = list_entry(l->next, struct ipath_qp,
|
||||||
timerwait);
|
timerwait);
|
||||||
}
|
}
|
||||||
|
if (nqp)
|
||||||
|
nqp->s_rnr_timeout -= qp->s_rnr_timeout;
|
||||||
list_add(&qp->timerwait, l);
|
list_add(&qp->timerwait, l);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&dev->pending_lock, flags);
|
spin_unlock_irqrestore(&dev->pending_lock, flags);
|
||||||
|
Loading…
Reference in New Issue
Block a user