xprtrdma: Schedule reply tasklet once per upcall

Minor optimization: grab rpcrdma_tk_lock_g and disable hard IRQs
just once after clearing the receive completion queue.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Steve Wise <swise@opengridcomputing.com>
Tested-by: Shirley Ma <shirley.ma@oracle.com>
Tested-by: Devesh Sharma <devesh.sharma@emulex.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
Chuck Lever 2014-07-29 17:25:46 -04:00 committed by Anna Schumaker
parent 2e84522c2e
commit bb96193d91

View File

@ -105,17 +105,6 @@ rpcrdma_run_tasklet(unsigned long data)
static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL); static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL);
static inline void
rpcrdma_schedule_tasklet(struct rpcrdma_rep *rep)
{
unsigned long flags;
spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
list_add_tail(&rep->rr_list, &rpcrdma_tasklets_g);
spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
tasklet_schedule(&rpcrdma_tasklet_g);
}
static void static void
rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
{ {
@ -214,7 +203,7 @@ rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
} }
static void static void
rpcrdma_recvcq_process_wc(struct ib_wc *wc) rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
{ {
struct rpcrdma_rep *rep = struct rpcrdma_rep *rep =
(struct rpcrdma_rep *)(unsigned long)wc->wr_id; (struct rpcrdma_rep *)(unsigned long)wc->wr_id;
@ -245,28 +234,38 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc)
} }
out_schedule: out_schedule:
rpcrdma_schedule_tasklet(rep); list_add_tail(&rep->rr_list, sched_list);
} }
static int static int
rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep) rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
{ {
struct list_head sched_list;
struct ib_wc *wcs; struct ib_wc *wcs;
int budget, count, rc; int budget, count, rc;
unsigned long flags;
INIT_LIST_HEAD(&sched_list);
budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE; budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
do { do {
wcs = ep->rep_recv_wcs; wcs = ep->rep_recv_wcs;
rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs); rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
if (rc <= 0) if (rc <= 0)
return rc; goto out_schedule;
count = rc; count = rc;
while (count-- > 0) while (count-- > 0)
rpcrdma_recvcq_process_wc(wcs++); rpcrdma_recvcq_process_wc(wcs++, &sched_list);
} while (rc == RPCRDMA_POLLSIZE && --budget); } while (rc == RPCRDMA_POLLSIZE && --budget);
return 0; rc = 0;
out_schedule:
spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
list_splice_tail(&sched_list, &rpcrdma_tasklets_g);
spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
tasklet_schedule(&rpcrdma_tasklet_g);
return rc;
} }
/* /*