IB/srp: Split send and recieve CQs to reduce number of interrupts
We can reduce the number of IB interrupts from two interrupts per srp_queuecommand() call to one by using separate CQs for send and receive completions and processing send completions by polling every time a TX IU is allocated. Receive completion events still trigger an interrupt. Signed-off-by: Bart Van Assche <bart.vanassche@gmail.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
676ad58553
commit
9c03dc9f19
@ -80,7 +80,8 @@ MODULE_PARM_DESC(mellanox_workarounds,
|
|||||||
|
|
||||||
static void srp_add_one(struct ib_device *device);
|
static void srp_add_one(struct ib_device *device);
|
||||||
static void srp_remove_one(struct ib_device *device);
|
static void srp_remove_one(struct ib_device *device);
|
||||||
static void srp_completion(struct ib_cq *cq, void *target_ptr);
|
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
|
||||||
|
static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
|
||||||
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
|
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
|
||||||
|
|
||||||
static struct scsi_transport_template *ib_srp_transport_template;
|
static struct scsi_transport_template *ib_srp_transport_template;
|
||||||
@ -227,14 +228,22 @@ static int srp_create_target_ib(struct srp_target_port *target)
|
|||||||
if (!init_attr)
|
if (!init_attr)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
target->cq = ib_create_cq(target->srp_host->srp_dev->dev,
|
target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
|
||||||
srp_completion, NULL, target, SRP_CQ_SIZE, 0);
|
srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
|
||||||
if (IS_ERR(target->cq)) {
|
if (IS_ERR(target->recv_cq)) {
|
||||||
ret = PTR_ERR(target->cq);
|
ret = PTR_ERR(target->recv_cq);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
|
target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
|
||||||
|
srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
|
||||||
|
if (IS_ERR(target->send_cq)) {
|
||||||
|
ret = PTR_ERR(target->send_cq);
|
||||||
|
ib_destroy_cq(target->recv_cq);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
|
||||||
|
|
||||||
init_attr->event_handler = srp_qp_event;
|
init_attr->event_handler = srp_qp_event;
|
||||||
init_attr->cap.max_send_wr = SRP_SQ_SIZE;
|
init_attr->cap.max_send_wr = SRP_SQ_SIZE;
|
||||||
@ -243,20 +252,22 @@ static int srp_create_target_ib(struct srp_target_port *target)
|
|||||||
init_attr->cap.max_send_sge = 1;
|
init_attr->cap.max_send_sge = 1;
|
||||||
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
|
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
|
||||||
init_attr->qp_type = IB_QPT_RC;
|
init_attr->qp_type = IB_QPT_RC;
|
||||||
init_attr->send_cq = target->cq;
|
init_attr->send_cq = target->send_cq;
|
||||||
init_attr->recv_cq = target->cq;
|
init_attr->recv_cq = target->recv_cq;
|
||||||
|
|
||||||
target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
|
target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
|
||||||
if (IS_ERR(target->qp)) {
|
if (IS_ERR(target->qp)) {
|
||||||
ret = PTR_ERR(target->qp);
|
ret = PTR_ERR(target->qp);
|
||||||
ib_destroy_cq(target->cq);
|
ib_destroy_cq(target->send_cq);
|
||||||
|
ib_destroy_cq(target->recv_cq);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = srp_init_qp(target, target->qp);
|
ret = srp_init_qp(target, target->qp);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ib_destroy_qp(target->qp);
|
ib_destroy_qp(target->qp);
|
||||||
ib_destroy_cq(target->cq);
|
ib_destroy_cq(target->send_cq);
|
||||||
|
ib_destroy_cq(target->recv_cq);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -270,7 +281,8 @@ static void srp_free_target_ib(struct srp_target_port *target)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
ib_destroy_qp(target->qp);
|
ib_destroy_qp(target->qp);
|
||||||
ib_destroy_cq(target->cq);
|
ib_destroy_cq(target->send_cq);
|
||||||
|
ib_destroy_cq(target->recv_cq);
|
||||||
|
|
||||||
for (i = 0; i < SRP_RQ_SIZE; ++i)
|
for (i = 0; i < SRP_RQ_SIZE; ++i)
|
||||||
srp_free_iu(target->srp_host, target->rx_ring[i]);
|
srp_free_iu(target->srp_host, target->rx_ring[i]);
|
||||||
@ -568,7 +580,9 @@ static int srp_reconnect_target(struct srp_target_port *target)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
while (ib_poll_cq(target->cq, 1, &wc) > 0)
|
while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
|
||||||
|
; /* nothing */
|
||||||
|
while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
|
||||||
; /* nothing */
|
; /* nothing */
|
||||||
|
|
||||||
spin_lock_irq(target->scsi_host->host_lock);
|
spin_lock_irq(target->scsi_host->host_lock);
|
||||||
@ -851,7 +865,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
|
|||||||
struct srp_iu *iu;
|
struct srp_iu *iu;
|
||||||
u8 opcode;
|
u8 opcode;
|
||||||
|
|
||||||
iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
|
iu = target->rx_ring[wc->wr_id];
|
||||||
|
|
||||||
dev = target->srp_host->srp_dev->dev;
|
dev = target->srp_host->srp_dev->dev;
|
||||||
ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
|
ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
|
||||||
@ -898,7 +912,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
|
|||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void srp_completion(struct ib_cq *cq, void *target_ptr)
|
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
|
||||||
{
|
{
|
||||||
struct srp_target_port *target = target_ptr;
|
struct srp_target_port *target = target_ptr;
|
||||||
struct ib_wc wc;
|
struct ib_wc wc;
|
||||||
@ -907,17 +921,31 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
|
|||||||
while (ib_poll_cq(cq, 1, &wc) > 0) {
|
while (ib_poll_cq(cq, 1, &wc) > 0) {
|
||||||
if (wc.status) {
|
if (wc.status) {
|
||||||
shost_printk(KERN_ERR, target->scsi_host,
|
shost_printk(KERN_ERR, target->scsi_host,
|
||||||
PFX "failed %s status %d\n",
|
PFX "failed receive status %d\n",
|
||||||
wc.wr_id & SRP_OP_RECV ? "receive" : "send",
|
|
||||||
wc.status);
|
wc.status);
|
||||||
target->qp_in_error = 1;
|
target->qp_in_error = 1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wc.wr_id & SRP_OP_RECV)
|
srp_handle_recv(target, &wc);
|
||||||
srp_handle_recv(target, &wc);
|
}
|
||||||
else
|
}
|
||||||
++target->tx_tail;
|
|
||||||
|
static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
|
||||||
|
{
|
||||||
|
struct srp_target_port *target = target_ptr;
|
||||||
|
struct ib_wc wc;
|
||||||
|
|
||||||
|
while (ib_poll_cq(cq, 1, &wc) > 0) {
|
||||||
|
if (wc.status) {
|
||||||
|
shost_printk(KERN_ERR, target->scsi_host,
|
||||||
|
PFX "failed send status %d\n",
|
||||||
|
wc.status);
|
||||||
|
target->qp_in_error = 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
++target->tx_tail;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -930,7 +958,7 @@ static int __srp_post_recv(struct srp_target_port *target)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
next = target->rx_head & (SRP_RQ_SIZE - 1);
|
next = target->rx_head & (SRP_RQ_SIZE - 1);
|
||||||
wr.wr_id = next | SRP_OP_RECV;
|
wr.wr_id = next;
|
||||||
iu = target->rx_ring[next];
|
iu = target->rx_ring[next];
|
||||||
|
|
||||||
list.addr = iu->dma;
|
list.addr = iu->dma;
|
||||||
@ -970,6 +998,8 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
|
|||||||
{
|
{
|
||||||
s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
|
s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
|
||||||
|
|
||||||
|
srp_send_completion(target->send_cq, target);
|
||||||
|
|
||||||
if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
|
if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -60,7 +60,6 @@ enum {
|
|||||||
SRP_RQ_SHIFT = 6,
|
SRP_RQ_SHIFT = 6,
|
||||||
SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
|
SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
|
||||||
SRP_SQ_SIZE = SRP_RQ_SIZE - 1,
|
SRP_SQ_SIZE = SRP_RQ_SIZE - 1,
|
||||||
SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE,
|
|
||||||
|
|
||||||
SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
|
SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
|
||||||
|
|
||||||
@ -69,8 +68,6 @@ enum {
|
|||||||
SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4
|
SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4
|
||||||
};
|
};
|
||||||
|
|
||||||
#define SRP_OP_RECV (1 << 31)
|
|
||||||
|
|
||||||
enum srp_target_state {
|
enum srp_target_state {
|
||||||
SRP_TARGET_LIVE,
|
SRP_TARGET_LIVE,
|
||||||
SRP_TARGET_CONNECTING,
|
SRP_TARGET_CONNECTING,
|
||||||
@ -133,7 +130,8 @@ struct srp_target_port {
|
|||||||
int path_query_id;
|
int path_query_id;
|
||||||
|
|
||||||
struct ib_cm_id *cm_id;
|
struct ib_cm_id *cm_id;
|
||||||
struct ib_cq *cq;
|
struct ib_cq *recv_cq;
|
||||||
|
struct ib_cq *send_cq;
|
||||||
struct ib_qp *qp;
|
struct ib_qp *qp;
|
||||||
|
|
||||||
int max_ti_iu_len;
|
int max_ti_iu_len;
|
||||||
|
Loading…
Reference in New Issue
Block a user