IB/rdmavt, IB/hfi1, IB/qib: Make wc opcode translation driver dependent

The work to create a completion helper moved the translation of send
wqe operations to completion opcodes to rdmvat.

This precludes having driver dependent operations.  Make the translation
driver dependent by doing the translation in the driver prior to the
rvt_qp_swqe_complete() call using restored translation tables.

Fixes: Commit f2dc9cdce8 ("IB/rdmavt: Add a send completion helper")
Fixes: Commit 0771da5a6e ("IB/hfi1,IB/qib: Use new send completion helper")
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Mike Marciniszyn 2017-03-20 17:25:04 -07:00 committed by Doug Ledford
parent 5a52a7acf7
commit 43a474aadb
8 changed files with 55 additions and 24 deletions

View File

@ -1034,7 +1034,10 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
/* see post_send() */
barrier();
rvt_put_swqe(wqe);
rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
rvt_qp_swqe_complete(qp,
wqe,
ib_hfi1_wc_opcode[wqe->wr.opcode],
IB_WC_SUCCESS);
}
/*
* If we were waiting for sends to complete before re-sending,
@ -1081,7 +1084,10 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
qp->s_last = s_last;
/* see post_send() */
barrier();
rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
rvt_qp_swqe_complete(qp,
wqe,
ib_hfi1_wc_opcode[wqe->wr.opcode],
IB_WC_SUCCESS);
} else {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);

View File

@ -920,7 +920,10 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
rvt_qp_swqe_complete(qp, wqe, status);
rvt_qp_swqe_complete(qp,
wqe,
ib_hfi1_wc_opcode[wqe->wr.opcode],
status);
if (qp->s_acked == old_last)
qp->s_acked = last;

View File

@ -296,6 +296,22 @@ static inline bool wss_exceeds_threshold(void)
return atomic_read(&wss.total_count) >= wss.threshold;
}
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
[IB_WR_SEND] = IB_WC_SEND,
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
[IB_WR_SEND_WITH_INV] = IB_WC_SEND,
[IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
[IB_WR_REG_MR] = IB_WC_REG_MR
};
/*
* Length of header by opcode, 0 --> not supported
*/

View File

@ -938,7 +938,10 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
/* see post_send() */
barrier();
rvt_put_swqe(wqe);
rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
rvt_qp_swqe_complete(qp,
wqe,
ib_qib_wc_opcode[wqe->wr.opcode],
IB_WC_SUCCESS);
}
/*
* If we were waiting for sends to complete before resending,
@ -983,7 +986,10 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
qp->s_last = s_last;
/* see post_send() */
barrier();
rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
rvt_qp_swqe_complete(qp,
wqe,
ib_qib_wc_opcode[wqe->wr.opcode],
IB_WC_SUCCESS);
} else
this_cpu_inc(*ibp->rvp.rc_delayed_comp);

View File

@ -769,7 +769,10 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
rvt_qp_swqe_complete(qp, wqe, status);
rvt_qp_swqe_complete(qp,
wqe,
ib_qib_wc_opcode[wqe->wr.opcode],
status);
if (qp->s_acked == old_last)
qp->s_acked = last;

View File

@ -113,6 +113,19 @@ static unsigned int ib_qib_disable_sma;
module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(disable_sma, "Disable the SMA");
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
const enum ib_wc_opcode ib_qib_wc_opcode[] = {
[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
[IB_WR_SEND] = IB_WC_SEND,
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
};
/*
* System image GUID.
*/

View File

@ -117,23 +117,6 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
};
EXPORT_SYMBOL(ib_rvt_state_ops);
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
const enum ib_wc_opcode ib_rvt_wc_opcode[] = {
[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
[IB_WR_SEND] = IB_WC_SEND,
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
[IB_WR_SEND_WITH_INV] = IB_WC_SEND,
[IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
[IB_WR_REG_MR] = IB_WC_REG_MR
};
EXPORT_SYMBOL(ib_rvt_wc_opcode);
static void get_map_page(struct rvt_qpn_table *qpt,
struct rvt_qpn_map *map,
gfp_t gfp)

View File

@ -574,6 +574,7 @@ extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
static inline void rvt_qp_swqe_complete(
struct rvt_qp *qp,
struct rvt_swqe *wqe,
enum ib_wc_opcode opcode,
enum ib_wc_status status)
{
if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
@ -586,7 +587,7 @@ static inline void rvt_qp_swqe_complete(
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
wc.status = status;
wc.opcode = ib_rvt_wc_opcode[wqe->wr.opcode];
wc.opcode = opcode;
wc.qp = &qp->ibqp;
wc.byte_len = wqe->length;
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,