linux/drivers/infiniband/sw/siw/siw_cq.c
Bernard Metzler 58fb0b5625 RDMA/siw: Simplify QP representation
Change siw_qp to contain ib_qp. Use rdma_is_kernel_res() on contained
ib_qp to distinguish kernel level from user level applications
resources. Apply same mechanism for kernel/user level application
detection to completion queues.

Link: https://lore.kernel.org/r/20191210161729.31598-1-bmt@zurich.ibm.com
Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-03 15:54:09 -04:00

103 lines
2.6 KiB
C

// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Copyright (c) 2008-2019, IBM Corporation */
#include <linux/errno.h>
#include <linux/types.h>
#include <rdma/ib_verbs.h>
#include "siw.h"
static int map_wc_opcode[SIW_NUM_OPCODES] = {
[SIW_OP_WRITE] = IB_WC_RDMA_WRITE,
[SIW_OP_SEND] = IB_WC_SEND,
[SIW_OP_SEND_WITH_IMM] = IB_WC_SEND,
[SIW_OP_READ] = IB_WC_RDMA_READ,
[SIW_OP_READ_LOCAL_INV] = IB_WC_RDMA_READ,
[SIW_OP_COMP_AND_SWAP] = IB_WC_COMP_SWAP,
[SIW_OP_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
[SIW_OP_INVAL_STAG] = IB_WC_LOCAL_INV,
[SIW_OP_REG_MR] = IB_WC_REG_MR,
[SIW_OP_RECEIVE] = IB_WC_RECV,
[SIW_OP_READ_RESPONSE] = -1 /* not used */
};
static struct {
enum siw_wc_status siw;
enum ib_wc_status ib;
} map_cqe_status[SIW_NUM_WC_STATUS] = {
{ SIW_WC_SUCCESS, IB_WC_SUCCESS },
{ SIW_WC_LOC_LEN_ERR, IB_WC_LOC_LEN_ERR },
{ SIW_WC_LOC_PROT_ERR, IB_WC_LOC_PROT_ERR },
{ SIW_WC_LOC_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
{ SIW_WC_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
{ SIW_WC_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
{ SIW_WC_LOC_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
{ SIW_WC_REM_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
{ SIW_WC_REM_INV_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
{ SIW_WC_GENERAL_ERR, IB_WC_GENERAL_ERR }
};
/*
* Reap one CQE from the CQ. Only used by kernel clients
* during CQ normal operation. Might be called during CQ
* flush for user mapped CQE array as well.
*/
int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
{
struct siw_cqe *cqe;
unsigned long flags;
spin_lock_irqsave(&cq->lock, flags);
cqe = &cq->queue[cq->cq_get % cq->num_cqe];
if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) {
memset(wc, 0, sizeof(*wc));
wc->wr_id = cqe->id;
wc->status = map_cqe_status[cqe->status].ib;
wc->opcode = map_wc_opcode[cqe->opcode];
wc->byte_len = cqe->bytes;
/*
* During CQ flush, also user land CQE's may get
* reaped here, which do not hold a QP reference
* and do not qualify for memory extension verbs.
*/
if (likely(rdma_is_kernel_res(&cq->base_cq.res))) {
if (cqe->flags & SIW_WQE_REM_INVAL) {
wc->ex.invalidate_rkey = cqe->inval_stag;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
}
wc->qp = cqe->base_qp;
siw_dbg_cq(cq,
"idx %u, type %d, flags %2x, id 0x%pK\n",
cq->cq_get % cq->num_cqe, cqe->opcode,
cqe->flags, (void *)(uintptr_t)cqe->id);
}
WRITE_ONCE(cqe->flags, 0);
cq->cq_get++;
spin_unlock_irqrestore(&cq->lock, flags);
return 1;
}
spin_unlock_irqrestore(&cq->lock, flags);
return 0;
}
/*
* siw_cq_flush()
*
* Flush all CQ elements.
*/
void siw_cq_flush(struct siw_cq *cq)
{
struct ib_wc wc;
while (siw_reap_cqe(cq, &wc))
;
}