forked from Minki/linux
RDMA/rxe: Add a type flag to rxe_queue structs
To create optimal code only want to use smp_load_acquire() and smp_store_release() for user indices in rxe_queue APIs since kernel indices are protected by locks which also act as memory barriers. By adding a type to the queues we can determine which indices need to be protected. Link: https://lore.kernel.org/r/20210527194748.662636-2-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
50971e3915
commit
59daff49f2
@ -59,9 +59,11 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
struct rxe_create_cq_resp __user *uresp)
|
||||
{
|
||||
int err;
|
||||
enum queue_type type;
|
||||
|
||||
type = uresp ? QUEUE_TYPE_TO_USER : QUEUE_TYPE_KERNEL;
|
||||
cq->queue = rxe_queue_init(rxe, &cqe,
|
||||
sizeof(struct rxe_cqe));
|
||||
sizeof(struct rxe_cqe), type);
|
||||
if (!cq->queue) {
|
||||
pr_warn("unable to create cq\n");
|
||||
return -ENOMEM;
|
||||
|
@ -206,6 +206,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
{
|
||||
int err;
|
||||
int wqe_size;
|
||||
enum queue_type type;
|
||||
|
||||
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
|
||||
if (err < 0)
|
||||
@ -231,7 +232,9 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
|
||||
wqe_size += sizeof(struct rxe_send_wqe);
|
||||
|
||||
qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
|
||||
type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
|
||||
qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
|
||||
wqe_size, type);
|
||||
if (!qp->sq.queue)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -273,6 +276,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
{
|
||||
int err;
|
||||
int wqe_size;
|
||||
enum queue_type type;
|
||||
|
||||
if (!qp->srq) {
|
||||
qp->rq.max_wr = init->cap.max_recv_wr;
|
||||
@ -283,9 +287,9 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
|
||||
qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
|
||||
|
||||
qp->rq.queue = rxe_queue_init(rxe,
|
||||
&qp->rq.max_wr,
|
||||
wqe_size);
|
||||
type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
|
||||
qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
|
||||
wqe_size, type);
|
||||
if (!qp->rq.queue)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -52,9 +52,8 @@ inline void rxe_queue_reset(struct rxe_queue *q)
|
||||
memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
|
||||
}
|
||||
|
||||
struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
|
||||
int *num_elem,
|
||||
unsigned int elem_size)
|
||||
struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
|
||||
unsigned int elem_size, enum queue_type type)
|
||||
{
|
||||
struct rxe_queue *q;
|
||||
size_t buf_size;
|
||||
@ -69,6 +68,7 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
|
||||
goto err1;
|
||||
|
||||
q->rxe = rxe;
|
||||
q->type = type;
|
||||
|
||||
/* used in resize, only need to copy used part of queue */
|
||||
q->elem_size = elem_size;
|
||||
@ -136,7 +136,7 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
|
||||
int err;
|
||||
unsigned long flags = 0, flags1;
|
||||
|
||||
new_q = rxe_queue_init(q->rxe, &num_elem, elem_size);
|
||||
new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
|
||||
if (!new_q)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -19,6 +19,13 @@
|
||||
* of the queue is one less than the number of element slots
|
||||
*/
|
||||
|
||||
/* type of queue */
|
||||
enum queue_type {
|
||||
QUEUE_TYPE_KERNEL,
|
||||
QUEUE_TYPE_TO_USER,
|
||||
QUEUE_TYPE_FROM_USER,
|
||||
};
|
||||
|
||||
struct rxe_queue {
|
||||
struct rxe_dev *rxe;
|
||||
struct rxe_queue_buf *buf;
|
||||
@ -27,6 +34,7 @@ struct rxe_queue {
|
||||
size_t elem_size;
|
||||
unsigned int log2_elem_size;
|
||||
u32 index_mask;
|
||||
enum queue_type type;
|
||||
};
|
||||
|
||||
int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
|
||||
@ -35,9 +43,8 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
|
||||
|
||||
void rxe_queue_reset(struct rxe_queue *q);
|
||||
|
||||
struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
|
||||
int *num_elem,
|
||||
unsigned int elem_size);
|
||||
struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
|
||||
unsigned int elem_size, enum queue_type type);
|
||||
|
||||
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
|
||||
unsigned int elem_size, struct ib_udata *udata,
|
||||
|
@ -78,6 +78,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
int err;
|
||||
int srq_wqe_size;
|
||||
struct rxe_queue *q;
|
||||
enum queue_type type;
|
||||
|
||||
srq->ibsrq.event_handler = init->event_handler;
|
||||
srq->ibsrq.srq_context = init->srq_context;
|
||||
@ -91,8 +92,9 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
spin_lock_init(&srq->rq.producer_lock);
|
||||
spin_lock_init(&srq->rq.consumer_lock);
|
||||
|
||||
type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
|
||||
q = rxe_queue_init(rxe, &srq->rq.max_wr,
|
||||
srq_wqe_size);
|
||||
srq_wqe_size, type);
|
||||
if (!q) {
|
||||
pr_warn("unable to allocate queue for srq\n");
|
||||
return -ENOMEM;
|
||||
|
Loading…
Reference in New Issue
Block a user