RDMA: Hold the sgid_attr inside the struct ib_ah/qp
If the AH has a GRH then hold a reference to the sgid_attr inside the common struct. If the QP is modified with an AV that includes a GRH then also hold a reference to the sgid_attr inside the common struct. This informs the cache that the sgid_index is in-use so long as the AH or QP using it exists. This also means that all drivers can access the sgid_attr directly from the ah_attr instead of querying the cache during their UD post-send paths. Signed-off-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
This commit is contained in:
parent
7492052a18
commit
1a1f460ff1
@ -459,6 +459,19 @@ static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
|
|||||||
rdma_destroy_ah_attr(ah_attr);
|
rdma_destroy_ah_attr(ah_attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct ib_gid_attr *
|
||||||
|
rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
|
||||||
|
const struct ib_gid_attr *old_attr)
|
||||||
|
{
|
||||||
|
if (old_attr)
|
||||||
|
rdma_put_gid_attr(old_attr);
|
||||||
|
if (ah_attr->ah_flags & IB_AH_GRH) {
|
||||||
|
rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
|
||||||
|
return ah_attr->grh.sgid_attr;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
|
static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
|
||||||
struct rdma_ah_attr *ah_attr,
|
struct rdma_ah_attr *ah_attr,
|
||||||
struct ib_udata *udata)
|
struct ib_udata *udata)
|
||||||
@ -472,6 +485,8 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
|
|||||||
ah->pd = pd;
|
ah->pd = pd;
|
||||||
ah->uobject = NULL;
|
ah->uobject = NULL;
|
||||||
ah->type = ah_attr->type;
|
ah->type = ah_attr->type;
|
||||||
|
ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
|
||||||
|
|
||||||
atomic_inc(&pd->usecnt);
|
atomic_inc(&pd->usecnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -871,6 +886,7 @@ int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
|
|||||||
ah->device->modify_ah(ah, ah_attr) :
|
ah->device->modify_ah(ah, ah_attr) :
|
||||||
-EOPNOTSUPP;
|
-EOPNOTSUPP;
|
||||||
|
|
||||||
|
ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
|
||||||
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
|
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -888,13 +904,17 @@ EXPORT_SYMBOL(rdma_query_ah);
|
|||||||
|
|
||||||
int rdma_destroy_ah(struct ib_ah *ah)
|
int rdma_destroy_ah(struct ib_ah *ah)
|
||||||
{
|
{
|
||||||
|
const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
|
||||||
struct ib_pd *pd;
|
struct ib_pd *pd;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
pd = ah->pd;
|
pd = ah->pd;
|
||||||
ret = ah->device->destroy_ah(ah);
|
ret = ah->device->destroy_ah(ah);
|
||||||
if (!ret)
|
if (!ret) {
|
||||||
atomic_dec(&pd->usecnt);
|
atomic_dec(&pd->usecnt);
|
||||||
|
if (sgid_attr)
|
||||||
|
rdma_put_gid_attr(sgid_attr);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -1573,6 +1593,13 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
if (attr_mask & IB_QP_ALT_PATH) {
|
if (attr_mask & IB_QP_ALT_PATH) {
|
||||||
|
/*
|
||||||
|
* FIXME: This does not track the migration state, so if the
|
||||||
|
* user loads a new alternate path after the HW has migrated
|
||||||
|
* from primary->alternate we will keep the wrong
|
||||||
|
* references. This is OK for IB because the reference
|
||||||
|
* counting does not serve any functional purpose.
|
||||||
|
*/
|
||||||
ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
|
ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
|
||||||
&old_sgid_attr_alt_av);
|
&old_sgid_attr_alt_av);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -1606,8 +1633,17 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
|
ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
|
||||||
if (!ret && (attr_mask & IB_QP_PORT))
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if (attr_mask & IB_QP_PORT)
|
||||||
qp->port = attr->port_num;
|
qp->port = attr->port_num;
|
||||||
|
if (attr_mask & IB_QP_AV)
|
||||||
|
qp->av_sgid_attr =
|
||||||
|
rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
|
||||||
|
if (attr_mask & IB_QP_ALT_PATH)
|
||||||
|
qp->alt_path_sgid_attr = rdma_update_sgid_attr(
|
||||||
|
&attr->alt_ah_attr, qp->alt_path_sgid_attr);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (attr_mask & IB_QP_ALT_PATH)
|
if (attr_mask & IB_QP_ALT_PATH)
|
||||||
@ -1765,6 +1801,8 @@ static int __ib_destroy_shared_qp(struct ib_qp *qp)
|
|||||||
|
|
||||||
int ib_destroy_qp(struct ib_qp *qp)
|
int ib_destroy_qp(struct ib_qp *qp)
|
||||||
{
|
{
|
||||||
|
const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
|
||||||
|
const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
|
||||||
struct ib_pd *pd;
|
struct ib_pd *pd;
|
||||||
struct ib_cq *scq, *rcq;
|
struct ib_cq *scq, *rcq;
|
||||||
struct ib_srq *srq;
|
struct ib_srq *srq;
|
||||||
@ -1795,6 +1833,10 @@ int ib_destroy_qp(struct ib_qp *qp)
|
|||||||
rdma_restrack_del(&qp->res);
|
rdma_restrack_del(&qp->res);
|
||||||
ret = qp->device->destroy_qp(qp);
|
ret = qp->device->destroy_qp(qp);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
|
if (alt_path_sgid_attr)
|
||||||
|
rdma_put_gid_attr(alt_path_sgid_attr);
|
||||||
|
if (av_sgid_attr)
|
||||||
|
rdma_put_gid_attr(av_sgid_attr);
|
||||||
if (pd)
|
if (pd)
|
||||||
atomic_dec(&pd->usecnt);
|
atomic_dec(&pd->usecnt);
|
||||||
if (scq)
|
if (scq)
|
||||||
|
@ -1580,6 +1580,7 @@ struct ib_ah {
|
|||||||
struct ib_device *device;
|
struct ib_device *device;
|
||||||
struct ib_pd *pd;
|
struct ib_pd *pd;
|
||||||
struct ib_uobject *uobject;
|
struct ib_uobject *uobject;
|
||||||
|
const struct ib_gid_attr *sgid_attr;
|
||||||
enum rdma_ah_attr_type type;
|
enum rdma_ah_attr_type type;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1778,6 +1779,9 @@ struct ib_qp {
|
|||||||
struct ib_uobject *uobject;
|
struct ib_uobject *uobject;
|
||||||
void (*event_handler)(struct ib_event *, void *);
|
void (*event_handler)(struct ib_event *, void *);
|
||||||
void *qp_context;
|
void *qp_context;
|
||||||
|
/* sgid_attrs associated with the AV's */
|
||||||
|
const struct ib_gid_attr *av_sgid_attr;
|
||||||
|
const struct ib_gid_attr *alt_path_sgid_attr;
|
||||||
u32 qp_num;
|
u32 qp_num;
|
||||||
u32 max_write_sge;
|
u32 max_write_sge;
|
||||||
u32 max_read_sge;
|
u32 max_read_sge;
|
||||||
|
Loading…
Reference in New Issue
Block a user