IB/core: Guarantee that a local_dma_lkey is available
Every single ULP requires a local_dma_lkey to do anything with a QP, so let us ensure one exists for every PD created. If the driver can supply a global local_dma_lkey then use that, otherwise ask the driver to create a local use all physical memory MR associated with the new PD. Signed-off-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com> Reviewed-by: Sagi Grimberg <sagig@dev.mellanox.co.il> Acked-by: Christoph Hellwig <hch@infradead.org> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Tested-by: Ira Weiny <ira.weiny@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
7332bed085
commit
96249d70dd
@ -562,6 +562,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
|
|||||||
|
|
||||||
pd->device = file->device->ib_dev;
|
pd->device = file->device->ib_dev;
|
||||||
pd->uobject = uobj;
|
pd->uobject = uobj;
|
||||||
|
pd->local_mr = NULL;
|
||||||
atomic_set(&pd->usecnt, 0);
|
atomic_set(&pd->usecnt, 0);
|
||||||
|
|
||||||
uobj->object = pd;
|
uobj->object = pd;
|
||||||
|
@ -213,24 +213,61 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
|
|||||||
|
|
||||||
/* Protection domains */
|
/* Protection domains */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ib_alloc_pd - Allocates an unused protection domain.
|
||||||
|
* @device: The device on which to allocate the protection domain.
|
||||||
|
*
|
||||||
|
* A protection domain object provides an association between QPs, shared
|
||||||
|
* receive queues, address handles, memory regions, and memory windows.
|
||||||
|
*
|
||||||
|
* Every PD has a local_dma_lkey which can be used as the lkey value for local
|
||||||
|
* memory operations.
|
||||||
|
*/
|
||||||
struct ib_pd *ib_alloc_pd(struct ib_device *device)
|
struct ib_pd *ib_alloc_pd(struct ib_device *device)
|
||||||
{
|
{
|
||||||
struct ib_pd *pd;
|
struct ib_pd *pd;
|
||||||
|
struct ib_device_attr devattr;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
rc = ib_query_device(device, &devattr);
|
||||||
|
if (rc)
|
||||||
|
return ERR_PTR(rc);
|
||||||
|
|
||||||
pd = device->alloc_pd(device, NULL, NULL);
|
pd = device->alloc_pd(device, NULL, NULL);
|
||||||
|
if (IS_ERR(pd))
|
||||||
|
return pd;
|
||||||
|
|
||||||
if (!IS_ERR(pd)) {
|
pd->device = device;
|
||||||
pd->device = device;
|
pd->uobject = NULL;
|
||||||
pd->uobject = NULL;
|
pd->local_mr = NULL;
|
||||||
atomic_set(&pd->usecnt, 0);
|
atomic_set(&pd->usecnt, 0);
|
||||||
|
|
||||||
|
if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
|
||||||
|
pd->local_dma_lkey = device->local_dma_lkey;
|
||||||
|
else {
|
||||||
|
struct ib_mr *mr;
|
||||||
|
|
||||||
|
mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
|
||||||
|
if (IS_ERR(mr)) {
|
||||||
|
ib_dealloc_pd(pd);
|
||||||
|
return (struct ib_pd *)mr;
|
||||||
|
}
|
||||||
|
|
||||||
|
pd->local_mr = mr;
|
||||||
|
pd->local_dma_lkey = pd->local_mr->lkey;
|
||||||
}
|
}
|
||||||
|
|
||||||
return pd;
|
return pd;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ib_alloc_pd);
|
EXPORT_SYMBOL(ib_alloc_pd);
|
||||||
|
|
||||||
int ib_dealloc_pd(struct ib_pd *pd)
|
int ib_dealloc_pd(struct ib_pd *pd)
|
||||||
{
|
{
|
||||||
|
if (pd->local_mr) {
|
||||||
|
if (ib_dereg_mr(pd->local_mr))
|
||||||
|
return -EBUSY;
|
||||||
|
pd->local_mr = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (atomic_read(&pd->usecnt))
|
if (atomic_read(&pd->usecnt))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
|
@ -1257,9 +1257,11 @@ struct ib_udata {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct ib_pd {
|
struct ib_pd {
|
||||||
|
u32 local_dma_lkey;
|
||||||
struct ib_device *device;
|
struct ib_device *device;
|
||||||
struct ib_uobject *uobject;
|
struct ib_uobject *uobject;
|
||||||
atomic_t usecnt; /* count all resources */
|
atomic_t usecnt; /* count all resources */
|
||||||
|
struct ib_mr *local_mr;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ib_xrcd {
|
struct ib_xrcd {
|
||||||
@ -2192,13 +2194,6 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
|
|||||||
int ib_find_pkey(struct ib_device *device,
|
int ib_find_pkey(struct ib_device *device,
|
||||||
u8 port_num, u16 pkey, u16 *index);
|
u8 port_num, u16 pkey, u16 *index);
|
||||||
|
|
||||||
/**
|
|
||||||
* ib_alloc_pd - Allocates an unused protection domain.
|
|
||||||
* @device: The device on which to allocate the protection domain.
|
|
||||||
*
|
|
||||||
* A protection domain object provides an association between QPs, shared
|
|
||||||
* receive queues, address handles, memory regions, and memory windows.
|
|
||||||
*/
|
|
||||||
struct ib_pd *ib_alloc_pd(struct ib_device *device);
|
struct ib_pd *ib_alloc_pd(struct ib_device *device);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
Reference in New Issue
Block a user