forked from Minki/linux
RDMA/odp: remove ib_ucontext from ib_umem
At this point the ucontext is only being stored to access the ib_device, so just store the ib_device directly instead. This is more natural and logical as the umem has nothing to do with the ucontext. Link: https://lore.kernel.org/r/20190806231548.25242-8-jgg@ziepe.ca Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
c571feca2d
commit
47f725ee7b
@ -234,7 +234,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
|
||||
umem = kzalloc(sizeof(*umem), GFP_KERNEL);
|
||||
if (!umem)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
umem->context = context;
|
||||
umem->ibdev = context->device;
|
||||
umem->length = size;
|
||||
umem->address = addr;
|
||||
umem->writable = ib_access_writable(access);
|
||||
@ -337,7 +337,7 @@ void ib_umem_release(struct ib_umem *umem)
|
||||
if (umem->is_odp)
|
||||
return ib_umem_odp_release(to_ib_umem_odp(umem));
|
||||
|
||||
__ib_umem_release(umem->context->device, umem, 1);
|
||||
__ib_umem_release(umem->ibdev, umem, 1);
|
||||
|
||||
atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
|
||||
mmdrop(umem->owning_mm);
|
||||
|
@ -96,7 +96,7 @@ static void ib_umem_notifier_release(struct mmu_notifier *mn,
|
||||
*/
|
||||
ib_umem_notifier_start_account(umem_odp);
|
||||
complete_all(&umem_odp->notifier_completion);
|
||||
umem_odp->umem.context->device->ops.invalidate_range(
|
||||
umem_odp->umem.ibdev->ops.invalidate_range(
|
||||
umem_odp, ib_umem_start(umem_odp),
|
||||
ib_umem_end(umem_odp));
|
||||
}
|
||||
@ -109,7 +109,7 @@ static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
|
||||
u64 start, u64 end, void *cookie)
|
||||
{
|
||||
ib_umem_notifier_start_account(item);
|
||||
item->umem.context->device->ops.invalidate_range(item, start, end);
|
||||
item->umem.ibdev->ops.invalidate_range(item, start, end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -312,7 +312,7 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
|
||||
if (!umem_odp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
umem = &umem_odp->umem;
|
||||
umem->context = context;
|
||||
umem->ibdev = context->device;
|
||||
umem->writable = ib_access_writable(access);
|
||||
umem->owning_mm = current->mm;
|
||||
umem_odp->is_implicit_odp = 1;
|
||||
@ -354,7 +354,7 @@ struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root,
|
||||
if (!odp_data)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
umem = &odp_data->umem;
|
||||
umem->context = root->umem.context;
|
||||
umem->ibdev = root->umem.ibdev;
|
||||
umem->length = size;
|
||||
umem->address = addr;
|
||||
umem->writable = root->umem.writable;
|
||||
@ -406,7 +406,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
|
||||
if (!umem_odp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
umem_odp->umem.context = context;
|
||||
umem_odp->umem.ibdev = context->device;
|
||||
umem_odp->umem.length = size;
|
||||
umem_odp->umem.address = addr;
|
||||
umem_odp->umem.writable = ib_access_writable(access);
|
||||
@ -504,8 +504,7 @@ static int ib_umem_odp_map_dma_single_page(
|
||||
u64 access_mask,
|
||||
unsigned long current_seq)
|
||||
{
|
||||
struct ib_ucontext *context = umem_odp->umem.context;
|
||||
struct ib_device *dev = context->device;
|
||||
struct ib_device *dev = umem_odp->umem.ibdev;
|
||||
dma_addr_t dma_addr;
|
||||
int remove_existing_mapping = 0;
|
||||
int ret = 0;
|
||||
@ -718,7 +717,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
|
||||
{
|
||||
int idx;
|
||||
u64 addr;
|
||||
struct ib_device *dev = umem_odp->umem.context->device;
|
||||
struct ib_device *dev = umem_odp->umem.ibdev;
|
||||
|
||||
virt = max_t(u64, virt, ib_umem_start(umem_odp));
|
||||
bound = min_t(u64, bound, ib_umem_end(umem_odp));
|
||||
|
@ -42,7 +42,7 @@ struct ib_ucontext;
|
||||
struct ib_umem_odp;
|
||||
|
||||
struct ib_umem {
|
||||
struct ib_ucontext *context;
|
||||
struct ib_device *ibdev;
|
||||
struct mm_struct *owning_mm;
|
||||
size_t length;
|
||||
unsigned long address;
|
||||
|
Loading…
Reference in New Issue
Block a user