mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
RDMA/uverbs: Check ODP in ib_check_mr_access() as well
No reason only one caller checks this. This properly blocks ODP from the rereg flow if the device does not support ODP. Link: https://lore.kernel.org/r/20201130075839.278575-3-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
b9653b31d7
commit
adac4cb3c1
@ -709,29 +709,20 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
|
||||
if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
ret = ib_check_mr_access(cmd.access_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
|
||||
if (IS_ERR(uobj))
|
||||
return PTR_ERR(uobj);
|
||||
|
||||
ret = ib_check_mr_access(ib_dev, cmd.access_flags);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
|
||||
if (!pd) {
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
|
||||
if (!(pd->device->attrs.device_cap_flags &
|
||||
IB_DEVICE_ON_DEMAND_PAGING)) {
|
||||
pr_debug("ODP support not available\n");
|
||||
ret = -EINVAL;
|
||||
goto err_put;
|
||||
}
|
||||
}
|
||||
|
||||
mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
|
||||
cmd.access_flags,
|
||||
&attrs->driver_udata);
|
||||
@ -805,7 +796,7 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
|
||||
}
|
||||
|
||||
if (cmd.flags & IB_MR_REREG_ACCESS) {
|
||||
ret = ib_check_mr_access(cmd.access_flags);
|
||||
ret = ib_check_mr_access(mr->device, cmd.access_flags);
|
||||
if (ret)
|
||||
goto put_uobjs;
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
|
||||
if (!(attr.access_flags & IB_ZERO_BASED))
|
||||
return -EINVAL;
|
||||
|
||||
ret = ib_check_mr_access(attr.access_flags);
|
||||
ret = ib_check_mr_access(ib_dev, attr.access_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2068,7 +2068,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = ib_check_mr_access(access);
|
||||
err = ib_check_mr_access(&dev->ib_dev, access);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -4183,7 +4183,8 @@ struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
|
||||
struct inode *inode, struct ib_udata *udata);
|
||||
int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
|
||||
|
||||
static inline int ib_check_mr_access(int flags)
|
||||
static inline int ib_check_mr_access(struct ib_device *ib_dev,
|
||||
unsigned int flags)
|
||||
{
|
||||
/*
|
||||
* Local write permission is required if remote write or
|
||||
@ -4196,6 +4197,9 @@ static inline int ib_check_mr_access(int flags)
|
||||
if (flags & ~IB_ACCESS_SUPPORTED)
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & IB_ACCESS_ON_DEMAND &&
|
||||
!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user