RDMA/irdma: Validate udata inlen and outlen
Currently ib_copy_from_udata and ib_copy_to_udata could underfill
the request and response buffer if the user-space passes an undersized
value for udata->inlen or udata->outlen respectively [1]
This could lead to undesirable behavior.
Zero initing the buffer only goes as far as preventing using the buffer
uninitialized.
Validate udata->inlen and udata->outlen passed from user-space to ensure
they are at least the required minimum size.
[1] https://lore.kernel.org/linux-rdma/MWHPR11MB0029F37D40D9D4A993F8F549E9D79@MWHPR11MB0029.namprd11.prod.outlook.com/
Fixes: b48c24c2d7
("RDMA/irdma: Implement device supported verb APIs")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Link: https://lore.kernel.org/r/20220907191324.1173-3-shiraz.saleem@intel.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
7f51a961f8
commit
34acb833cc
@ -296,13 +296,19 @@ static void irdma_alloc_push_page(struct irdma_qp *iwqp)
|
||||
static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
|
||||
#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
|
||||
struct ib_device *ibdev = uctx->device;
|
||||
struct irdma_device *iwdev = to_iwdev(ibdev);
|
||||
struct irdma_alloc_ucontext_req req;
|
||||
struct irdma_alloc_ucontext_req req = {};
|
||||
struct irdma_alloc_ucontext_resp uresp = {};
|
||||
struct irdma_ucontext *ucontext = to_ucontext(uctx);
|
||||
struct irdma_uk_attrs *uk_attrs;
|
||||
|
||||
if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
|
||||
udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
|
||||
return -EINVAL;
|
||||
|
||||
@ -314,7 +320,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
|
||||
|
||||
uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
|
||||
/* GEN_1 legacy support with libi40iw */
|
||||
if (udata->outlen < sizeof(uresp)) {
|
||||
if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
|
||||
if (uk_attrs->hw_rev != IRDMA_GEN_1)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
@ -386,6 +392,7 @@ static void irdma_dealloc_ucontext(struct ib_ucontext *context)
|
||||
*/
|
||||
static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
|
||||
{
|
||||
#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
|
||||
struct irdma_pd *iwpd = to_iwpd(pd);
|
||||
struct irdma_device *iwdev = to_iwdev(pd->device);
|
||||
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
|
||||
@ -395,6 +402,9 @@ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
|
||||
u32 pd_id = 0;
|
||||
int err;
|
||||
|
||||
if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
|
||||
&rf->next_pd);
|
||||
if (err)
|
||||
@ -811,12 +821,14 @@ static int irdma_create_qp(struct ib_qp *ibqp,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
|
||||
#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
|
||||
struct ib_pd *ibpd = ibqp->pd;
|
||||
struct irdma_pd *iwpd = to_iwpd(ibpd);
|
||||
struct irdma_device *iwdev = to_iwdev(ibpd->device);
|
||||
struct irdma_pci_f *rf = iwdev->rf;
|
||||
struct irdma_qp *iwqp = to_iwqp(ibqp);
|
||||
struct irdma_create_qp_req req;
|
||||
struct irdma_create_qp_req req = {};
|
||||
struct irdma_create_qp_resp uresp = {};
|
||||
u32 qp_num = 0;
|
||||
int err_code;
|
||||
@ -833,6 +845,10 @@ static int irdma_create_qp(struct ib_qp *ibqp,
|
||||
if (err_code)
|
||||
return err_code;
|
||||
|
||||
if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
|
||||
udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
|
||||
return -EINVAL;
|
||||
|
||||
sq_size = init_attr->cap.max_send_wr;
|
||||
rq_size = init_attr->cap.max_recv_wr;
|
||||
|
||||
@ -1117,6 +1133,8 @@ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
|
||||
int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata)
|
||||
{
|
||||
#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
|
||||
#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
|
||||
struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
|
||||
struct irdma_qp *iwqp = to_iwqp(ibqp);
|
||||
struct irdma_device *iwdev = iwqp->iwdev;
|
||||
@ -1135,6 +1153,13 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
roce_info = &iwqp->roce_info;
|
||||
udp_info = &iwqp->udp_info;
|
||||
|
||||
if (udata) {
|
||||
/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
|
||||
if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
|
||||
(udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
@ -1371,7 +1396,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
|
||||
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
|
||||
spin_unlock_irqrestore(&iwqp->lock, flags);
|
||||
if (udata) {
|
||||
if (udata && udata->inlen) {
|
||||
if (ib_copy_from_udata(&ureq, udata,
|
||||
min(sizeof(ureq), udata->inlen)))
|
||||
return -EINVAL;
|
||||
@ -1423,7 +1448,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
} else {
|
||||
iwqp->ibqp_state = attr->qp_state;
|
||||
}
|
||||
if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
|
||||
if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
|
||||
struct irdma_ucontext *ucontext;
|
||||
|
||||
ucontext = rdma_udata_to_drv_context(udata,
|
||||
@ -1463,6 +1488,8 @@ exit:
|
||||
int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
|
||||
#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
|
||||
struct irdma_qp *iwqp = to_iwqp(ibqp);
|
||||
struct irdma_device *iwdev = iwqp->iwdev;
|
||||
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
|
||||
@ -1477,6 +1504,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
|
||||
int err;
|
||||
unsigned long flags;
|
||||
|
||||
if (udata) {
|
||||
/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
|
||||
if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
|
||||
(udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
@ -1562,7 +1596,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
|
||||
case IB_QPS_RESET:
|
||||
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
|
||||
spin_unlock_irqrestore(&iwqp->lock, flags);
|
||||
if (udata) {
|
||||
if (udata && udata->inlen) {
|
||||
if (ib_copy_from_udata(&ureq, udata,
|
||||
min(sizeof(ureq), udata->inlen)))
|
||||
return -EINVAL;
|
||||
@ -1659,7 +1693,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
|
||||
}
|
||||
}
|
||||
}
|
||||
if (attr_mask & IB_QP_STATE && udata &&
|
||||
if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
|
||||
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
|
||||
struct irdma_ucontext *ucontext;
|
||||
|
||||
@ -1794,6 +1828,7 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
|
||||
struct irdma_cq *iwcq = to_iwcq(ibcq);
|
||||
struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
|
||||
struct irdma_cqp_request *cqp_request;
|
||||
@ -1816,6 +1851,9 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
|
||||
IRDMA_FEATURE_CQ_RESIZE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
if (entries > rf->max_cqe)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1948,6 +1986,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
|
||||
#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
|
||||
struct ib_device *ibdev = ibcq->device;
|
||||
struct irdma_device *iwdev = to_iwdev(ibdev);
|
||||
struct irdma_pci_f *rf = iwdev->rf;
|
||||
@ -1966,6 +2006,11 @@ static int irdma_create_cq(struct ib_cq *ibcq,
|
||||
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
|
||||
if (err_code)
|
||||
return err_code;
|
||||
|
||||
if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
|
||||
udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
|
||||
return -EINVAL;
|
||||
|
||||
err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
|
||||
&rf->next_cq);
|
||||
if (err_code)
|
||||
@ -2743,6 +2788,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
|
||||
u64 virt, int access,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
|
||||
struct irdma_device *iwdev = to_iwdev(pd->device);
|
||||
struct irdma_ucontext *ucontext;
|
||||
struct irdma_pble_alloc *palloc;
|
||||
@ -2760,6 +2806,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
|
||||
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
region = ib_umem_get(pd->device, start, len, access);
|
||||
|
||||
if (IS_ERR(region)) {
|
||||
@ -4291,12 +4340,16 @@ static int irdma_create_user_ah(struct ib_ah *ibah,
|
||||
struct rdma_ah_init_attr *attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
|
||||
struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
|
||||
struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
|
||||
struct irdma_create_ah_resp uresp;
|
||||
struct irdma_ah *parent_ah;
|
||||
int err;
|
||||
|
||||
if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
err = irdma_setup_ah(ibah, attr);
|
||||
if (err)
|
||||
return err;
|
||||
|
Loading…
Reference in New Issue
Block a user