RDMA: Split kernel-only global device caps from uverbs device caps

Split out flags from ib_device::device_cap_flags that are only used
internally to the kernel into kernel_cap_flags that is not part of the
uapi. This limits the device_cap_flags to being the same bitmap that will
be copied to userspace.

This cleanly splits out the uverbs flags from the kernel flags to avoid
confusion in the flags bitmap.

Add some short comments describing which each of the kernel flags is
connected to. Remove unused kernel flags.

Link: https://lore.kernel.org/r/0-v2-22c19e565eef+139a-kern_caps_jgg@nvidia.com
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Jason Gunthorpe 2022-04-04 12:26:42 -03:00
parent 22cbc6c268
commit e945c653c8
33 changed files with 100 additions and 116 deletions

View File

@ -1739,7 +1739,7 @@ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!device) if (!device)
return -EINVAL; return -EINVAL;
if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) { if (!(device->attrs.kernel_cap_flags & IBK_ALLOW_USER_UNREG)) {
ib_device_put(device); ib_device_put(device);
return -EINVAL; return -EINVAL;
} }

View File

@ -337,8 +337,7 @@ static void copy_query_dev_fields(struct ib_ucontext *ucontext,
resp->hw_ver = attr->hw_ver; resp->hw_ver = attr->hw_ver;
resp->max_qp = attr->max_qp; resp->max_qp = attr->max_qp;
resp->max_qp_wr = attr->max_qp_wr; resp->max_qp_wr = attr->max_qp_wr;
resp->device_cap_flags = lower_32_bits(attr->device_cap_flags & resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
IB_UVERBS_DEVICE_CAP_FLAGS_MASK);
resp->max_sge = min(attr->max_send_sge, attr->max_recv_sge); resp->max_sge = min(attr->max_send_sge, attr->max_recv_sge);
resp->max_sge_rd = attr->max_sge_rd; resp->max_sge_rd = attr->max_sge_rd;
resp->max_cq = attr->max_cq; resp->max_cq = attr->max_cq;
@ -3619,8 +3618,7 @@ static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
resp.timestamp_mask = attr.timestamp_mask; resp.timestamp_mask = attr.timestamp_mask;
resp.hca_core_clock = attr.hca_core_clock; resp.hca_core_clock = attr.hca_core_clock;
resp.device_cap_flags_ex = attr.device_cap_flags & resp.device_cap_flags_ex = attr.device_cap_flags;
IB_UVERBS_DEVICE_CAP_FLAGS_MASK;
resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
resp.rss_caps.max_rwq_indirection_tables = resp.rss_caps.max_rwq_indirection_tables =
attr.rss_caps.max_rwq_indirection_tables; attr.rss_caps.max_rwq_indirection_tables;

View File

@ -281,7 +281,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
} }
rdma_restrack_add(&pd->res); rdma_restrack_add(&pd->res);
if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)
pd->local_dma_lkey = device->local_dma_lkey; pd->local_dma_lkey = device->local_dma_lkey;
else else
mr_access_flags |= IB_ACCESS_LOCAL_WRITE; mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
@ -308,7 +308,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
pd->__internal_mr = mr; pd->__internal_mr = mr;
if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY))
pd->local_dma_lkey = pd->__internal_mr->lkey; pd->local_dma_lkey = pd->__internal_mr->lkey;
if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
@ -2131,8 +2131,8 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct ib_mr *mr; struct ib_mr *mr;
if (access_flags & IB_ACCESS_ON_DEMAND) { if (access_flags & IB_ACCESS_ON_DEMAND) {
if (!(pd->device->attrs.device_cap_flags & if (!(pd->device->attrs.kernel_cap_flags &
IB_DEVICE_ON_DEMAND_PAGING)) { IBK_ON_DEMAND_PAGING)) {
pr_debug("ODP support not available\n"); pr_debug("ODP support not available\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }

View File

@ -146,13 +146,13 @@ int bnxt_re_query_device(struct ib_device *ibdev,
| IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_RC_RNR_NAK_GEN
| IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_SHUTDOWN_PORT
| IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_SYS_IMAGE_GUID
| IB_DEVICE_LOCAL_DMA_LKEY
| IB_DEVICE_RESIZE_MAX_WR | IB_DEVICE_RESIZE_MAX_WR
| IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_PORT_ACTIVE_EVENT
| IB_DEVICE_N_NOTIFY_CQ | IB_DEVICE_N_NOTIFY_CQ
| IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_WINDOW
| IB_DEVICE_MEM_WINDOW_TYPE_2B | IB_DEVICE_MEM_WINDOW_TYPE_2B
| IB_DEVICE_MEM_MGT_EXTENSIONS; | IB_DEVICE_MEM_MGT_EXTENSIONS;
ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
ib_attr->max_send_sge = dev_attr->max_qp_sges; ib_attr->max_send_sge = dev_attr->max_qp_sges;
ib_attr->max_recv_sge = dev_attr->max_qp_sges; ib_attr->max_recv_sge = dev_attr->max_qp_sges;
ib_attr->max_sge_rd = dev_attr->max_qp_sges; ib_attr->max_sge_rd = dev_attr->max_qp_sges;

View File

@ -314,7 +314,6 @@ enum db_state {
struct c4iw_dev { struct c4iw_dev {
struct ib_device ibdev; struct ib_device ibdev;
struct c4iw_rdev rdev; struct c4iw_rdev rdev;
u32 device_cap_flags;
struct xarray cqs; struct xarray cqs;
struct xarray qps; struct xarray qps;
struct xarray mrs; struct xarray mrs;

View File

@ -269,7 +269,10 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
dev->rdev.lldi.ports[0]->dev_addr); dev->rdev.lldi.ports[0]->dev_addr);
props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type); props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
props->fw_ver = dev->rdev.lldi.fw_vers; props->fw_ver = dev->rdev.lldi.fw_vers;
props->device_cap_flags = dev->device_cap_flags; props->device_cap_flags = IB_DEVICE_MEM_WINDOW;
props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
if (fastreg_support)
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
props->page_size_cap = T4_PAGESIZE_MASK; props->page_size_cap = T4_PAGESIZE_MASK;
props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor; props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device; props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
@ -529,9 +532,6 @@ void c4iw_register_device(struct work_struct *work)
pr_debug("c4iw_dev %p\n", dev); pr_debug("c4iw_dev %p\n", dev);
addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
dev->rdev.lldi.ports[0]->dev_addr); dev->rdev.lldi.ports[0]->dev_addr);
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
if (fastreg_support)
dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
dev->ibdev.local_dma_lkey = 0; dev->ibdev.local_dma_lkey = 0;
dev->ibdev.node_type = RDMA_NODE_RNIC; dev->ibdev.node_type = RDMA_NODE_RNIC;
BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX); BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);

View File

@ -1300,8 +1300,8 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_MEM_MGT_EXTENSIONS;
IB_DEVICE_RDMA_NETDEV_OPA; rdi->dparms.props.kernel_cap_flags = IBK_RDMA_NETDEV_OPA;
rdi->dparms.props.page_size_cap = PAGE_SIZE; rdi->dparms.props.page_size_cap = PAGE_SIZE;
rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3; rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
rdi->dparms.props.vendor_part_id = dd->pcidev->device; rdi->dparms.props.vendor_part_id = dd->pcidev->device;

View File

@ -1827,10 +1827,6 @@ int irdma_rt_init_hw(struct irdma_device *iwdev,
rf->rsrc_created = true; rf->rsrc_created = true;
} }
iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_WINDOW |
IB_DEVICE_MEM_MGT_EXTENSIONS;
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
irdma_alloc_set_mac(iwdev); irdma_alloc_set_mac(iwdev);
irdma_add_ip(iwdev); irdma_add_ip(iwdev);

View File

@ -338,7 +338,6 @@ struct irdma_device {
u32 roce_ackcreds; u32 roce_ackcreds;
u32 vendor_id; u32 vendor_id;
u32 vendor_part_id; u32 vendor_part_id;
u32 device_cap_flags;
u32 push_mode; u32 push_mode;
u32 rcv_wnd; u32 rcv_wnd;
u16 mac_ip_table_idx; u16 mac_ip_table_idx;

View File

@ -25,7 +25,9 @@ static int irdma_query_device(struct ib_device *ibdev,
iwdev->netdev->dev_addr); iwdev->netdev->dev_addr);
props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
irdma_fw_minor_ver(&rf->sc_dev); irdma_fw_minor_ver(&rf->sc_dev);
props->device_cap_flags = iwdev->device_cap_flags; props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
IB_DEVICE_MEM_MGT_EXTENSIONS;
props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
props->vendor_id = pcidev->vendor; props->vendor_id = pcidev->vendor;
props->vendor_part_id = pcidev->device; props->vendor_part_id = pcidev->device;

View File

@ -479,8 +479,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_RC_RNR_NAK_GEN;
IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
@ -494,9 +494,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
if (dev->dev->caps.max_gso_sz && if (dev->dev->caps.max_gso_sz &&
(dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
props->device_cap_flags |= IB_DEVICE_UD_TSO; props->kernel_cap_flags |= IBK_UD_TSO;
if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; props->kernel_cap_flags |= IBK_LOCAL_DMA_LKEY;
if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) && if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
(dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
(dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))

View File

@ -855,13 +855,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
IB_DEVICE_MEM_WINDOW_TYPE_2B; IB_DEVICE_MEM_WINDOW_TYPE_2B;
props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
/* We support 'Gappy' memory registration too */ /* We support 'Gappy' memory registration too */
props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; props->kernel_cap_flags |= IBK_SG_GAPS_REG;
} }
/* IB_WR_REG_MR always requires changing the entity size with UMR */ /* IB_WR_REG_MR always requires changing the entity size with UMR */
if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
if (MLX5_CAP_GEN(mdev, sho)) { if (MLX5_CAP_GEN(mdev, sho)) {
props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER; props->kernel_cap_flags |= IBK_INTEGRITY_HANDOVER;
/* At this stage no support for signature handover */ /* At this stage no support for signature handover */
props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
IB_PROT_T10DIF_TYPE_2 | IB_PROT_T10DIF_TYPE_2 |
@ -870,7 +870,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
IB_GUARD_T10DIF_CSUM; IB_GUARD_T10DIF_CSUM;
} }
if (MLX5_CAP_GEN(mdev, block_lb_mc)) if (MLX5_CAP_GEN(mdev, block_lb_mc))
props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; props->kernel_cap_flags |= IBK_BLOCK_MULTICAST_LOOPBACK;
if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) { if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
if (MLX5_CAP_ETH(mdev, csum_cap)) { if (MLX5_CAP_ETH(mdev, csum_cap)) {
@ -921,7 +921,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
props->device_cap_flags |= IB_DEVICE_UD_TSO; props->kernel_cap_flags |= IBK_UD_TSO;
} }
if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) && if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
@ -997,7 +997,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; props->kernel_cap_flags |= IBK_ON_DEMAND_PAGING;
props->odp_caps = dev->odp_caps; props->odp_caps = dev->odp_caps;
if (!uhw) { if (!uhw) {
/* ODP for kernel QPs is not implemented for receive /* ODP for kernel QPs is not implemented for receive
@ -1018,11 +1018,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
} }
} }
if (MLX5_CAP_GEN(mdev, cd))
props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
if (mlx5_core_is_vf(mdev)) if (mlx5_core_is_vf(mdev))
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; props->kernel_cap_flags |= IBK_VIRTUAL_FUNCTION;
if (mlx5_ib_port_link_layer(ibdev, 1) == if (mlx5_ib_port_link_layer(ibdev, 1) ==
IB_LINK_LAYER_ETHERNET && raw_support) { IB_LINK_LAYER_ETHERNET && raw_support) {

View File

@ -90,8 +90,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_MGT_EXTENSIONS; IB_DEVICE_MEM_MGT_EXTENSIONS;
attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
attr->max_send_sge = dev->attr.max_send_sge; attr->max_send_sge = dev->attr.max_send_sge;
attr->max_recv_sge = dev->attr.max_recv_sge; attr->max_recv_sge = dev->attr.max_recv_sge;
attr->max_sge_rd = dev->attr.max_rdma_sge; attr->max_sge_rd = dev->attr.max_rdma_sge;

View File

@ -134,7 +134,8 @@ int qedr_query_device(struct ib_device *ibdev,
attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe); attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS; IB_DEVICE_MEM_MGT_EXTENSIONS;
attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
if (!rdma_protocol_iwarp(&dev->ibdev, 1)) if (!rdma_protocol_iwarp(&dev->ibdev, 1))
attr->device_cap_flags |= IB_DEVICE_XRC; attr->device_cap_flags |= IB_DEVICE_XRC;

View File

@ -305,7 +305,8 @@ int usnic_ib_query_device(struct ib_device *ibdev,
props->max_qp = qp_per_vf * props->max_qp = qp_per_vf *
kref_read(&us_ibdev->vf_cnt); kref_read(&us_ibdev->vf_cnt);
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; IB_DEVICE_SYS_IMAGE_GUID;
props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK;
props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] * props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
kref_read(&us_ibdev->vf_cnt); kref_read(&us_ibdev->vf_cnt);
props->max_pd = USNIC_UIOM_MAX_PD_CNT; props->max_pd = USNIC_UIOM_MAX_PD_CNT;

View File

@ -46,6 +46,7 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_qp = RXE_MAX_QP; rxe->attr.max_qp = RXE_MAX_QP;
rxe->attr.max_qp_wr = RXE_MAX_QP_WR; rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS; rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
rxe->attr.kernel_cap_flags = IBK_ALLOW_USER_UNREG;
rxe->attr.max_send_sge = RXE_MAX_SGE; rxe->attr.max_send_sge = RXE_MAX_SGE;
rxe->attr.max_recv_sge = RXE_MAX_SGE; rxe->attr.max_recv_sge = RXE_MAX_SGE;
rxe->attr.max_sge_rd = RXE_MAX_SGE_RD; rxe->attr.max_sge_rd = RXE_MAX_SGE_RD;

View File

@ -50,7 +50,6 @@ enum rxe_device_param {
| IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_RC_RNR_NAK_GEN
| IB_DEVICE_SRQ_RESIZE | IB_DEVICE_SRQ_RESIZE
| IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_MEM_MGT_EXTENSIONS
| IB_DEVICE_ALLOW_USER_UNREG
| IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_WINDOW
| IB_DEVICE_MEM_WINDOW_TYPE_2A | IB_DEVICE_MEM_WINDOW_TYPE_2A
| IB_DEVICE_MEM_WINDOW_TYPE_2B, | IB_DEVICE_MEM_WINDOW_TYPE_2B,

View File

@ -132,8 +132,8 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
/* Revisit atomic caps if RFC 7306 gets supported */ /* Revisit atomic caps if RFC 7306 gets supported */
attr->atomic_cap = 0; attr->atomic_cap = 0;
attr->device_cap_flags = attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_ALLOW_USER_UNREG; attr->kernel_cap_flags = IBK_ALLOW_USER_UNREG;
attr->max_cq = sdev->attrs.max_cq; attr->max_cq = sdev->attrs.max_cq;
attr->max_cqe = sdev->attrs.max_cqe; attr->max_cqe = sdev->attrs.max_cqe;
attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL; attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;

View File

@ -411,6 +411,7 @@ struct ipoib_dev_priv {
struct dentry *path_dentry; struct dentry *path_dentry;
#endif #endif
u64 hca_caps; u64 hca_caps;
u64 kernel_caps;
struct ipoib_ethtool_st ethtool; struct ipoib_ethtool_st ethtool;
unsigned int max_send_sge; unsigned int max_send_sge;
const struct net_device_ops *rn_ops; const struct net_device_ops *rn_ops;

View File

@ -1850,11 +1850,12 @@ static void ipoib_parent_unregister_pre(struct net_device *ndev)
static void ipoib_set_dev_features(struct ipoib_dev_priv *priv) static void ipoib_set_dev_features(struct ipoib_dev_priv *priv)
{ {
priv->hca_caps = priv->ca->attrs.device_cap_flags; priv->hca_caps = priv->ca->attrs.device_cap_flags;
priv->kernel_caps = priv->ca->attrs.kernel_cap_flags;
if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
if (priv->hca_caps & IB_DEVICE_UD_TSO) if (priv->kernel_caps & IBK_UD_TSO)
priv->dev->hw_features |= NETIF_F_TSO; priv->dev->hw_features |= NETIF_F_TSO;
priv->dev->features |= priv->dev->hw_features; priv->dev->features |= priv->dev->hw_features;
@ -2201,7 +2202,7 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
priv->rn_ops = dev->netdev_ops; priv->rn_ops = dev->netdev_ops;
if (hca->attrs.device_cap_flags & IB_DEVICE_VIRTUAL_FUNCTION) if (hca->attrs.kernel_cap_flags & IBK_VIRTUAL_FUNCTION)
dev->netdev_ops = &ipoib_netdev_ops_vf; dev->netdev_ops = &ipoib_netdev_ops_vf;
else else
dev->netdev_ops = &ipoib_netdev_ops_pf; dev->netdev_ops = &ipoib_netdev_ops_pf;

View File

@ -197,16 +197,16 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
init_attr.send_cq = priv->send_cq; init_attr.send_cq = priv->send_cq;
init_attr.recv_cq = priv->recv_cq; init_attr.recv_cq = priv->recv_cq;
if (priv->hca_caps & IB_DEVICE_UD_TSO) if (priv->kernel_caps & IBK_UD_TSO)
init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK) if (priv->kernel_caps & IBK_BLOCK_MULTICAST_LOOPBACK)
init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING) if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP; init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
if (priv->hca_caps & IB_DEVICE_RDMA_NETDEV_OPA) if (priv->kernel_caps & IBK_RDMA_NETDEV_OPA)
init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE; init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE;
priv->qp = ib_create_qp(priv->pd, &init_attr); priv->qp = ib_create_qp(priv->pd, &init_attr);

View File

@ -650,7 +650,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
SHOST_DIX_GUARD_CRC); SHOST_DIX_GUARD_CRC);
} }
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) if (!(ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
shost->virt_boundary_mask = SZ_4K - 1; shost->virt_boundary_mask = SZ_4K - 1;
if (iscsi_host_add(shost, ib_dev->dev.parent)) { if (iscsi_host_add(shost, ib_dev->dev.parent)) {

View File

@ -115,7 +115,7 @@ iser_create_fastreg_desc(struct iser_device *device,
if (!desc) if (!desc)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
mr_type = IB_MR_TYPE_SG_GAPS; mr_type = IB_MR_TYPE_SG_GAPS;
else else
mr_type = IB_MR_TYPE_MEM_REG; mr_type = IB_MR_TYPE_MEM_REG;
@ -517,7 +517,7 @@ static void iser_calc_scsi_params(struct iser_conn *iser_conn,
* (head and tail) for a single page worth data, so one additional * (head and tail) for a single page worth data, so one additional
* entry is required. * entry is required.
*/ */
if (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG) if (attr->kernel_cap_flags & IBK_SG_GAPS_REG)
reserved_mr_pages = 0; reserved_mr_pages = 0;
else else
reserved_mr_pages = 1; reserved_mr_pages = 1;
@ -562,8 +562,8 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
/* connection T10-PI support */ /* connection T10-PI support */
if (iser_pi_enable) { if (iser_pi_enable) {
if (!(device->ib_device->attrs.device_cap_flags & if (!(device->ib_device->attrs.kernel_cap_flags &
IB_DEVICE_INTEGRITY_HANDOVER)) { IBK_INTEGRITY_HANDOVER)) {
iser_warn("T10-PI requested but not supported on %s, " iser_warn("T10-PI requested but not supported on %s, "
"continue without T10-PI\n", "continue without T10-PI\n",
dev_name(&ib_conn->device->ib_device->dev)); dev_name(&ib_conn->device->ib_device->dev));

View File

@ -230,7 +230,7 @@ isert_create_device_ib_res(struct isert_device *device)
} }
/* Check signature cap */ /* Check signature cap */
if (ib_dev->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER) if (ib_dev->attrs.kernel_cap_flags & IBK_INTEGRITY_HANDOVER)
device->pi_capable = true; device->pi_capable = true;
else else
device->pi_capable = false; device->pi_capable = false;

View File

@ -430,7 +430,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
spin_lock_init(&pool->lock); spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->free_list); INIT_LIST_HEAD(&pool->free_list);
if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
mr_type = IB_MR_TYPE_SG_GAPS; mr_type = IB_MR_TYPE_SG_GAPS;
else else
mr_type = IB_MR_TYPE_MEM_REG; mr_type = IB_MR_TYPE_MEM_REG;
@ -3650,7 +3650,7 @@ static ssize_t add_target_store(struct device *dev,
target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
target_host->max_segment_size = ib_dma_max_seg_size(ibdev); target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
target_host->virt_boundary_mask = ~srp_dev->mr_page_mask; target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
target = host_to_target(target_host); target = host_to_target(target_host);
@ -3706,8 +3706,8 @@ static ssize_t add_target_store(struct device *dev,
} }
if (srp_dev->use_fast_reg) { if (srp_dev->use_fast_reg) {
bool gaps_reg = (ibdev->attrs.device_cap_flags & bool gaps_reg = ibdev->attrs.kernel_cap_flags &
IB_DEVICE_SG_GAPS_REG); IBK_SG_GAPS_REG;
max_sectors_per_mr = srp_dev->max_pages_per_mr << max_sectors_per_mr = srp_dev->max_pages_per_mr <<
(ilog2(srp_dev->mr_page_size) - 9); (ilog2(srp_dev->mr_page_size) - 9);

View File

@ -867,8 +867,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev); ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
/* T10-PI support */ /* T10-PI support */
if (ctrl->device->dev->attrs.device_cap_flags & if (ctrl->device->dev->attrs.kernel_cap_flags &
IB_DEVICE_INTEGRITY_HANDOVER) IBK_INTEGRITY_HANDOVER)
pi_capable = true; pi_capable = true;
ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev, ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,

View File

@ -1221,8 +1221,8 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
ndev->inline_data_size = nport->inline_data_size; ndev->inline_data_size = nport->inline_data_size;
ndev->inline_page_count = inline_page_count; ndev->inline_page_count = inline_page_count;
if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags & if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags &
IB_DEVICE_INTEGRITY_HANDOVER)) { IBK_INTEGRITY_HANDOVER)) {
pr_warn("T10-PI is not supported by device %s. Disabling it\n", pr_warn("T10-PI is not supported by device %s. Disabling it\n",
cm_id->device->name); cm_id->device->name);
nport->pi_enable = false; nport->pi_enable = false;

View File

@ -649,7 +649,7 @@ static int smbd_ia_open(
smbd_max_frmr_depth, smbd_max_frmr_depth,
info->id->device->attrs.max_fast_reg_page_list_len); info->id->device->attrs.max_fast_reg_page_list_len);
info->mr_type = IB_MR_TYPE_MEM_REG; info->mr_type = IB_MR_TYPE_MEM_REG;
if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
info->mr_type = IB_MR_TYPE_SG_GAPS; info->mr_type = IB_MR_TYPE_SG_GAPS;
info->pd = ib_alloc_pd(info->id->device, 0); info->pd = ib_alloc_pd(info->id->device, 0);

View File

@ -236,14 +236,6 @@ enum ib_device_cap_flags {
IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE, IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE,
IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ, IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ,
/*
* This device supports a per-device lkey or stag that can be
* used without performing a memory registration for the local
* memory. Note that ULPs should never check this flag, but
* instead of use the local_dma_lkey flag in the ib_pd structure,
* which will always contain a usable lkey.
*/
IB_DEVICE_LOCAL_DMA_LKEY = 1 << 15,
/* Reserved, old SEND_W_INV = 1 << 16,*/ /* Reserved, old SEND_W_INV = 1 << 16,*/
IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW, IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW,
/* /*
@ -254,7 +246,6 @@ enum ib_device_cap_flags {
* IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
*/ */
IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM, IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
IB_DEVICE_UD_TSO = 1 << 19,
IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC, IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
/* /*
@ -267,59 +258,53 @@ enum ib_device_cap_flags {
* stag. * stag.
*/ */
IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS, IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = 1 << 22,
IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A, IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B, IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM, IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM,
/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM, IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
/*
* Devices should set IB_DEVICE_CROSS_CHANNEL if they
* support execution of WQEs that involve synchronization
* of I/O operations with single completion queue managed
* by hardware.
*/
IB_DEVICE_CROSS_CHANNEL = 1 << 27,
IB_DEVICE_MANAGED_FLOW_STEERING = IB_DEVICE_MANAGED_FLOW_STEERING =
IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING, IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING,
IB_DEVICE_INTEGRITY_HANDOVER = 1 << 30,
IB_DEVICE_ON_DEMAND_PAGING = 1ULL << 31,
IB_DEVICE_SG_GAPS_REG = 1ULL << 32,
IB_DEVICE_VIRTUAL_FUNCTION = 1ULL << 33,
/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS, IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS,
IB_DEVICE_RDMA_NETDEV_OPA = 1ULL << 35,
/* The device supports padding incoming writes to cacheline. */ /* The device supports padding incoming writes to cacheline. */
IB_DEVICE_PCI_WRITE_END_PADDING = IB_DEVICE_PCI_WRITE_END_PADDING =
IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING, IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING,
IB_DEVICE_ALLOW_USER_UNREG = 1ULL << 37,
}; };
#define IB_UVERBS_DEVICE_CAP_FLAGS_MASK (IB_UVERBS_DEVICE_RESIZE_MAX_WR | \ enum ib_kernel_cap_flags {
IB_UVERBS_DEVICE_BAD_PKEY_CNTR | \ /*
IB_UVERBS_DEVICE_BAD_QKEY_CNTR | \ * This device supports a per-device lkey or stag that can be
IB_UVERBS_DEVICE_RAW_MULTI | \ * used without performing a memory registration for the local
IB_UVERBS_DEVICE_AUTO_PATH_MIG | \ * memory. Note that ULPs should never check this flag, but
IB_UVERBS_DEVICE_CHANGE_PHY_PORT | \ * instead of use the local_dma_lkey flag in the ib_pd structure,
IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE | \ * which will always contain a usable lkey.
IB_UVERBS_DEVICE_CURR_QP_STATE_MOD | \ */
IB_UVERBS_DEVICE_SHUTDOWN_PORT | \ IBK_LOCAL_DMA_LKEY = 1 << 0,
IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT | \ /* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
IB_UVERBS_DEVICE_SYS_IMAGE_GUID | \ IBK_INTEGRITY_HANDOVER = 1 << 1,
IB_UVERBS_DEVICE_RC_RNR_NAK_GEN | \ /* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
IB_UVERBS_DEVICE_SRQ_RESIZE | \ IBK_ON_DEMAND_PAGING = 1 << 2,
IB_UVERBS_DEVICE_N_NOTIFY_CQ | \ /* IB_MR_TYPE_SG_GAPS is supported */
IB_UVERBS_DEVICE_MEM_WINDOW | \ IBK_SG_GAPS_REG = 1 << 3,
IB_UVERBS_DEVICE_UD_IP_CSUM | \ /* Driver supports RDMA_NLDEV_CMD_DELLINK */
IB_UVERBS_DEVICE_XRC | \ IBK_ALLOW_USER_UNREG = 1 << 4,
IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS | \
IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A | \ /* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B | \ IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5,
IB_UVERBS_DEVICE_RC_IP_CSUM | \ /* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
IB_UVERBS_DEVICE_RAW_IP_CSUM | \ IBK_UD_TSO = 1 << 6,
IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING | \ /* iopib will use the device ops:
IB_UVERBS_DEVICE_RAW_SCATTER_FCS | \ * get_vf_config
IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING) * get_vf_guid
* get_vf_stats
* set_vf_guid
* set_vf_link_state
*/
IBK_VIRTUAL_FUNCTION = 1 << 7,
/* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
IBK_RDMA_NETDEV_OPA = 1 << 8,
};
enum ib_atomic_cap { enum ib_atomic_cap {
IB_ATOMIC_NONE, IB_ATOMIC_NONE,
@ -417,6 +402,7 @@ struct ib_device_attr {
int max_qp; int max_qp;
int max_qp_wr; int max_qp_wr;
u64 device_cap_flags; u64 device_cap_flags;
u64 kernel_cap_flags;
int max_send_sge; int max_send_sge;
int max_recv_sge; int max_recv_sge;
int max_sge_rd; int max_sge_rd;
@ -4344,7 +4330,7 @@ static inline int ib_check_mr_access(struct ib_device *ib_dev,
return -EINVAL; return -EINVAL;
if (flags & IB_ACCESS_ON_DEMAND && if (flags & IB_ACCESS_ON_DEMAND &&
!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING))
return -EINVAL; return -EINVAL;
return 0; return 0;
} }

View File

@ -90,8 +90,7 @@ struct opa_vnic_stats {
static inline bool rdma_cap_opa_vnic(struct ib_device *device) static inline bool rdma_cap_opa_vnic(struct ib_device *device)
{ {
return !!(device->attrs.device_cap_flags & return !!(device->attrs.kernel_cap_flags & IBK_RDMA_NETDEV_OPA);
IB_DEVICE_RDMA_NETDEV_OPA);
} }
#endif /* _OPA_VNIC_H */ #endif /* _OPA_VNIC_H */

View File

@ -1298,6 +1298,10 @@ struct ib_uverbs_ex_modify_cq {
#define IB_DEVICE_NAME_MAX 64 #define IB_DEVICE_NAME_MAX 64
/*
* bits 9, 15, 16, 19, 22, 27, 30, 31, 32, 33, 35 and 37 may be set by old
* kernels and should not be used.
*/
enum ib_uverbs_device_cap_flags { enum ib_uverbs_device_cap_flags {
IB_UVERBS_DEVICE_RESIZE_MAX_WR = 1 << 0, IB_UVERBS_DEVICE_RESIZE_MAX_WR = 1 << 0,
IB_UVERBS_DEVICE_BAD_PKEY_CNTR = 1 << 1, IB_UVERBS_DEVICE_BAD_PKEY_CNTR = 1 << 1,

View File

@ -154,8 +154,8 @@ static int rds_ib_add_one(struct ib_device *device)
rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE); rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
rds_ibdev->odp_capable = rds_ibdev->odp_capable =
!!(device->attrs.device_cap_flags & !!(device->attrs.kernel_cap_flags &
IB_DEVICE_ON_DEMAND_PAGING) && IBK_ON_DEMAND_PAGING) &&
!!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps & !!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps &
IB_ODP_SUPPORT_WRITE) && IB_ODP_SUPPORT_WRITE) &&
!!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps & !!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps &

View File

@ -195,7 +195,7 @@ int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
ep->re_attr.cap.max_recv_sge = 1; ep->re_attr.cap.max_recv_sge = 1;
ep->re_mrtype = IB_MR_TYPE_MEM_REG; ep->re_mrtype = IB_MR_TYPE_MEM_REG;
if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) if (attrs->kernel_cap_flags & IBK_SG_GAPS_REG)
ep->re_mrtype = IB_MR_TYPE_SG_GAPS; ep->re_mrtype = IB_MR_TYPE_SG_GAPS;
/* Quirk: Some devices advertise a large max_fast_reg_page_list_len /* Quirk: Some devices advertise a large max_fast_reg_page_list_len