mirror of
https://github.com/torvalds/linux.git
synced 2025-01-01 07:42:07 +00:00
a50243b1dd
This has been a slightly more active cycle than normal with ongoing core changes and quite a lot of collected driver updates. - Various driver fixes for bnxt_re, cxgb4, hns, mlx5, pvrdma, rxe - A new data transfer mode for HFI1 giving higher performance - Significant functional and bug fix update to the mlx5 On-Demand-Paging MR feature - A chip hang reset recovery system for hns - Change mm->pinned_vm to an atomic64 - Update bnxt_re to support a new 57500 chip - A sane netlink 'rdma link add' method for creating rxe devices and fixing the various unregistration race conditions in rxe's unregister flow - Allow lookup up objects by an ID over netlink - Various reworking of the core to driver interface: * Drivers should not assume umem SGLs are in PAGE_SIZE chunks * ucontext is accessed via udata not other means * Start to make the core code responsible for object memory allocation * Drivers should convert struct device to struct ib_device via a helper * Drivers have more tools to avoid use after unregister problems -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAlyAJYYACgkQOG33FX4g mxrWwQ/+OyAx4Moru7Aix0C6GWxTJp/wKgw21CS3reZxgLai6x81xNYG/s2wCNjo IccObVd7mvzyqPdxOeyHBsJBbQDqWvoD6O2duH8cqGMgBRgh3CSdUep2zLvPpSAx 2W1SvWYCLDnCuarboFrCA8c4AN3eCZiqD7z9lHyFQGjy3nTUWzk1uBaOP46uaiMv w89N8EMdXJ/iY6ONzihvE05NEYbMA8fuvosKLLNdghRiHIjbMQU8SneY23pvyPDd ZziPu9NcO3Hw9OVbkwtJp47U3KCBgvKHmnixyZKkikjiD+HVoABw2IMwcYwyBZwP Bic/ddONJUvAxMHpKRnQaW7znAiHARk21nDG28UAI7FWXH/wMXgicMp6LRcNKqKF vqXdxHTKJb0QUR4xrYI+eA8ihstss7UUpgSgByuANJ0X729xHiJtlEvPb1DPo1Dz 9CB4OHOVRl5O8sA5Jc6PSusZiKEpvWoyWbdmw0IiwDF5pe922VLl5Nv88ta+sJ38 v2Ll5AgYcluk7F3599Uh9D7gwp5hxW2Ph3bNYyg2j3HP4/dKsL9XvIJPXqEthgCr 3KQS9rOZfI/7URieT+H+Mlf+OWZhXsZilJG7No0fYgIVjgJ00h3SF1/299YIq6Qp 9W7ZXBfVSwLYA2AEVSvGFeZPUxgBwHrSZ62wya4uFeB1jyoodPk= =p12E -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma updates from Jason Gunthorpe: "This has been a slightly more active cycle than normal with ongoing core changes and quite a lot of collected driver updates. - Various driver fixes for bnxt_re, cxgb4, hns, mlx5, pvrdma, rxe - A new data transfer mode for HFI1 giving higher performance - Significant functional and bug fix update to the mlx5 On-Demand-Paging MR feature - A chip hang reset recovery system for hns - Change mm->pinned_vm to an atomic64 - Update bnxt_re to support a new 57500 chip - A sane netlink 'rdma link add' method for creating rxe devices and fixing the various unregistration race conditions in rxe's unregister flow - Allow lookup up objects by an ID over netlink - Various reworking of the core to driver interface: - drivers should not assume umem SGLs are in PAGE_SIZE chunks - ucontext is accessed via udata not other means - start to make the core code responsible for object memory allocation - drivers should convert struct device to struct ib_device via a helper - drivers have more tools to avoid use after unregister problems" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (280 commits) net/mlx5: ODP support for XRC transport is not enabled by default in FW IB/hfi1: Close race condition on user context disable and close RDMA/umem: Revert broken 'off by one' fix RDMA/umem: minor bug fix in error handling path RDMA/hns: Use GFP_ATOMIC in hns_roce_v2_modify_qp cxgb4: kfree mhp after the debug print IB/rdmavt: Fix concurrency panics in QP post_send and modify to error IB/rdmavt: Fix loopback send with invalidate ordering IB/iser: Fix dma_nents type definition IB/mlx5: Set correct write permissions for implicit ODP MR bnxt_re: Clean cq for kernel consumers only RDMA/uverbs: Don't do double free of allocated PD RDMA: Handle ucontext allocations by IB/core RDMA/core: Fix a WARN() message bnxt_re: fix the regression due to changes in alloc_pbl IB/mlx4: Increase the timeout for CM cache IB/core: Abort page fault handler silently during owning process exit IB/mlx5: Validate correct PD before prefetch MR IB/mlx5: Protect against prefetch of invalid MR RDMA/uverbs: Store PR pointer before it is overwritten ...
162 lines
4.0 KiB
C
162 lines
4.0 KiB
C
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
|
/*
|
|
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
|
|
*/
|
|
|
|
#include <linux/mlx5/vport.h>
|
|
#include "ib_rep.h"
|
|
#include "srq.h"
|
|
|
|
static const struct mlx5_ib_profile vf_rep_profile = {
|
|
STAGE_CREATE(MLX5_IB_STAGE_INIT,
|
|
mlx5_ib_stage_init_init,
|
|
mlx5_ib_stage_init_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
|
|
mlx5_ib_stage_rep_flow_db_init,
|
|
NULL),
|
|
STAGE_CREATE(MLX5_IB_STAGE_CAPS,
|
|
mlx5_ib_stage_caps_init,
|
|
NULL),
|
|
STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
|
|
mlx5_ib_stage_rep_non_default_cb,
|
|
NULL),
|
|
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
|
|
mlx5_ib_stage_rep_roce_init,
|
|
mlx5_ib_stage_rep_roce_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
|
|
mlx5_init_srq_table,
|
|
mlx5_cleanup_srq_table),
|
|
STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
|
|
mlx5_ib_stage_dev_res_init,
|
|
mlx5_ib_stage_dev_res_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
|
|
mlx5_ib_stage_counters_init,
|
|
mlx5_ib_stage_counters_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
|
|
mlx5_ib_stage_bfrag_init,
|
|
mlx5_ib_stage_bfrag_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
|
|
NULL,
|
|
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
|
|
mlx5_ib_stage_ib_reg_init,
|
|
mlx5_ib_stage_ib_reg_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
|
|
mlx5_ib_stage_post_ib_reg_umr_init,
|
|
NULL),
|
|
};
|
|
|
|
static int
|
|
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|
{
|
|
const struct mlx5_ib_profile *profile;
|
|
struct mlx5_ib_dev *ibdev;
|
|
|
|
if (rep->vport == MLX5_VPORT_UPLINK)
|
|
profile = &uplink_rep_profile;
|
|
else
|
|
profile = &vf_rep_profile;
|
|
|
|
ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
|
|
if (!ibdev)
|
|
return -ENOMEM;
|
|
|
|
ibdev->rep = rep;
|
|
ibdev->mdev = dev;
|
|
ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
|
|
MLX5_CAP_GEN(dev, num_vhca_ports));
|
|
if (!__mlx5_ib_add(ibdev, profile)) {
|
|
ib_dealloc_device(&ibdev->ib_dev);
|
|
return -EINVAL;
|
|
}
|
|
|
|
rep->rep_if[REP_IB].priv = ibdev;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void
|
|
mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
|
|
{
|
|
struct mlx5_ib_dev *dev;
|
|
|
|
if (!rep->rep_if[REP_IB].priv)
|
|
return;
|
|
|
|
dev = mlx5_ib_rep_to_dev(rep);
|
|
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
|
|
rep->rep_if[REP_IB].priv = NULL;
|
|
ib_dealloc_device(&dev->ib_dev);
|
|
}
|
|
|
|
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
|
|
{
|
|
return mlx5_ib_rep_to_dev(rep);
|
|
}
|
|
|
|
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
|
|
{
|
|
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
|
struct mlx5_eswitch_rep_if rep_if = {};
|
|
|
|
rep_if.load = mlx5_ib_vport_rep_load;
|
|
rep_if.unload = mlx5_ib_vport_rep_unload;
|
|
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
|
|
|
|
mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_IB);
|
|
}
|
|
|
|
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
|
|
{
|
|
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
|
|
|
mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
|
|
}
|
|
|
|
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
|
|
{
|
|
return mlx5_eswitch_mode(esw);
|
|
}
|
|
|
|
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
|
|
int vport_index)
|
|
{
|
|
return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB);
|
|
}
|
|
|
|
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
|
|
int vport_index)
|
|
{
|
|
return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH);
|
|
}
|
|
|
|
struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
|
|
{
|
|
return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
|
|
}
|
|
|
|
struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
|
|
{
|
|
return mlx5_eswitch_vport_rep(esw, vport);
|
|
}
|
|
|
|
int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
|
|
struct mlx5_ib_sq *sq)
|
|
{
|
|
struct mlx5_flow_handle *flow_rule;
|
|
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
|
|
|
|
if (!dev->rep)
|
|
return 0;
|
|
|
|
flow_rule =
|
|
mlx5_eswitch_add_send_to_vport_rule(esw,
|
|
dev->rep->vport,
|
|
sq->base.mqp.qpn);
|
|
if (IS_ERR(flow_rule))
|
|
return PTR_ERR(flow_rule);
|
|
sq->flow_rule = flow_rule;
|
|
|
|
return 0;
|
|
}
|