mirror of
https://github.com/torvalds/linux.git
synced 2024-11-29 15:41:36 +00:00
Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
This series provides some low level updates for mlx5 driver needed for both rdma and netdev trees. 1) Termination flow steering table bits and hardware definitions. 2) Introduce the core dump HW access registers definitions. 3) Refactor and cleans-up VF representors functions handlers. 4) Renames host_params bits to function_changed bits and add the support for eswitch functions change event in the eswitch general case. (for both legacy and switchdev modes). 5) Potential error pointer dereference in error handling Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
commit
7fe4d43ecc
@ -60,7 +60,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
||||
if (!__mlx5_ib_add(ibdev, profile))
|
||||
return -EINVAL;
|
||||
|
||||
rep->rep_if[REP_IB].priv = ibdev;
|
||||
rep->rep_data[REP_IB].priv = ibdev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -70,13 +70,13 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
struct mlx5_ib_dev *dev;
|
||||
|
||||
if (!rep->rep_if[REP_IB].priv ||
|
||||
if (!rep->rep_data[REP_IB].priv ||
|
||||
rep->vport != MLX5_VPORT_UPLINK)
|
||||
return;
|
||||
|
||||
dev = mlx5_ib_rep_to_dev(rep);
|
||||
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
|
||||
rep->rep_if[REP_IB].priv = NULL;
|
||||
rep->rep_data[REP_IB].priv = NULL;
|
||||
}
|
||||
|
||||
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
|
||||
@ -84,16 +84,17 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
|
||||
return mlx5_ib_rep_to_dev(rep);
|
||||
}
|
||||
|
||||
static const struct mlx5_eswitch_rep_ops rep_ops = {
|
||||
.load = mlx5_ib_vport_rep_load,
|
||||
.unload = mlx5_ib_vport_rep_unload,
|
||||
.get_proto_dev = mlx5_ib_vport_get_proto_dev,
|
||||
};
|
||||
|
||||
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
||||
struct mlx5_eswitch_rep_if rep_if = {};
|
||||
|
||||
rep_if.load = mlx5_ib_vport_rep_load;
|
||||
rep_if.unload = mlx5_ib_vport_rep_unload;
|
||||
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
|
||||
|
||||
mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_IB);
|
||||
mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
|
||||
}
|
||||
|
||||
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
|
||||
|
@ -72,6 +72,6 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
|
||||
static inline
|
||||
struct mlx5_ib_dev *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
return (struct mlx5_ib_dev *)rep->rep_if[REP_IB].priv;
|
||||
return rep->rep_data[REP_IB].priv;
|
||||
}
|
||||
#endif /* __MLX5_IB_REP_H__ */
|
||||
|
@ -316,7 +316,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
||||
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
|
||||
case MLX5_CMD_OP_DEALLOC_MEMIC:
|
||||
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
|
||||
case MLX5_CMD_OP_QUERY_HOST_PARAMS:
|
||||
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
|
||||
return MLX5_CMD_STAT_OK;
|
||||
|
||||
case MLX5_CMD_OP_QUERY_HCA_CAP:
|
||||
@ -628,7 +628,7 @@ const char *mlx5_command_str(int command)
|
||||
MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
|
||||
MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
|
||||
MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
|
||||
MLX5_COMMAND_STR_CASE(QUERY_HOST_PARAMS);
|
||||
MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
|
||||
default: return "unknown command opcode";
|
||||
}
|
||||
}
|
||||
|
@ -83,30 +83,3 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
|
||||
|
||||
mlx5_peer_pf_cleanup(dev);
|
||||
}
|
||||
|
||||
static int mlx5_query_host_params_context(struct mlx5_core_dev *dev,
|
||||
u32 *out, int outlen)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_host_params_in)] = {};
|
||||
|
||||
MLX5_SET(query_host_params_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_HOST_PARAMS);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(query_host_params_out)] = {};
|
||||
int err;
|
||||
|
||||
err = mlx5_query_host_params_context(dev, out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*num_vf = MLX5_GET(query_host_params_out, out,
|
||||
host_params_context.host_num_of_vfs);
|
||||
mlx5_core_dbg(dev, "host_num_of_vfs %d\n", *num_vf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -16,7 +16,6 @@ enum {
|
||||
bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev);
|
||||
int mlx5_ec_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_ec_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf);
|
||||
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
|
||||
@ -24,9 +23,6 @@ static inline bool
|
||||
mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) { return false; }
|
||||
static inline int mlx5_ec_init(struct mlx5_core_dev *dev) { return 0; }
|
||||
static inline void mlx5_ec_cleanup(struct mlx5_core_dev *dev) {}
|
||||
static inline int
|
||||
mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
|
||||
{ return -EOPNOTSUPP; }
|
||||
|
||||
#endif /* CONFIG_MLX5_ESWITCH */
|
||||
|
||||
|
@ -1769,7 +1769,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
||||
}
|
||||
|
||||
rpriv->netdev = netdev;
|
||||
rep->rep_if[REP_ETH].priv = rpriv;
|
||||
rep->rep_data[REP_ETH].priv = rpriv;
|
||||
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
|
||||
|
||||
if (rep->vport == MLX5_VPORT_UPLINK) {
|
||||
@ -1843,16 +1843,17 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
|
||||
return rpriv->netdev;
|
||||
}
|
||||
|
||||
static const struct mlx5_eswitch_rep_ops rep_ops = {
|
||||
.load = mlx5e_vport_rep_load,
|
||||
.unload = mlx5e_vport_rep_unload,
|
||||
.get_proto_dev = mlx5e_vport_rep_get_proto_dev
|
||||
};
|
||||
|
||||
void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
||||
struct mlx5_eswitch_rep_if rep_if = {};
|
||||
|
||||
rep_if.load = mlx5e_vport_rep_load;
|
||||
rep_if.unload = mlx5e_vport_rep_unload;
|
||||
rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
|
||||
|
||||
mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_ETH);
|
||||
mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
|
||||
}
|
||||
|
||||
void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
|
||||
|
@ -91,7 +91,7 @@ struct mlx5e_rep_priv {
|
||||
static inline
|
||||
struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
|
||||
return rep->rep_data[REP_ETH].priv;
|
||||
}
|
||||
|
||||
struct mlx5e_neigh {
|
||||
|
@ -533,8 +533,9 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
|
||||
if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
|
||||
async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
|
||||
|
||||
if (mlx5_core_is_ecpf_esw_manager(dev))
|
||||
async_event_mask |= (1ull << MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE);
|
||||
if (mlx5_eswitch_is_funcs_handler(dev))
|
||||
async_event_mask |=
|
||||
(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
|
||||
|
||||
return async_event_mask;
|
||||
}
|
||||
|
@ -1686,13 +1686,41 @@ static int eswitch_vport_event(struct notifier_block *nb,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int query_esw_functions(struct mlx5_core_dev *dev,
|
||||
u32 *out, int outlen)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {0};
|
||||
|
||||
MLX5_SET(query_esw_functions_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u16 *num_vfs)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {0};
|
||||
int err;
|
||||
|
||||
err = query_esw_functions(dev, out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*num_vfs = MLX5_GET(query_esw_functions_out, out,
|
||||
host_params_context.host_num_of_vfs);
|
||||
esw_debug(dev, "host_num_of_vfs=%d\n", *num_vfs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Public E-Switch API */
|
||||
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
|
||||
|
||||
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
||||
{
|
||||
int vf_nvports = 0, total_nvports = 0;
|
||||
struct mlx5_vport *vport;
|
||||
int total_nvports = 0;
|
||||
u16 vf_nvports = 0;
|
||||
int err;
|
||||
int i, enabled_events;
|
||||
|
||||
@ -1712,7 +1740,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
||||
|
||||
if (mode == SRIOV_OFFLOADS) {
|
||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
||||
err = mlx5_query_host_params_num_vfs(esw->dev, &vf_nvports);
|
||||
err = mlx5_esw_query_functions(esw->dev, &vf_nvports);
|
||||
if (err)
|
||||
return err;
|
||||
total_nvports = esw->total_vports;
|
||||
|
@ -173,6 +173,7 @@ struct mlx5_esw_offload {
|
||||
struct mutex peer_mutex;
|
||||
DECLARE_HASHTABLE(encap_tbl, 8);
|
||||
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
|
||||
const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
|
||||
u8 inline_mode;
|
||||
u64 num_flows;
|
||||
u8 encap;
|
||||
@ -190,7 +191,7 @@ struct mlx5_host_work {
|
||||
struct mlx5_eswitch *esw;
|
||||
};
|
||||
|
||||
struct mlx5_host_info {
|
||||
struct mlx5_esw_functions {
|
||||
struct mlx5_nb nb;
|
||||
u16 num_vfs;
|
||||
};
|
||||
@ -219,7 +220,7 @@ struct mlx5_eswitch {
|
||||
int mode;
|
||||
int nvports;
|
||||
u16 manager_vport;
|
||||
struct mlx5_host_info host_info;
|
||||
struct mlx5_esw_functions esw_funcs;
|
||||
};
|
||||
|
||||
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
|
||||
@ -386,6 +387,8 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
|
||||
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
|
||||
struct mlx5_core_dev *dev1);
|
||||
|
||||
int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u16 *num_vfs);
|
||||
|
||||
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
|
||||
|
||||
#define esw_info(__dev, format, ...) \
|
||||
@ -404,6 +407,18 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
|
||||
MLX5_VPORT_ECPF : MLX5_VPORT_PF;
|
||||
}
|
||||
|
||||
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev)
|
||||
{
|
||||
/* Ideally device should have the functions changed supported
|
||||
* capability regardless of it being ECPF or PF wherever such
|
||||
* event should be processed such as on eswitch manager device.
|
||||
* However, some ECPF based device might not have this capability
|
||||
* set. Hence OR for ECPF check to cover such device.
|
||||
*/
|
||||
return MLX5_CAP_ESW(dev, esw_functions_changed) ||
|
||||
mlx5_core_is_ecpf_esw_manager(dev);
|
||||
}
|
||||
|
||||
static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
|
||||
{
|
||||
/* Uplink always locate at the last element of the array.*/
|
||||
@ -498,6 +513,7 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
|
||||
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
|
||||
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
|
||||
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
|
||||
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
|
||||
|
||||
#define FDB_MAX_CHAIN 1
|
||||
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
|
||||
|
@ -41,7 +41,6 @@
|
||||
#include "en.h"
|
||||
#include "fs_core.h"
|
||||
#include "lib/devcom.h"
|
||||
#include "ecpf.h"
|
||||
#include "lib/eq.h"
|
||||
|
||||
/* There are two match-all miss flows, one for unicast dst mac and
|
||||
@ -333,7 +332,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
|
||||
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
|
||||
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
|
||||
rep = &esw->offloads.vport_reps[vf_vport];
|
||||
if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED)
|
||||
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
|
||||
continue;
|
||||
|
||||
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
|
||||
@ -1278,7 +1277,7 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
|
||||
ether_addr_copy(rep->hw_id, hw_id);
|
||||
|
||||
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
|
||||
atomic_set(&rep->rep_if[rep_type].state,
|
||||
atomic_set(&rep->rep_data[rep_type].state,
|
||||
REP_UNREGISTERED);
|
||||
}
|
||||
|
||||
@ -1288,9 +1287,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
|
||||
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep *rep, u8 rep_type)
|
||||
{
|
||||
if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
|
||||
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
|
||||
REP_LOADED, REP_REGISTERED) == REP_LOADED)
|
||||
rep->rep_if[rep_type].unload(rep);
|
||||
esw->offloads.rep_ops[rep_type]->unload(rep);
|
||||
}
|
||||
|
||||
static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
@ -1351,11 +1350,11 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
|
||||
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
|
||||
REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
|
||||
err = rep->rep_if[rep_type].load(esw->dev, rep);
|
||||
err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
|
||||
if (err)
|
||||
atomic_set(&rep->rep_if[rep_type].state,
|
||||
atomic_set(&rep->rep_data[rep_type].state,
|
||||
REP_REGISTERED);
|
||||
}
|
||||
|
||||
@ -1784,57 +1783,79 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
|
||||
esw_prio_tag_acls_cleanup(esw);
|
||||
}
|
||||
|
||||
static void esw_host_params_event_handler(struct work_struct *work)
|
||||
static void esw_functions_changed_event_handler(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_host_work *host_work;
|
||||
struct mlx5_eswitch *esw;
|
||||
int err, num_vf = 0;
|
||||
u16 num_vfs = 0;
|
||||
int err;
|
||||
|
||||
host_work = container_of(work, struct mlx5_host_work, work);
|
||||
esw = host_work->esw;
|
||||
|
||||
err = mlx5_query_host_params_num_vfs(esw->dev, &num_vf);
|
||||
if (err || num_vf == esw->host_info.num_vfs)
|
||||
err = mlx5_esw_query_functions(esw->dev, &num_vfs);
|
||||
if (err || num_vfs == esw->esw_funcs.num_vfs)
|
||||
goto out;
|
||||
|
||||
/* Number of VFs can only change from "0 to x" or "x to 0". */
|
||||
if (esw->host_info.num_vfs > 0) {
|
||||
esw_offloads_unload_vf_reps(esw, esw->host_info.num_vfs);
|
||||
if (esw->esw_funcs.num_vfs > 0) {
|
||||
esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
|
||||
} else {
|
||||
err = esw_offloads_load_vf_reps(esw, num_vf);
|
||||
err = esw_offloads_load_vf_reps(esw, num_vfs);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
esw->host_info.num_vfs = num_vf;
|
||||
esw->esw_funcs.num_vfs = num_vfs;
|
||||
|
||||
out:
|
||||
kfree(host_work);
|
||||
}
|
||||
|
||||
static int esw_host_params_event(struct notifier_block *nb,
|
||||
unsigned long type, void *data)
|
||||
static int esw_functions_changed_event(struct notifier_block *nb,
|
||||
unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_esw_functions *esw_funcs;
|
||||
struct mlx5_host_work *host_work;
|
||||
struct mlx5_host_info *host_info;
|
||||
struct mlx5_eswitch *esw;
|
||||
|
||||
host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
|
||||
if (!host_work)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
host_info = mlx5_nb_cof(nb, struct mlx5_host_info, nb);
|
||||
esw = container_of(host_info, struct mlx5_eswitch, host_info);
|
||||
esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
|
||||
esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
|
||||
|
||||
host_work->esw = esw;
|
||||
|
||||
INIT_WORK(&host_work->work, esw_host_params_event_handler);
|
||||
INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
|
||||
queue_work(esw->work_queue, &host_work->work);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static void esw_functions_changed_event_init(struct mlx5_eswitch *esw,
|
||||
u16 vf_nvports)
|
||||
{
|
||||
if (!mlx5_eswitch_is_funcs_handler(esw->dev))
|
||||
return;
|
||||
|
||||
MLX5_NB_INIT(&esw->esw_funcs.nb, esw_functions_changed_event,
|
||||
ESW_FUNCTIONS_CHANGED);
|
||||
mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
|
||||
esw->esw_funcs.num_vfs = vf_nvports;
|
||||
}
|
||||
|
||||
static void esw_functions_changed_event_cleanup(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (!mlx5_eswitch_is_funcs_handler(esw->dev))
|
||||
return;
|
||||
|
||||
mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
|
||||
flush_workqueue(esw->work_queue);
|
||||
}
|
||||
|
||||
int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
|
||||
int total_nvports)
|
||||
{
|
||||
@ -1850,12 +1871,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
|
||||
|
||||
esw_offloads_devcom_init(esw);
|
||||
|
||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
||||
MLX5_NB_INIT(&esw->host_info.nb, esw_host_params_event,
|
||||
HOST_PARAMS_CHANGE);
|
||||
mlx5_eq_notifier_register(esw->dev, &esw->host_info.nb);
|
||||
esw->host_info.num_vfs = vf_nvports;
|
||||
}
|
||||
esw_functions_changed_event_init(esw, vf_nvports);
|
||||
|
||||
mlx5_rdma_enable_roce(esw->dev);
|
||||
|
||||
@ -1889,13 +1905,12 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
|
||||
{
|
||||
u16 num_vfs;
|
||||
|
||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
||||
mlx5_eq_notifier_unregister(esw->dev, &esw->host_info.nb);
|
||||
flush_workqueue(esw->work_queue);
|
||||
num_vfs = esw->host_info.num_vfs;
|
||||
} else {
|
||||
esw_functions_changed_event_cleanup(esw);
|
||||
|
||||
if (mlx5_eswitch_is_funcs_handler(esw->dev))
|
||||
num_vfs = esw->esw_funcs.num_vfs;
|
||||
else
|
||||
num_vfs = esw->dev->priv.sriov.num_vfs;
|
||||
}
|
||||
|
||||
mlx5_rdma_disable_roce(esw->dev);
|
||||
esw_offloads_devcom_cleanup(esw);
|
||||
@ -2203,21 +2218,17 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
|
||||
}
|
||||
|
||||
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep_if *__rep_if,
|
||||
const struct mlx5_eswitch_rep_ops *ops,
|
||||
u8 rep_type)
|
||||
{
|
||||
struct mlx5_eswitch_rep_if *rep_if;
|
||||
struct mlx5_eswitch_rep_data *rep_data;
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int i;
|
||||
|
||||
esw->offloads.rep_ops[rep_type] = ops;
|
||||
mlx5_esw_for_all_reps(esw, i, rep) {
|
||||
rep_if = &rep->rep_if[rep_type];
|
||||
rep_if->load = __rep_if->load;
|
||||
rep_if->unload = __rep_if->unload;
|
||||
rep_if->get_proto_dev = __rep_if->get_proto_dev;
|
||||
rep_if->priv = __rep_if->priv;
|
||||
|
||||
atomic_set(&rep_if->state, REP_REGISTERED);
|
||||
rep_data = &rep->rep_data[rep_type];
|
||||
atomic_set(&rep_data->state, REP_REGISTERED);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
|
||||
@ -2232,7 +2243,7 @@ void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
__unload_reps_all_vport(esw, max_vf, rep_type);
|
||||
|
||||
mlx5_esw_for_all_reps(esw, i, rep)
|
||||
atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED);
|
||||
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
|
||||
|
||||
@ -2241,7 +2252,7 @@ void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
|
||||
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
|
||||
return rep->rep_if[rep_type].priv;
|
||||
return rep->rep_data[rep_type].priv;
|
||||
}
|
||||
|
||||
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
|
||||
@ -2252,9 +2263,9 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
|
||||
|
||||
rep = mlx5_eswitch_get_rep(esw, vport);
|
||||
|
||||
if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED &&
|
||||
rep->rep_if[rep_type].get_proto_dev)
|
||||
return rep->rep_if[rep_type].get_proto_dev(rep);
|
||||
if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
|
||||
esw->offloads.rep_ops[rep_type]->get_proto_dev)
|
||||
return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
|
||||
|
@ -108,8 +108,8 @@ static const char *eqe_type_str(u8 type)
|
||||
return "MLX5_EVENT_TYPE_STALL_EVENT";
|
||||
case MLX5_EVENT_TYPE_CMD:
|
||||
return "MLX5_EVENT_TYPE_CMD";
|
||||
case MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE:
|
||||
return "MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE";
|
||||
case MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED:
|
||||
return "MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED";
|
||||
case MLX5_EVENT_TYPE_PAGE_REQUEST:
|
||||
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
|
||||
case MLX5_EVENT_TYPE_PAGE_FAULT:
|
||||
|
@ -147,6 +147,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
{
|
||||
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
|
||||
int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
|
||||
int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
|
||||
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
|
||||
struct mlx5_core_dev *dev = ns->dev;
|
||||
@ -167,6 +168,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
en_decap);
|
||||
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
|
||||
en_encap);
|
||||
MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
|
||||
term);
|
||||
|
||||
switch (ft->op_mod) {
|
||||
case FS_FT_OP_MOD_NORMAL:
|
||||
|
@ -106,10 +106,10 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
|
||||
|
||||
return 0;
|
||||
|
||||
destroy_flow_table:
|
||||
mlx5_destroy_flow_table(ft);
|
||||
destroy_flow_group:
|
||||
mlx5_destroy_flow_group(fg);
|
||||
destroy_flow_table:
|
||||
mlx5_destroy_flow_table(ft);
|
||||
free:
|
||||
kvfree(spec);
|
||||
kvfree(flow_group_in);
|
||||
|
@ -342,7 +342,7 @@ enum mlx5_event {
|
||||
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
|
||||
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
|
||||
|
||||
MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE = 0xe,
|
||||
MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
|
||||
|
||||
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
|
||||
|
||||
|
@ -107,6 +107,7 @@ enum {
|
||||
MLX5_REG_FPGA_CAP = 0x4022,
|
||||
MLX5_REG_FPGA_CTRL = 0x4023,
|
||||
MLX5_REG_FPGA_ACCESS_REG = 0x4024,
|
||||
MLX5_REG_CORE_DUMP = 0x402e,
|
||||
MLX5_REG_PCAP = 0x5001,
|
||||
MLX5_REG_PMTU = 0x5003,
|
||||
MLX5_REG_PTYS = 0x5004,
|
||||
|
@ -29,17 +29,19 @@ enum {
|
||||
};
|
||||
|
||||
struct mlx5_eswitch_rep;
|
||||
struct mlx5_eswitch_rep_if {
|
||||
int (*load)(struct mlx5_core_dev *dev,
|
||||
struct mlx5_eswitch_rep *rep);
|
||||
void (*unload)(struct mlx5_eswitch_rep *rep);
|
||||
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
|
||||
void *priv;
|
||||
atomic_t state;
|
||||
struct mlx5_eswitch_rep_ops {
|
||||
int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep);
|
||||
void (*unload)(struct mlx5_eswitch_rep *rep);
|
||||
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
|
||||
};
|
||||
|
||||
struct mlx5_eswitch_rep_data {
|
||||
void *priv;
|
||||
atomic_t state;
|
||||
};
|
||||
|
||||
struct mlx5_eswitch_rep {
|
||||
struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
|
||||
struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES];
|
||||
u16 vport;
|
||||
u8 hw_id[ETH_ALEN];
|
||||
u16 vlan;
|
||||
@ -47,7 +49,7 @@ struct mlx5_eswitch_rep {
|
||||
};
|
||||
|
||||
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep_if *rep_if,
|
||||
const struct mlx5_eswitch_rep_ops *ops,
|
||||
u8 rep_type);
|
||||
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
|
||||
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
|
||||
|
@ -47,6 +47,7 @@ enum {
|
||||
enum {
|
||||
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
|
||||
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
|
||||
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
|
||||
};
|
||||
|
||||
#define LEFTOVERS_RULE_NUM 2
|
||||
|
@ -155,7 +155,7 @@ enum {
|
||||
MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
|
||||
MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
|
||||
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
|
||||
MLX5_CMD_OP_QUERY_HOST_PARAMS = 0x740,
|
||||
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740,
|
||||
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
|
||||
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
|
||||
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
|
||||
@ -382,7 +382,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
|
||||
u8 reformat_and_modify_action[0x1];
|
||||
u8 reserved_at_15[0x2];
|
||||
u8 table_miss_action_domain[0x1];
|
||||
u8 reserved_at_18[0x8];
|
||||
u8 termination_table[0x1];
|
||||
u8 reserved_at_19[0x7];
|
||||
u8 reserved_at_20[0x2];
|
||||
u8 log_max_ft_size[0x6];
|
||||
u8 log_max_modify_header_context[0x8];
|
||||
@ -664,7 +665,9 @@ struct mlx5_ifc_e_switch_cap_bits {
|
||||
u8 vport_svlan_insert[0x1];
|
||||
u8 vport_cvlan_insert_if_not_exist[0x1];
|
||||
u8 vport_cvlan_insert_overwrite[0x1];
|
||||
u8 reserved_at_5[0x16];
|
||||
u8 reserved_at_5[0x14];
|
||||
u8 esw_functions_changed[0x1];
|
||||
u8 reserved_at_1a[0x1];
|
||||
u8 ecpf_vport_exists[0x1];
|
||||
u8 counter_eswitch_affinity[0x1];
|
||||
u8 merged_eswitch[0x1];
|
||||
@ -715,7 +718,9 @@ struct mlx5_ifc_qos_cap_bits {
|
||||
};
|
||||
|
||||
struct mlx5_ifc_debug_cap_bits {
|
||||
u8 reserved_at_0[0x20];
|
||||
u8 core_dump_general[0x1];
|
||||
u8 core_dump_qp[0x1];
|
||||
u8 reserved_at_2[0x1e];
|
||||
|
||||
u8 reserved_at_20[0x2];
|
||||
u8 stall_detect[0x1];
|
||||
@ -2531,6 +2536,7 @@ union mlx5_ifc_hca_cap_union_bits {
|
||||
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
|
||||
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
|
||||
struct mlx5_ifc_qos_cap_bits qos_cap;
|
||||
struct mlx5_ifc_debug_cap_bits debug_cap;
|
||||
struct mlx5_ifc_fpga_cap_bits fpga_cap;
|
||||
u8 reserved_at_0[0x8000];
|
||||
};
|
||||
@ -7236,7 +7242,8 @@ struct mlx5_ifc_create_flow_table_out_bits {
|
||||
struct mlx5_ifc_flow_table_context_bits {
|
||||
u8 reformat_en[0x1];
|
||||
u8 decap_en[0x1];
|
||||
u8 reserved_at_2[0x2];
|
||||
u8 reserved_at_2[0x1];
|
||||
u8 termination_table[0x1];
|
||||
u8 table_miss_action[0x4];
|
||||
u8 level[0x8];
|
||||
u8 reserved_at_10[0x8];
|
||||
@ -8546,6 +8553,18 @@ struct mlx5_ifc_qcam_reg_bits {
|
||||
u8 reserved_at_1c0[0x80];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_core_dump_reg_bits {
|
||||
u8 reserved_at_0[0x18];
|
||||
u8 core_dump_type[0x8];
|
||||
|
||||
u8 reserved_at_20[0x30];
|
||||
u8 vhca_id[0x10];
|
||||
|
||||
u8 reserved_at_60[0x8];
|
||||
u8 qpn[0x18];
|
||||
u8 reserved_at_80[0x180];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_pcap_reg_bits {
|
||||
u8 reserved_at_0[0x8];
|
||||
u8 local_port[0x8];
|
||||
@ -9704,7 +9723,7 @@ struct mlx5_ifc_host_params_context_bits {
|
||||
u8 reserved_at_80[0x180];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_host_params_in_bits {
|
||||
struct mlx5_ifc_query_esw_functions_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
@ -9714,7 +9733,7 @@ struct mlx5_ifc_query_host_params_in_bits {
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_host_params_out_bits {
|
||||
struct mlx5_ifc_query_esw_functions_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user