forked from Minki/linux
Merge mlx5-next into rdma for-next
From git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux Required for dependencies in the next patches. Resolved the conflicts: - esw_destroy_offloads_acl_tables() use the newer mlx5_esw_for_all_vports() version - esw_offloads_steering_init() drop the cap test - esw_offloads_init() drop the extra function arguments * branch 'mlx5-next': (39 commits) net/mlx5: Expose device definitions for object events net/mlx5: Report EQE data upon CQ completion net/mlx5: Report a CQ error event only when a handler was set net/mlx5: mlx5_core_create_cq() enhancements net/mlx5: Expose the API to register for ANY event net/mlx5: Use event mask based on device capabilities net/mlx5: Fix mlx5_core_destroy_cq() error flow net/mlx5: E-Switch, Handle UC address change in switchdev mode net/mlx5: E-Switch, Consider host PF for inline mode and vlan pop net/mlx5: E-Switch, Use iterator for vlan and min-inline setups net/mlx5: E-Switch, Reg/unreg function changed event at correct stage net/mlx5: E-Switch, Consolidate eswitch function number of VFs net/mlx5: E-Switch, Refactor eswitch SR-IOV interface net/mlx5: Handle host PF vport mac/guid for ECPF net/mlx5: E-Switch, Use correct flags when configuring vlan net/mlx5: Reduce dependency on enabled_vfs counter and num_vfs net/mlx5: Don't handle VF func change if host PF is disabled net/mlx5: Limit scope of mlx5_get_next_phys_dev() to PCI PF devices net/mlx5: Move pci status reg access mutex to mlx5_pci_init net/mlx5: Rename mlx5_pci_dev_type to mlx5_coredev_type ... Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
commit
69ea0582f3
@ -37,7 +37,7 @@
|
|||||||
#include "mlx5_ib.h"
|
#include "mlx5_ib.h"
|
||||||
#include "srq.h"
|
#include "srq.h"
|
||||||
|
|
||||||
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
|
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
|
||||||
{
|
{
|
||||||
struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
|
struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
|
||||||
|
|
||||||
@ -522,9 +522,9 @@ repoll:
|
|||||||
case MLX5_CQE_SIG_ERR:
|
case MLX5_CQE_SIG_ERR:
|
||||||
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
|
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
|
||||||
|
|
||||||
read_lock(&dev->mdev->priv.mkey_table.lock);
|
xa_lock(&dev->mdev->priv.mkey_table);
|
||||||
mmkey = __mlx5_mr_lookup(dev->mdev,
|
mmkey = xa_load(&dev->mdev->priv.mkey_table,
|
||||||
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
|
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
|
||||||
mr = to_mibmr(mmkey);
|
mr = to_mibmr(mmkey);
|
||||||
get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
|
get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
|
||||||
mr->sig->sig_err_exists = true;
|
mr->sig->sig_err_exists = true;
|
||||||
@ -537,7 +537,7 @@ repoll:
|
|||||||
mr->sig->err_item.expected,
|
mr->sig->err_item.expected,
|
||||||
mr->sig->err_item.actual);
|
mr->sig->err_item.actual);
|
||||||
|
|
||||||
read_unlock(&dev->mdev->priv.mkey_table.lock);
|
xa_unlock(&dev->mdev->priv.mkey_table);
|
||||||
goto repoll;
|
goto repoll;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -892,6 +892,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||||||
int vector = attr->comp_vector;
|
int vector = attr->comp_vector;
|
||||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||||
struct mlx5_ib_cq *cq = to_mcq(ibcq);
|
struct mlx5_ib_cq *cq = to_mcq(ibcq);
|
||||||
|
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
|
||||||
int uninitialized_var(index);
|
int uninitialized_var(index);
|
||||||
int uninitialized_var(inlen);
|
int uninitialized_var(inlen);
|
||||||
u32 *cqb = NULL;
|
u32 *cqb = NULL;
|
||||||
@ -954,7 +955,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||||||
if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
|
if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
|
||||||
MLX5_SET(cqc, cqc, oi, 1);
|
MLX5_SET(cqc, cqc, oi, 1);
|
||||||
|
|
||||||
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
|
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
|
||||||
if (err)
|
if (err)
|
||||||
goto err_cqb;
|
goto err_cqb;
|
||||||
|
|
||||||
|
@ -1043,13 +1043,10 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
|
|||||||
struct mlx5_ib_dev *dev,
|
struct mlx5_ib_dev *dev,
|
||||||
void *in, void *out)
|
void *in, void *out)
|
||||||
{
|
{
|
||||||
struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
|
|
||||||
struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
|
struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
|
||||||
unsigned long flags;
|
|
||||||
struct mlx5_core_mkey *mkey;
|
struct mlx5_core_mkey *mkey;
|
||||||
void *mkc;
|
void *mkc;
|
||||||
u8 key;
|
u8 key;
|
||||||
int err;
|
|
||||||
|
|
||||||
mkey = &devx_mr->mmkey;
|
mkey = &devx_mr->mmkey;
|
||||||
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
||||||
@ -1062,11 +1059,8 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
|
|||||||
mkey->pd = MLX5_GET(mkc, mkc, pd);
|
mkey->pd = MLX5_GET(mkc, mkc, pd);
|
||||||
devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
|
devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
|
||||||
|
|
||||||
write_lock_irqsave(&table->lock, flags);
|
return xa_err(xa_store(&dev->mdev->priv.mkey_table,
|
||||||
err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key),
|
mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
|
||||||
mkey);
|
|
||||||
write_unlock_irqrestore(&table->lock, flags);
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
|
static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
|
||||||
@ -1117,12 +1111,8 @@ static void devx_free_indirect_mkey(struct rcu_head *rcu)
|
|||||||
*/
|
*/
|
||||||
static void devx_cleanup_mkey(struct devx_obj *obj)
|
static void devx_cleanup_mkey(struct devx_obj *obj)
|
||||||
{
|
{
|
||||||
struct mlx5_mkey_table *table = &obj->mdev->priv.mkey_table;
|
xa_erase(&obj->mdev->priv.mkey_table,
|
||||||
unsigned long flags;
|
mlx5_base_mkey(obj->devx_mr.mmkey.key));
|
||||||
|
|
||||||
write_lock_irqsave(&table->lock, flags);
|
|
||||||
radix_tree_delete(&table->tree, mlx5_base_mkey(obj->devx_mr.mmkey.key));
|
|
||||||
write_unlock_irqrestore(&table->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int devx_obj_cleanup(struct ib_uobject *uobject,
|
static int devx_obj_cleanup(struct ib_uobject *uobject,
|
||||||
|
@ -65,11 +65,12 @@ static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
|
|||||||
static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
|
static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
|
||||||
struct uverbs_attr_bundle *attrs)
|
struct uverbs_attr_bundle *attrs)
|
||||||
{
|
{
|
||||||
struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
|
struct mlx5_flow_context flow_context = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
|
||||||
struct mlx5_ib_flow_handler *flow_handler;
|
struct mlx5_ib_flow_handler *flow_handler;
|
||||||
struct mlx5_ib_flow_matcher *fs_matcher;
|
struct mlx5_ib_flow_matcher *fs_matcher;
|
||||||
struct ib_uobject **arr_flow_actions;
|
struct ib_uobject **arr_flow_actions;
|
||||||
struct ib_uflow_resources *uflow_res;
|
struct ib_uflow_resources *uflow_res;
|
||||||
|
struct mlx5_flow_act flow_act = {};
|
||||||
void *devx_obj;
|
void *devx_obj;
|
||||||
int dest_id, dest_type;
|
int dest_id, dest_type;
|
||||||
void *cmd_in;
|
void *cmd_in;
|
||||||
@ -172,17 +173,19 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
|
|||||||
arr_flow_actions[i]->object);
|
arr_flow_actions[i]->object);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = uverbs_copy_from(&flow_act.flow_tag, attrs,
|
ret = uverbs_copy_from(&flow_context.flow_tag, attrs,
|
||||||
MLX5_IB_ATTR_CREATE_FLOW_TAG);
|
MLX5_IB_ATTR_CREATE_FLOW_TAG);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
if (flow_act.flow_tag >= BIT(24)) {
|
if (flow_context.flow_tag >= BIT(24)) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
flow_act.flags |= FLOW_ACT_HAS_TAG;
|
flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
|
||||||
}
|
}
|
||||||
|
|
||||||
flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, &flow_act,
|
flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher,
|
||||||
|
&flow_context,
|
||||||
|
&flow_act,
|
||||||
counter_id,
|
counter_id,
|
||||||
cmd_in, inlen,
|
cmd_in, inlen,
|
||||||
dest_id, dest_type);
|
dest_id, dest_type);
|
||||||
|
@ -14,9 +14,10 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|||||||
int vport_index;
|
int vport_index;
|
||||||
|
|
||||||
ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch);
|
ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch);
|
||||||
vport_index = ibdev->free_port++;
|
vport_index = rep->vport_index;
|
||||||
|
|
||||||
ibdev->port[vport_index].rep = rep;
|
ibdev->port[vport_index].rep = rep;
|
||||||
|
rep->rep_data[REP_IB].priv = ibdev;
|
||||||
write_lock(&ibdev->port[vport_index].roce.netdev_lock);
|
write_lock(&ibdev->port[vport_index].roce.netdev_lock);
|
||||||
ibdev->port[vport_index].roce.netdev =
|
ibdev->port[vport_index].roce.netdev =
|
||||||
mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
|
mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
|
||||||
@ -50,7 +51,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ibdev->is_rep = true;
|
ibdev->is_rep = true;
|
||||||
vport_index = ibdev->free_port++;
|
vport_index = rep->vport_index;
|
||||||
ibdev->port[vport_index].rep = rep;
|
ibdev->port[vport_index].rep = rep;
|
||||||
ibdev->port[vport_index].roce.netdev =
|
ibdev->port[vport_index].roce.netdev =
|
||||||
mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
|
mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
|
||||||
@ -68,15 +69,18 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|||||||
static void
|
static void
|
||||||
mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
|
mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
|
||||||
{
|
{
|
||||||
struct mlx5_ib_dev *dev;
|
struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
|
||||||
|
struct mlx5_ib_port *port;
|
||||||
|
|
||||||
if (!rep->rep_data[REP_IB].priv ||
|
port = &dev->port[rep->vport_index];
|
||||||
rep->vport != MLX5_VPORT_UPLINK)
|
write_lock(&port->roce.netdev_lock);
|
||||||
return;
|
port->roce.netdev = NULL;
|
||||||
|
write_unlock(&port->roce.netdev_lock);
|
||||||
dev = mlx5_ib_rep_to_dev(rep);
|
|
||||||
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
|
|
||||||
rep->rep_data[REP_IB].priv = NULL;
|
rep->rep_data[REP_IB].priv = NULL;
|
||||||
|
port->rep = NULL;
|
||||||
|
|
||||||
|
if (rep->vport == MLX5_VPORT_UPLINK)
|
||||||
|
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
|
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
|
||||||
|
@ -28,7 +28,7 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
|
|||||||
#else /* CONFIG_MLX5_ESWITCH */
|
#else /* CONFIG_MLX5_ESWITCH */
|
||||||
static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
|
static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
|
||||||
{
|
{
|
||||||
return SRIOV_NONE;
|
return MLX5_ESWITCH_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
|
@ -2669,11 +2669,15 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
|
static int parse_flow_attr(struct mlx5_core_dev *mdev,
|
||||||
u32 *match_v, const union ib_flow_spec *ib_spec,
|
struct mlx5_flow_spec *spec,
|
||||||
|
const union ib_flow_spec *ib_spec,
|
||||||
const struct ib_flow_attr *flow_attr,
|
const struct ib_flow_attr *flow_attr,
|
||||||
struct mlx5_flow_act *action, u32 prev_type)
|
struct mlx5_flow_act *action, u32 prev_type)
|
||||||
{
|
{
|
||||||
|
struct mlx5_flow_context *flow_context = &spec->flow_context;
|
||||||
|
u32 *match_c = spec->match_criteria;
|
||||||
|
u32 *match_v = spec->match_value;
|
||||||
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
||||||
misc_parameters);
|
misc_parameters);
|
||||||
void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
||||||
@ -2992,8 +2996,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
|
|||||||
if (ib_spec->flow_tag.tag_id >= BIT(24))
|
if (ib_spec->flow_tag.tag_id >= BIT(24))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
action->flow_tag = ib_spec->flow_tag.tag_id;
|
flow_context->flow_tag = ib_spec->flow_tag.tag_id;
|
||||||
action->flags |= FLOW_ACT_HAS_TAG;
|
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
|
||||||
break;
|
break;
|
||||||
case IB_FLOW_SPEC_ACTION_DROP:
|
case IB_FLOW_SPEC_ACTION_DROP:
|
||||||
if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
|
if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
|
||||||
@ -3087,7 +3091,8 @@ is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
|
|||||||
return VALID_SPEC_NA;
|
return VALID_SPEC_NA;
|
||||||
|
|
||||||
return is_crypto && is_ipsec &&
|
return is_crypto && is_ipsec &&
|
||||||
(!egress || (!is_drop && !(flow_act->flags & FLOW_ACT_HAS_TAG))) ?
|
(!egress || (!is_drop &&
|
||||||
|
!(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
|
||||||
VALID_SPEC_VALID : VALID_SPEC_INVALID;
|
VALID_SPEC_VALID : VALID_SPEC_INVALID;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3470,6 +3475,37 @@ free:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
|
||||||
|
struct mlx5_flow_spec *spec,
|
||||||
|
struct mlx5_eswitch_rep *rep)
|
||||||
|
{
|
||||||
|
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
|
||||||
|
void *misc;
|
||||||
|
|
||||||
|
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
|
||||||
|
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
||||||
|
misc_parameters_2);
|
||||||
|
|
||||||
|
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
|
||||||
|
mlx5_eswitch_get_vport_metadata_for_match(esw,
|
||||||
|
rep->vport));
|
||||||
|
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
||||||
|
misc_parameters_2);
|
||||||
|
|
||||||
|
MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
|
||||||
|
} else {
|
||||||
|
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
||||||
|
misc_parameters);
|
||||||
|
|
||||||
|
MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
|
||||||
|
|
||||||
|
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
||||||
|
misc_parameters);
|
||||||
|
|
||||||
|
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
||||||
struct mlx5_ib_flow_prio *ft_prio,
|
struct mlx5_ib_flow_prio *ft_prio,
|
||||||
const struct ib_flow_attr *flow_attr,
|
const struct ib_flow_attr *flow_attr,
|
||||||
@ -3479,7 +3515,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
|||||||
{
|
{
|
||||||
struct mlx5_flow_table *ft = ft_prio->flow_table;
|
struct mlx5_flow_table *ft = ft_prio->flow_table;
|
||||||
struct mlx5_ib_flow_handler *handler;
|
struct mlx5_ib_flow_handler *handler;
|
||||||
struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
|
struct mlx5_flow_act flow_act = {};
|
||||||
struct mlx5_flow_spec *spec;
|
struct mlx5_flow_spec *spec;
|
||||||
struct mlx5_flow_destination dest_arr[2] = {};
|
struct mlx5_flow_destination dest_arr[2] = {};
|
||||||
struct mlx5_flow_destination *rule_dst = dest_arr;
|
struct mlx5_flow_destination *rule_dst = dest_arr;
|
||||||
@ -3510,8 +3546,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
|
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
|
||||||
err = parse_flow_attr(dev->mdev, spec->match_criteria,
|
err = parse_flow_attr(dev->mdev, spec,
|
||||||
spec->match_value,
|
|
||||||
ib_flow, flow_attr, &flow_act,
|
ib_flow, flow_attr, &flow_act,
|
||||||
prev_type);
|
prev_type);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
@ -3525,19 +3560,15 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
|||||||
set_underlay_qp(dev, spec, underlay_qpn);
|
set_underlay_qp(dev, spec, underlay_qpn);
|
||||||
|
|
||||||
if (dev->is_rep) {
|
if (dev->is_rep) {
|
||||||
void *misc;
|
struct mlx5_eswitch_rep *rep;
|
||||||
|
|
||||||
if (!dev->port[flow_attr->port - 1].rep) {
|
rep = dev->port[flow_attr->port - 1].rep;
|
||||||
|
if (!rep) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto free;
|
goto free;
|
||||||
}
|
}
|
||||||
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
|
||||||
misc_parameters);
|
mlx5_ib_set_rule_source_port(dev, spec, rep);
|
||||||
MLX5_SET(fte_match_set_misc, misc, source_port,
|
|
||||||
dev->port[flow_attr->port - 1].rep->vport);
|
|
||||||
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
|
||||||
misc_parameters);
|
|
||||||
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
|
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
|
||||||
@ -3578,11 +3609,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
|||||||
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
|
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((flow_act.flags & FLOW_ACT_HAS_TAG) &&
|
if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) &&
|
||||||
(flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
|
(flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
|
||||||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
|
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
|
||||||
mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
|
mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
|
||||||
flow_act.flow_tag, flow_attr->type);
|
spec->flow_context.flow_tag, flow_attr->type);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto free;
|
goto free;
|
||||||
}
|
}
|
||||||
@ -3962,6 +3993,7 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev,
|
|||||||
struct mlx5_ib_flow_prio *ft_prio,
|
struct mlx5_ib_flow_prio *ft_prio,
|
||||||
struct mlx5_flow_destination *dst,
|
struct mlx5_flow_destination *dst,
|
||||||
struct mlx5_ib_flow_matcher *fs_matcher,
|
struct mlx5_ib_flow_matcher *fs_matcher,
|
||||||
|
struct mlx5_flow_context *flow_context,
|
||||||
struct mlx5_flow_act *flow_act,
|
struct mlx5_flow_act *flow_act,
|
||||||
void *cmd_in, int inlen,
|
void *cmd_in, int inlen,
|
||||||
int dst_num)
|
int dst_num)
|
||||||
@ -3984,6 +4016,7 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev,
|
|||||||
memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
|
memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
|
||||||
fs_matcher->mask_len);
|
fs_matcher->mask_len);
|
||||||
spec->match_criteria_enable = fs_matcher->match_criteria_enable;
|
spec->match_criteria_enable = fs_matcher->match_criteria_enable;
|
||||||
|
spec->flow_context = *flow_context;
|
||||||
|
|
||||||
handler->rule = mlx5_add_flow_rules(ft, spec,
|
handler->rule = mlx5_add_flow_rules(ft, spec,
|
||||||
flow_act, dst, dst_num);
|
flow_act, dst, dst_num);
|
||||||
@ -4048,6 +4081,7 @@ static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
|
|||||||
struct mlx5_ib_flow_handler *
|
struct mlx5_ib_flow_handler *
|
||||||
mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
|
mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
|
||||||
struct mlx5_ib_flow_matcher *fs_matcher,
|
struct mlx5_ib_flow_matcher *fs_matcher,
|
||||||
|
struct mlx5_flow_context *flow_context,
|
||||||
struct mlx5_flow_act *flow_act,
|
struct mlx5_flow_act *flow_act,
|
||||||
u32 counter_id,
|
u32 counter_id,
|
||||||
void *cmd_in, int inlen, int dest_id,
|
void *cmd_in, int inlen, int dest_id,
|
||||||
@ -4100,7 +4134,8 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
|
|||||||
dst_num++;
|
dst_num++;
|
||||||
}
|
}
|
||||||
|
|
||||||
handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act,
|
handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
|
||||||
|
flow_context, flow_act,
|
||||||
cmd_in, inlen, dst_num);
|
cmd_in, inlen, dst_num);
|
||||||
|
|
||||||
if (IS_ERR(handler)) {
|
if (IS_ERR(handler)) {
|
||||||
@ -4472,7 +4507,7 @@ static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
|
|||||||
* lock/unlock above locks Now need to arm all involved CQs.
|
* lock/unlock above locks Now need to arm all involved CQs.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
|
list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
|
||||||
mcq->comp(mcq);
|
mcq->comp(mcq, NULL);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
|
spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
|
||||||
}
|
}
|
||||||
@ -6802,7 +6837,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|||||||
printk_once(KERN_INFO "%s", mlx5_version);
|
printk_once(KERN_INFO "%s", mlx5_version);
|
||||||
|
|
||||||
if (MLX5_ESWITCH_MANAGER(mdev) &&
|
if (MLX5_ESWITCH_MANAGER(mdev) &&
|
||||||
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
|
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
|
||||||
if (!mlx5_core_mp_enabled(mdev))
|
if (!mlx5_core_mp_enabled(mdev))
|
||||||
mlx5_ib_register_vport_reps(mdev);
|
mlx5_ib_register_vport_reps(mdev);
|
||||||
return mdev;
|
return mdev;
|
||||||
|
@ -985,7 +985,6 @@ struct mlx5_ib_dev {
|
|||||||
u16 devx_whitelist_uid;
|
u16 devx_whitelist_uid;
|
||||||
struct mlx5_srq_table srq_table;
|
struct mlx5_srq_table srq_table;
|
||||||
struct mlx5_async_ctx async_ctx;
|
struct mlx5_async_ctx async_ctx;
|
||||||
int free_port;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
|
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
|
||||||
@ -1330,6 +1329,7 @@ extern const struct uapi_definition mlx5_ib_devx_defs[];
|
|||||||
extern const struct uapi_definition mlx5_ib_flow_defs[];
|
extern const struct uapi_definition mlx5_ib_flow_defs[];
|
||||||
struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
|
struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
|
||||||
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
|
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
|
||||||
|
struct mlx5_flow_context *flow_context,
|
||||||
struct mlx5_flow_act *flow_act, u32 counter_id,
|
struct mlx5_flow_act *flow_act, u32 counter_id,
|
||||||
void *cmd_in, int inlen, int dest_id, int dest_type);
|
void *cmd_in, int inlen, int dest_id, int dest_type);
|
||||||
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
|
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
|
||||||
|
@ -130,7 +130,7 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
|
|||||||
struct mlx5_cache_ent *ent = &cache->ent[c];
|
struct mlx5_cache_ent *ent = &cache->ent[c];
|
||||||
u8 key;
|
u8 key;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
|
struct xarray *mkeys = &dev->mdev->priv.mkey_table;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
spin_lock_irqsave(&ent->lock, flags);
|
spin_lock_irqsave(&ent->lock, flags);
|
||||||
@ -158,12 +158,12 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
|
|||||||
ent->size++;
|
ent->size++;
|
||||||
spin_unlock_irqrestore(&ent->lock, flags);
|
spin_unlock_irqrestore(&ent->lock, flags);
|
||||||
|
|
||||||
write_lock_irqsave(&table->lock, flags);
|
xa_lock_irqsave(mkeys, flags);
|
||||||
err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
|
err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key),
|
||||||
&mr->mmkey);
|
&mr->mmkey, GFP_ATOMIC));
|
||||||
|
xa_unlock_irqrestore(mkeys, flags);
|
||||||
if (err)
|
if (err)
|
||||||
pr_err("Error inserting to mkey tree. 0x%x\n", -err);
|
pr_err("Error inserting to mkey tree. 0x%x\n", -err);
|
||||||
write_unlock_irqrestore(&table->lock, flags);
|
|
||||||
|
|
||||||
if (!completion_done(&ent->compl))
|
if (!completion_done(&ent->compl))
|
||||||
complete(&ent->compl);
|
complete(&ent->compl);
|
||||||
|
@ -765,7 +765,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
|
|||||||
bcnt -= *bytes_committed;
|
bcnt -= *bytes_committed;
|
||||||
|
|
||||||
next_mr:
|
next_mr:
|
||||||
mmkey = __mlx5_mr_lookup(dev->mdev, mlx5_base_mkey(key));
|
mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key));
|
||||||
if (!mkey_is_eq(mmkey, key)) {
|
if (!mkey_is_eq(mmkey, key)) {
|
||||||
mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
|
mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
@ -1555,9 +1555,9 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
|
|||||||
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
|
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
|
||||||
param = (struct mlx5_eq_param) {
|
param = (struct mlx5_eq_param) {
|
||||||
.irq_index = 0,
|
.irq_index = 0,
|
||||||
.mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
|
|
||||||
.nent = MLX5_IB_NUM_PF_EQE,
|
.nent = MLX5_IB_NUM_PF_EQE,
|
||||||
};
|
};
|
||||||
|
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
|
||||||
eq->core = mlx5_eq_create_generic(dev->mdev, ¶m);
|
eq->core = mlx5_eq_create_generic(dev->mdev, ¶m);
|
||||||
if (IS_ERR(eq->core)) {
|
if (IS_ERR(eq->core)) {
|
||||||
err = PTR_ERR(eq->core);
|
err = PTR_ERR(eq->core);
|
||||||
@ -1683,8 +1683,8 @@ static void num_pending_prefetch_dec(struct mlx5_ib_dev *dev,
|
|||||||
struct mlx5_core_mkey *mmkey;
|
struct mlx5_core_mkey *mmkey;
|
||||||
struct mlx5_ib_mr *mr;
|
struct mlx5_ib_mr *mr;
|
||||||
|
|
||||||
mmkey = __mlx5_mr_lookup(dev->mdev,
|
mmkey = xa_load(&dev->mdev->priv.mkey_table,
|
||||||
mlx5_base_mkey(sg_list[i].lkey));
|
mlx5_base_mkey(sg_list[i].lkey));
|
||||||
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
|
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
|
||||||
atomic_dec(&mr->num_pending_prefetch);
|
atomic_dec(&mr->num_pending_prefetch);
|
||||||
}
|
}
|
||||||
@ -1703,8 +1703,8 @@ static bool num_pending_prefetch_inc(struct ib_pd *pd,
|
|||||||
struct mlx5_core_mkey *mmkey;
|
struct mlx5_core_mkey *mmkey;
|
||||||
struct mlx5_ib_mr *mr;
|
struct mlx5_ib_mr *mr;
|
||||||
|
|
||||||
mmkey = __mlx5_mr_lookup(dev->mdev,
|
mmkey = xa_load(&dev->mdev->priv.mkey_table,
|
||||||
mlx5_base_mkey(sg_list[i].lkey));
|
mlx5_base_mkey(sg_list[i].lkey));
|
||||||
if (!mmkey || mmkey->key != sg_list[i].lkey) {
|
if (!mmkey || mmkey->key != sg_list[i].lkey) {
|
||||||
ret = false;
|
ret = false;
|
||||||
break;
|
break;
|
||||||
|
@ -6365,7 +6365,7 @@ static void handle_drain_completion(struct ib_cq *cq,
|
|||||||
/* Run the CQ handler - this makes sure that the drain WR will
|
/* Run the CQ handler - this makes sure that the drain WR will
|
||||||
* be processed if wasn't processed yet.
|
* be processed if wasn't processed yet.
|
||||||
*/
|
*/
|
||||||
mcq->mcq.comp(&mcq->mcq);
|
mcq->mcq.comp(&mcq->mcq, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
wait_for_completion(&sdrain->done);
|
wait_for_completion(&sdrain->done);
|
||||||
|
@ -58,7 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
|
|||||||
list_for_each_entry_safe(mcq, temp, &ctx->process_list,
|
list_for_each_entry_safe(mcq, temp, &ctx->process_list,
|
||||||
tasklet_ctx.list) {
|
tasklet_ctx.list) {
|
||||||
list_del_init(&mcq->tasklet_ctx.list);
|
list_del_init(&mcq->tasklet_ctx.list);
|
||||||
mcq->tasklet_ctx.comp(mcq);
|
mcq->tasklet_ctx.comp(mcq, NULL);
|
||||||
mlx5_cq_put(mcq);
|
mlx5_cq_put(mcq);
|
||||||
if (time_after(jiffies, end))
|
if (time_after(jiffies, end))
|
||||||
break;
|
break;
|
||||||
@ -68,7 +68,8 @@ void mlx5_cq_tasklet_cb(unsigned long data)
|
|||||||
tasklet_schedule(&ctx->task);
|
tasklet_schedule(&ctx->task);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
|
static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
|
||||||
|
struct mlx5_eqe *eqe)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
|
struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
|
||||||
@ -87,11 +88,10 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||||
u32 *in, int inlen)
|
u32 *in, int inlen, u32 *out, int outlen)
|
||||||
{
|
{
|
||||||
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
|
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
|
||||||
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
|
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
|
||||||
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
|
|
||||||
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
|
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
|
||||||
struct mlx5_eq_comp *eq;
|
struct mlx5_eq_comp *eq;
|
||||||
int err;
|
int err;
|
||||||
@ -100,9 +100,9 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
|||||||
if (IS_ERR(eq))
|
if (IS_ERR(eq))
|
||||||
return PTR_ERR(eq);
|
return PTR_ERR(eq);
|
||||||
|
|
||||||
memset(out, 0, sizeof(out));
|
memset(out, 0, outlen);
|
||||||
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
|
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
|
||||||
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -158,13 +158,8 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
|
|||||||
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
|
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
|
mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
|
||||||
if (err)
|
mlx5_eq_del_cq(&cq->eq->core, cq);
|
||||||
return err;
|
|
||||||
|
|
||||||
err = mlx5_eq_del_cq(&cq->eq->core, cq);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
|
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
|
||||||
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
|
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
|
||||||
|
@ -311,13 +311,20 @@ static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
|
|||||||
/* Must be called with intf_mutex held */
|
/* Must be called with intf_mutex held */
|
||||||
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
|
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
u32 pci_id = mlx5_gen_pci_id(dev);
|
|
||||||
struct mlx5_core_dev *res = NULL;
|
struct mlx5_core_dev *res = NULL;
|
||||||
struct mlx5_core_dev *tmp_dev;
|
struct mlx5_core_dev *tmp_dev;
|
||||||
struct mlx5_priv *priv;
|
struct mlx5_priv *priv;
|
||||||
|
u32 pci_id;
|
||||||
|
|
||||||
|
if (!mlx5_core_is_pf(dev))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
pci_id = mlx5_gen_pci_id(dev);
|
||||||
list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
|
list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
|
||||||
tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
|
tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||||
|
if (!mlx5_core_is_pf(tmp_dev))
|
||||||
|
continue;
|
||||||
|
|
||||||
if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
|
if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
|
||||||
res = tmp_dev;
|
res = tmp_dev;
|
||||||
break;
|
break;
|
||||||
|
@ -187,6 +187,7 @@ TRACE_EVENT(mlx5_fs_set_fte,
|
|||||||
__field(u32, index)
|
__field(u32, index)
|
||||||
__field(u32, action)
|
__field(u32, action)
|
||||||
__field(u32, flow_tag)
|
__field(u32, flow_tag)
|
||||||
|
__field(u32, flow_source)
|
||||||
__field(u8, mask_enable)
|
__field(u8, mask_enable)
|
||||||
__field(int, new_fte)
|
__field(int, new_fte)
|
||||||
__array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
|
__array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
|
||||||
@ -204,7 +205,8 @@ TRACE_EVENT(mlx5_fs_set_fte,
|
|||||||
__entry->index = fte->index;
|
__entry->index = fte->index;
|
||||||
__entry->action = fte->action.action;
|
__entry->action = fte->action.action;
|
||||||
__entry->mask_enable = __entry->fg->mask.match_criteria_enable;
|
__entry->mask_enable = __entry->fg->mask.match_criteria_enable;
|
||||||
__entry->flow_tag = fte->action.flow_tag;
|
__entry->flow_tag = fte->flow_context.flow_tag;
|
||||||
|
__entry->flow_source = fte->flow_context.flow_source;
|
||||||
memcpy(__entry->mask_outer,
|
memcpy(__entry->mask_outer,
|
||||||
MLX5_ADDR_OF(fte_match_param,
|
MLX5_ADDR_OF(fte_match_param,
|
||||||
&__entry->fg->mask.match_criteria,
|
&__entry->fg->mask.match_criteria,
|
||||||
|
@ -781,7 +781,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||||||
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
|
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
|
||||||
|
|
||||||
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
|
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
|
||||||
void mlx5e_completion_event(struct mlx5_core_cq *mcq);
|
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
|
||||||
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
|
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
|
||||||
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
|
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
|
||||||
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
|
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
|
||||||
|
@ -680,7 +680,7 @@ static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
|
|||||||
|
|
||||||
memset(perm_addr, 0xff, MAX_ADDR_LEN);
|
memset(perm_addr, 0xff, MAX_ADDR_LEN);
|
||||||
|
|
||||||
mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
|
mlx5_query_mac_address(priv->mdev, perm_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
|
static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
|
||||||
|
@ -426,7 +426,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
|
|||||||
}
|
}
|
||||||
|
|
||||||
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
|
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
|
||||||
flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
|
spec->flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
|
||||||
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
|
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
|
||||||
if (IS_ERR(rule)) {
|
if (IS_ERR(rule)) {
|
||||||
err = PTR_ERR(rule);
|
err = PTR_ERR(rule);
|
||||||
|
@ -1518,6 +1518,7 @@ static void mlx5e_free_cq(struct mlx5e_cq *cq)
|
|||||||
|
|
||||||
static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
||||||
{
|
{
|
||||||
|
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
|
||||||
struct mlx5_core_dev *mdev = cq->mdev;
|
struct mlx5_core_dev *mdev = cq->mdev;
|
||||||
struct mlx5_core_cq *mcq = &cq->mcq;
|
struct mlx5_core_cq *mcq = &cq->mcq;
|
||||||
|
|
||||||
@ -1552,7 +1553,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||||||
MLX5_ADAPTER_PAGE_SHIFT);
|
MLX5_ADAPTER_PAGE_SHIFT);
|
||||||
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
|
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
|
||||||
|
|
||||||
err = mlx5_core_create_cq(mdev, mcq, in, inlen);
|
err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
|
||||||
|
|
||||||
kvfree(in);
|
kvfree(in);
|
||||||
|
|
||||||
@ -4590,7 +4591,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
|
|||||||
{
|
{
|
||||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||||
|
|
||||||
mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
|
mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
|
||||||
if (is_zero_ether_addr(netdev->dev_addr) &&
|
if (is_zero_ether_addr(netdev->dev_addr) &&
|
||||||
!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
|
!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
|
||||||
eth_hw_addr_random(netdev);
|
eth_hw_addr_random(netdev);
|
||||||
@ -5133,7 +5134,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
|
|||||||
|
|
||||||
#ifdef CONFIG_MLX5_ESWITCH
|
#ifdef CONFIG_MLX5_ESWITCH
|
||||||
if (MLX5_ESWITCH_MANAGER(mdev) &&
|
if (MLX5_ESWITCH_MANAGER(mdev) &&
|
||||||
mlx5_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
|
mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
|
||||||
mlx5e_rep_register_vport_reps(mdev);
|
mlx5e_rep_register_vport_reps(mdev);
|
||||||
return mdev;
|
return mdev;
|
||||||
}
|
}
|
||||||
|
@ -408,7 +408,7 @@ static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
|
|||||||
struct mlx5e_priv *uplink_priv = NULL;
|
struct mlx5e_priv *uplink_priv = NULL;
|
||||||
struct net_device *uplink_dev;
|
struct net_device *uplink_dev;
|
||||||
|
|
||||||
if (esw->mode == SRIOV_NONE)
|
if (esw->mode == MLX5_ESWITCH_NONE)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
|
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
|
||||||
@ -436,7 +436,7 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
|
|||||||
struct mlx5e_rep_sq *rep_sq, *tmp;
|
struct mlx5e_rep_sq *rep_sq, *tmp;
|
||||||
struct mlx5e_rep_priv *rpriv;
|
struct mlx5e_rep_priv *rpriv;
|
||||||
|
|
||||||
if (esw->mode != SRIOV_OFFLOADS)
|
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
rpriv = mlx5e_rep_to_rep_priv(rep);
|
rpriv = mlx5e_rep_to_rep_priv(rep);
|
||||||
@ -457,7 +457,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
|
|||||||
int err;
|
int err;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (esw->mode != SRIOV_OFFLOADS)
|
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
rpriv = mlx5e_rep_to_rep_priv(rep);
|
rpriv = mlx5e_rep_to_rep_priv(rep);
|
||||||
@ -1412,7 +1412,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
|
|||||||
SET_NETDEV_DEV(netdev, mdev->device);
|
SET_NETDEV_DEV(netdev, mdev->device);
|
||||||
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
|
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
|
||||||
/* we want a persistent mac for the uplink rep */
|
/* we want a persistent mac for the uplink rep */
|
||||||
mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
|
mlx5_query_mac_address(mdev, netdev->dev_addr);
|
||||||
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
|
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
|
||||||
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
||||||
if (MLX5_CAP_GEN(mdev, qos))
|
if (MLX5_CAP_GEN(mdev, qos))
|
||||||
|
@ -716,19 +716,22 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
|
|||||||
struct mlx5e_tc_flow *flow,
|
struct mlx5e_tc_flow *flow,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
|
struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
|
||||||
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
|
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
|
||||||
struct mlx5_core_dev *dev = priv->mdev;
|
struct mlx5_core_dev *dev = priv->mdev;
|
||||||
struct mlx5_flow_destination dest[2] = {};
|
struct mlx5_flow_destination dest[2] = {};
|
||||||
struct mlx5_flow_act flow_act = {
|
struct mlx5_flow_act flow_act = {
|
||||||
.action = attr->action,
|
.action = attr->action,
|
||||||
.flow_tag = attr->flow_tag,
|
|
||||||
.reformat_id = 0,
|
.reformat_id = 0,
|
||||||
.flags = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
|
.flags = FLOW_ACT_NO_APPEND,
|
||||||
};
|
};
|
||||||
struct mlx5_fc *counter = NULL;
|
struct mlx5_fc *counter = NULL;
|
||||||
bool table_created = false;
|
bool table_created = false;
|
||||||
int err, dest_ix = 0;
|
int err, dest_ix = 0;
|
||||||
|
|
||||||
|
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
|
||||||
|
flow_context->flow_tag = attr->flow_tag;
|
||||||
|
|
||||||
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
|
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
|
||||||
err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
|
err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
|
||||||
if (err) {
|
if (err) {
|
||||||
@ -3349,7 +3352,7 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
|
|||||||
if (!tc_can_offload_extack(priv->netdev, f->common.extack))
|
if (!tc_can_offload_extack(priv->netdev, f->common.extack))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (esw && esw->mode == SRIOV_OFFLOADS)
|
if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
|
||||||
err = mlx5e_add_fdb_flow(priv, f, flow_flags,
|
err = mlx5e_add_fdb_flow(priv, f, flow_flags,
|
||||||
filter_dev, flow);
|
filter_dev, flow);
|
||||||
else
|
else
|
||||||
|
@ -136,7 +136,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
|
|||||||
return work_done;
|
return work_done;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_completion_event(struct mlx5_core_cq *mcq)
|
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
|
||||||
{
|
{
|
||||||
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
|
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
|
||||||
|
|
||||||
|
@ -153,7 +153,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
|
|||||||
cq = mlx5_eq_cq_get(eq, cqn);
|
cq = mlx5_eq_cq_get(eq, cqn);
|
||||||
if (likely(cq)) {
|
if (likely(cq)) {
|
||||||
++cq->arm_sn;
|
++cq->arm_sn;
|
||||||
cq->comp(cq);
|
cq->comp(cq, eqe);
|
||||||
mlx5_cq_put(cq);
|
mlx5_cq_put(cq);
|
||||||
} else {
|
} else {
|
||||||
mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
|
mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
|
||||||
@ -256,6 +256,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
|||||||
int inlen;
|
int inlen;
|
||||||
u32 *in;
|
u32 *in;
|
||||||
int err;
|
int err;
|
||||||
|
int i;
|
||||||
|
|
||||||
/* Init CQ table */
|
/* Init CQ table */
|
||||||
memset(cq_table, 0, sizeof(*cq_table));
|
memset(cq_table, 0, sizeof(*cq_table));
|
||||||
@ -283,10 +284,12 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
|||||||
mlx5_fill_page_array(&eq->buf, pas);
|
mlx5_fill_page_array(&eq->buf, pas);
|
||||||
|
|
||||||
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
|
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
|
||||||
if (!param->mask && MLX5_CAP_GEN(dev, log_max_uctx))
|
if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
|
||||||
MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
|
MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
|
||||||
|
|
||||||
MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
|
for (i = 0; i < 4; i++)
|
||||||
|
MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
|
||||||
|
param->mask[i]);
|
||||||
|
|
||||||
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
|
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
|
||||||
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
|
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
|
||||||
@ -389,7 +392,7 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
|
void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
|
||||||
{
|
{
|
||||||
struct mlx5_cq_table *table = &eq->cq_table;
|
struct mlx5_cq_table *table = &eq->cq_table;
|
||||||
struct mlx5_core_cq *tmp;
|
struct mlx5_core_cq *tmp;
|
||||||
@ -399,16 +402,14 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
|
|||||||
spin_unlock(&table->lock);
|
spin_unlock(&table->lock);
|
||||||
|
|
||||||
if (!tmp) {
|
if (!tmp) {
|
||||||
mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
|
mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
|
||||||
return -ENOENT;
|
eq->eqn, cq->cqn);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tmp != cq) {
|
if (tmp != cq)
|
||||||
mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
|
mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
|
||||||
return -EINVAL;
|
eq->eqn, cq->cqn);
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_eq_table_init(struct mlx5_core_dev *dev)
|
int mlx5_eq_table_init(struct mlx5_core_dev *dev)
|
||||||
@ -502,14 +503,31 @@ static int cq_err_event_notifier(struct notifier_block *nb,
|
|||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
cq->event(cq, type);
|
if (cq->event)
|
||||||
|
cq->event(cq, type);
|
||||||
|
|
||||||
mlx5_cq_put(cq);
|
mlx5_cq_put(cq);
|
||||||
|
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
|
static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
|
||||||
|
{
|
||||||
|
__be64 *user_unaffiliated_events;
|
||||||
|
__be64 *user_affiliated_events;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
user_affiliated_events =
|
||||||
|
MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
|
||||||
|
user_unaffiliated_events =
|
||||||
|
MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
|
||||||
|
|
||||||
|
for (i = 0; i < 4; i++)
|
||||||
|
mask[i] |= be64_to_cpu(user_affiliated_events[i] |
|
||||||
|
user_unaffiliated_events[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
|
||||||
{
|
{
|
||||||
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
|
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
|
||||||
|
|
||||||
@ -546,7 +564,10 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
|
|||||||
async_event_mask |=
|
async_event_mask |=
|
||||||
(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
|
(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
|
||||||
|
|
||||||
return async_event_mask;
|
mask[0] = async_event_mask;
|
||||||
|
|
||||||
|
if (MLX5_CAP_GEN(dev, event_cap))
|
||||||
|
gather_user_async_events(dev, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int create_async_eqs(struct mlx5_core_dev *dev)
|
static int create_async_eqs(struct mlx5_core_dev *dev)
|
||||||
@ -561,9 +582,10 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
|
|||||||
table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
|
table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
|
||||||
param = (struct mlx5_eq_param) {
|
param = (struct mlx5_eq_param) {
|
||||||
.irq_index = 0,
|
.irq_index = 0,
|
||||||
.mask = 1ull << MLX5_EVENT_TYPE_CMD,
|
|
||||||
.nent = MLX5_NUM_CMD_EQE,
|
.nent = MLX5_NUM_CMD_EQE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD;
|
||||||
err = create_async_eq(dev, &table->cmd_eq.core, ¶m);
|
err = create_async_eq(dev, &table->cmd_eq.core, ¶m);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
|
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
|
||||||
@ -579,9 +601,10 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
|
|||||||
table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
|
table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
|
||||||
param = (struct mlx5_eq_param) {
|
param = (struct mlx5_eq_param) {
|
||||||
.irq_index = 0,
|
.irq_index = 0,
|
||||||
.mask = gather_async_events_mask(dev),
|
|
||||||
.nent = MLX5_NUM_ASYNC_EQE,
|
.nent = MLX5_NUM_ASYNC_EQE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
gather_async_events_mask(dev, param.mask);
|
||||||
err = create_async_eq(dev, &table->async_eq.core, ¶m);
|
err = create_async_eq(dev, &table->async_eq.core, ¶m);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
|
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
|
||||||
@ -597,9 +620,10 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
|
|||||||
table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
|
table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
|
||||||
param = (struct mlx5_eq_param) {
|
param = (struct mlx5_eq_param) {
|
||||||
.irq_index = 0,
|
.irq_index = 0,
|
||||||
.mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
|
|
||||||
.nent = /* TODO: sriov max_vf + */ 1,
|
.nent = /* TODO: sriov max_vf + */ 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST;
|
||||||
err = create_async_eq(dev, &table->pages_eq.core, ¶m);
|
err = create_async_eq(dev, &table->pages_eq.core, ¶m);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
|
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
|
||||||
@ -791,7 +815,6 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
|
|||||||
eq->irq_nb.notifier_call = mlx5_eq_comp_int;
|
eq->irq_nb.notifier_call = mlx5_eq_comp_int;
|
||||||
param = (struct mlx5_eq_param) {
|
param = (struct mlx5_eq_param) {
|
||||||
.irq_index = vecidx,
|
.irq_index = vecidx,
|
||||||
.mask = 0,
|
|
||||||
.nent = nent,
|
.nent = nent,
|
||||||
};
|
};
|
||||||
err = create_map_eq(dev, &eq->core, ¶m);
|
err = create_map_eq(dev, &eq->core, ¶m);
|
||||||
@ -927,6 +950,7 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
|
|||||||
|
|
||||||
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
|
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(mlx5_eq_notifier_register);
|
||||||
|
|
||||||
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
|
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
|
||||||
{
|
{
|
||||||
@ -937,3 +961,4 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
|
|||||||
|
|
||||||
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
|
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
|
||||||
|
@ -134,6 +134,30 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
|
|||||||
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
|
||||||
|
void *in, int inlen)
|
||||||
|
{
|
||||||
|
return modify_esw_vport_context_cmd(esw->dev, vport, in, inlen);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int query_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
|
||||||
|
void *out, int outlen)
|
||||||
|
{
|
||||||
|
u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
|
||||||
|
|
||||||
|
MLX5_SET(query_esw_vport_context_in, in, opcode,
|
||||||
|
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
|
||||||
|
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
|
||||||
|
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
|
||||||
|
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||||
|
}
|
||||||
|
|
||||||
|
int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
|
||||||
|
void *out, int outlen)
|
||||||
|
{
|
||||||
|
return query_esw_vport_context_cmd(esw->dev, vport, out, outlen);
|
||||||
|
}
|
||||||
|
|
||||||
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
|
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
|
||||||
u16 vlan, u8 qos, u8 set_flags)
|
u16 vlan, u8 qos, u8 set_flags)
|
||||||
{
|
{
|
||||||
@ -473,7 +497,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
|||||||
|
|
||||||
fdb_add:
|
fdb_add:
|
||||||
/* SRIOV is enabled: Forward UC MAC to vport */
|
/* SRIOV is enabled: Forward UC MAC to vport */
|
||||||
if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY)
|
if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
|
||||||
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
|
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
|
||||||
|
|
||||||
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
|
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
|
||||||
@ -873,7 +897,7 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
|
|||||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||||
u8 mac[ETH_ALEN];
|
u8 mac[ETH_ALEN];
|
||||||
|
|
||||||
mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
|
mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
|
||||||
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
|
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
|
||||||
vport->vport, mac);
|
vport->vport, mac);
|
||||||
|
|
||||||
@ -939,7 +963,7 @@ int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
|
|||||||
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
|
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
|
||||||
|
|
||||||
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
|
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
|
||||||
vport->vport);
|
mlx5_eswitch_vport_num_to_index(esw, vport->vport));
|
||||||
if (!root_ns) {
|
if (!root_ns) {
|
||||||
esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
|
esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
@ -1057,7 +1081,7 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|||||||
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
|
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
|
||||||
|
|
||||||
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||||||
vport->vport);
|
mlx5_eswitch_vport_num_to_index(esw, vport->vport));
|
||||||
if (!root_ns) {
|
if (!root_ns) {
|
||||||
esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
|
esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
@ -1168,6 +1192,8 @@ void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
|||||||
|
|
||||||
vport->ingress.drop_rule = NULL;
|
vport->ingress.drop_rule = NULL;
|
||||||
vport->ingress.allow_rule = NULL;
|
vport->ingress.allow_rule = NULL;
|
||||||
|
|
||||||
|
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||||||
}
|
}
|
||||||
|
|
||||||
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
|
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
|
||||||
@ -1527,6 +1553,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
|
|||||||
struct mlx5_vport *vport)
|
struct mlx5_vport *vport)
|
||||||
{
|
{
|
||||||
u16 vport_num = vport->vport;
|
u16 vport_num = vport->vport;
|
||||||
|
int flags;
|
||||||
|
|
||||||
if (esw->manager_vport == vport_num)
|
if (esw->manager_vport == vport_num)
|
||||||
return;
|
return;
|
||||||
@ -1544,11 +1571,13 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
|
|||||||
vport->info.node_guid);
|
vport->info.node_guid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
flags = (vport->info.vlan || vport->info.qos) ?
|
||||||
|
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
|
||||||
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
|
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
|
||||||
(vport->info.vlan || vport->info.qos));
|
flags);
|
||||||
|
|
||||||
/* Only legacy mode needs ACLs */
|
/* Only legacy mode needs ACLs */
|
||||||
if (esw->mode == SRIOV_LEGACY) {
|
if (esw->mode == MLX5_ESWITCH_LEGACY) {
|
||||||
esw_vport_ingress_config(esw, vport);
|
esw_vport_ingress_config(esw, vport);
|
||||||
esw_vport_egress_config(esw, vport);
|
esw_vport_egress_config(esw, vport);
|
||||||
}
|
}
|
||||||
@ -1600,7 +1629,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
|||||||
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
|
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
|
||||||
|
|
||||||
/* Create steering drop counters for ingress and egress ACLs */
|
/* Create steering drop counters for ingress and egress ACLs */
|
||||||
if (vport_num && esw->mode == SRIOV_LEGACY)
|
if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY)
|
||||||
esw_vport_create_drop_counters(vport);
|
esw_vport_create_drop_counters(vport);
|
||||||
|
|
||||||
/* Restore old vport configuration */
|
/* Restore old vport configuration */
|
||||||
@ -1654,7 +1683,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
|
|||||||
vport->enabled_events = 0;
|
vport->enabled_events = 0;
|
||||||
esw_vport_disable_qos(esw, vport);
|
esw_vport_disable_qos(esw, vport);
|
||||||
if (esw->manager_vport != vport_num &&
|
if (esw->manager_vport != vport_num &&
|
||||||
esw->mode == SRIOV_LEGACY) {
|
esw->mode == MLX5_ESWITCH_LEGACY) {
|
||||||
mlx5_modify_vport_admin_state(esw->dev,
|
mlx5_modify_vport_admin_state(esw->dev,
|
||||||
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
|
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
|
||||||
vport_num, 1,
|
vport_num, 1,
|
||||||
@ -1696,49 +1725,61 @@ int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
|
|||||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
|
||||||
|
{
|
||||||
|
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
|
||||||
|
mlx5_eq_notifier_register(esw->dev, &esw->nb);
|
||||||
|
|
||||||
|
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
|
||||||
|
MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
|
||||||
|
ESW_FUNCTIONS_CHANGED);
|
||||||
|
mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
|
||||||
|
{
|
||||||
|
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
|
||||||
|
mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
|
||||||
|
|
||||||
|
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
|
||||||
|
|
||||||
|
flush_workqueue(esw->work_queue);
|
||||||
|
}
|
||||||
|
|
||||||
/* Public E-Switch API */
|
/* Public E-Switch API */
|
||||||
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
|
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
|
||||||
|
|
||||||
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
|
||||||
{
|
{
|
||||||
struct mlx5_vport *vport;
|
struct mlx5_vport *vport;
|
||||||
int total_nvports = 0;
|
|
||||||
int err;
|
int err;
|
||||||
int i, enabled_events;
|
int i, enabled_events;
|
||||||
|
|
||||||
if (!ESW_ALLOWED(esw) ||
|
if (!ESW_ALLOWED(esw) ||
|
||||||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
|
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
|
||||||
esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
|
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
|
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
|
||||||
esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
|
esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
|
||||||
|
|
||||||
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
|
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
|
||||||
esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
|
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
|
||||||
|
|
||||||
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
|
|
||||||
|
|
||||||
if (mode == SRIOV_OFFLOADS) {
|
|
||||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev))
|
|
||||||
total_nvports = esw->total_vports;
|
|
||||||
else
|
|
||||||
total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
esw->mode = mode;
|
esw->mode = mode;
|
||||||
|
|
||||||
mlx5_lag_update(esw->dev);
|
mlx5_lag_update(esw->dev);
|
||||||
|
|
||||||
if (mode == SRIOV_LEGACY) {
|
if (mode == MLX5_ESWITCH_LEGACY) {
|
||||||
err = esw_create_legacy_table(esw);
|
err = esw_create_legacy_table(esw);
|
||||||
if (err)
|
if (err)
|
||||||
goto abort;
|
goto abort;
|
||||||
} else {
|
} else {
|
||||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
||||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||||
err = esw_offloads_init(esw, nvfs, total_nvports);
|
err = esw_offloads_init(esw);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
@ -1748,11 +1789,8 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
|||||||
if (err)
|
if (err)
|
||||||
esw_warn(esw->dev, "Failed to create eswitch TSAR");
|
esw_warn(esw->dev, "Failed to create eswitch TSAR");
|
||||||
|
|
||||||
/* Don't enable vport events when in SRIOV_OFFLOADS mode, since:
|
enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS :
|
||||||
* 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode
|
UC_ADDR_CHANGE;
|
||||||
* 2. FDB/Eswitch is programmed by user space tools
|
|
||||||
*/
|
|
||||||
enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0;
|
|
||||||
|
|
||||||
/* Enable PF vport */
|
/* Enable PF vport */
|
||||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
||||||
@ -1765,22 +1803,21 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Enable VF vports */
|
/* Enable VF vports */
|
||||||
mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs)
|
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
|
||||||
esw_enable_vport(esw, vport, enabled_events);
|
esw_enable_vport(esw, vport, enabled_events);
|
||||||
|
|
||||||
if (mode == SRIOV_LEGACY) {
|
mlx5_eswitch_event_handlers_register(esw);
|
||||||
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
|
|
||||||
mlx5_eq_notifier_register(esw->dev, &esw->nb);
|
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
|
||||||
}
|
mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
|
||||||
|
esw->esw_funcs.num_vfs, esw->enabled_vports);
|
||||||
|
|
||||||
esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
|
|
||||||
esw->enabled_vports);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
abort:
|
abort:
|
||||||
esw->mode = SRIOV_NONE;
|
esw->mode = MLX5_ESWITCH_NONE;
|
||||||
|
|
||||||
if (mode == SRIOV_OFFLOADS) {
|
if (mode == MLX5_ESWITCH_OFFLOADS) {
|
||||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
||||||
}
|
}
|
||||||
@ -1788,23 +1825,22 @@ abort:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
|
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
|
||||||
{
|
{
|
||||||
struct esw_mc_addr *mc_promisc;
|
struct esw_mc_addr *mc_promisc;
|
||||||
struct mlx5_vport *vport;
|
struct mlx5_vport *vport;
|
||||||
int old_mode;
|
int old_mode;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
|
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
|
esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
|
||||||
esw->enabled_vports, esw->mode);
|
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
|
||||||
|
esw->esw_funcs.num_vfs, esw->enabled_vports);
|
||||||
|
|
||||||
mc_promisc = &esw->mc_promisc;
|
mc_promisc = &esw->mc_promisc;
|
||||||
|
mlx5_eswitch_event_handlers_unregister(esw);
|
||||||
if (esw->mode == SRIOV_LEGACY)
|
|
||||||
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
|
|
||||||
|
|
||||||
mlx5_esw_for_all_vports(esw, i, vport)
|
mlx5_esw_for_all_vports(esw, i, vport)
|
||||||
esw_disable_vport(esw, vport);
|
esw_disable_vport(esw, vport);
|
||||||
@ -1814,17 +1850,17 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
|
|||||||
|
|
||||||
esw_destroy_tsar(esw);
|
esw_destroy_tsar(esw);
|
||||||
|
|
||||||
if (esw->mode == SRIOV_LEGACY)
|
if (esw->mode == MLX5_ESWITCH_LEGACY)
|
||||||
esw_destroy_legacy_table(esw);
|
esw_destroy_legacy_table(esw);
|
||||||
else if (esw->mode == SRIOV_OFFLOADS)
|
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
||||||
esw_offloads_cleanup(esw);
|
esw_offloads_cleanup(esw);
|
||||||
|
|
||||||
old_mode = esw->mode;
|
old_mode = esw->mode;
|
||||||
esw->mode = SRIOV_NONE;
|
esw->mode = MLX5_ESWITCH_NONE;
|
||||||
|
|
||||||
mlx5_lag_update(esw->dev);
|
mlx5_lag_update(esw->dev);
|
||||||
|
|
||||||
if (old_mode == SRIOV_OFFLOADS) {
|
if (old_mode == MLX5_ESWITCH_OFFLOADS) {
|
||||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
||||||
}
|
}
|
||||||
@ -1852,6 +1888,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
|||||||
|
|
||||||
esw->dev = dev;
|
esw->dev = dev;
|
||||||
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
|
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
|
||||||
|
esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
|
||||||
|
|
||||||
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
|
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
|
||||||
if (!esw->work_queue) {
|
if (!esw->work_queue) {
|
||||||
@ -1885,7 +1922,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
esw->enabled_vports = 0;
|
esw->enabled_vports = 0;
|
||||||
esw->mode = SRIOV_NONE;
|
esw->mode = MLX5_ESWITCH_NONE;
|
||||||
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
|
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
|
||||||
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
|
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
|
||||||
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
|
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
|
||||||
@ -1955,7 +1992,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
|||||||
|
|
||||||
ether_addr_copy(evport->info.mac, mac);
|
ether_addr_copy(evport->info.mac, mac);
|
||||||
evport->info.node_guid = node_guid;
|
evport->info.node_guid = node_guid;
|
||||||
if (evport->enabled && esw->mode == SRIOV_LEGACY)
|
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
|
||||||
err = esw_vport_ingress_config(esw, evport);
|
err = esw_vport_ingress_config(esw, evport);
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
@ -2039,7 +2076,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
|||||||
|
|
||||||
evport->info.vlan = vlan;
|
evport->info.vlan = vlan;
|
||||||
evport->info.qos = qos;
|
evport->info.qos = qos;
|
||||||
if (evport->enabled && esw->mode == SRIOV_LEGACY) {
|
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
|
||||||
err = esw_vport_ingress_config(esw, evport);
|
err = esw_vport_ingress_config(esw, evport);
|
||||||
if (err)
|
if (err)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
@ -2081,7 +2118,7 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
|
|||||||
mlx5_core_warn(esw->dev,
|
mlx5_core_warn(esw->dev,
|
||||||
"Spoofchk in set while MAC is invalid, vport(%d)\n",
|
"Spoofchk in set while MAC is invalid, vport(%d)\n",
|
||||||
evport->vport);
|
evport->vport);
|
||||||
if (evport->enabled && esw->mode == SRIOV_LEGACY)
|
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
|
||||||
err = esw_vport_ingress_config(esw, evport);
|
err = esw_vport_ingress_config(esw, evport);
|
||||||
if (err)
|
if (err)
|
||||||
evport->info.spoofchk = pschk;
|
evport->info.spoofchk = pschk;
|
||||||
@ -2177,7 +2214,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
|
|||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
mutex_lock(&esw->state_lock);
|
mutex_lock(&esw->state_lock);
|
||||||
if (esw->mode != SRIOV_LEGACY) {
|
if (esw->mode != MLX5_ESWITCH_LEGACY) {
|
||||||
err = -EOPNOTSUPP;
|
err = -EOPNOTSUPP;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -2200,7 +2237,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
|
|||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
mutex_lock(&esw->state_lock);
|
mutex_lock(&esw->state_lock);
|
||||||
if (esw->mode != SRIOV_LEGACY) {
|
if (esw->mode != MLX5_ESWITCH_LEGACY) {
|
||||||
err = -EOPNOTSUPP;
|
err = -EOPNOTSUPP;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -2343,7 +2380,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
|
|||||||
u64 bytes = 0;
|
u64 bytes = 0;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (!vport->enabled || esw->mode != SRIOV_LEGACY)
|
if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (vport->egress.drop_counter)
|
if (vport->egress.drop_counter)
|
||||||
@ -2453,7 +2490,7 @@ free_out:
|
|||||||
|
|
||||||
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
|
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
|
||||||
{
|
{
|
||||||
return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
|
return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
|
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
|
||||||
|
|
||||||
@ -2470,10 +2507,10 @@ EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
|
|||||||
|
|
||||||
bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
|
bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
|
||||||
{
|
{
|
||||||
if ((dev0->priv.eswitch->mode == SRIOV_NONE &&
|
if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
|
||||||
dev1->priv.eswitch->mode == SRIOV_NONE) ||
|
dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
|
||||||
(dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
|
(dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
|
||||||
dev1->priv.eswitch->mode == SRIOV_OFFLOADS))
|
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
@ -2482,6 +2519,24 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
|
|||||||
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
|
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
|
||||||
struct mlx5_core_dev *dev1)
|
struct mlx5_core_dev *dev1)
|
||||||
{
|
{
|
||||||
return (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
|
return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
|
||||||
dev1->priv.eswitch->mode == SRIOV_OFFLOADS);
|
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
|
||||||
|
}
|
||||||
|
|
||||||
|
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
|
||||||
|
{
|
||||||
|
u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {};
|
||||||
|
int err;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
|
||||||
|
|
||||||
|
if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
||||||
|
esw->esw_funcs.num_vfs = num_vfs;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = mlx5_esw_query_functions(esw->dev, out, sizeof(out));
|
||||||
|
if (!err)
|
||||||
|
esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
|
||||||
|
host_params_context.host_num_of_vfs);
|
||||||
}
|
}
|
||||||
|
@ -68,6 +68,8 @@ struct vport_ingress {
|
|||||||
struct mlx5_flow_group *allow_spoofchk_only_grp;
|
struct mlx5_flow_group *allow_spoofchk_only_grp;
|
||||||
struct mlx5_flow_group *allow_untagged_only_grp;
|
struct mlx5_flow_group *allow_untagged_only_grp;
|
||||||
struct mlx5_flow_group *drop_grp;
|
struct mlx5_flow_group *drop_grp;
|
||||||
|
int modify_metadata_id;
|
||||||
|
struct mlx5_flow_handle *modify_metadata_rule;
|
||||||
struct mlx5_flow_handle *allow_rule;
|
struct mlx5_flow_handle *allow_rule;
|
||||||
struct mlx5_flow_handle *drop_rule;
|
struct mlx5_flow_handle *drop_rule;
|
||||||
struct mlx5_fc *drop_counter;
|
struct mlx5_fc *drop_counter;
|
||||||
@ -196,6 +198,10 @@ struct mlx5_esw_functions {
|
|||||||
u16 num_vfs;
|
u16 num_vfs;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_eswitch {
|
struct mlx5_eswitch {
|
||||||
struct mlx5_core_dev *dev;
|
struct mlx5_core_dev *dev;
|
||||||
struct mlx5_nb nb;
|
struct mlx5_nb nb;
|
||||||
@ -203,6 +209,7 @@ struct mlx5_eswitch {
|
|||||||
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
|
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
|
||||||
struct workqueue_struct *work_queue;
|
struct workqueue_struct *work_queue;
|
||||||
struct mlx5_vport *vports;
|
struct mlx5_vport *vports;
|
||||||
|
u32 flags;
|
||||||
int total_vports;
|
int total_vports;
|
||||||
int enabled_vports;
|
int enabled_vports;
|
||||||
/* Synchronize between vport change events
|
/* Synchronize between vport change events
|
||||||
@ -220,12 +227,12 @@ struct mlx5_eswitch {
|
|||||||
int mode;
|
int mode;
|
||||||
int nvports;
|
int nvports;
|
||||||
u16 manager_vport;
|
u16 manager_vport;
|
||||||
|
u16 first_host_vport;
|
||||||
struct mlx5_esw_functions esw_funcs;
|
struct mlx5_esw_functions esw_funcs;
|
||||||
};
|
};
|
||||||
|
|
||||||
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
|
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
|
||||||
int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
|
int esw_offloads_init(struct mlx5_eswitch *esw);
|
||||||
int total_nvports);
|
|
||||||
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
|
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
|
||||||
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
|
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
|
||||||
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
||||||
@ -240,12 +247,14 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
|
|||||||
struct mlx5_vport *vport);
|
struct mlx5_vport *vport);
|
||||||
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
|
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
|
||||||
struct mlx5_vport *vport);
|
struct mlx5_vport *vport);
|
||||||
|
void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
||||||
|
struct mlx5_vport *vport);
|
||||||
|
|
||||||
/* E-Switch API */
|
/* E-Switch API */
|
||||||
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
|
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
|
||||||
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
|
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
|
||||||
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
|
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
|
||||||
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
|
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
|
||||||
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
||||||
u16 vport, u8 mac[ETH_ALEN]);
|
u16 vport, u8 mac[ETH_ALEN]);
|
||||||
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
|
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
|
||||||
@ -267,6 +276,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
|
|||||||
struct ifla_vf_stats *vf_stats);
|
struct ifla_vf_stats *vf_stats);
|
||||||
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
|
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
|
||||||
|
|
||||||
|
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
|
||||||
|
void *in, int inlen);
|
||||||
|
int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
|
||||||
|
void *out, int outlen);
|
||||||
|
|
||||||
struct mlx5_flow_spec;
|
struct mlx5_flow_spec;
|
||||||
struct mlx5_esw_flow_attr;
|
struct mlx5_esw_flow_attr;
|
||||||
|
|
||||||
@ -356,7 +370,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
|
|||||||
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
|
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
|
||||||
struct netlink_ext_ack *extack);
|
struct netlink_ext_ack *extack);
|
||||||
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
|
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
|
||||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
|
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode);
|
||||||
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
|
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
|
||||||
enum devlink_eswitch_encap_mode encap,
|
enum devlink_eswitch_encap_mode encap,
|
||||||
struct netlink_ext_ack *extack);
|
struct netlink_ext_ack *extack);
|
||||||
@ -409,6 +423,12 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
|
|||||||
MLX5_VPORT_ECPF : MLX5_VPORT_PF;
|
MLX5_VPORT_ECPF : MLX5_VPORT_PF;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
|
||||||
|
{
|
||||||
|
return mlx5_core_is_ecpf_esw_manager(dev) ?
|
||||||
|
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev)
|
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
/* Ideally device should have the functions changed supported
|
/* Ideally device should have the functions changed supported
|
||||||
@ -505,15 +525,39 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
|
|||||||
#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
|
#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
|
||||||
for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
|
for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
|
||||||
|
|
||||||
|
/* Includes host PF (vport 0) if it's not esw manager. */
|
||||||
|
#define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
|
||||||
|
for ((i) = (esw)->first_host_vport; \
|
||||||
|
(rep) = &(esw)->offloads.vport_reps[i], \
|
||||||
|
(i) <= (nvfs); (i)++)
|
||||||
|
|
||||||
|
#define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
|
||||||
|
for ((i) = (nvfs); \
|
||||||
|
(rep) = &(esw)->offloads.vport_reps[i], \
|
||||||
|
(i) >= (esw)->first_host_vport; (i)--)
|
||||||
|
|
||||||
|
#define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
|
||||||
|
for ((vport) = (esw)->first_host_vport; \
|
||||||
|
(vport) <= (nvfs); (vport)++)
|
||||||
|
|
||||||
|
#define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
|
||||||
|
for ((vport) = (nvfs); \
|
||||||
|
(vport) >= (esw)->first_host_vport; (vport)--)
|
||||||
|
|
||||||
struct mlx5_vport *__must_check
|
struct mlx5_vport *__must_check
|
||||||
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
|
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
|
||||||
|
|
||||||
|
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
|
||||||
|
|
||||||
|
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
|
||||||
|
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
|
||||||
|
|
||||||
#else /* CONFIG_MLX5_ESWITCH */
|
#else /* CONFIG_MLX5_ESWITCH */
|
||||||
/* eswitch API stubs */
|
/* eswitch API stubs */
|
||||||
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
|
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
|
||||||
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
|
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
|
||||||
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
|
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
|
||||||
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
|
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
|
||||||
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
|
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
|
||||||
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
|
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
|
||||||
static inline int
|
static inline int
|
||||||
@ -522,6 +566,8 @@ mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
|
|||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
|
||||||
|
|
||||||
#define FDB_MAX_CHAIN 1
|
#define FDB_MAX_CHAIN 1
|
||||||
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
|
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
|
||||||
#define FDB_MAX_PRIO 1
|
#define FDB_MAX_PRIO 1
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -414,7 +414,8 @@ static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
|
|||||||
mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
|
mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq)
|
static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq,
|
||||||
|
struct mlx5_eqe *eqe)
|
||||||
{
|
{
|
||||||
struct mlx5_fpga_conn *conn;
|
struct mlx5_fpga_conn *conn;
|
||||||
|
|
||||||
@ -429,6 +430,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||||||
struct mlx5_fpga_device *fdev = conn->fdev;
|
struct mlx5_fpga_device *fdev = conn->fdev;
|
||||||
struct mlx5_core_dev *mdev = fdev->mdev;
|
struct mlx5_core_dev *mdev = fdev->mdev;
|
||||||
u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
|
u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
|
||||||
|
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
|
||||||
struct mlx5_wq_param wqp;
|
struct mlx5_wq_param wqp;
|
||||||
struct mlx5_cqe64 *cqe;
|
struct mlx5_cqe64 *cqe;
|
||||||
int inlen, err, eqn;
|
int inlen, err, eqn;
|
||||||
@ -476,7 +478,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||||||
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
|
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
|
||||||
mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
|
mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
|
||||||
|
|
||||||
err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
|
err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out));
|
||||||
kvfree(in);
|
kvfree(in);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
@ -867,7 +869,7 @@ struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
|
|||||||
conn->cb_arg = attr->cb_arg;
|
conn->cb_arg = attr->cb_arg;
|
||||||
|
|
||||||
remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
|
remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
|
||||||
err = mlx5_query_nic_vport_mac_address(fdev->mdev, 0, remote_mac);
|
err = mlx5_query_mac_address(fdev->mdev, remote_mac);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
|
mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
|
||||||
ret = ERR_PTR(err);
|
ret = ERR_PTR(err);
|
||||||
|
@ -636,7 +636,8 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
|
|||||||
u8 match_criteria_enable,
|
u8 match_criteria_enable,
|
||||||
const u32 *match_c,
|
const u32 *match_c,
|
||||||
const u32 *match_v,
|
const u32 *match_v,
|
||||||
struct mlx5_flow_act *flow_act)
|
struct mlx5_flow_act *flow_act,
|
||||||
|
struct mlx5_flow_context *flow_context)
|
||||||
{
|
{
|
||||||
const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
||||||
outer_headers);
|
outer_headers);
|
||||||
@ -655,7 +656,7 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
|
|||||||
(match_criteria_enable &
|
(match_criteria_enable &
|
||||||
~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
|
~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
|
||||||
(flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
|
(flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
|
||||||
(flow_act->flags & FLOW_ACT_HAS_TAG))
|
(flow_context->flags & FLOW_CONTEXT_HAS_TAG))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -767,7 +768,8 @@ mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
|
|||||||
fg->mask.match_criteria_enable,
|
fg->mask.match_criteria_enable,
|
||||||
fg->mask.match_criteria,
|
fg->mask.match_criteria,
|
||||||
fte->val,
|
fte->val,
|
||||||
&fte->action))
|
&fte->action,
|
||||||
|
&fte->flow_context))
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
else if (!mlx5_is_fpga_ipsec_rule(mdev,
|
else if (!mlx5_is_fpga_ipsec_rule(mdev,
|
||||||
fg->mask.match_criteria_enable,
|
fg->mask.match_criteria_enable,
|
||||||
|
@ -396,7 +396,11 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
|
|||||||
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
|
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
|
||||||
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
|
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
|
||||||
|
|
||||||
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
|
MLX5_SET(flow_context, in_flow_context, flow_tag,
|
||||||
|
fte->flow_context.flow_tag);
|
||||||
|
MLX5_SET(flow_context, in_flow_context, flow_source,
|
||||||
|
fte->flow_context.flow_source);
|
||||||
|
|
||||||
MLX5_SET(flow_context, in_flow_context, extended_destination,
|
MLX5_SET(flow_context, in_flow_context, extended_destination,
|
||||||
extended_dest);
|
extended_dest);
|
||||||
if (extended_dest) {
|
if (extended_dest) {
|
||||||
@ -771,6 +775,10 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
|
|||||||
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
|
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
|
||||||
table_type = FS_FT_NIC_TX;
|
table_type = FS_FT_NIC_TX;
|
||||||
break;
|
break;
|
||||||
|
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
|
||||||
|
max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
|
||||||
|
table_type = FS_FT_ESW_INGRESS_ACL;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
@ -584,7 +584,7 @@ err_ida_remove:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
|
static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
|
||||||
u32 *match_value,
|
struct mlx5_flow_spec *spec,
|
||||||
struct mlx5_flow_act *flow_act)
|
struct mlx5_flow_act *flow_act)
|
||||||
{
|
{
|
||||||
struct mlx5_flow_steering *steering = get_steering(&ft->node);
|
struct mlx5_flow_steering *steering = get_steering(&ft->node);
|
||||||
@ -594,9 +594,10 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
|
|||||||
if (!fte)
|
if (!fte)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
memcpy(fte->val, match_value, sizeof(fte->val));
|
memcpy(fte->val, &spec->match_value, sizeof(fte->val));
|
||||||
fte->node.type = FS_TYPE_FLOW_ENTRY;
|
fte->node.type = FS_TYPE_FLOW_ENTRY;
|
||||||
fte->action = *flow_act;
|
fte->action = *flow_act;
|
||||||
|
fte->flow_context = spec->flow_context;
|
||||||
|
|
||||||
tree_init_node(&fte->node, NULL, del_sw_fte);
|
tree_init_node(&fte->node, NULL, del_sw_fte);
|
||||||
|
|
||||||
@ -1430,7 +1431,9 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act)
|
static int check_conflicting_ftes(struct fs_fte *fte,
|
||||||
|
const struct mlx5_flow_context *flow_context,
|
||||||
|
const struct mlx5_flow_act *flow_act)
|
||||||
{
|
{
|
||||||
if (check_conflicting_actions(flow_act->action, fte->action.action)) {
|
if (check_conflicting_actions(flow_act->action, fte->action.action)) {
|
||||||
mlx5_core_warn(get_dev(&fte->node),
|
mlx5_core_warn(get_dev(&fte->node),
|
||||||
@ -1438,12 +1441,12 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act
|
|||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((flow_act->flags & FLOW_ACT_HAS_TAG) &&
|
if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
|
||||||
fte->action.flow_tag != flow_act->flow_tag) {
|
fte->flow_context.flow_tag != flow_context->flow_tag) {
|
||||||
mlx5_core_warn(get_dev(&fte->node),
|
mlx5_core_warn(get_dev(&fte->node),
|
||||||
"FTE flow tag %u already exists with different flow tag %u\n",
|
"FTE flow tag %u already exists with different flow tag %u\n",
|
||||||
fte->action.flow_tag,
|
fte->flow_context.flow_tag,
|
||||||
flow_act->flow_tag);
|
flow_context->flow_tag);
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1451,7 +1454,7 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
|
static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
|
||||||
u32 *match_value,
|
struct mlx5_flow_spec *spec,
|
||||||
struct mlx5_flow_act *flow_act,
|
struct mlx5_flow_act *flow_act,
|
||||||
struct mlx5_flow_destination *dest,
|
struct mlx5_flow_destination *dest,
|
||||||
int dest_num,
|
int dest_num,
|
||||||
@ -1462,7 +1465,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
|
|||||||
int i;
|
int i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = check_conflicting_ftes(fte, flow_act);
|
ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
@ -1637,7 +1640,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
|
|||||||
u64 version;
|
u64 version;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
fte = alloc_fte(ft, spec->match_value, flow_act);
|
fte = alloc_fte(ft, spec, flow_act);
|
||||||
if (IS_ERR(fte))
|
if (IS_ERR(fte))
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
@ -1653,8 +1656,7 @@ search_again_locked:
|
|||||||
fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
|
fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
|
||||||
if (!fte_tmp)
|
if (!fte_tmp)
|
||||||
continue;
|
continue;
|
||||||
rule = add_rule_fg(g, spec->match_value,
|
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
|
||||||
flow_act, dest, dest_num, fte_tmp);
|
|
||||||
up_write_ref_node(&fte_tmp->node, false);
|
up_write_ref_node(&fte_tmp->node, false);
|
||||||
tree_put_node(&fte_tmp->node, false);
|
tree_put_node(&fte_tmp->node, false);
|
||||||
kmem_cache_free(steering->ftes_cache, fte);
|
kmem_cache_free(steering->ftes_cache, fte);
|
||||||
@ -1701,8 +1703,7 @@ skip_search:
|
|||||||
|
|
||||||
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
|
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
|
||||||
up_write_ref_node(&g->node, false);
|
up_write_ref_node(&g->node, false);
|
||||||
rule = add_rule_fg(g, spec->match_value,
|
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
|
||||||
flow_act, dest, dest_num, fte);
|
|
||||||
up_write_ref_node(&fte->node, false);
|
up_write_ref_node(&fte->node, false);
|
||||||
tree_put_node(&fte->node, false);
|
tree_put_node(&fte->node, false);
|
||||||
return rule;
|
return rule;
|
||||||
@ -1788,7 +1789,7 @@ search_again_locked:
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_release_fg;
|
goto err_release_fg;
|
||||||
|
|
||||||
fte = alloc_fte(ft, spec->match_value, flow_act);
|
fte = alloc_fte(ft, spec, flow_act);
|
||||||
if (IS_ERR(fte)) {
|
if (IS_ERR(fte)) {
|
||||||
err = PTR_ERR(fte);
|
err = PTR_ERR(fte);
|
||||||
goto err_release_fg;
|
goto err_release_fg;
|
||||||
@ -1802,8 +1803,7 @@ search_again_locked:
|
|||||||
|
|
||||||
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
|
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
|
||||||
up_write_ref_node(&g->node, false);
|
up_write_ref_node(&g->node, false);
|
||||||
rule = add_rule_fg(g, spec->match_value, flow_act, dest,
|
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
|
||||||
dest_num, fte);
|
|
||||||
up_write_ref_node(&fte->node, false);
|
up_write_ref_node(&fte->node, false);
|
||||||
tree_put_node(&fte->node, false);
|
tree_put_node(&fte->node, false);
|
||||||
tree_put_node(&g->node, false);
|
tree_put_node(&g->node, false);
|
||||||
|
@ -170,6 +170,7 @@ struct fs_fte {
|
|||||||
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
|
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
|
||||||
u32 dests_size;
|
u32 dests_size;
|
||||||
u32 index;
|
u32 index;
|
||||||
|
struct mlx5_flow_context flow_context;
|
||||||
struct mlx5_flow_act action;
|
struct mlx5_flow_act action;
|
||||||
enum fs_fte_status status;
|
enum fs_fte_status status;
|
||||||
struct mlx5_fc *counter;
|
struct mlx5_fc *counter;
|
||||||
|
@ -202,6 +202,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (MLX5_CAP_GEN(dev, event_cap)) {
|
||||||
|
err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -305,8 +305,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
|||||||
!mlx5_sriov_is_enabled(dev1);
|
!mlx5_sriov_is_enabled(dev1);
|
||||||
|
|
||||||
#ifdef CONFIG_MLX5_ESWITCH
|
#ifdef CONFIG_MLX5_ESWITCH
|
||||||
roce_lag &= dev0->priv.eswitch->mode == SRIOV_NONE &&
|
roce_lag &= dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
|
||||||
dev1->priv.eswitch->mode == SRIOV_NONE;
|
dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (roce_lag)
|
if (roce_lag)
|
||||||
|
@ -75,7 +75,7 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev);
|
|||||||
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
|
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
|
||||||
|
|
||||||
int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
|
int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
|
||||||
int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
|
void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
|
||||||
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
|
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
|
||||||
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
|
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
|
||||||
void mlx5_cq_tasklet_cb(unsigned long data);
|
void mlx5_cq_tasklet_cb(unsigned long data);
|
||||||
@ -97,7 +97,4 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
|
|||||||
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
|
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
|
|
||||||
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -731,8 +731,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
|
|||||||
struct mlx5_priv *priv = &dev->priv;
|
struct mlx5_priv *priv = &dev->priv;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
priv->pci_dev_data = id->driver_data;
|
mutex_init(&dev->pci_status_mutex);
|
||||||
|
|
||||||
pci_set_drvdata(dev->pdev, dev);
|
pci_set_drvdata(dev->pdev, dev);
|
||||||
|
|
||||||
dev->bar_addr = pci_resource_start(pdev, 0);
|
dev->bar_addr = pci_resource_start(pdev, 0);
|
||||||
@ -1258,7 +1257,6 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&priv->ctx_list);
|
INIT_LIST_HEAD(&priv->ctx_list);
|
||||||
spin_lock_init(&priv->ctx_lock);
|
spin_lock_init(&priv->ctx_lock);
|
||||||
mutex_init(&dev->pci_status_mutex);
|
|
||||||
mutex_init(&dev->intf_state_mutex);
|
mutex_init(&dev->intf_state_mutex);
|
||||||
|
|
||||||
mutex_init(&priv->bfregs.reg_head.lock);
|
mutex_init(&priv->bfregs.reg_head.lock);
|
||||||
@ -1320,6 +1318,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
dev->device = &pdev->dev;
|
dev->device = &pdev->dev;
|
||||||
dev->pdev = pdev;
|
dev->pdev = pdev;
|
||||||
|
|
||||||
|
dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ?
|
||||||
|
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
|
||||||
|
|
||||||
err = mlx5_mdev_init(dev, prof_sel);
|
err = mlx5_mdev_init(dev, prof_sel);
|
||||||
if (err)
|
if (err)
|
||||||
goto mdev_init_err;
|
goto mdev_init_err;
|
||||||
|
@ -38,15 +38,12 @@
|
|||||||
|
|
||||||
void mlx5_init_mkey_table(struct mlx5_core_dev *dev)
|
void mlx5_init_mkey_table(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
|
xa_init_flags(&dev->priv.mkey_table, XA_FLAGS_LOCK_IRQ);
|
||||||
|
|
||||||
memset(table, 0, sizeof(*table));
|
|
||||||
rwlock_init(&table->lock);
|
|
||||||
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
|
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
|
WARN_ON(!xa_empty(&dev->priv.mkey_table));
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
||||||
@ -56,8 +53,8 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
|||||||
mlx5_async_cbk_t callback,
|
mlx5_async_cbk_t callback,
|
||||||
struct mlx5_async_work *context)
|
struct mlx5_async_work *context)
|
||||||
{
|
{
|
||||||
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
|
|
||||||
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
|
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
|
||||||
|
struct xarray *mkeys = &dev->priv.mkey_table;
|
||||||
u32 mkey_index;
|
u32 mkey_index;
|
||||||
void *mkc;
|
void *mkc;
|
||||||
int err;
|
int err;
|
||||||
@ -88,12 +85,10 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
|||||||
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
|
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
|
||||||
mkey_index, key, mkey->key);
|
mkey_index, key, mkey->key);
|
||||||
|
|
||||||
/* connect to mkey tree */
|
err = xa_err(xa_store_irq(mkeys, mlx5_base_mkey(mkey->key), mkey,
|
||||||
write_lock_irq(&table->lock);
|
GFP_KERNEL));
|
||||||
err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key), mkey);
|
|
||||||
write_unlock_irq(&table->lock);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_core_warn(dev, "failed radix tree insert of mkey 0x%x, %d\n",
|
mlx5_core_warn(dev, "failed xarray insert of mkey 0x%x, %d\n",
|
||||||
mlx5_base_mkey(mkey->key), err);
|
mlx5_base_mkey(mkey->key), err);
|
||||||
mlx5_core_destroy_mkey(dev, mkey);
|
mlx5_core_destroy_mkey(dev, mkey);
|
||||||
}
|
}
|
||||||
@ -114,17 +109,17 @@ EXPORT_SYMBOL(mlx5_core_create_mkey);
|
|||||||
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
|
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
|
||||||
struct mlx5_core_mkey *mkey)
|
struct mlx5_core_mkey *mkey)
|
||||||
{
|
{
|
||||||
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
|
|
||||||
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
|
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
|
||||||
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
|
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
|
||||||
|
struct xarray *mkeys = &dev->priv.mkey_table;
|
||||||
struct mlx5_core_mkey *deleted_mkey;
|
struct mlx5_core_mkey *deleted_mkey;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
write_lock_irqsave(&table->lock, flags);
|
xa_lock_irqsave(mkeys, flags);
|
||||||
deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key));
|
deleted_mkey = __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
|
||||||
write_unlock_irqrestore(&table->lock, flags);
|
xa_unlock_irqrestore(mkeys, flags);
|
||||||
if (!deleted_mkey) {
|
if (!deleted_mkey) {
|
||||||
mlx5_core_dbg(dev, "failed radix tree delete of mkey 0x%x\n",
|
mlx5_core_dbg(dev, "failed xarray delete of mkey 0x%x\n",
|
||||||
mlx5_base_mkey(mkey->key));
|
mlx5_base_mkey(mkey->key));
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
@ -126,7 +126,7 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
|
|||||||
{
|
{
|
||||||
u8 hw_id[ETH_ALEN];
|
u8 hw_id[ETH_ALEN];
|
||||||
|
|
||||||
mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
|
mlx5_query_mac_address(dev, hw_id);
|
||||||
gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
|
gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
|
||||||
addrconf_addr_eui48(&gid->raw[8], hw_id);
|
addrconf_addr_eui48(&gid->raw[8], hw_id);
|
||||||
}
|
}
|
||||||
|
@ -74,17 +74,11 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
|
|||||||
int err;
|
int err;
|
||||||
int vf;
|
int vf;
|
||||||
|
|
||||||
if (sriov->enabled_vfs) {
|
|
||||||
mlx5_core_warn(dev,
|
|
||||||
"failed to enable SRIOV on device, already enabled with %d vfs\n",
|
|
||||||
sriov->enabled_vfs);
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!MLX5_ESWITCH_MANAGER(dev))
|
if (!MLX5_ESWITCH_MANAGER(dev))
|
||||||
goto enable_vfs_hca;
|
goto enable_vfs_hca;
|
||||||
|
|
||||||
err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
|
mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs);
|
||||||
|
err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_core_warn(dev,
|
mlx5_core_warn(dev,
|
||||||
"failed to enable eswitch SRIOV (%d)\n", err);
|
"failed to enable eswitch SRIOV (%d)\n", err);
|
||||||
@ -99,7 +93,6 @@ enable_vfs_hca:
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
sriov->vfs_ctx[vf].enabled = 1;
|
sriov->vfs_ctx[vf].enabled = 1;
|
||||||
sriov->enabled_vfs++;
|
|
||||||
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
|
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
|
||||||
err = sriov_restore_guids(dev, vf);
|
err = sriov_restore_guids(dev, vf);
|
||||||
if (err) {
|
if (err) {
|
||||||
@ -118,13 +111,11 @@ enable_vfs_hca:
|
|||||||
static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
|
static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
|
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
|
||||||
|
int num_vfs = pci_num_vf(dev->pdev);
|
||||||
int err;
|
int err;
|
||||||
int vf;
|
int vf;
|
||||||
|
|
||||||
if (!sriov->enabled_vfs)
|
for (vf = num_vfs - 1; vf >= 0; vf--) {
|
||||||
goto out;
|
|
||||||
|
|
||||||
for (vf = 0; vf < sriov->num_vfs; vf++) {
|
|
||||||
if (!sriov->vfs_ctx[vf].enabled)
|
if (!sriov->vfs_ctx[vf].enabled)
|
||||||
continue;
|
continue;
|
||||||
err = mlx5_core_disable_hca(dev, vf + 1);
|
err = mlx5_core_disable_hca(dev, vf + 1);
|
||||||
@ -133,12 +124,10 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
sriov->vfs_ctx[vf].enabled = 0;
|
sriov->vfs_ctx[vf].enabled = 0;
|
||||||
sriov->enabled_vfs--;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
if (MLX5_ESWITCH_MANAGER(dev))
|
if (MLX5_ESWITCH_MANAGER(dev))
|
||||||
mlx5_eswitch_disable_sriov(dev->priv.eswitch);
|
mlx5_eswitch_disable(dev->priv.eswitch);
|
||||||
|
|
||||||
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
|
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
|
||||||
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
|
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
|
||||||
@ -191,13 +180,11 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
|||||||
|
|
||||||
int mlx5_sriov_attach(struct mlx5_core_dev *dev)
|
int mlx5_sriov_attach(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
|
if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev))
|
||||||
|
|
||||||
if (!mlx5_core_is_pf(dev) || !sriov->num_vfs)
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* If sriov VFs exist in PCI level, enable them in device level */
|
/* If sriov VFs exist in PCI level, enable them in device level */
|
||||||
return mlx5_device_enable_sriov(dev, sriov->num_vfs);
|
return mlx5_device_enable_sriov(dev, pci_num_vf(dev->pdev));
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5_sriov_detach(struct mlx5_core_dev *dev)
|
void mlx5_sriov_detach(struct mlx5_core_dev *dev)
|
||||||
|
@ -155,11 +155,12 @@ int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||||
u16 vport, u8 *addr)
|
u16 vport, bool other, u8 *addr)
|
||||||
{
|
{
|
||||||
u32 *out;
|
|
||||||
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
||||||
|
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
|
||||||
u8 *out_addr;
|
u8 *out_addr;
|
||||||
|
u32 *out;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
out = kvzalloc(outlen, GFP_KERNEL);
|
out = kvzalloc(outlen, GFP_KERNEL);
|
||||||
@ -169,7 +170,12 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
|||||||
out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
|
out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
|
||||||
nic_vport_context.permanent_address);
|
nic_vport_context.permanent_address);
|
||||||
|
|
||||||
err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
|
MLX5_SET(query_nic_vport_context_in, in, opcode,
|
||||||
|
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
|
||||||
|
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
|
||||||
|
MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
|
||||||
|
|
||||||
|
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
|
||||||
if (!err)
|
if (!err)
|
||||||
ether_addr_copy(addr, &out_addr[2]);
|
ether_addr_copy(addr, &out_addr[2]);
|
||||||
|
|
||||||
@ -178,6 +184,12 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
|
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
|
||||||
|
|
||||||
|
int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
|
||||||
|
{
|
||||||
|
return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx5_query_mac_address);
|
||||||
|
|
||||||
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||||
u16 vport, u8 *addr)
|
u16 vport, u8 *addr)
|
||||||
{
|
{
|
||||||
@ -194,9 +206,7 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
|||||||
MLX5_SET(modify_nic_vport_context_in, in,
|
MLX5_SET(modify_nic_vport_context_in, in,
|
||||||
field_select.permanent_address, 1);
|
field_select.permanent_address, 1);
|
||||||
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
|
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
|
||||||
|
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
|
||||||
if (vport)
|
|
||||||
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
|
|
||||||
|
|
||||||
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
|
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
|
||||||
in, nic_vport_context);
|
in, nic_vport_context);
|
||||||
@ -291,9 +301,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
|
|||||||
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
|
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
|
||||||
MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
|
MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
|
||||||
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
|
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
|
||||||
|
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
|
||||||
if (vport)
|
|
||||||
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
|
|
||||||
|
|
||||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
|
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
|
||||||
if (err)
|
if (err)
|
||||||
@ -483,7 +491,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
|||||||
MLX5_SET(modify_nic_vport_context_in, in,
|
MLX5_SET(modify_nic_vport_context_in, in,
|
||||||
field_select.node_guid, 1);
|
field_select.node_guid, 1);
|
||||||
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
|
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
|
||||||
MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
|
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
|
||||||
|
|
||||||
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
|
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
|
||||||
in, nic_vport_context);
|
in, nic_vport_context);
|
||||||
|
@ -47,7 +47,7 @@ struct mlx5_core_cq {
|
|||||||
struct completion free;
|
struct completion free;
|
||||||
unsigned vector;
|
unsigned vector;
|
||||||
unsigned int irqn;
|
unsigned int irqn;
|
||||||
void (*comp) (struct mlx5_core_cq *);
|
void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
|
||||||
void (*event) (struct mlx5_core_cq *, enum mlx5_event);
|
void (*event) (struct mlx5_core_cq *, enum mlx5_event);
|
||||||
u32 cons_index;
|
u32 cons_index;
|
||||||
unsigned arm_sn;
|
unsigned arm_sn;
|
||||||
@ -55,7 +55,7 @@ struct mlx5_core_cq {
|
|||||||
int pid;
|
int pid;
|
||||||
struct {
|
struct {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
void (*comp)(struct mlx5_core_cq *);
|
void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
|
||||||
void *priv;
|
void *priv;
|
||||||
} tasklet_ctx;
|
} tasklet_ctx;
|
||||||
int reset_notify_added;
|
int reset_notify_added;
|
||||||
@ -185,7 +185,7 @@ static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||||
u32 *in, int inlen);
|
u32 *in, int inlen, u32 *out, int outlen);
|
||||||
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
|
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
|
||||||
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||||
u32 *out, int outlen);
|
u32 *out, int outlen);
|
||||||
|
@ -351,7 +351,7 @@ enum mlx5_event {
|
|||||||
|
|
||||||
MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
|
MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
|
||||||
|
|
||||||
MLX5_EVENT_TYPE_MAX = MLX5_EVENT_TYPE_DEVICE_TRACER + 1,
|
MLX5_EVENT_TYPE_MAX = 0x100,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@ -1077,6 +1077,7 @@ enum mlx5_cap_type {
|
|||||||
MLX5_CAP_DEBUG,
|
MLX5_CAP_DEBUG,
|
||||||
MLX5_CAP_RESERVED_14,
|
MLX5_CAP_RESERVED_14,
|
||||||
MLX5_CAP_DEV_MEM,
|
MLX5_CAP_DEV_MEM,
|
||||||
|
MLX5_CAP_DEV_EVENT = 0x14,
|
||||||
/* NUM OF CAP Types */
|
/* NUM OF CAP Types */
|
||||||
MLX5_CAP_NUM
|
MLX5_CAP_NUM
|
||||||
};
|
};
|
||||||
@ -1255,6 +1256,9 @@ enum mlx5_qcam_feature_groups {
|
|||||||
#define MLX5_CAP64_DEV_MEM(mdev, cap)\
|
#define MLX5_CAP64_DEV_MEM(mdev, cap)\
|
||||||
MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
|
MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
|
||||||
|
|
||||||
|
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
|
||||||
|
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MLX5_CMD_STAT_OK = 0x0,
|
MLX5_CMD_STAT_OK = 0x0,
|
||||||
MLX5_CMD_STAT_INT_ERR = 0x1,
|
MLX5_CMD_STAT_INT_ERR = 0x1,
|
||||||
|
@ -41,7 +41,7 @@
|
|||||||
#include <linux/semaphore.h>
|
#include <linux/semaphore.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/radix-tree.h>
|
#include <linux/xarray.h>
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/mempool.h>
|
#include <linux/mempool.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
@ -138,6 +138,7 @@ enum {
|
|||||||
MLX5_REG_MTPPS = 0x9053,
|
MLX5_REG_MTPPS = 0x9053,
|
||||||
MLX5_REG_MTPPSE = 0x9054,
|
MLX5_REG_MTPPSE = 0x9054,
|
||||||
MLX5_REG_MPEGC = 0x9056,
|
MLX5_REG_MPEGC = 0x9056,
|
||||||
|
MLX5_REG_MCQS = 0x9060,
|
||||||
MLX5_REG_MCQI = 0x9061,
|
MLX5_REG_MCQI = 0x9061,
|
||||||
MLX5_REG_MCC = 0x9062,
|
MLX5_REG_MCC = 0x9062,
|
||||||
MLX5_REG_MCDA = 0x9063,
|
MLX5_REG_MCDA = 0x9063,
|
||||||
@ -181,6 +182,11 @@ enum port_state_policy {
|
|||||||
MLX5_POLICY_INVALID = 0xffffffff
|
MLX5_POLICY_INVALID = 0xffffffff
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum mlx5_coredev_type {
|
||||||
|
MLX5_COREDEV_PF,
|
||||||
|
MLX5_COREDEV_VF
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_field_desc {
|
struct mlx5_field_desc {
|
||||||
struct dentry *dent;
|
struct dentry *dent;
|
||||||
int i;
|
int i;
|
||||||
@ -452,13 +458,6 @@ struct mlx5_qp_table {
|
|||||||
struct radix_tree_root tree;
|
struct radix_tree_root tree;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_mkey_table {
|
|
||||||
/* protect radix tree
|
|
||||||
*/
|
|
||||||
rwlock_t lock;
|
|
||||||
struct radix_tree_root tree;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct mlx5_vf_context {
|
struct mlx5_vf_context {
|
||||||
int enabled;
|
int enabled;
|
||||||
u64 port_guid;
|
u64 port_guid;
|
||||||
@ -469,7 +468,6 @@ struct mlx5_vf_context {
|
|||||||
struct mlx5_core_sriov {
|
struct mlx5_core_sriov {
|
||||||
struct mlx5_vf_context *vfs_ctx;
|
struct mlx5_vf_context *vfs_ctx;
|
||||||
int num_vfs;
|
int num_vfs;
|
||||||
int enabled_vfs;
|
|
||||||
u16 max_vfs;
|
u16 max_vfs;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -546,9 +544,7 @@ struct mlx5_priv {
|
|||||||
struct dentry *cmdif_debugfs;
|
struct dentry *cmdif_debugfs;
|
||||||
/* end: qp staff */
|
/* end: qp staff */
|
||||||
|
|
||||||
/* start: mkey staff */
|
struct xarray mkey_table;
|
||||||
struct mlx5_mkey_table mkey_table;
|
|
||||||
/* end: mkey staff */
|
|
||||||
|
|
||||||
/* start: alloc staff */
|
/* start: alloc staff */
|
||||||
/* protect buffer alocation according to numa node */
|
/* protect buffer alocation according to numa node */
|
||||||
@ -575,7 +571,6 @@ struct mlx5_priv {
|
|||||||
struct mlx5_core_sriov sriov;
|
struct mlx5_core_sriov sriov;
|
||||||
struct mlx5_lag *lag;
|
struct mlx5_lag *lag;
|
||||||
struct mlx5_devcom *devcom;
|
struct mlx5_devcom *devcom;
|
||||||
unsigned long pci_dev_data;
|
|
||||||
struct mlx5_core_roce roce;
|
struct mlx5_core_roce roce;
|
||||||
struct mlx5_fc_stats fc_stats;
|
struct mlx5_fc_stats fc_stats;
|
||||||
struct mlx5_rl_table rl_table;
|
struct mlx5_rl_table rl_table;
|
||||||
@ -654,6 +649,7 @@ struct mlx5_vxlan;
|
|||||||
|
|
||||||
struct mlx5_core_dev {
|
struct mlx5_core_dev {
|
||||||
struct device *device;
|
struct device *device;
|
||||||
|
enum mlx5_coredev_type coredev_type;
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
/* sync pci state */
|
/* sync pci state */
|
||||||
struct mutex pci_status_mutex;
|
struct mutex pci_status_mutex;
|
||||||
@ -1047,6 +1043,8 @@ int mlx5_register_interface(struct mlx5_interface *intf);
|
|||||||
void mlx5_unregister_interface(struct mlx5_interface *intf);
|
void mlx5_unregister_interface(struct mlx5_interface *intf);
|
||||||
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
||||||
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
||||||
|
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
|
||||||
|
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
|
||||||
|
|
||||||
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
|
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
|
||||||
|
|
||||||
@ -1087,9 +1085,9 @@ enum {
|
|||||||
MLX5_PCI_DEV_IS_VF = 1 << 0,
|
MLX5_PCI_DEV_IS_VF = 1 << 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
|
static inline bool mlx5_core_is_pf(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
|
return dev->coredev_type == MLX5_COREDEV_PF;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
|
static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
|
||||||
|
@ -15,7 +15,7 @@ struct mlx5_core_dev;
|
|||||||
struct mlx5_eq_param {
|
struct mlx5_eq_param {
|
||||||
u8 irq_index;
|
u8 irq_index;
|
||||||
int nent;
|
int nent;
|
||||||
u64 mask;
|
u64 mask[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_eq *
|
struct mlx5_eq *
|
||||||
|
@ -12,9 +12,9 @@
|
|||||||
#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
|
#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
SRIOV_NONE,
|
MLX5_ESWITCH_NONE,
|
||||||
SRIOV_LEGACY,
|
MLX5_ESWITCH_LEGACY,
|
||||||
SRIOV_OFFLOADS
|
MLX5_ESWITCH_OFFLOADS
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@ -46,6 +46,8 @@ struct mlx5_eswitch_rep {
|
|||||||
u16 vport;
|
u16 vport;
|
||||||
u8 hw_id[ETH_ALEN];
|
u8 hw_id[ETH_ALEN];
|
||||||
u16 vlan;
|
u16 vlan;
|
||||||
|
/* Only IB rep is using vport_index */
|
||||||
|
u16 vport_index;
|
||||||
u32 vlan_refcount;
|
u32 vlan_refcount;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -67,11 +69,28 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw,
|
|||||||
#ifdef CONFIG_MLX5_ESWITCH
|
#ifdef CONFIG_MLX5_ESWITCH
|
||||||
enum devlink_eswitch_encap_mode
|
enum devlink_eswitch_encap_mode
|
||||||
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
|
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
|
||||||
|
|
||||||
|
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
|
||||||
|
u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
|
||||||
|
u16 vport_num);
|
||||||
#else /* CONFIG_MLX5_ESWITCH */
|
#else /* CONFIG_MLX5_ESWITCH */
|
||||||
static inline enum devlink_eswitch_encap_mode
|
static inline enum devlink_eswitch_encap_mode
|
||||||
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
|
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
return DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
return DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline u32
|
||||||
|
mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
|
||||||
|
int vport_num)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
};
|
||||||
#endif /* CONFIG_MLX5_ESWITCH */
|
#endif /* CONFIG_MLX5_ESWITCH */
|
||||||
#endif
|
#endif
|
||||||
|
@ -88,10 +88,21 @@ struct mlx5_flow_group;
|
|||||||
struct mlx5_flow_namespace;
|
struct mlx5_flow_namespace;
|
||||||
struct mlx5_flow_handle;
|
struct mlx5_flow_handle;
|
||||||
|
|
||||||
|
enum {
|
||||||
|
FLOW_CONTEXT_HAS_TAG = BIT(0),
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5_flow_context {
|
||||||
|
u32 flags;
|
||||||
|
u32 flow_tag;
|
||||||
|
u32 flow_source;
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_flow_spec {
|
struct mlx5_flow_spec {
|
||||||
u8 match_criteria_enable;
|
u8 match_criteria_enable;
|
||||||
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
|
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
|
||||||
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
|
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
|
||||||
|
struct mlx5_flow_context flow_context;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@ -173,13 +184,11 @@ struct mlx5_fs_vlan {
|
|||||||
#define MLX5_FS_VLAN_DEPTH 2
|
#define MLX5_FS_VLAN_DEPTH 2
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
FLOW_ACT_HAS_TAG = BIT(0),
|
FLOW_ACT_NO_APPEND = BIT(0),
|
||||||
FLOW_ACT_NO_APPEND = BIT(1),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_flow_act {
|
struct mlx5_flow_act {
|
||||||
u32 action;
|
u32 action;
|
||||||
u32 flow_tag;
|
|
||||||
u32 reformat_id;
|
u32 reformat_id;
|
||||||
u32 modify_id;
|
u32 modify_id;
|
||||||
uintptr_t esp_id;
|
uintptr_t esp_id;
|
||||||
@ -190,7 +199,6 @@ struct mlx5_flow_act {
|
|||||||
|
|
||||||
#define MLX5_DECLARE_FLOW_ACT(name) \
|
#define MLX5_DECLARE_FLOW_ACT(name) \
|
||||||
struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
|
struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
|
||||||
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, \
|
|
||||||
.reformat_id = 0, \
|
.reformat_id = 0, \
|
||||||
.modify_id = 0, \
|
.modify_id = 0, \
|
||||||
.flags = 0, }
|
.flags = 0, }
|
||||||
|
@ -91,6 +91,20 @@ enum {
|
|||||||
|
|
||||||
enum {
|
enum {
|
||||||
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
|
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
|
||||||
|
MLX5_OBJ_TYPE_MKEY = 0xff01,
|
||||||
|
MLX5_OBJ_TYPE_QP = 0xff02,
|
||||||
|
MLX5_OBJ_TYPE_PSV = 0xff03,
|
||||||
|
MLX5_OBJ_TYPE_RMP = 0xff04,
|
||||||
|
MLX5_OBJ_TYPE_XRC_SRQ = 0xff05,
|
||||||
|
MLX5_OBJ_TYPE_RQ = 0xff06,
|
||||||
|
MLX5_OBJ_TYPE_SQ = 0xff07,
|
||||||
|
MLX5_OBJ_TYPE_TIR = 0xff08,
|
||||||
|
MLX5_OBJ_TYPE_TIS = 0xff09,
|
||||||
|
MLX5_OBJ_TYPE_DCT = 0xff0a,
|
||||||
|
MLX5_OBJ_TYPE_XRQ = 0xff0b,
|
||||||
|
MLX5_OBJ_TYPE_RQT = 0xff0e,
|
||||||
|
MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f,
|
||||||
|
MLX5_OBJ_TYPE_CQ = 0xff10,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@ -106,6 +120,9 @@ enum {
|
|||||||
MLX5_CMD_OP_QUERY_ISSI = 0x10a,
|
MLX5_CMD_OP_QUERY_ISSI = 0x10a,
|
||||||
MLX5_CMD_OP_SET_ISSI = 0x10b,
|
MLX5_CMD_OP_SET_ISSI = 0x10b,
|
||||||
MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d,
|
MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d,
|
||||||
|
MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111,
|
||||||
|
MLX5_CMD_OP_ALLOC_SF = 0x113,
|
||||||
|
MLX5_CMD_OP_DEALLOC_SF = 0x114,
|
||||||
MLX5_CMD_OP_CREATE_MKEY = 0x200,
|
MLX5_CMD_OP_CREATE_MKEY = 0x200,
|
||||||
MLX5_CMD_OP_QUERY_MKEY = 0x201,
|
MLX5_CMD_OP_QUERY_MKEY = 0x201,
|
||||||
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
|
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
|
||||||
@ -528,7 +545,21 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
|
|||||||
|
|
||||||
struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
|
struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
|
||||||
|
|
||||||
u8 reserved_at_80[0x100];
|
u8 metadata_reg_c_7[0x20];
|
||||||
|
|
||||||
|
u8 metadata_reg_c_6[0x20];
|
||||||
|
|
||||||
|
u8 metadata_reg_c_5[0x20];
|
||||||
|
|
||||||
|
u8 metadata_reg_c_4[0x20];
|
||||||
|
|
||||||
|
u8 metadata_reg_c_3[0x20];
|
||||||
|
|
||||||
|
u8 metadata_reg_c_2[0x20];
|
||||||
|
|
||||||
|
u8 metadata_reg_c_1[0x20];
|
||||||
|
|
||||||
|
u8 metadata_reg_c_0[0x20];
|
||||||
|
|
||||||
u8 metadata_reg_a[0x20];
|
u8 metadata_reg_a[0x20];
|
||||||
|
|
||||||
@ -636,8 +667,22 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
|
|||||||
u8 reserved_at_e00[0x7200];
|
u8 reserved_at_e00[0x7200];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MLX5_FDB_TO_VPORT_REG_C_0 = 0x01,
|
||||||
|
MLX5_FDB_TO_VPORT_REG_C_1 = 0x02,
|
||||||
|
MLX5_FDB_TO_VPORT_REG_C_2 = 0x04,
|
||||||
|
MLX5_FDB_TO_VPORT_REG_C_3 = 0x08,
|
||||||
|
MLX5_FDB_TO_VPORT_REG_C_4 = 0x10,
|
||||||
|
MLX5_FDB_TO_VPORT_REG_C_5 = 0x20,
|
||||||
|
MLX5_FDB_TO_VPORT_REG_C_6 = 0x40,
|
||||||
|
MLX5_FDB_TO_VPORT_REG_C_7 = 0x80,
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_flow_table_eswitch_cap_bits {
|
struct mlx5_ifc_flow_table_eswitch_cap_bits {
|
||||||
u8 reserved_at_0[0x1a];
|
u8 fdb_to_vport_reg_c_id[0x8];
|
||||||
|
u8 reserved_at_8[0xf];
|
||||||
|
u8 flow_source[0x1];
|
||||||
|
u8 reserved_at_18[0x2];
|
||||||
u8 multi_fdb_encap[0x1];
|
u8 multi_fdb_encap[0x1];
|
||||||
u8 reserved_at_1b[0x1];
|
u8 reserved_at_1b[0x1];
|
||||||
u8 fdb_multi_path_to_table[0x1];
|
u8 fdb_multi_path_to_table[0x1];
|
||||||
@ -665,7 +710,9 @@ struct mlx5_ifc_e_switch_cap_bits {
|
|||||||
u8 vport_svlan_insert[0x1];
|
u8 vport_svlan_insert[0x1];
|
||||||
u8 vport_cvlan_insert_if_not_exist[0x1];
|
u8 vport_cvlan_insert_if_not_exist[0x1];
|
||||||
u8 vport_cvlan_insert_overwrite[0x1];
|
u8 vport_cvlan_insert_overwrite[0x1];
|
||||||
u8 reserved_at_5[0x14];
|
u8 reserved_at_5[0x3];
|
||||||
|
u8 esw_uplink_ingress_acl[0x1];
|
||||||
|
u8 reserved_at_9[0x10];
|
||||||
u8 esw_functions_changed[0x1];
|
u8 esw_functions_changed[0x1];
|
||||||
u8 reserved_at_1a[0x1];
|
u8 reserved_at_1a[0x1];
|
||||||
u8 ecpf_vport_exists[0x1];
|
u8 ecpf_vport_exists[0x1];
|
||||||
@ -683,7 +730,11 @@ struct mlx5_ifc_e_switch_cap_bits {
|
|||||||
u8 reserved_2b[0x6];
|
u8 reserved_2b[0x6];
|
||||||
u8 max_encap_header_size[0xa];
|
u8 max_encap_header_size[0xa];
|
||||||
|
|
||||||
u8 reserved_40[0x7c0];
|
u8 reserved_at_40[0xb];
|
||||||
|
u8 log_max_esw_sf[0x5];
|
||||||
|
u8 esw_sf_base_id[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_60[0x7a0];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -823,6 +874,12 @@ struct mlx5_ifc_device_mem_cap_bits {
|
|||||||
u8 reserved_at_180[0x680];
|
u8 reserved_at_180[0x680];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_device_event_cap_bits {
|
||||||
|
u8 user_affiliated_events[4][0x40];
|
||||||
|
|
||||||
|
u8 user_unaffiliated_events[4][0x40];
|
||||||
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
|
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
|
||||||
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
|
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
|
||||||
@ -980,7 +1037,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||||||
|
|
||||||
u8 log_max_srq_sz[0x8];
|
u8 log_max_srq_sz[0x8];
|
||||||
u8 log_max_qp_sz[0x8];
|
u8 log_max_qp_sz[0x8];
|
||||||
u8 reserved_at_90[0x8];
|
u8 event_cap[0x1];
|
||||||
|
u8 reserved_at_91[0x7];
|
||||||
u8 prio_tag_required[0x1];
|
u8 prio_tag_required[0x1];
|
||||||
u8 reserved_at_99[0x2];
|
u8 reserved_at_99[0x2];
|
||||||
u8 log_max_qp[0x5];
|
u8 log_max_qp[0x5];
|
||||||
@ -1300,13 +1358,24 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||||||
u8 reserved_at_640[0x10];
|
u8 reserved_at_640[0x10];
|
||||||
u8 num_q_monitor_counters[0x10];
|
u8 num_q_monitor_counters[0x10];
|
||||||
|
|
||||||
u8 reserved_at_660[0x40];
|
u8 reserved_at_660[0x20];
|
||||||
|
|
||||||
|
u8 sf[0x1];
|
||||||
|
u8 sf_set_partition[0x1];
|
||||||
|
u8 reserved_at_682[0x1];
|
||||||
|
u8 log_max_sf[0x5];
|
||||||
|
u8 reserved_at_688[0x8];
|
||||||
|
u8 log_min_sf_size[0x8];
|
||||||
|
u8 max_num_sf_partitions[0x8];
|
||||||
|
|
||||||
u8 uctx_cap[0x20];
|
u8 uctx_cap[0x20];
|
||||||
|
|
||||||
u8 reserved_at_6c0[0x4];
|
u8 reserved_at_6c0[0x4];
|
||||||
u8 flex_parser_id_geneve_tlv_option_0[0x4];
|
u8 flex_parser_id_geneve_tlv_option_0[0x4];
|
||||||
u8 reserved_at_6c8[0x138];
|
u8 reserved_at_6c8[0x28];
|
||||||
|
u8 sf_base_id[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_700[0x100];
|
||||||
};
|
};
|
||||||
|
|
||||||
enum mlx5_flow_destination_type {
|
enum mlx5_flow_destination_type {
|
||||||
@ -2555,6 +2624,12 @@ enum {
|
|||||||
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
|
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT = 0x0,
|
||||||
|
MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK = 0x1,
|
||||||
|
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2,
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_vlan_bits {
|
struct mlx5_ifc_vlan_bits {
|
||||||
u8 ethtype[0x10];
|
u8 ethtype[0x10];
|
||||||
u8 prio[0x3];
|
u8 prio[0x3];
|
||||||
@ -2574,7 +2649,9 @@ struct mlx5_ifc_flow_context_bits {
|
|||||||
u8 action[0x10];
|
u8 action[0x10];
|
||||||
|
|
||||||
u8 extended_destination[0x1];
|
u8 extended_destination[0x1];
|
||||||
u8 reserved_at_80[0x7];
|
u8 reserved_at_81[0x1];
|
||||||
|
u8 flow_source[0x2];
|
||||||
|
u8 reserved_at_84[0x4];
|
||||||
u8 destination_list_size[0x18];
|
u8 destination_list_size[0x18];
|
||||||
|
|
||||||
u8 reserved_at_a0[0x8];
|
u8 reserved_at_a0[0x8];
|
||||||
@ -3099,12 +3176,14 @@ struct mlx5_ifc_hca_vport_context_bits {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_esw_vport_context_bits {
|
struct mlx5_ifc_esw_vport_context_bits {
|
||||||
u8 reserved_at_0[0x3];
|
u8 fdb_to_vport_reg_c[0x1];
|
||||||
|
u8 reserved_at_1[0x2];
|
||||||
u8 vport_svlan_strip[0x1];
|
u8 vport_svlan_strip[0x1];
|
||||||
u8 vport_cvlan_strip[0x1];
|
u8 vport_cvlan_strip[0x1];
|
||||||
u8 vport_svlan_insert[0x1];
|
u8 vport_svlan_insert[0x1];
|
||||||
u8 vport_cvlan_insert[0x2];
|
u8 vport_cvlan_insert[0x2];
|
||||||
u8 reserved_at_8[0x18];
|
u8 fdb_to_vport_reg_c_id[0x8];
|
||||||
|
u8 reserved_at_10[0x10];
|
||||||
|
|
||||||
u8 reserved_at_20[0x20];
|
u8 reserved_at_20[0x20];
|
||||||
|
|
||||||
@ -4985,7 +5064,8 @@ struct mlx5_ifc_modify_esw_vport_context_out_bits {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_esw_vport_context_fields_select_bits {
|
struct mlx5_ifc_esw_vport_context_fields_select_bits {
|
||||||
u8 reserved_at_0[0x1c];
|
u8 reserved_at_0[0x1b];
|
||||||
|
u8 fdb_to_vport_reg_c_id[0x1];
|
||||||
u8 vport_cvlan_insert[0x1];
|
u8 vport_cvlan_insert[0x1];
|
||||||
u8 vport_svlan_insert[0x1];
|
u8 vport_svlan_insert[0x1];
|
||||||
u8 vport_cvlan_strip[0x1];
|
u8 vport_cvlan_strip[0x1];
|
||||||
@ -5182,6 +5262,7 @@ enum {
|
|||||||
MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16,
|
MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16,
|
||||||
MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17,
|
MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17,
|
||||||
MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47,
|
MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47,
|
||||||
|
MLX5_ACTION_IN_FIELD_METADATA_REG_C_0 = 0x51,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_alloc_modify_header_context_out_bits {
|
struct mlx5_ifc_alloc_modify_header_context_out_bits {
|
||||||
@ -7362,9 +7443,9 @@ struct mlx5_ifc_create_eq_in_bits {
|
|||||||
|
|
||||||
u8 reserved_at_280[0x40];
|
u8 reserved_at_280[0x40];
|
||||||
|
|
||||||
u8 event_bitmask[0x40];
|
u8 event_bitmask[4][0x40];
|
||||||
|
|
||||||
u8 reserved_at_300[0x580];
|
u8 reserved_at_3c0[0x4c0];
|
||||||
|
|
||||||
u8 pas[0][0x40];
|
u8 pas[0][0x40];
|
||||||
};
|
};
|
||||||
@ -8482,7 +8563,7 @@ struct mlx5_ifc_mcam_access_reg_bits {
|
|||||||
u8 mcda[0x1];
|
u8 mcda[0x1];
|
||||||
u8 mcc[0x1];
|
u8 mcc[0x1];
|
||||||
u8 mcqi[0x1];
|
u8 mcqi[0x1];
|
||||||
u8 reserved_at_1f[0x1];
|
u8 mcqs[0x1];
|
||||||
|
|
||||||
u8 regs_95_to_87[0x9];
|
u8 regs_95_to_87[0x9];
|
||||||
u8 mpegc[0x1];
|
u8 mpegc[0x1];
|
||||||
@ -8974,6 +9055,24 @@ struct mlx5_ifc_mtppse_reg_bits {
|
|||||||
u8 reserved_at_40[0x40];
|
u8 reserved_at_40[0x40];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_mcqs_reg_bits {
|
||||||
|
u8 last_index_flag[0x1];
|
||||||
|
u8 reserved_at_1[0x7];
|
||||||
|
u8 fw_device[0x8];
|
||||||
|
u8 component_index[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_20[0x10];
|
||||||
|
u8 identifier[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_40[0x17];
|
||||||
|
u8 component_status[0x5];
|
||||||
|
u8 component_update_state[0x4];
|
||||||
|
|
||||||
|
u8 last_update_state_changer_type[0x4];
|
||||||
|
u8 last_update_state_changer_host_id[0x4];
|
||||||
|
u8 reserved_at_68[0x18];
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_mcqi_cap_bits {
|
struct mlx5_ifc_mcqi_cap_bits {
|
||||||
u8 supported_info_bitmask[0x20];
|
u8 supported_info_bitmask[0x20];
|
||||||
|
|
||||||
@ -8994,6 +9093,43 @@ struct mlx5_ifc_mcqi_cap_bits {
|
|||||||
u8 reserved_at_86[0x1a];
|
u8 reserved_at_86[0x1a];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_mcqi_version_bits {
|
||||||
|
u8 reserved_at_0[0x2];
|
||||||
|
u8 build_time_valid[0x1];
|
||||||
|
u8 user_defined_time_valid[0x1];
|
||||||
|
u8 reserved_at_4[0x14];
|
||||||
|
u8 version_string_length[0x8];
|
||||||
|
|
||||||
|
u8 version[0x20];
|
||||||
|
|
||||||
|
u8 build_time[0x40];
|
||||||
|
|
||||||
|
u8 user_defined_time[0x40];
|
||||||
|
|
||||||
|
u8 build_tool_version[0x20];
|
||||||
|
|
||||||
|
u8 reserved_at_e0[0x20];
|
||||||
|
|
||||||
|
u8 version_string[92][0x8];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_mcqi_activation_method_bits {
|
||||||
|
u8 pending_server_ac_power_cycle[0x1];
|
||||||
|
u8 pending_server_dc_power_cycle[0x1];
|
||||||
|
u8 pending_server_reboot[0x1];
|
||||||
|
u8 pending_fw_reset[0x1];
|
||||||
|
u8 auto_activate[0x1];
|
||||||
|
u8 all_hosts_sync[0x1];
|
||||||
|
u8 device_hw_reset[0x1];
|
||||||
|
u8 reserved_at_7[0x19];
|
||||||
|
};
|
||||||
|
|
||||||
|
union mlx5_ifc_mcqi_reg_data_bits {
|
||||||
|
struct mlx5_ifc_mcqi_cap_bits mcqi_caps;
|
||||||
|
struct mlx5_ifc_mcqi_version_bits mcqi_version;
|
||||||
|
struct mlx5_ifc_mcqi_activation_method_bits mcqi_activation_mathod;
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_mcqi_reg_bits {
|
struct mlx5_ifc_mcqi_reg_bits {
|
||||||
u8 read_pending_component[0x1];
|
u8 read_pending_component[0x1];
|
||||||
u8 reserved_at_1[0xf];
|
u8 reserved_at_1[0xf];
|
||||||
@ -9011,7 +9147,7 @@ struct mlx5_ifc_mcqi_reg_bits {
|
|||||||
u8 reserved_at_a0[0x10];
|
u8 reserved_at_a0[0x10];
|
||||||
u8 data_size[0x10];
|
u8 data_size[0x10];
|
||||||
|
|
||||||
u8 data[0][0x20];
|
union mlx5_ifc_mcqi_reg_data_bits data[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_mcc_reg_bits {
|
struct mlx5_ifc_mcc_reg_bits {
|
||||||
@ -9708,7 +9844,8 @@ struct mlx5_ifc_mtrc_ctrl_bits {
|
|||||||
|
|
||||||
struct mlx5_ifc_host_params_context_bits {
|
struct mlx5_ifc_host_params_context_bits {
|
||||||
u8 host_number[0x8];
|
u8 host_number[0x8];
|
||||||
u8 reserved_at_8[0x8];
|
u8 reserved_at_8[0x7];
|
||||||
|
u8 host_pf_disabled[0x1];
|
||||||
u8 host_num_of_vfs[0x10];
|
u8 host_num_of_vfs[0x10];
|
||||||
|
|
||||||
u8 host_total_vfs[0x10];
|
u8 host_total_vfs[0x10];
|
||||||
@ -9744,6 +9881,88 @@ struct mlx5_ifc_query_esw_functions_out_bits {
|
|||||||
struct mlx5_ifc_host_params_context_bits host_params_context;
|
struct mlx5_ifc_host_params_context_bits host_params_context;
|
||||||
|
|
||||||
u8 reserved_at_280[0x180];
|
u8 reserved_at_280[0x180];
|
||||||
|
u8 host_sf_enable[0][0x40];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_sf_partition_bits {
|
||||||
|
u8 reserved_at_0[0x10];
|
||||||
|
u8 log_num_sf[0x8];
|
||||||
|
u8 log_sf_bar_size[0x8];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_query_sf_partitions_out_bits {
|
||||||
|
u8 status[0x8];
|
||||||
|
u8 reserved_at_8[0x18];
|
||||||
|
|
||||||
|
u8 syndrome[0x20];
|
||||||
|
|
||||||
|
u8 reserved_at_40[0x18];
|
||||||
|
u8 num_sf_partitions[0x8];
|
||||||
|
|
||||||
|
u8 reserved_at_60[0x20];
|
||||||
|
|
||||||
|
struct mlx5_ifc_sf_partition_bits sf_partition[0];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_query_sf_partitions_in_bits {
|
||||||
|
u8 opcode[0x10];
|
||||||
|
u8 reserved_at_10[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_20[0x10];
|
||||||
|
u8 op_mod[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_40[0x40];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_dealloc_sf_out_bits {
|
||||||
|
u8 status[0x8];
|
||||||
|
u8 reserved_at_8[0x18];
|
||||||
|
|
||||||
|
u8 syndrome[0x20];
|
||||||
|
|
||||||
|
u8 reserved_at_40[0x40];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_dealloc_sf_in_bits {
|
||||||
|
u8 opcode[0x10];
|
||||||
|
u8 reserved_at_10[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_20[0x10];
|
||||||
|
u8 op_mod[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_40[0x10];
|
||||||
|
u8 function_id[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_60[0x20];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_alloc_sf_out_bits {
|
||||||
|
u8 status[0x8];
|
||||||
|
u8 reserved_at_8[0x18];
|
||||||
|
|
||||||
|
u8 syndrome[0x20];
|
||||||
|
|
||||||
|
u8 reserved_at_40[0x40];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_alloc_sf_in_bits {
|
||||||
|
u8 opcode[0x10];
|
||||||
|
u8 reserved_at_10[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_20[0x10];
|
||||||
|
u8 op_mod[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_40[0x10];
|
||||||
|
u8 function_id[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_60[0x20];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_affiliated_event_header_bits {
|
||||||
|
u8 reserved_at_0[0x10];
|
||||||
|
u8 obj_type[0x10];
|
||||||
|
|
||||||
|
u8 obj_id[0x20];
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* MLX5_IFC_H */
|
#endif /* MLX5_IFC_H */
|
||||||
|
@ -552,11 +552,6 @@ static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u
|
|||||||
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
|
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
|
|
||||||
{
|
|
||||||
return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
|
|
||||||
}
|
|
||||||
|
|
||||||
int mlx5_core_create_dct(struct mlx5_core_dev *dev,
|
int mlx5_core_create_dct(struct mlx5_core_dev *dev,
|
||||||
struct mlx5_core_dct *qp,
|
struct mlx5_core_dct *qp,
|
||||||
u32 *in, int inlen,
|
u32 *in, int inlen,
|
||||||
|
@ -58,6 +58,7 @@ enum {
|
|||||||
MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
|
MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Vport number for each function must keep unchanged */
|
||||||
enum {
|
enum {
|
||||||
MLX5_VPORT_PF = 0x0,
|
MLX5_VPORT_PF = 0x0,
|
||||||
MLX5_VPORT_FIRST_VF = 0x1,
|
MLX5_VPORT_FIRST_VF = 0x1,
|
||||||
@ -69,7 +70,8 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
|
|||||||
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
|
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
|
||||||
u16 vport, u8 other_vport, u8 state);
|
u16 vport, u8 other_vport, u8 state);
|
||||||
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||||
u16 vport, u8 *addr);
|
u16 vport, bool other, u8 *addr);
|
||||||
|
int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
|
||||||
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
|
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
|
||||||
u16 vport, u8 *min_inline);
|
u16 vport, u8 *min_inline);
|
||||||
void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
|
void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
|
||||||
|
Loading…
Reference in New Issue
Block a user