mlx5-fixes-2021-02-11
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmAl7OgACgkQSD+KveBX +j72XQgAkwXPdkVm36mUP4vnjZhxCIOG/8KHp50iVJAFKRBukEGgSV3yGt8Srjaj fCyqevg/ncOq61mmo6KE2v16/oI5Mh7fzJ+Q0+MXdYp5VVgtyUIil2dnfgPWnYfm Ahsc4TaRfU3YUBnMz8MhgAhmRih24+dyW+YtOj3pzwxNRjTMxseLI9S1tXu9/a7P BLWBka7EZroI+mArNtGXqlN8bPt+IigrysDixvEEQvNoUxSbEwjoPjwHLrukWjwB FE530QXjvzsJH8gFeH0QnG432w/ZYcDlBqmlr4qRwR7k89/ClH0GjA2fqDXejX5u xI/bx8dH5YgPKjpNPdsfhupe902dag== =Z+Z+ -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2021-02-11' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux mlx5-fixes-2021-02-11 Saeed Mahameed says: ==================== mlx5 fixes 2021-02-11 This series introduces some fixes to mlx5 driver. Please pull and let me know if there is any problem. For -stable v5.4 ('net/mlx5e: E-switch, Fix rate calculation for overflow')i For -stable v5.10 ('net/mlx5: Disallow RoCE on multi port slave device') ('net/mlx5: Disable devlink reload for multi port slave device') ('net/mlx5e: Don't change interrupt moderation params when DIM is enabled') ('net/mlx5e: Replace synchronize_rcu with synchronize_net') ('net/mlx5e: Enable XDP for Connect-X IPsec capable devices') ('net/mlx5e: kTLS, Use refcounts to free kTLS RX priv context') ('net/mlx5e: Check tunnel offload is required before setting SWP') ('net/mlx5: Fix health error state handling') ('net/mlx5: Disable devlink reload for lag devices') ('net/mlx5e: CT: manage the lifetime of the ct entry object') ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
308daa19e2
@ -128,6 +128,11 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
|
|||||||
{
|
{
|
||||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||||
|
|
||||||
|
if (mlx5_lag_is_active(dev)) {
|
||||||
|
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n");
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
switch (action) {
|
switch (action) {
|
||||||
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
|
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
|
||||||
mlx5_unload_one(dev, false);
|
mlx5_unload_one(dev, false);
|
||||||
@ -273,6 +278,10 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
|
|||||||
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
|
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
if (mlx5_core_is_mp_slave(dev) || mlx5_lag_is_active(dev)) {
|
||||||
|
NL_SET_ERR_MSG_MOD(extack, "Multi port slave/Lag device can't configure RoCE");
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include <net/flow_offload.h>
|
#include <net/flow_offload.h>
|
||||||
#include <net/netfilter/nf_flow_table.h>
|
#include <net/netfilter/nf_flow_table.h>
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
|
#include <linux/refcount.h>
|
||||||
#include <linux/xarray.h>
|
#include <linux/xarray.h>
|
||||||
|
|
||||||
#include "lib/fs_chains.h"
|
#include "lib/fs_chains.h"
|
||||||
@ -51,11 +52,11 @@ struct mlx5_tc_ct_priv {
|
|||||||
struct mlx5_flow_table *ct_nat;
|
struct mlx5_flow_table *ct_nat;
|
||||||
struct mlx5_flow_table *post_ct;
|
struct mlx5_flow_table *post_ct;
|
||||||
struct mutex control_lock; /* guards parallel adds/dels */
|
struct mutex control_lock; /* guards parallel adds/dels */
|
||||||
struct mutex shared_counter_lock;
|
|
||||||
struct mapping_ctx *zone_mapping;
|
struct mapping_ctx *zone_mapping;
|
||||||
struct mapping_ctx *labels_mapping;
|
struct mapping_ctx *labels_mapping;
|
||||||
enum mlx5_flow_namespace_type ns_type;
|
enum mlx5_flow_namespace_type ns_type;
|
||||||
struct mlx5_fs_chains *chains;
|
struct mlx5_fs_chains *chains;
|
||||||
|
spinlock_t ht_lock; /* protects ft entries */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ct_flow {
|
struct mlx5_ct_flow {
|
||||||
@ -124,6 +125,10 @@ struct mlx5_ct_counter {
|
|||||||
bool is_shared;
|
bool is_shared;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MLX5_CT_ENTRY_FLAG_VALID,
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_ct_entry {
|
struct mlx5_ct_entry {
|
||||||
struct rhash_head node;
|
struct rhash_head node;
|
||||||
struct rhash_head tuple_node;
|
struct rhash_head tuple_node;
|
||||||
@ -134,6 +139,12 @@ struct mlx5_ct_entry {
|
|||||||
struct mlx5_ct_tuple tuple;
|
struct mlx5_ct_tuple tuple;
|
||||||
struct mlx5_ct_tuple tuple_nat;
|
struct mlx5_ct_tuple tuple_nat;
|
||||||
struct mlx5_ct_zone_rule zone_rules[2];
|
struct mlx5_ct_zone_rule zone_rules[2];
|
||||||
|
|
||||||
|
struct mlx5_tc_ct_priv *ct_priv;
|
||||||
|
struct work_struct work;
|
||||||
|
|
||||||
|
refcount_t refcnt;
|
||||||
|
unsigned long flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct rhashtable_params cts_ht_params = {
|
static const struct rhashtable_params cts_ht_params = {
|
||||||
@ -740,6 +751,87 @@ err_attr:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
|
||||||
|
{
|
||||||
|
return test_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct mlx5_ct_entry *
|
||||||
|
mlx5_tc_ct_entry_get(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_tuple *tuple)
|
||||||
|
{
|
||||||
|
struct mlx5_ct_entry *entry;
|
||||||
|
|
||||||
|
entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, tuple,
|
||||||
|
tuples_ht_params);
|
||||||
|
if (entry && mlx5_tc_ct_entry_valid(entry) &&
|
||||||
|
refcount_inc_not_zero(&entry->refcnt)) {
|
||||||
|
return entry;
|
||||||
|
} else if (!entry) {
|
||||||
|
entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
|
||||||
|
tuple, tuples_nat_ht_params);
|
||||||
|
if (entry && mlx5_tc_ct_entry_valid(entry) &&
|
||||||
|
refcount_inc_not_zero(&entry->refcnt))
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry ? ERR_PTR(-EINVAL) : NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
|
||||||
|
{
|
||||||
|
struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
|
||||||
|
|
||||||
|
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
|
||||||
|
&entry->tuple_nat_node,
|
||||||
|
tuples_nat_ht_params);
|
||||||
|
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
|
||||||
|
tuples_ht_params);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
|
||||||
|
{
|
||||||
|
struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
|
||||||
|
|
||||||
|
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
|
||||||
|
|
||||||
|
spin_lock_bh(&ct_priv->ht_lock);
|
||||||
|
mlx5_tc_ct_entry_remove_from_tuples(entry);
|
||||||
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
|
|
||||||
|
mlx5_tc_ct_counter_put(ct_priv, entry);
|
||||||
|
kfree(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
|
||||||
|
{
|
||||||
|
if (!refcount_dec_and_test(&entry->refcnt))
|
||||||
|
return;
|
||||||
|
|
||||||
|
mlx5_tc_ct_entry_del(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct mlx5_ct_entry *entry = container_of(work, struct mlx5_ct_entry, work);
|
||||||
|
|
||||||
|
mlx5_tc_ct_entry_del(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
__mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
|
||||||
|
{
|
||||||
|
struct mlx5e_priv *priv;
|
||||||
|
|
||||||
|
if (!refcount_dec_and_test(&entry->refcnt))
|
||||||
|
return;
|
||||||
|
|
||||||
|
priv = netdev_priv(entry->ct_priv->netdev);
|
||||||
|
INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
|
||||||
|
queue_work(priv->wq, &entry->work);
|
||||||
|
}
|
||||||
|
|
||||||
static struct mlx5_ct_counter *
|
static struct mlx5_ct_counter *
|
||||||
mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
|
mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
|
||||||
{
|
{
|
||||||
@ -792,16 +884,26 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Use the same counter as the reverse direction */
|
/* Use the same counter as the reverse direction */
|
||||||
mutex_lock(&ct_priv->shared_counter_lock);
|
spin_lock_bh(&ct_priv->ht_lock);
|
||||||
rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
|
rev_entry = mlx5_tc_ct_entry_get(ct_priv, &rev_tuple);
|
||||||
tuples_ht_params);
|
|
||||||
if (rev_entry) {
|
if (IS_ERR(rev_entry)) {
|
||||||
if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
mutex_unlock(&ct_priv->shared_counter_lock);
|
goto create_counter;
|
||||||
return rev_entry->counter;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) {
|
||||||
|
ct_dbg("Using shared counter entry=0x%p rev=0x%p\n", entry, rev_entry);
|
||||||
|
shared_counter = rev_entry->counter;
|
||||||
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
|
|
||||||
|
mlx5_tc_ct_entry_put(rev_entry);
|
||||||
|
return shared_counter;
|
||||||
}
|
}
|
||||||
mutex_unlock(&ct_priv->shared_counter_lock);
|
|
||||||
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
|
|
||||||
|
create_counter:
|
||||||
|
|
||||||
shared_counter = mlx5_tc_ct_counter_create(ct_priv);
|
shared_counter = mlx5_tc_ct_counter_create(ct_priv);
|
||||||
if (IS_ERR(shared_counter)) {
|
if (IS_ERR(shared_counter)) {
|
||||||
@ -866,10 +968,14 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
|
|||||||
if (!meta_action)
|
if (!meta_action)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
|
spin_lock_bh(&ct_priv->ht_lock);
|
||||||
cts_ht_params);
|
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
|
||||||
if (entry)
|
if (entry && refcount_inc_not_zero(&entry->refcnt)) {
|
||||||
return 0;
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
|
mlx5_tc_ct_entry_put(entry);
|
||||||
|
return -EEXIST;
|
||||||
|
}
|
||||||
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
|
|
||||||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||||
if (!entry)
|
if (!entry)
|
||||||
@ -878,6 +984,8 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
|
|||||||
entry->tuple.zone = ft->zone;
|
entry->tuple.zone = ft->zone;
|
||||||
entry->cookie = flow->cookie;
|
entry->cookie = flow->cookie;
|
||||||
entry->restore_cookie = meta_action->ct_metadata.cookie;
|
entry->restore_cookie = meta_action->ct_metadata.cookie;
|
||||||
|
refcount_set(&entry->refcnt, 2);
|
||||||
|
entry->ct_priv = ct_priv;
|
||||||
|
|
||||||
err = mlx5_tc_ct_rule_to_tuple(&entry->tuple, flow_rule);
|
err = mlx5_tc_ct_rule_to_tuple(&entry->tuple, flow_rule);
|
||||||
if (err)
|
if (err)
|
||||||
@ -888,35 +996,40 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_set;
|
goto err_set;
|
||||||
|
|
||||||
err = rhashtable_insert_fast(&ct_priv->ct_tuples_ht,
|
spin_lock_bh(&ct_priv->ht_lock);
|
||||||
|
|
||||||
|
err = rhashtable_lookup_insert_fast(&ft->ct_entries_ht, &entry->node,
|
||||||
|
cts_ht_params);
|
||||||
|
if (err)
|
||||||
|
goto err_entries;
|
||||||
|
|
||||||
|
err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
|
||||||
&entry->tuple_node,
|
&entry->tuple_node,
|
||||||
tuples_ht_params);
|
tuples_ht_params);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_tuple;
|
goto err_tuple;
|
||||||
|
|
||||||
if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
|
if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
|
||||||
err = rhashtable_insert_fast(&ct_priv->ct_tuples_nat_ht,
|
err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
|
||||||
&entry->tuple_nat_node,
|
&entry->tuple_nat_node,
|
||||||
tuples_nat_ht_params);
|
tuples_nat_ht_params);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_tuple_nat;
|
goto err_tuple_nat;
|
||||||
}
|
}
|
||||||
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
|
|
||||||
err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry,
|
err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry,
|
||||||
ft->zone_restore_id);
|
ft->zone_restore_id);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_rules;
|
goto err_rules;
|
||||||
|
|
||||||
err = rhashtable_insert_fast(&ft->ct_entries_ht, &entry->node,
|
set_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
|
||||||
cts_ht_params);
|
mlx5_tc_ct_entry_put(entry); /* this function reference */
|
||||||
if (err)
|
|
||||||
goto err_insert;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_insert:
|
|
||||||
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
|
|
||||||
err_rules:
|
err_rules:
|
||||||
|
spin_lock_bh(&ct_priv->ht_lock);
|
||||||
if (mlx5_tc_ct_entry_has_nat(entry))
|
if (mlx5_tc_ct_entry_has_nat(entry))
|
||||||
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
|
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
|
||||||
&entry->tuple_nat_node, tuples_nat_ht_params);
|
&entry->tuple_nat_node, tuples_nat_ht_params);
|
||||||
@ -925,47 +1038,43 @@ err_tuple_nat:
|
|||||||
&entry->tuple_node,
|
&entry->tuple_node,
|
||||||
tuples_ht_params);
|
tuples_ht_params);
|
||||||
err_tuple:
|
err_tuple:
|
||||||
|
rhashtable_remove_fast(&ft->ct_entries_ht,
|
||||||
|
&entry->node,
|
||||||
|
cts_ht_params);
|
||||||
|
err_entries:
|
||||||
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
err_set:
|
err_set:
|
||||||
kfree(entry);
|
kfree(entry);
|
||||||
netdev_warn(ct_priv->netdev,
|
if (err != -EEXIST)
|
||||||
"Failed to offload ct entry, err: %d\n", err);
|
netdev_warn(ct_priv->netdev, "Failed to offload ct entry, err: %d\n", err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
|
|
||||||
struct mlx5_ct_entry *entry)
|
|
||||||
{
|
|
||||||
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
|
|
||||||
mutex_lock(&ct_priv->shared_counter_lock);
|
|
||||||
if (mlx5_tc_ct_entry_has_nat(entry))
|
|
||||||
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
|
|
||||||
&entry->tuple_nat_node,
|
|
||||||
tuples_nat_ht_params);
|
|
||||||
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
|
|
||||||
tuples_ht_params);
|
|
||||||
mutex_unlock(&ct_priv->shared_counter_lock);
|
|
||||||
mlx5_tc_ct_counter_put(ct_priv, entry);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
|
mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
|
||||||
struct flow_cls_offload *flow)
|
struct flow_cls_offload *flow)
|
||||||
{
|
{
|
||||||
|
struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
|
||||||
unsigned long cookie = flow->cookie;
|
unsigned long cookie = flow->cookie;
|
||||||
struct mlx5_ct_entry *entry;
|
struct mlx5_ct_entry *entry;
|
||||||
|
|
||||||
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
|
spin_lock_bh(&ct_priv->ht_lock);
|
||||||
cts_ht_params);
|
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
|
||||||
if (!entry)
|
if (!entry) {
|
||||||
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
|
||||||
mlx5_tc_ct_del_ft_entry(ft->ct_priv, entry);
|
if (!mlx5_tc_ct_entry_valid(entry)) {
|
||||||
WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht,
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
&entry->node,
|
return -EINVAL;
|
||||||
cts_ht_params));
|
}
|
||||||
kfree(entry);
|
|
||||||
|
rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
|
||||||
|
mlx5_tc_ct_entry_remove_from_tuples(entry);
|
||||||
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
|
|
||||||
|
mlx5_tc_ct_entry_put(entry);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -974,19 +1083,30 @@ static int
|
|||||||
mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
|
mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
|
||||||
struct flow_cls_offload *f)
|
struct flow_cls_offload *f)
|
||||||
{
|
{
|
||||||
|
struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
|
||||||
unsigned long cookie = f->cookie;
|
unsigned long cookie = f->cookie;
|
||||||
struct mlx5_ct_entry *entry;
|
struct mlx5_ct_entry *entry;
|
||||||
u64 lastuse, packets, bytes;
|
u64 lastuse, packets, bytes;
|
||||||
|
|
||||||
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
|
spin_lock_bh(&ct_priv->ht_lock);
|
||||||
cts_ht_params);
|
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
|
||||||
if (!entry)
|
if (!entry) {
|
||||||
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!mlx5_tc_ct_entry_valid(entry) || !refcount_inc_not_zero(&entry->refcnt)) {
|
||||||
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_bh(&ct_priv->ht_lock);
|
||||||
|
|
||||||
mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
|
mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
|
||||||
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
|
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
|
||||||
FLOW_ACTION_HW_STATS_DELAYED);
|
FLOW_ACTION_HW_STATS_DELAYED);
|
||||||
|
|
||||||
|
mlx5_tc_ct_entry_put(entry);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1478,11 +1598,9 @@ err_mapping:
|
|||||||
static void
|
static void
|
||||||
mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
|
mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
|
||||||
{
|
{
|
||||||
struct mlx5_tc_ct_priv *ct_priv = arg;
|
|
||||||
struct mlx5_ct_entry *entry = ptr;
|
struct mlx5_ct_entry *entry = ptr;
|
||||||
|
|
||||||
mlx5_tc_ct_del_ft_entry(ct_priv, entry);
|
mlx5_tc_ct_entry_put(entry);
|
||||||
kfree(entry);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1960,6 +2078,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
|
|||||||
goto err_mapping_labels;
|
goto err_mapping_labels;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_init(&ct_priv->ht_lock);
|
||||||
ct_priv->ns_type = ns_type;
|
ct_priv->ns_type = ns_type;
|
||||||
ct_priv->chains = chains;
|
ct_priv->chains = chains;
|
||||||
ct_priv->netdev = priv->netdev;
|
ct_priv->netdev = priv->netdev;
|
||||||
@ -1994,7 +2113,6 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
|
|||||||
|
|
||||||
idr_init(&ct_priv->fte_ids);
|
idr_init(&ct_priv->fte_ids);
|
||||||
mutex_init(&ct_priv->control_lock);
|
mutex_init(&ct_priv->control_lock);
|
||||||
mutex_init(&ct_priv->shared_counter_lock);
|
|
||||||
rhashtable_init(&ct_priv->zone_ht, &zone_params);
|
rhashtable_init(&ct_priv->zone_ht, &zone_params);
|
||||||
rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
|
rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
|
||||||
rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
|
rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
|
||||||
@ -2037,7 +2155,6 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
|
|||||||
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
|
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
|
||||||
rhashtable_destroy(&ct_priv->zone_ht);
|
rhashtable_destroy(&ct_priv->zone_ht);
|
||||||
mutex_destroy(&ct_priv->control_lock);
|
mutex_destroy(&ct_priv->control_lock);
|
||||||
mutex_destroy(&ct_priv->shared_counter_lock);
|
|
||||||
idr_destroy(&ct_priv->fte_ids);
|
idr_destroy(&ct_priv->fte_ids);
|
||||||
kfree(ct_priv);
|
kfree(ct_priv);
|
||||||
}
|
}
|
||||||
@ -2059,14 +2176,22 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
|
|||||||
if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
|
if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &tuple,
|
spin_lock(&ct_priv->ht_lock);
|
||||||
tuples_ht_params);
|
|
||||||
if (!entry)
|
entry = mlx5_tc_ct_entry_get(ct_priv, &tuple);
|
||||||
entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
|
if (!entry) {
|
||||||
&tuple, tuples_nat_ht_params);
|
spin_unlock(&ct_priv->ht_lock);
|
||||||
if (!entry)
|
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (IS_ERR(entry)) {
|
||||||
|
spin_unlock(&ct_priv->ht_lock);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
spin_unlock(&ct_priv->ht_lock);
|
||||||
|
|
||||||
tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
|
tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
|
||||||
|
__mlx5_tc_ct_entry_put(entry);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -83,7 +83,7 @@ static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
|
|||||||
|
|
||||||
clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
|
clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
|
||||||
/* Let other device's napi(s) and XSK wakeups see our new state. */
|
/* Let other device's napi(s) and XSK wakeups see our new state. */
|
||||||
synchronize_rcu();
|
synchronize_net();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
|
static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
|
||||||
|
@ -111,7 +111,7 @@ err_free_cparam:
|
|||||||
void mlx5e_close_xsk(struct mlx5e_channel *c)
|
void mlx5e_close_xsk(struct mlx5e_channel *c)
|
||||||
{
|
{
|
||||||
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
|
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
|
||||||
synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */
|
synchronize_net(); /* Sync with the XSK wakeup and with NAPI. */
|
||||||
|
|
||||||
mlx5e_close_rq(&c->xskrq);
|
mlx5e_close_rq(&c->xskrq);
|
||||||
mlx5e_close_cq(&c->xskrq.cq);
|
mlx5e_close_cq(&c->xskrq.cq);
|
||||||
|
@ -173,7 +173,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_GENEVE)
|
#if IS_ENABLED(CONFIG_GENEVE)
|
||||||
if (skb->encapsulation)
|
if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
|
||||||
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
|
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -57,6 +57,20 @@ struct mlx5e_ktls_offload_context_rx {
|
|||||||
struct mlx5e_ktls_rx_resync_ctx resync;
|
struct mlx5e_ktls_rx_resync_ctx resync;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx)
|
||||||
|
{
|
||||||
|
if (!refcount_dec_and_test(&priv_rx->resync.refcnt))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
kfree(priv_rx);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx)
|
||||||
|
{
|
||||||
|
refcount_inc(&priv_rx->resync.refcnt);
|
||||||
|
}
|
||||||
|
|
||||||
static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
|
static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
|
||||||
{
|
{
|
||||||
int err, inlen;
|
int err, inlen;
|
||||||
@ -326,7 +340,7 @@ static void resync_handle_work(struct work_struct *work)
|
|||||||
priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
|
priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
|
||||||
|
|
||||||
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
|
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
|
||||||
refcount_dec(&resync->refcnt);
|
mlx5e_ktls_priv_rx_put(priv_rx);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -334,7 +348,7 @@ static void resync_handle_work(struct work_struct *work)
|
|||||||
sq = &c->async_icosq;
|
sq = &c->async_icosq;
|
||||||
|
|
||||||
if (resync_post_get_progress_params(sq, priv_rx))
|
if (resync_post_get_progress_params(sq, priv_rx))
|
||||||
refcount_dec(&resync->refcnt);
|
mlx5e_ktls_priv_rx_put(priv_rx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
|
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
|
||||||
@ -377,7 +391,11 @@ unlock:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Function is called with elevated refcount, it decreases it. */
|
/* Function can be called with the refcount being either elevated or not.
|
||||||
|
* It decreases the refcount and may free the kTLS priv context.
|
||||||
|
* Refcount is not elevated only if tls_dev_del has been called, but GET_PSV was
|
||||||
|
* already in flight.
|
||||||
|
*/
|
||||||
void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
|
void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
|
||||||
struct mlx5e_icosq *sq)
|
struct mlx5e_icosq *sq)
|
||||||
{
|
{
|
||||||
@ -410,7 +428,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
|
|||||||
tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
|
tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
|
||||||
priv_rx->stats->tls_resync_req_end++;
|
priv_rx->stats->tls_resync_req_end++;
|
||||||
out:
|
out:
|
||||||
refcount_dec(&resync->refcnt);
|
mlx5e_ktls_priv_rx_put(priv_rx);
|
||||||
dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
|
dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
}
|
}
|
||||||
@ -431,9 +449,9 @@ static bool resync_queue_get_psv(struct sock *sk)
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
resync = &priv_rx->resync;
|
resync = &priv_rx->resync;
|
||||||
refcount_inc(&resync->refcnt);
|
mlx5e_ktls_priv_rx_get(priv_rx);
|
||||||
if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
|
if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
|
||||||
refcount_dec(&resync->refcnt);
|
mlx5e_ktls_priv_rx_put(priv_rx);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -625,31 +643,6 @@ err_create_key:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Elevated refcount on the resync object means there are
|
|
||||||
* outstanding operations (uncompleted GET_PSV WQEs) that
|
|
||||||
* will read the resync / priv_rx objects once completed.
|
|
||||||
* Wait for them to avoid use-after-free.
|
|
||||||
*/
|
|
||||||
static void wait_for_resync(struct net_device *netdev,
|
|
||||||
struct mlx5e_ktls_rx_resync_ctx *resync)
|
|
||||||
{
|
|
||||||
#define MLX5E_KTLS_RX_RESYNC_TIMEOUT 20000 /* msecs */
|
|
||||||
unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5E_KTLS_RX_RESYNC_TIMEOUT);
|
|
||||||
unsigned int refcnt;
|
|
||||||
|
|
||||||
do {
|
|
||||||
refcnt = refcount_read(&resync->refcnt);
|
|
||||||
if (refcnt == 1)
|
|
||||||
return;
|
|
||||||
|
|
||||||
msleep(20);
|
|
||||||
} while (time_before(jiffies, exp_time));
|
|
||||||
|
|
||||||
netdev_warn(netdev,
|
|
||||||
"Failed waiting for kTLS RX resync refcnt to be released (%u).\n",
|
|
||||||
refcnt);
|
|
||||||
}
|
|
||||||
|
|
||||||
void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
|
void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
|
||||||
{
|
{
|
||||||
struct mlx5e_ktls_offload_context_rx *priv_rx;
|
struct mlx5e_ktls_offload_context_rx *priv_rx;
|
||||||
@ -663,7 +656,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
|
|||||||
priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
|
priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
|
||||||
set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
|
set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
|
||||||
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
|
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
|
||||||
synchronize_rcu(); /* Sync with NAPI */
|
synchronize_net(); /* Sync with NAPI */
|
||||||
if (!cancel_work_sync(&priv_rx->rule.work))
|
if (!cancel_work_sync(&priv_rx->rule.work))
|
||||||
/* completion is needed, as the priv_rx in the add flow
|
/* completion is needed, as the priv_rx in the add flow
|
||||||
* is maintained on the wqe info (wi), not on the socket.
|
* is maintained on the wqe info (wi), not on the socket.
|
||||||
@ -671,8 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
|
|||||||
wait_for_completion(&priv_rx->add_ctx);
|
wait_for_completion(&priv_rx->add_ctx);
|
||||||
resync = &priv_rx->resync;
|
resync = &priv_rx->resync;
|
||||||
if (cancel_work_sync(&resync->work))
|
if (cancel_work_sync(&resync->work))
|
||||||
refcount_dec(&resync->refcnt);
|
mlx5e_ktls_priv_rx_put(priv_rx);
|
||||||
wait_for_resync(netdev, resync);
|
|
||||||
|
|
||||||
priv_rx->stats->tls_del++;
|
priv_rx->stats->tls_del++;
|
||||||
if (priv_rx->rule.rule)
|
if (priv_rx->rule.rule)
|
||||||
@ -680,5 +672,9 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
|
|||||||
|
|
||||||
mlx5_core_destroy_tir(mdev, priv_rx->tirn);
|
mlx5_core_destroy_tir(mdev, priv_rx->tirn);
|
||||||
mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
|
mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
|
||||||
kfree(priv_rx);
|
/* priv_rx should normally be freed here, but if there is an outstanding
|
||||||
|
* GET_PSV, deallocation will be delayed until the CQE for GET_PSV is
|
||||||
|
* processed.
|
||||||
|
*/
|
||||||
|
mlx5e_ktls_priv_rx_put(priv_rx);
|
||||||
}
|
}
|
||||||
|
@ -525,7 +525,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
|
|||||||
#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
|
#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
|
||||||
|
|
||||||
static void
|
static void
|
||||||
mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
|
mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
|
||||||
{
|
{
|
||||||
struct mlx5_core_dev *mdev = priv->mdev;
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
int tc;
|
int tc;
|
||||||
@ -540,6 +540,17 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc
|
|||||||
coal->tx_coalesce_usecs,
|
coal->tx_coalesce_usecs,
|
||||||
coal->tx_max_coalesced_frames);
|
coal->tx_max_coalesced_frames);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
|
||||||
|
{
|
||||||
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < priv->channels.num; ++i) {
|
||||||
|
struct mlx5e_channel *c = priv->channels.c[i];
|
||||||
|
|
||||||
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
|
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
|
||||||
coal->rx_coalesce_usecs,
|
coal->rx_coalesce_usecs,
|
||||||
@ -586,21 +597,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
|
|||||||
tx_moder->pkts = coal->tx_max_coalesced_frames;
|
tx_moder->pkts = coal->tx_max_coalesced_frames;
|
||||||
new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
|
new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
|
||||||
|
|
||||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
|
||||||
priv->channels.params = new_channels.params;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
/* we are opened */
|
|
||||||
|
|
||||||
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
|
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
|
||||||
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
|
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
|
||||||
|
|
||||||
if (!reset_rx && !reset_tx) {
|
|
||||||
mlx5e_set_priv_channels_coalesce(priv, coal);
|
|
||||||
priv->channels.params = new_channels.params;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (reset_rx) {
|
if (reset_rx) {
|
||||||
u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
|
u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
|
||||||
MLX5E_PFLAG_RX_CQE_BASED_MODER);
|
MLX5E_PFLAG_RX_CQE_BASED_MODER);
|
||||||
@ -614,6 +613,20 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
|
|||||||
mlx5e_reset_tx_moderation(&new_channels.params, mode);
|
mlx5e_reset_tx_moderation(&new_channels.params, mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||||
|
priv->channels.params = new_channels.params;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!reset_rx && !reset_tx) {
|
||||||
|
if (!coal->use_adaptive_rx_coalesce)
|
||||||
|
mlx5e_set_priv_channels_rx_coalesce(priv, coal);
|
||||||
|
if (!coal->use_adaptive_tx_coalesce)
|
||||||
|
mlx5e_set_priv_channels_tx_coalesce(priv, coal);
|
||||||
|
priv->channels.params = new_channels.params;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
|
err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -65,6 +65,7 @@
|
|||||||
#include "en/devlink.h"
|
#include "en/devlink.h"
|
||||||
#include "lib/mlx5.h"
|
#include "lib/mlx5.h"
|
||||||
#include "en/ptp.h"
|
#include "en/ptp.h"
|
||||||
|
#include "fpga/ipsec.h"
|
||||||
|
|
||||||
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
|
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
|
||||||
{
|
{
|
||||||
@ -106,7 +107,7 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
|
|||||||
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
|
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (MLX5_IPSEC_DEV(mdev))
|
if (mlx5_fpga_is_ipsec_device(mdev))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (params->xdp_prog) {
|
if (params->xdp_prog) {
|
||||||
@ -914,7 +915,7 @@ void mlx5e_activate_rq(struct mlx5e_rq *rq)
|
|||||||
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
|
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
|
||||||
{
|
{
|
||||||
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
|
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
|
||||||
synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
|
synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_close_rq(struct mlx5e_rq *rq)
|
void mlx5e_close_rq(struct mlx5e_rq *rq)
|
||||||
@ -1348,7 +1349,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
|
|||||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||||
|
|
||||||
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
||||||
synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
|
synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
|
||||||
|
|
||||||
mlx5e_tx_disable_queue(sq->txq);
|
mlx5e_tx_disable_queue(sq->txq);
|
||||||
|
|
||||||
@ -1423,7 +1424,7 @@ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
|
|||||||
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
|
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
|
||||||
{
|
{
|
||||||
clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
|
clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
|
||||||
synchronize_rcu(); /* Sync with NAPI. */
|
synchronize_net(); /* Sync with NAPI. */
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_close_icosq(struct mlx5e_icosq *sq)
|
void mlx5e_close_icosq(struct mlx5e_icosq *sq)
|
||||||
@ -1502,7 +1503,7 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
|
|||||||
struct mlx5e_channel *c = sq->channel;
|
struct mlx5e_channel *c = sq->channel;
|
||||||
|
|
||||||
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
||||||
synchronize_rcu(); /* Sync with NAPI. */
|
synchronize_net(); /* Sync with NAPI. */
|
||||||
|
|
||||||
mlx5e_destroy_sq(c->mdev, sq->sqn);
|
mlx5e_destroy_sq(c->mdev, sq->sqn);
|
||||||
mlx5e_free_xdpsq_descs(sq);
|
mlx5e_free_xdpsq_descs(sq);
|
||||||
@ -1826,12 +1827,12 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
|
|||||||
|
|
||||||
mlx5e_build_create_cq_param(&ccp, c);
|
mlx5e_build_create_cq_param(&ccp, c);
|
||||||
|
|
||||||
err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
|
err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
|
||||||
&c->async_icosq.cq);
|
&c->async_icosq.cq);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
|
err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
|
||||||
&c->icosq.cq);
|
&c->icosq.cq);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_close_async_icosq_cq;
|
goto err_close_async_icosq_cq;
|
||||||
@ -2069,7 +2070,7 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
#ifdef CONFIG_MLX5_EN_IPSEC
|
#ifdef CONFIG_MLX5_EN_IPSEC
|
||||||
if (MLX5_IPSEC_DEV(mdev))
|
if (mlx5_fpga_is_ipsec_device(mdev))
|
||||||
byte_count += MLX5E_METADATA_ETHER_LEN;
|
byte_count += MLX5E_METADATA_ETHER_LEN;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -4455,8 +4456,9 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (MLX5_IPSEC_DEV(priv->mdev)) {
|
if (mlx5_fpga_is_ipsec_device(priv->mdev)) {
|
||||||
netdev_warn(netdev, "can't set XDP with IPSec offload\n");
|
netdev_warn(netdev,
|
||||||
|
"XDP is not available on Innova cards with IPsec support\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1795,8 +1795,8 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
|
|||||||
|
|
||||||
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
|
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
|
||||||
#ifdef CONFIG_MLX5_EN_IPSEC
|
#ifdef CONFIG_MLX5_EN_IPSEC
|
||||||
if (MLX5_IPSEC_DEV(mdev)) {
|
if (mlx5_fpga_is_ipsec_device(mdev)) {
|
||||||
netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n");
|
netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -5040,7 +5040,7 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
|
|||||||
*/
|
*/
|
||||||
if (rate) {
|
if (rate) {
|
||||||
rate = (rate * BITS_PER_BYTE) + 500000;
|
rate = (rate * BITS_PER_BYTE) + 500000;
|
||||||
rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
|
rate_mbps = max_t(u64, do_div(rate, 1000000), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
|
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
|
||||||
|
@ -124,7 +124,7 @@ struct mlx5_fpga_ipsec {
|
|||||||
struct ida halloc;
|
struct ida halloc;
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
|
bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
|
||||||
{
|
{
|
||||||
if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
|
if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
|
||||||
return false;
|
return false;
|
||||||
|
@ -43,6 +43,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
|
|||||||
const struct mlx5_flow_cmds *
|
const struct mlx5_flow_cmds *
|
||||||
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
|
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
|
||||||
void mlx5_fpga_ipsec_build_fs_cmds(void);
|
void mlx5_fpga_ipsec_build_fs_cmds(void);
|
||||||
|
bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev);
|
||||||
#else
|
#else
|
||||||
static inline
|
static inline
|
||||||
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
|
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
|
||||||
@ -55,6 +56,7 @@ mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
|
static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
|
||||||
|
static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; }
|
||||||
|
|
||||||
#endif /* CONFIG_MLX5_FPGA_IPSEC */
|
#endif /* CONFIG_MLX5_FPGA_IPSEC */
|
||||||
#endif /* __MLX5_FPGA_IPSEC_H__ */
|
#endif /* __MLX5_FPGA_IPSEC_H__ */
|
||||||
|
@ -190,6 +190,16 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void enter_error_state(struct mlx5_core_dev *dev, bool force)
|
||||||
|
{
|
||||||
|
if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
|
||||||
|
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||||
|
mlx5_cmd_flush(dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
|
||||||
|
}
|
||||||
|
|
||||||
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
|
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
|
||||||
{
|
{
|
||||||
bool err_detected = false;
|
bool err_detected = false;
|
||||||
@ -208,12 +218,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
|
enter_error_state(dev, force);
|
||||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
|
||||||
mlx5_cmd_flush(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
|
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&dev->intf_state_mutex);
|
mutex_unlock(&dev->intf_state_mutex);
|
||||||
}
|
}
|
||||||
@ -613,7 +618,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
|
|||||||
priv = container_of(health, struct mlx5_priv, health);
|
priv = container_of(health, struct mlx5_priv, health);
|
||||||
dev = container_of(priv, struct mlx5_core_dev, priv);
|
dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||||
|
|
||||||
mlx5_enter_error_state(dev, false);
|
enter_error_state(dev, false);
|
||||||
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
|
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
|
||||||
if (mlx5_health_try_recover(dev))
|
if (mlx5_health_try_recover(dev))
|
||||||
mlx5_core_err(dev, "health recovery failed\n");
|
mlx5_core_err(dev, "health recovery failed\n");
|
||||||
@ -707,8 +712,9 @@ static void poll_health(struct timer_list *t)
|
|||||||
mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
|
mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
|
||||||
dev->priv.health.fatal_error = fatal_error;
|
dev->priv.health.fatal_error = fatal_error;
|
||||||
print_health_info(dev);
|
print_health_info(dev);
|
||||||
|
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||||
mlx5_trigger_health_work(dev);
|
mlx5_trigger_health_work(dev);
|
||||||
goto out;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
count = ioread32be(health->health_counter);
|
count = ioread32be(health->health_counter);
|
||||||
|
@ -1396,6 +1396,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
|
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
|
||||||
|
|
||||||
pci_save_state(pdev);
|
pci_save_state(pdev);
|
||||||
|
if (!mlx5_core_is_mp_slave(dev))
|
||||||
devlink_reload_enable(devlink);
|
devlink_reload_enable(devlink);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user