Merge branch 'mlxsw-spectrum_acl-Don-t-take-rtnl-mutex-for-region-rehash'

Ido Schimmel says:

====================
mlxsw: spectrum_acl: Don't take rtnl mutex for region rehash

Jiri says:

During region rehash, a new region is created with a more optimized set
of masks (ERPs). When transitioning to the new region, all the rules
from the old region are copied one-by-one to the new region. This
transition can be time consuming and currently done under RTNL lock.

In order to remove RTNL lock dependency during region rehash, introduce
multiple smaller locks guarding dedicated structures or parts of them.
That is the vast majority of this patchset. Only patch #1 is simple
cleanup and patches 12-15 are improving or introducing new selftests.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-02-24 20:25:30 -08:00
commit 834f9b057d
6 changed files with 593 additions and 188 deletions

View File

@ -5,11 +5,13 @@
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/refcount.h>
#include <linux/mutex.h>
#include "spectrum.h"
#include "spectrum_acl_tcam.h"
struct mlxsw_sp_acl_bf {
struct mutex lock; /* Protects Bloom Filter updates. */
unsigned int bank_size;
refcount_t refcnt[0];
};
@ -172,26 +174,36 @@ mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp,
u16 bf_index;
int err;
mutex_lock(&bf->lock);
bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry);
rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
bf_index);
if (refcount_inc_not_zero(&bf->refcnt[rule_index]))
return 0;
if (refcount_inc_not_zero(&bf->refcnt[rule_index])) {
err = 0;
goto unlock;
}
peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL);
if (!peabfe_pl)
return -ENOMEM;
if (!peabfe_pl) {
err = -ENOMEM;
goto unlock;
}
mlxsw_reg_peabfe_pack(peabfe_pl);
mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 1, erp_bank, bf_index);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl);
kfree(peabfe_pl);
if (err)
return err;
goto unlock;
refcount_set(&bf->refcnt[rule_index], 1);
return 0;
err = 0;
unlock:
mutex_unlock(&bf->lock);
return err;
}
void
@ -205,6 +217,8 @@ mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp,
char *peabfe_pl;
u16 bf_index;
mutex_lock(&bf->lock);
bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry);
rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
bf_index);
@ -212,13 +226,16 @@ mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp,
if (refcount_dec_and_test(&bf->refcnt[rule_index])) {
peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL);
if (!peabfe_pl)
return;
goto unlock;
mlxsw_reg_peabfe_pack(peabfe_pl);
mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 0, erp_bank, bf_index);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl);
kfree(peabfe_pl);
}
unlock:
mutex_unlock(&bf->lock);
}
struct mlxsw_sp_acl_bf *
@ -240,10 +257,13 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
return ERR_PTR(-ENOMEM);
bf->bank_size = bf_bank_size;
mutex_init(&bf->lock);
return bf;
}
void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf)
{
mutex_destroy(&bf->lock);
kfree(bf);
}

View File

@ -7,6 +7,7 @@
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/objagg.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
@ -63,6 +64,7 @@ struct mlxsw_sp_acl_erp_table {
unsigned int num_ctcam_erps;
unsigned int num_deltas;
struct objagg *objagg;
struct mutex objagg_lock; /* guards objagg manipulation */
};
struct mlxsw_sp_acl_erp_table_ops {
@ -1001,17 +1003,15 @@ struct mlxsw_sp_acl_erp_mask *
mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
const char *mask, bool ctcam)
{
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct mlxsw_sp_acl_erp_key key;
struct objagg_obj *objagg_obj;
/* eRPs are allocated from a shared resource, but currently all
* allocations are done under RTNL.
*/
ASSERT_RTNL();
memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
key.ctcam = ctcam;
objagg_obj = objagg_obj_get(aregion->erp_table->objagg, &key);
mutex_lock(&erp_table->objagg_lock);
objagg_obj = objagg_obj_get(erp_table->objagg, &key);
mutex_unlock(&erp_table->objagg_lock);
if (IS_ERR(objagg_obj))
return ERR_CAST(objagg_obj);
return (struct mlxsw_sp_acl_erp_mask *) objagg_obj;
@ -1021,8 +1021,11 @@ void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_erp_mask *erp_mask)
{
struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
objagg_obj_put(aregion->erp_table->objagg, objagg_obj);
mutex_lock(&erp_table->objagg_lock);
objagg_obj_put(erp_table->objagg, objagg_obj);
mutex_unlock(&erp_table->objagg_lock);
}
int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp,
@ -1034,7 +1037,6 @@ int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
unsigned int erp_bank;
ASSERT_RTNL();
if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
return 0;
@ -1334,6 +1336,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion,
erp_table->ops = &erp_no_mask_ops;
INIT_LIST_HEAD(&erp_table->atcam_erps_list);
erp_table->aregion = aregion;
mutex_init(&erp_table->objagg_lock);
return erp_table;
@ -1346,6 +1349,7 @@ static void
mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
{
WARN_ON(!list_empty(&erp_table->atcam_erps_list));
mutex_destroy(&erp_table->objagg_lock);
objagg_destroy(erp_table->objagg);
kfree(erp_table);
}
@ -1376,14 +1380,16 @@ mlxsw_sp_acl_erp_hints_check(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct objagg_hints *hints, bool *p_rehash_needed)
{
struct objagg *objagg = aregion->erp_table->objagg;
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
const struct objagg_stats *ostats;
const struct objagg_stats *hstats;
int err;
*p_rehash_needed = false;
ostats = objagg_stats_get(objagg);
mutex_lock(&erp_table->objagg_lock);
ostats = objagg_stats_get(erp_table->objagg);
mutex_unlock(&erp_table->objagg_lock);
if (IS_ERR(ostats)) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get ERP stats\n");
return PTR_ERR(ostats);
@ -1411,13 +1417,16 @@ err_hints_stats_get:
void *
mlxsw_sp_acl_erp_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion)
{
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
struct objagg_hints *hints;
bool rehash_needed;
int err;
hints = objagg_hints_get(aregion->erp_table->objagg,
mutex_lock(&erp_table->objagg_lock);
hints = objagg_hints_get(erp_table->objagg,
OBJAGG_OPT_ALGO_SIMPLE_GREEDY);
mutex_unlock(&erp_table->objagg_lock);
if (IS_ERR(hints)) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to create ERP hints\n");
return ERR_CAST(hints);

View File

@ -8,6 +8,7 @@
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <trace/events/mlxsw.h>
#include "reg.h"
@ -37,6 +38,7 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
size_t alloc_size;
int err;
mutex_init(&tcam->lock);
tcam->vregion_rehash_intrvl =
MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
INIT_LIST_HEAD(&tcam->vregion_list);
@ -84,6 +86,7 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
mutex_destroy(&tcam->lock);
ops->fini(mlxsw_sp, tcam->priv);
kfree(tcam->used_groups);
kfree(tcam->used_regions);
@ -161,28 +164,38 @@ struct mlxsw_sp_acl_tcam_pattern {
struct mlxsw_sp_acl_tcam_group {
struct mlxsw_sp_acl_tcam *tcam;
u16 id;
struct list_head vregion_list;
struct mutex lock; /* guards region list updates */
struct list_head region_list;
unsigned int region_count;
};
struct mlxsw_sp_acl_tcam_vgroup {
struct mlxsw_sp_acl_tcam_group group;
struct list_head vregion_list;
struct rhashtable vchunk_ht;
struct mlxsw_sp_acl_tcam_group_ops *ops;
const struct mlxsw_sp_acl_tcam_pattern *patterns;
unsigned int patterns_count;
bool tmplt_elusage_set;
struct mlxsw_afk_element_usage tmplt_elusage;
bool vregion_rehash_enabled;
};
struct mlxsw_sp_acl_tcam_vregion {
struct mutex lock; /* Protects consistency of region, region2 pointers
* and vchunk_list.
*/
struct mlxsw_sp_acl_tcam_region *region;
struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
struct list_head list; /* Member of a TCAM group */
struct list_head tlist; /* Member of a TCAM */
struct list_head vchunk_list; /* List of vchunks under this vregion */
struct mlxsw_sp_acl_tcam_group *group;
struct mlxsw_afk_key_info *key_info;
struct mlxsw_sp_acl_tcam *tcam;
struct mlxsw_sp_acl_tcam_vgroup *vgroup;
struct delayed_work rehash_dw;
struct mlxsw_sp *mlxsw_sp;
bool failed_rollback; /* Indicates failed rollback during migration */
unsigned int ref_count;
};
struct mlxsw_sp_acl_tcam_vchunk;
@ -201,7 +214,7 @@ struct mlxsw_sp_acl_tcam_vchunk {
struct rhash_head ht_node; /* Member of a chunk HT */
struct list_head ventry_list;
unsigned int priority; /* Priority within the vregion and group */
struct mlxsw_sp_acl_tcam_group *group;
struct mlxsw_sp_acl_tcam_vgroup *vgroup;
struct mlxsw_sp_acl_tcam_vregion *vregion;
unsigned int ref_count;
};
@ -230,46 +243,78 @@ static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group)
{
struct mlxsw_sp_acl_tcam_vregion *vregion;
struct mlxsw_sp_acl_tcam_region *region;
char pagt_pl[MLXSW_REG_PAGT_LEN];
int acl_index = 0;
mlxsw_reg_pagt_pack(pagt_pl, group->id);
list_for_each_entry(vregion, &group->vregion_list, list) {
if (vregion->region2)
mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
vregion->region2->id, true);
list_for_each_entry(region, &group->region_list, list) {
bool multi = false;
/* Check if the next entry in the list has the same vregion. */
if (region->list.next != &group->region_list &&
list_next_entry(region, list)->vregion == region->vregion)
multi = true;
mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
vregion->region->id, false);
region->id, multi);
}
mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
}
static int
mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
struct mlxsw_sp_acl_tcam_group *group,
const struct mlxsw_sp_acl_tcam_pattern *patterns,
unsigned int patterns_count,
struct mlxsw_afk_element_usage *tmplt_elusage)
mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
struct mlxsw_sp_acl_tcam_group *group)
{
int err;
group->tcam = tcam;
group->patterns = patterns;
group->patterns_count = patterns_count;
if (tmplt_elusage) {
group->tmplt_elusage_set = true;
memcpy(&group->tmplt_elusage, tmplt_elusage,
sizeof(group->tmplt_elusage));
}
INIT_LIST_HEAD(&group->vregion_list);
mutex_init(&group->lock);
INIT_LIST_HEAD(&group->region_list);
err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
if (err)
return err;
err = rhashtable_init(&group->vchunk_ht,
return 0;
}
static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
{
struct mlxsw_sp_acl_tcam *tcam = group->tcam;
mutex_destroy(&group->lock);
mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
WARN_ON(!list_empty(&group->region_list));
}
static int
mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
struct mlxsw_sp_acl_tcam_vgroup *vgroup,
const struct mlxsw_sp_acl_tcam_pattern *patterns,
unsigned int patterns_count,
struct mlxsw_afk_element_usage *tmplt_elusage,
bool vregion_rehash_enabled)
{
int err;
vgroup->patterns = patterns;
vgroup->patterns_count = patterns_count;
vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
if (tmplt_elusage) {
vgroup->tmplt_elusage_set = true;
memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
sizeof(vgroup->tmplt_elusage));
}
INIT_LIST_HEAD(&vgroup->vregion_list);
err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
if (err)
return err;
err = rhashtable_init(&vgroup->vchunk_ht,
&mlxsw_sp_acl_tcam_vchunk_ht_params);
if (err)
goto err_rhashtable_init;
@ -277,18 +322,16 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
return 0;
err_rhashtable_init:
mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
mlxsw_sp_acl_tcam_group_del(&vgroup->group);
return err;
}
static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group)
static void
mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
{
struct mlxsw_sp_acl_tcam *tcam = group->tcam;
rhashtable_destroy(&group->vchunk_ht);
mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
WARN_ON(!list_empty(&group->vregion_list));
rhashtable_destroy(&vgroup->vchunk_ht);
mlxsw_sp_acl_tcam_group_del(&vgroup->group);
WARN_ON(!list_empty(&vgroup->vregion_list));
}
static int
@ -354,52 +397,87 @@ mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
static int
mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
struct mlxsw_sp_acl_tcam_group *group,
struct mlxsw_sp_acl_tcam_region *region,
unsigned int priority,
struct mlxsw_sp_acl_tcam_region *next_region)
{
struct mlxsw_sp_acl_tcam_group *group = region->vregion->group;
struct mlxsw_sp_acl_tcam_region *region2;
struct list_head *pos;
int err;
if (group->region_count == group->tcam->max_group_size)
return -ENOBUFS;
mutex_lock(&group->lock);
if (group->region_count == group->tcam->max_group_size) {
err = -ENOBUFS;
goto err_region_count_check;
}
if (next_region) {
/* If the next region is defined, place the new one
* before it. The next one is a sibling.
*/
pos = &next_region->list;
} else {
/* Position the region inside the list according to priority */
list_for_each(pos, &group->region_list) {
region2 = list_entry(pos, typeof(*region2), list);
if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
priority)
break;
}
}
list_add_tail(&region->list, pos);
region->group = group;
err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
if (err)
return err;
goto err_group_update;
group->region_count++;
mutex_unlock(&group->lock);
return 0;
err_group_update:
list_del(&region->list);
err_region_count_check:
mutex_unlock(&group->lock);
return err;
}
static void
mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
struct mlxsw_sp_acl_tcam_group *group = region->vregion->group;
struct mlxsw_sp_acl_tcam_group *group = region->group;
mutex_lock(&group->lock);
list_del(&region->list);
group->region_count--;
mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
mutex_unlock(&group->lock);
}
static int
mlxsw_sp_acl_tcam_group_vregion_attach(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
struct mlxsw_sp_acl_tcam_vregion *vregion)
mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vgroup *vgroup,
struct mlxsw_sp_acl_tcam_vregion *vregion,
unsigned int priority)
{
struct mlxsw_sp_acl_tcam_vregion *vregion2;
struct list_head *pos;
int err;
/* Position the vregion inside the list according to priority */
list_for_each(pos, &group->vregion_list) {
list_for_each(pos, &vgroup->vregion_list) {
vregion2 = list_entry(pos, typeof(*vregion2), list);
if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) >
mlxsw_sp_acl_tcam_vregion_prio(vregion))
if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
break;
}
list_add_tail(&vregion->list, pos);
vregion->group = group;
err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, vregion->region);
err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
vregion->region,
priority, NULL);
if (err)
goto err_region_attach;
@ -411,8 +489,8 @@ err_region_attach:
}
static void
mlxsw_sp_acl_tcam_group_vregion_detach(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion)
mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion)
{
list_del(&vregion->list);
if (vregion->region2)
@ -422,22 +500,22 @@ mlxsw_sp_acl_tcam_group_vregion_detach(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_group_vregion_find(struct mlxsw_sp_acl_tcam_group *group,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage,
bool *p_need_split)
mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage,
bool *p_need_split)
{
struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
struct list_head *pos;
bool issubset;
list_for_each(pos, &group->vregion_list) {
list_for_each(pos, &vgroup->vregion_list) {
vregion = list_entry(pos, typeof(*vregion), list);
/* First, check if the requested priority does not rather belong
* under some of the next vregions.
*/
if (pos->next != &group->vregion_list) { /* not last */
if (pos->next != &vgroup->vregion_list) { /* not last */
vregion2 = list_entry(pos->next, typeof(*vregion2),
list);
if (priority >=
@ -478,9 +556,9 @@ mlxsw_sp_acl_tcam_group_vregion_find(struct mlxsw_sp_acl_tcam_group *group,
}
static void
mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
struct mlxsw_afk_element_usage *elusage,
struct mlxsw_afk_element_usage *out)
mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
struct mlxsw_afk_element_usage *elusage,
struct mlxsw_afk_element_usage *out)
{
const struct mlxsw_sp_acl_tcam_pattern *pattern;
int i;
@ -488,14 +566,14 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
/* In case the template is set, we don't have to look up the pattern
* and just use the template.
*/
if (group->tmplt_elusage_set) {
memcpy(out, &group->tmplt_elusage, sizeof(*out));
if (vgroup->tmplt_elusage_set) {
memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
return;
}
for (i = 0; i < group->patterns_count; i++) {
pattern = &group->patterns[i];
for (i = 0; i < vgroup->patterns_count; i++) {
pattern = &vgroup->patterns[i];
mlxsw_afk_element_usage_fill(out, pattern->elements,
pattern->elements_count);
if (mlxsw_afk_element_usage_subset(elusage, out))
@ -628,7 +706,7 @@ mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
ops->region_fini(mlxsw_sp, region->priv);
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_id_put(region->vregion->group->tcam,
mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
region->id);
kfree(region);
}
@ -654,22 +732,19 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
container_of(work, struct mlxsw_sp_acl_tcam_vregion,
rehash_dw.work);
/* TODO: Take rtnl lock here as the rest of the code counts on it
* now. Later, this should be replaced by per-vregion lock.
*/
rtnl_lock();
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion);
rtnl_unlock();
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
}
static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
struct mlxsw_sp_acl_tcam_vgroup *vgroup,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
struct mlxsw_sp_acl_tcam_vregion *vregion;
int err;
@ -677,8 +752,11 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
if (!vregion)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&vregion->vchunk_list);
mutex_init(&vregion->lock);
vregion->tcam = tcam;
vregion->mlxsw_sp = mlxsw_sp;
vregion->vgroup = vgroup;
vregion->ref_count = 1;
vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
if (IS_ERR(vregion->key_info)) {
@ -693,17 +771,25 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
goto err_region_create;
}
list_add_tail(&vregion->tlist, &tcam->vregion_list);
err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
priority);
if (err)
goto err_vgroup_vregion_attach;
if (ops->region_rehash_hints_get) {
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
/* Create the delayed work for vregion periodic rehash */
INIT_DELAYED_WORK(&vregion->rehash_dw,
mlxsw_sp_acl_tcam_vregion_rehash_work);
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
mutex_lock(&tcam->lock);
list_add_tail(&vregion->tlist, &tcam->vregion_list);
mutex_unlock(&tcam->lock);
}
return vregion;
err_vgroup_vregion_attach:
mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
err_region_create:
mlxsw_afk_key_info_put(vregion->key_info);
err_key_info_get:
@ -716,14 +802,21 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
if (ops->region_rehash_hints_get)
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
mutex_lock(&tcam->lock);
list_del(&vregion->tlist);
mutex_unlock(&tcam->lock);
cancel_delayed_work_sync(&vregion->rehash_dw);
list_del(&vregion->tlist);
}
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2)
mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
mlxsw_afk_key_info_put(vregion->key_info);
mutex_destroy(&vregion->lock);
kfree(vregion);
}
@ -751,81 +844,58 @@ int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!ops->region_rehash_hints_get))
return -EOPNOTSUPP;
tcam->vregion_rehash_intrvl = val;
rtnl_lock();
mutex_lock(&tcam->lock);
list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
if (val)
mlxsw_core_schedule_dw(&vregion->rehash_dw, 0);
else
cancel_delayed_work_sync(&vregion->rehash_dw);
}
rtnl_unlock();
mutex_unlock(&tcam->lock);
return 0;
}
static int
mlxsw_sp_acl_tcam_vchunk_assoc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage,
struct mlxsw_sp_acl_tcam_vchunk *vchunk)
static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vgroup *vgroup,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage)
{
struct mlxsw_afk_element_usage vregion_elusage;
struct mlxsw_sp_acl_tcam_vregion *vregion;
bool vregion_created = false;
bool need_split;
int err;
vregion = mlxsw_sp_acl_tcam_group_vregion_find(group, priority, elusage,
&need_split);
if (vregion && need_split) {
/* According to priority, the vchunk should belong to an
* existing vregion. However, this vchunk needs elements
* that vregion does not contain. We need to split the existing
* vregion into two and create a new vregion for this vchunk
* in between. This is not supported now.
*/
return -EOPNOTSUPP;
}
if (!vregion) {
struct mlxsw_afk_element_usage vregion_elusage;
mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
&vregion_elusage);
vregion = mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp,
group->tcam,
&vregion_elusage);
if (IS_ERR(vregion))
return PTR_ERR(vregion);
vregion_created = true;
vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
elusage, &need_split);
if (vregion) {
if (need_split) {
/* According to priority, new vchunk should belong to
* an existing vregion. However, this vchunk needs
* elements that vregion does not contain. We need
* to split the existing vregion into two and create
* a new vregion for the new vchunk in between.
* This is not supported now.
*/
return ERR_PTR(-EOPNOTSUPP);
}
vregion->ref_count++;
return vregion;
}
vchunk->vregion = vregion;
list_add_tail(&vchunk->list, &vregion->vchunk_list);
mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
&vregion_elusage);
if (!vregion_created)
return 0;
err = mlxsw_sp_acl_tcam_group_vregion_attach(mlxsw_sp, group, vregion);
if (err)
goto err_group_vregion_attach;
return 0;
err_group_vregion_attach:
mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
return err;
return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
&vregion_elusage);
}
static void
mlxsw_sp_acl_tcam_vchunk_deassoc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk)
mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion)
{
struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
list_del(&vchunk->list);
if (list_empty(&vregion->vchunk_list)) {
mlxsw_sp_acl_tcam_group_vregion_detach(mlxsw_sp, vregion);
mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
}
if (--vregion->ref_count)
return;
mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
}
static struct mlxsw_sp_acl_tcam_chunk *
@ -858,10 +928,11 @@ mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_acl_tcam_vchunk *
mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
struct mlxsw_sp_acl_tcam_vgroup *vgroup,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage)
{
struct mlxsw_sp_acl_tcam_vregion *vregion;
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
@ -873,34 +944,43 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&vchunk->ventry_list);
vchunk->priority = priority;
vchunk->group = group;
vchunk->vgroup = vgroup;
vchunk->ref_count = 1;
err = mlxsw_sp_acl_tcam_vchunk_assoc(mlxsw_sp, group, priority,
elusage, vchunk);
if (err)
goto err_vchunk_assoc;
vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
priority, elusage);
if (IS_ERR(vregion)) {
err = PTR_ERR(vregion);
goto err_vregion_get;
}
err = rhashtable_insert_fast(&group->vchunk_ht, &vchunk->ht_node,
vchunk->vregion = vregion;
err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
mlxsw_sp_acl_tcam_vchunk_ht_params);
if (err)
goto err_rhashtable_insert;
mutex_lock(&vregion->lock);
vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
vchunk->vregion->region);
if (IS_ERR(vchunk->chunk)) {
mutex_unlock(&vregion->lock);
err = PTR_ERR(vchunk->chunk);
goto err_chunk_create;
}
list_add_tail(&vchunk->list, &vregion->vchunk_list);
mutex_unlock(&vregion->lock);
return vchunk;
err_chunk_create:
rhashtable_remove_fast(&group->vchunk_ht, &vchunk->ht_node,
rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
mlxsw_sp_acl_tcam_vchunk_ht_params);
err_rhashtable_insert:
mlxsw_sp_acl_tcam_vchunk_deassoc(mlxsw_sp, vchunk);
err_vchunk_assoc:
mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
err_vregion_get:
kfree(vchunk);
return ERR_PTR(err);
}
@ -909,26 +989,30 @@ static void
mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
struct mlxsw_sp_acl_tcam_group *group = vchunk->group;
struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
mutex_lock(&vregion->lock);
list_del(&vchunk->list);
if (vchunk->chunk2)
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
rhashtable_remove_fast(&group->vchunk_ht, &vchunk->ht_node,
mutex_unlock(&vregion->lock);
rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
mlxsw_sp_acl_tcam_vchunk_ht_params);
mlxsw_sp_acl_tcam_vchunk_deassoc(mlxsw_sp, vchunk);
mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
kfree(vchunk);
}
static struct mlxsw_sp_acl_tcam_vchunk *
mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
struct mlxsw_sp_acl_tcam_vgroup *vgroup,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage)
{
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
vchunk = rhashtable_lookup_fast(&group->vchunk_ht, &priority,
vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
mlxsw_sp_acl_tcam_vchunk_ht_params);
if (vchunk) {
if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
@ -937,7 +1021,7 @@ mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
vchunk->ref_count++;
return vchunk;
}
return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, group,
return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
priority, elusage);
}
@ -1011,28 +1095,34 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
struct mlxsw_sp_acl_tcam_vgroup *vgroup,
struct mlxsw_sp_acl_tcam_ventry *ventry,
struct mlxsw_sp_acl_rule_info *rulei)
{
struct mlxsw_sp_acl_tcam_vregion *vregion;
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, group, rulei->priority,
vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
&rulei->values.elusage);
if (IS_ERR(vchunk))
return PTR_ERR(vchunk);
ventry->vchunk = vchunk;
ventry->rulei = rulei;
vregion = vchunk->vregion;
mutex_lock(&vregion->lock);
ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
vchunk->chunk);
if (IS_ERR(ventry->entry)) {
mutex_unlock(&vregion->lock);
err = PTR_ERR(ventry->entry);
goto err_entry_create;
}
list_add_tail(&ventry->list, &vchunk->ventry_list);
mutex_unlock(&vregion->lock);
return 0;
@ -1045,9 +1135,12 @@ static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_ventry *ventry)
{
struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
mutex_lock(&vregion->lock);
list_del(&ventry->list);
mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
mutex_unlock(&vregion->lock);
mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
}
@ -1173,6 +1266,7 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion,
void *hints_priv)
{
unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
struct mlxsw_sp_acl_tcam_region *region2, *unused_region;
int err;
@ -1180,14 +1274,21 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
region2 = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
vregion, hints_priv);
if (IS_ERR(region2))
return PTR_ERR(region2);
if (IS_ERR(region2)) {
err = PTR_ERR(region2);
goto out;
}
vregion->region2 = region2;
err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, region2);
err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
vregion->region->group,
region2, priority,
vregion->region);
if (err)
goto err_group_region_attach;
mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion);
if (!vregion->failed_rollback) {
if (!err) {
@ -1202,15 +1303,22 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
*/
unused_region = vregion->region2;
}
mutex_unlock(&vregion->lock);
vregion->region2 = NULL;
mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
} else {
mutex_unlock(&vregion->lock);
}
return err;
goto out;
err_group_region_attach:
vregion->region2 = NULL;
mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region2);
out:
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
return err;
}
@ -1298,7 +1406,7 @@ static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
struct mlxsw_sp_acl_tcam_flower_ruleset {
struct mlxsw_sp_acl_tcam_group group;
struct mlxsw_sp_acl_tcam_vgroup vgroup;
};
struct mlxsw_sp_acl_tcam_flower_rule {
@ -1313,10 +1421,10 @@ mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
tmplt_elusage);
return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
tmplt_elusage, true);
}
static void
@ -1325,7 +1433,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
}
static int
@ -1336,7 +1444,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
mlxsw_sp_port, ingress);
}
@ -1348,7 +1456,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
mlxsw_sp_port, ingress);
}
@ -1357,7 +1465,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
}
static int
@ -1368,7 +1476,7 @@ mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->group,
return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
&rule->ventry, rulei);
}
@ -1414,7 +1522,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
struct mlxsw_sp_acl_tcam_mr_ruleset {
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
struct mlxsw_sp_acl_tcam_group group;
struct mlxsw_sp_acl_tcam_vgroup vgroup;
};
struct mlxsw_sp_acl_tcam_mr_rule {
@ -1430,10 +1538,10 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
int err;
err = mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
tmplt_elusage);
err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
tmplt_elusage, false);
if (err)
return err;
@ -1444,7 +1552,7 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
* is initialized.
*/
ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
&ruleset->group, 1,
&ruleset->vgroup, 1,
tmplt_elusage);
if (IS_ERR(ruleset->vchunk)) {
err = PTR_ERR(ruleset->vchunk);
@ -1454,7 +1562,7 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
return 0;
err_chunk_get:
mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
return err;
}
@ -1464,7 +1572,7 @@ mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
}
static int
@ -1489,7 +1597,7 @@ mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
{
struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
}
static int
@ -1500,7 +1608,7 @@ mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->group,
return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
&rule->ventry, rulei);
}

View File

@ -17,6 +17,7 @@ struct mlxsw_sp_acl_tcam {
unsigned long *used_groups; /* bit array */
unsigned int max_groups;
unsigned int max_group_size;
struct mutex lock; /* guards vregion list */
struct list_head vregion_list;
u32 vregion_rehash_intrvl; /* ms */
unsigned long priv[0];
@ -78,6 +79,8 @@ struct mlxsw_sp_acl_tcam_vregion;
struct mlxsw_sp_acl_tcam_region {
struct mlxsw_sp_acl_tcam_vregion *vregion;
struct mlxsw_sp_acl_tcam_group *group;
struct list_head list; /* Member of a TCAM group */
enum mlxsw_reg_ptar_key_type key_type;
u16 id; /* ACL ID and region ID - they are same */
char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];

View File

@ -73,6 +73,26 @@ TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_migrate,
__entry->mlxsw_sp, __entry->vregion)
);
TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_migrate_end,
TP_PROTO(const struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_vregion *vregion),
TP_ARGS(mlxsw_sp, vregion),
TP_STRUCT__entry(
__field(const void *, mlxsw_sp)
__field(const void *, vregion)
),
TP_fast_assign(
__entry->mlxsw_sp = mlxsw_sp;
__entry->vregion = vregion;
),
TP_printk("mlxsw_sp %p, vregion %p",
__entry->mlxsw_sp, __entry->vregion)
);
TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_rehash_dis,
TP_PROTO(const struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_vregion *vregion),

View File

@ -540,11 +540,15 @@ delta_simple_rehash_test()
check_err $? "Rehash trace was not hit"
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
check_err $? "Migrate trace was not hit"
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
check_err $? "Migrate end trace was not hit"
tp_record_all mlxsw:* 3
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
check_err $? "Rehash trace was not hit"
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
check_fail $? "Migrate trace was hit when no migration should happen"
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
check_fail $? "Migrate end trace was hit when no migration should happen"
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip -q
@ -565,6 +569,247 @@ delta_simple_rehash_test()
log_test "delta simple rehash test ($tcflags)"
}
delta_simple_ipv6_rehash_test()
{
RET=0
if [[ "$tcflags" != "skip_sw" ]]; then
return 0;
fi
devlink dev param set $DEVLINK_DEV \
name acl_region_rehash_interval cmode runtime value 0
check_err $? "Failed to set ACL region rehash interval"
tp_record_all mlxsw:mlxsw_sp_acl_tcam_vregion_rehash 7
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
check_fail $? "Rehash trace was hit even when rehash should be disabled"
devlink dev param set $DEVLINK_DEV \
name acl_region_rehash_interval cmode runtime value 3000
check_err $? "Failed to set ACL region rehash interval"
sleep 1
tc filter add dev $h2 ingress protocol ipv6 pref 1 handle 101 flower \
$tcflags dst_ip 2001:db8:1::0/121 action drop
tc filter add dev $h2 ingress protocol ipv6 pref 2 handle 102 flower \
$tcflags dst_ip 2001:db8:2::2 action drop
tc filter add dev $h2 ingress protocol ipv6 pref 3 handle 103 flower \
$tcflags dst_ip 2001:db8:3::0/120 action drop
$MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
-A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
tc_check_packets "dev $h2 ingress" 101 1
check_fail $? "Matched a wrong filter"
tc_check_packets "dev $h2 ingress" 103 1
check_fail $? "Matched a wrong filter"
tc_check_packets "dev $h2 ingress" 102 1
check_err $? "Did not match on correct filter"
tp_record_all mlxsw:* 3
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
check_err $? "Rehash trace was not hit"
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
check_err $? "Migrate trace was not hit"
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
check_err $? "Migrate end trace was not hit"
tp_record_all mlxsw:* 3
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
check_err $? "Rehash trace was not hit"
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
check_fail $? "Migrate trace was hit when no migration should happen"
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
check_fail $? "Migrate end trace was hit when no migration should happen"
$MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
-A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
tc_check_packets "dev $h2 ingress" 101 1
check_fail $? "Matched a wrong filter after rehash"
tc_check_packets "dev $h2 ingress" 103 1
check_fail $? "Matched a wrong filter after rehash"
tc_check_packets "dev $h2 ingress" 102 2
check_err $? "Did not match on correct filter after rehash"
tc filter del dev $h2 ingress protocol ipv6 pref 3 handle 103 flower
tc filter del dev $h2 ingress protocol ipv6 pref 2 handle 102 flower
tc filter del dev $h2 ingress protocol ipv6 pref 1 handle 101 flower
log_test "delta simple IPv6 rehash test ($tcflags)"
}
TEST_RULE_BASE=256
declare -a test_rules_inserted
test_rule_add()
{
local iface=$1
local tcflags=$2
local index=$3
if ! [ ${test_rules_inserted[$index]} ] ; then
test_rules_inserted[$index]=false
fi
if ${test_rules_inserted[$index]} ; then
return
fi
local number=$(( $index + $TEST_RULE_BASE ))
printf -v hexnumber '%x' $number
batch="${batch}filter add dev $iface ingress protocol ipv6 pref 1 \
handle $number flower $tcflags \
src_ip 2001:db8:1::$hexnumber action drop\n"
test_rules_inserted[$index]=true
}
test_rule_del()
{
local iface=$1
local index=$2
if ! [ ${test_rules_inserted[$index]} ] ; then
test_rules_inserted[$index]=false
fi
if ! ${test_rules_inserted[$index]} ; then
return
fi
local number=$(( $index + $TEST_RULE_BASE ))
printf -v hexnumber '%x' $number
batch="${batch}filter del dev $iface ingress protocol ipv6 pref 1 \
handle $number flower\n"
test_rules_inserted[$index]=false
}
test_rule_add_or_remove()
{
local iface=$1
local tcflags=$2
local index=$3
if ! [ ${test_rules_inserted[$index]} ] ; then
test_rules_inserted[$index]=false
fi
if ${test_rules_inserted[$index]} ; then
test_rule_del $iface $index
else
test_rule_add $iface $tcflags $index
fi
}
test_rule_add_or_remove_random_batch()
{
local iface=$1
local tcflags=$2
local total_count=$3
local skip=0
local count=0
local MAXSKIP=20
local MAXCOUNT=20
for ((i=1;i<=total_count;i++)); do
if (( $skip == 0 )) && (($count == 0)); then
((skip=$RANDOM % $MAXSKIP + 1))
((count=$RANDOM % $MAXCOUNT + 1))
fi
if (( $skip != 0 )); then
((skip-=1))
else
((count-=1))
test_rule_add_or_remove $iface $tcflags $i
fi
done
}
delta_massive_ipv6_rehash_test()
{
RET=0
if [[ "$tcflags" != "skip_sw" ]]; then
return 0;
fi
devlink dev param set $DEVLINK_DEV \
name acl_region_rehash_interval cmode runtime value 0
check_err $? "Failed to set ACL region rehash interval"
tp_record_all mlxsw:mlxsw_sp_acl_tcam_vregion_rehash 7
tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
check_fail $? "Rehash trace was hit even when rehash should be disabled"
RANDOM=4432897
declare batch=""
test_rule_add_or_remove_random_batch $h2 $tcflags 5000
echo -n -e $batch | tc -b -
declare batch=""
test_rule_add_or_remove_random_batch $h2 $tcflags 5000
devlink dev param set $DEVLINK_DEV \
name acl_region_rehash_interval cmode runtime value 3000
check_err $? "Failed to set ACL region rehash interval"
sleep 1
tc filter add dev $h2 ingress protocol ipv6 pref 1 handle 101 flower \
$tcflags dst_ip 2001:db8:1::0/121 action drop
tc filter add dev $h2 ingress protocol ipv6 pref 2 handle 102 flower \
$tcflags dst_ip 2001:db8:2::2 action drop
tc filter add dev $h2 ingress protocol ipv6 pref 3 handle 103 flower \
$tcflags dst_ip 2001:db8:3::0/120 action drop
$MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
-A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
tc_check_packets "dev $h2 ingress" 101 1
check_fail $? "Matched a wrong filter"
tc_check_packets "dev $h2 ingress" 103 1
check_fail $? "Matched a wrong filter"
tc_check_packets "dev $h2 ingress" 102 1
check_err $? "Did not match on correct filter"
echo -n -e $batch | tc -b -
devlink dev param set $DEVLINK_DEV \
name acl_region_rehash_interval cmode runtime value 0
check_err $? "Failed to set ACL region rehash interval"
$MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
-A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
tc_check_packets "dev $h2 ingress" 101 1
check_fail $? "Matched a wrong filter after rehash"
tc_check_packets "dev $h2 ingress" 103 1
check_fail $? "Matched a wrong filter after rehash"
tc_check_packets "dev $h2 ingress" 102 2
check_err $? "Did not match on correct filter after rehash"
tc filter del dev $h2 ingress protocol ipv6 pref 3 handle 103 flower
tc filter del dev $h2 ingress protocol ipv6 pref 2 handle 102 flower
tc filter del dev $h2 ingress protocol ipv6 pref 1 handle 101 flower
declare batch=""
for i in {1..5000}; do
test_rule_del $h2 $tcflags $i
done
echo -e $batch | tc -b -
log_test "delta massive IPv6 rehash test ($tcflags)"
}
bloom_simple_test()
{
# Bloom filter requires that the eRP table is used. This test