net/mlx5: Replace fs_node mutex with reader/writer semaphore
Currently, steering object is protected by mutex lock, replace the mutex lock with reader/writer semaphore . In this patch we still use only write semaphore. In downstream patches we will switch part of the write locks to read locks. Signed-off-by: Maor Gottlieb <maorg@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
19f100fef4
commit
c7784b1c8a
@ -145,10 +145,10 @@ static struct init_tree_node {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
enum fs_i_mutex_lock_class {
|
enum fs_i_lock_class {
|
||||||
FS_MUTEX_GRANDPARENT,
|
FS_LOCK_GRANDPARENT,
|
||||||
FS_MUTEX_PARENT,
|
FS_LOCK_PARENT,
|
||||||
FS_MUTEX_CHILD
|
FS_LOCK_CHILD
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct rhashtable_params rhash_fte = {
|
static const struct rhashtable_params rhash_fte = {
|
||||||
@ -184,7 +184,7 @@ static void tree_init_node(struct fs_node *node,
|
|||||||
atomic_set(&node->refcount, 1);
|
atomic_set(&node->refcount, 1);
|
||||||
INIT_LIST_HEAD(&node->list);
|
INIT_LIST_HEAD(&node->list);
|
||||||
INIT_LIST_HEAD(&node->children);
|
INIT_LIST_HEAD(&node->children);
|
||||||
mutex_init(&node->lock);
|
init_rwsem(&node->lock);
|
||||||
node->remove_func = remove_func;
|
node->remove_func = remove_func;
|
||||||
node->active = false;
|
node->active = false;
|
||||||
}
|
}
|
||||||
@ -208,10 +208,10 @@ static void tree_get_node(struct fs_node *node)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void nested_lock_ref_node(struct fs_node *node,
|
static void nested_lock_ref_node(struct fs_node *node,
|
||||||
enum fs_i_mutex_lock_class class)
|
enum fs_i_lock_class class)
|
||||||
{
|
{
|
||||||
if (node) {
|
if (node) {
|
||||||
mutex_lock_nested(&node->lock, class);
|
down_write_nested(&node->lock, class);
|
||||||
atomic_inc(&node->refcount);
|
atomic_inc(&node->refcount);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -219,7 +219,7 @@ static void nested_lock_ref_node(struct fs_node *node,
|
|||||||
static void lock_ref_node(struct fs_node *node)
|
static void lock_ref_node(struct fs_node *node)
|
||||||
{
|
{
|
||||||
if (node) {
|
if (node) {
|
||||||
mutex_lock(&node->lock);
|
down_write(&node->lock);
|
||||||
atomic_inc(&node->refcount);
|
atomic_inc(&node->refcount);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -228,7 +228,7 @@ static void unlock_ref_node(struct fs_node *node)
|
|||||||
{
|
{
|
||||||
if (node) {
|
if (node) {
|
||||||
atomic_dec(&node->refcount);
|
atomic_dec(&node->refcount);
|
||||||
mutex_unlock(&node->lock);
|
up_write(&node->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1376,7 +1376,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
|
|||||||
int old_action;
|
int old_action;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
|
nested_lock_ref_node(&fte->node, FS_LOCK_CHILD);
|
||||||
ret = check_conflicting_ftes(fte, flow_act);
|
ret = check_conflicting_ftes(fte, flow_act);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
handle = ERR_PTR(ret);
|
handle = ERR_PTR(ret);
|
||||||
@ -1400,7 +1400,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
|
|||||||
fte = alloc_insert_fte(fg, match_value, flow_act);
|
fte = alloc_insert_fte(fg, match_value, flow_act);
|
||||||
if (IS_ERR(fte))
|
if (IS_ERR(fte))
|
||||||
return (void *)fte;
|
return (void *)fte;
|
||||||
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
|
nested_lock_ref_node(&fte->node, FS_LOCK_CHILD);
|
||||||
handle = add_rule_fte(fte, fg, dest, dest_num, false);
|
handle = add_rule_fte(fte, fg, dest, dest_num, false);
|
||||||
if (IS_ERR(handle)) {
|
if (IS_ERR(handle)) {
|
||||||
unlock_ref_node(&fte->node);
|
unlock_ref_node(&fte->node);
|
||||||
@ -1548,7 +1548,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
|
|||||||
struct fs_fte *fte;
|
struct fs_fte *fte;
|
||||||
|
|
||||||
g = iter->g;
|
g = iter->g;
|
||||||
nested_lock_ref_node(&g->node, FS_MUTEX_PARENT);
|
nested_lock_ref_node(&g->node, FS_LOCK_PARENT);
|
||||||
fte = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
|
fte = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
|
||||||
rhash_fte);
|
rhash_fte);
|
||||||
if (fte) {
|
if (fte) {
|
||||||
@ -1566,7 +1566,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
|
|||||||
list_for_each_entry(iter, &match_head.list, list) {
|
list_for_each_entry(iter, &match_head.list, list) {
|
||||||
g = iter->g;
|
g = iter->g;
|
||||||
|
|
||||||
nested_lock_ref_node(&g->node, FS_MUTEX_PARENT);
|
nested_lock_ref_node(&g->node, FS_LOCK_PARENT);
|
||||||
rule = add_rule_fg(g, spec->match_value,
|
rule = add_rule_fg(g, spec->match_value,
|
||||||
flow_act, dest, dest_num, NULL);
|
flow_act, dest, dest_num, NULL);
|
||||||
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) {
|
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) {
|
||||||
@ -1605,7 +1605,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
|
|||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
|
nested_lock_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
|
||||||
rule = try_add_to_existing_fg(ft, spec, flow_act, dest, dest_num);
|
rule = try_add_to_existing_fg(ft, spec, flow_act, dest, dest_num);
|
||||||
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOENT)
|
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOENT)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
@ -80,7 +80,7 @@ struct fs_node {
|
|||||||
struct fs_node *parent;
|
struct fs_node *parent;
|
||||||
struct fs_node *root;
|
struct fs_node *root;
|
||||||
/* lock the node for writing and traversing */
|
/* lock the node for writing and traversing */
|
||||||
struct mutex lock;
|
struct rw_semaphore lock;
|
||||||
atomic_t refcount;
|
atomic_t refcount;
|
||||||
bool active;
|
bool active;
|
||||||
void (*remove_func)(struct fs_node *);
|
void (*remove_func)(struct fs_node *);
|
||||||
|
Loading…
Reference in New Issue
Block a user