From 8007880a2ca97c34e7ccd1fcf12daf854b792544 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Sat, 14 Dec 2019 10:51:17 +0200 Subject: [PATCH 01/13] net/mlx5: limit the function in local scope The function mlx5_buf_alloc_node is only used by the function in the local scope. So it is appropriate to limit this function in the local scope. Signed-off-by: Zhu Yanjun Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 4 ++-- include/linux/mlx5/driver.h | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 549f962cd86e..42198e64a7f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -71,8 +71,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, return cpu_handle; } -int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, - struct mlx5_frag_buf *buf, int node) +static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, + struct mlx5_frag_buf *buf, int node) { dma_addr_t t; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 27200dea0297..59cff380f41a 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -928,8 +928,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); -int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, - struct mlx5_frag_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); From 42ae1a5c76691928ed217c7e40269db27f5225e9 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 4 Jan 2020 22:51:44 +0100 Subject: [PATCH 02/13] mlx5: work around high stack usage with gcc In some configurations, gcc tries too hard to optimize this code: drivers/net/ethernet/mellanox/mlx5/core/en_stats.c: In function 'mlx5e_grp_sw_update_stats': drivers/net/ethernet/mellanox/mlx5/core/en_stats.c:302:1: error: the frame size of 1336 bytes is larger than 1024 bytes [-Werror=frame-larger-than=] As was stated in the bug report, the reason is that gcc runs into a corner case in the register allocator that is rather hard to fix in a good way. As there is an easy way to work around it, just add a comment and the barrier that stops gcc from trying to overoptimize the function. Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 Cc: Adhemerval Zanella Signed-off-by: Arnd Bergmann Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 9f09253f9f46..a05158472ed1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -297,6 +297,9 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; #endif s->tx_cqes += sq_stats->cqes; + + /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ + barrier(); } } } From 15fc92ec3ad4cfb34218e6b6c38c1355938fe49e Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Tue, 10 Dec 2019 22:49:42 +0800 Subject: [PATCH 03/13] net/mlx5e: Support accept action on nic table In one case, we may forward packets from one vport to others, but only one packets flow will be accepted, which destination ip was assign to VF. +-----+ +-----+ +-----+ | VFn | | VF1 | | VF0 | accept +--+--+ +--+--+ hairpin +--^--+ | | <--------------- | | | | +--+-----------v-+ +--+-------------+ | eswitch PF1 | | eswitch PF0 | +----------------+ +----------------+ tc filter add dev $PF0 protocol all parent ffff: prio 1 handle 1 \ flower skip_sw action mirred egress redirect dev $VF0_REP tc filter add dev $VF0 protocol ip parent ffff: prio 1 handle 1 \ flower skip_sw dst_ip $VF0_IP action pass tc filter add dev $VF0 protocol all parent ffff: prio 2 handle 2 \ flower skip_sw action mirred egress redirect dev $VF1 Signed-off-by: Tonghao Zhang Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9b32a9c0f497..e8f2d0e4913d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2842,6 +2842,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, flow_action_for_each(i, act, flow_action) { switch (act->id) { + case FLOW_ACTION_ACCEPT: + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + break; case FLOW_ACTION_DROP: action |= MLX5_FLOW_CONTEXT_ACTION_DROP; if (MLX5_CAP_FLOWTABLE(priv->mdev, From 57c7fce14b1ad512a42abe33cb721a2ea3520d4b Mon Sep 17 00:00:00 2001 From: Fan Li Date: Mon, 16 Dec 2019 14:46:15 +0200 Subject: [PATCH 04/13] net/mlx5: Increase the max number of channels to 128 Currently the max number of channels is limited to 64, which is half of the indirection table size to allow some flexibility. But on servers with more than 64 cores, users may want to utilize more queues. This patch increases the advertised max number of channels to 128 by changing the ratio between channels and indirection table slots to 1:1. At the same time, the driver still enable no more than 64 channels at loading. Users can change it by ethtool afterwards. Signed-off-by: Fan Li Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 12 +++++++----- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 4 ++-- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 9c8427698238..fc80b59db9a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -135,7 +135,7 @@ struct page_pool; #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) #define MLX5E_MIN_NUM_CHANNELS 0x1 -#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) +#define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_TX_XSK_POLL_BUDGET 64 @@ -1175,11 +1175,11 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv); void mlx5e_detach_netdev(struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5e_priv *priv); void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); -void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, +void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, struct mlx5e_rss_params *rss_params, struct mlx5e_params *params, - u16 max_channels, u16 mtu); + u16 mtu); void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 319b39f25592..78737fd42616 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4739,17 +4739,19 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, tirc_default_config[tt].rx_hash_fields; } -void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, +void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, struct mlx5e_rss_params *rss_params, struct mlx5e_params *params, - u16 max_channels, u16 mtu) + u16 mtu) { + struct mlx5_core_dev *mdev = priv->mdev; u8 rx_cq_period_mode; params->sw_mtu = mtu; params->hard_mtu = MLX5E_ETH_HARD_MTU; - params->num_channels = max_channels; + params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2, + priv->max_nch); params->num_tc = 1; /* SQ */ @@ -4986,8 +4988,8 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, if (err) return err; - mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params, - priv->max_nch, netdev->mtu); + mlx5e_build_nic_params(priv, &priv->xsk, rss, &priv->channels.params, + netdev->mtu); mlx5e_timestamp_init(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 3ed8ab2d703d..7c87f523e370 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -87,8 +87,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev, mlx5e_set_netdev_mtu_boundaries(priv); netdev->mtu = netdev->max_mtu; - mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params, - priv->max_nch, netdev->mtu); + mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params, + netdev->mtu); mlx5i_build_nic_params(mdev, &priv->channels.params); mlx5e_timestamp_init(priv); From 7396ae3d1cfe05be42e5f6b1883c9ed594cc42ba Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 5 Dec 2019 16:06:20 -0600 Subject: [PATCH 05/13] net/mlx5: Reduce No CQ found log level from warn to debug In below sequence, a EQE entry arrives for a CQ which is on the path of being destroyed. cpu-0 cpu-1 ------ ----- mlx5_core_destroy_cq() mlx5_eq_comp_int() mlx5_eq_del_cq() [..] radix_tree_delete() [..] [..] mlx5_eq_cq_get() /* Didn't find CQ is * a valid case. */ /* destroy CQ in hw */ mlx5_cmd_exec() This is still a valid scenario and correct delete CQ sequence, as mirror of the CQ create sequence. Hence, suppress the non harmful debug message from warn to debug level. Keep the debug log message rate limited because user application can trigger it repeatedly. Signed-off-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 580c71cb9dfa..2c716abc0f27 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -156,7 +156,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb, cq->comp(cq, eqe); mlx5_cq_put(cq); } else { - mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn); + dev_dbg_ratelimited(eq->dev->device, + "Completion event for bogus CQ 0x%x\n", cqn); } ++eq->cons_index; From 3ed879965cc4ea13fe0908468b653c4ff2cb1309 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 6 Dec 2019 15:13:41 -0600 Subject: [PATCH 06/13] net/mlx5: Use async EQ setup cleanup helpers for multiple EQs Use helper routines to setup and teardown multiple EQs and reuse the code in setup, cleanup and error unwinding flows. Signed-off-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 114 ++++++++----------- 1 file changed, 49 insertions(+), 65 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 2c716abc0f27..cccea3a8eddd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -564,6 +564,39 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4]) gather_user_async_events(dev, mask); } +static int +setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq, + struct mlx5_eq_param *param, const char *name) +{ + int err; + + eq->irq_nb.notifier_call = mlx5_eq_async_int; + + err = create_async_eq(dev, &eq->core, param); + if (err) { + mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err); + return err; + } + err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb); + if (err) { + mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err); + destroy_async_eq(dev, &eq->core); + } + return err; +} + +static void cleanup_async_eq(struct mlx5_core_dev *dev, + struct mlx5_eq_async *eq, const char *name) +{ + int err; + + mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); + err = destroy_async_eq(dev, &eq->core); + if (err) + mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n", + name, err); +} + static int create_async_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = dev->priv.eq_table; @@ -573,77 +606,45 @@ static int create_async_eqs(struct mlx5_core_dev *dev) MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR); mlx5_eq_notifier_register(dev, &table->cq_err_nb); - table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int; param = (struct mlx5_eq_param) { .irq_index = 0, .nent = MLX5_NUM_CMD_EQE, + .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, }; - - param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD; - err = create_async_eq(dev, &table->cmd_eq.core, ¶m); - if (err) { - mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); - goto err0; - } - err = mlx5_eq_enable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); - if (err) { - mlx5_core_warn(dev, "failed to enable cmd EQ %d\n", err); + err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd"); + if (err) goto err1; - } + mlx5_cmd_use_events(dev); - table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int; param = (struct mlx5_eq_param) { .irq_index = 0, .nent = MLX5_NUM_ASYNC_EQE, }; gather_async_events_mask(dev, param.mask); - err = create_async_eq(dev, &table->async_eq.core, ¶m); - if (err) { - mlx5_core_warn(dev, "failed to create async EQ %d\n", err); + err = setup_async_eq(dev, &table->async_eq, ¶m, "async"); + if (err) goto err2; - } - err = mlx5_eq_enable(dev, &table->async_eq.core, - &table->async_eq.irq_nb); - if (err) { - mlx5_core_warn(dev, "failed to enable async EQ %d\n", err); - goto err3; - } - table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int; param = (struct mlx5_eq_param) { .irq_index = 0, .nent = /* TODO: sriov max_vf + */ 1, + .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST, }; - param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST; - err = create_async_eq(dev, &table->pages_eq.core, ¶m); - if (err) { - mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); - goto err4; - } - err = mlx5_eq_enable(dev, &table->pages_eq.core, - &table->pages_eq.irq_nb); - if (err) { - mlx5_core_warn(dev, "failed to enable pages EQ %d\n", err); - goto err5; - } + err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages"); + if (err) + goto err3; - return err; + return 0; -err5: - destroy_async_eq(dev, &table->pages_eq.core); -err4: - mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb); err3: - destroy_async_eq(dev, &table->async_eq.core); + cleanup_async_eq(dev, &table->async_eq, "async"); err2: mlx5_cmd_use_polling(dev); - mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); + cleanup_async_eq(dev, &table->cmd_eq, "cmd"); err1: - destroy_async_eq(dev, &table->cmd_eq.core); -err0: mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); return err; } @@ -651,28 +652,11 @@ err0: static void destroy_async_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = dev->priv.eq_table; - int err; - - mlx5_eq_disable(dev, &table->pages_eq.core, &table->pages_eq.irq_nb); - err = destroy_async_eq(dev, &table->pages_eq.core); - if (err) - mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n", - err); - - mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb); - err = destroy_async_eq(dev, &table->async_eq.core); - if (err) - mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n", - err); + cleanup_async_eq(dev, &table->pages_eq, "pages"); + cleanup_async_eq(dev, &table->async_eq, "async"); mlx5_cmd_use_polling(dev); - - mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); - err = destroy_async_eq(dev, &table->cmd_eq.core); - if (err) - mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n", - err); - + cleanup_async_eq(dev, &table->cmd_eq, "cmd"); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); } From cc78dbd7686acf0a9729a187fdfcb8d2d3a80671 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Tue, 24 Dec 2019 22:52:03 +0200 Subject: [PATCH 07/13] net/mlx5: DR, Use attributes struct for FW flow table creation Instead of using multiple variables use a simple struct. The number of passed argument was too high after adding the encap decap support bits arguments used for multiple destination reformat. Signed-off-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_cmd.c | 39 ++++++++++--------- .../mellanox/mlx5/core/steering/dr_fw.c | 12 ++++-- .../mellanox/mlx5/core/steering/dr_table.c | 16 ++++---- .../mellanox/mlx5/core/steering/dr_types.h | 18 ++++++--- 4 files changed, 50 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 41662c4e2664..ec35b297dcab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -320,12 +320,7 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev, } int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev, - u32 table_type, - u64 icm_addr_rx, - u64 icm_addr_tx, - u8 level, - bool sw_owner, - bool term_tbl, + struct mlx5dr_cmd_create_flow_table_attr *attr, u64 *fdb_rx_icm_addr, u32 *table_id) { @@ -335,37 +330,43 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev, int err; MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); - MLX5_SET(create_flow_table_in, in, table_type, table_type); + MLX5_SET(create_flow_table_in, in, table_type, attr->table_type); ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context); - MLX5_SET(flow_table_context, ft_mdev, termination_table, term_tbl); - MLX5_SET(flow_table_context, ft_mdev, sw_owner, sw_owner); - MLX5_SET(flow_table_context, ft_mdev, level, level); + MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl); + MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner); + MLX5_SET(flow_table_context, ft_mdev, level, attr->level); - if (sw_owner) { + if (attr->sw_owner) { /* icm_addr_0 used for FDB RX / NIC TX / NIC_RX * icm_addr_1 used for FDB TX */ - if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) { + if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) { MLX5_SET64(flow_table_context, ft_mdev, - sw_owner_icm_root_0, icm_addr_rx); - } else if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) { + sw_owner_icm_root_0, attr->icm_addr_rx); + } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) { MLX5_SET64(flow_table_context, ft_mdev, - sw_owner_icm_root_0, icm_addr_tx); - } else if (table_type == MLX5_FLOW_TABLE_TYPE_FDB) { + sw_owner_icm_root_0, attr->icm_addr_tx); + } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) { MLX5_SET64(flow_table_context, ft_mdev, - sw_owner_icm_root_0, icm_addr_rx); + sw_owner_icm_root_0, attr->icm_addr_rx); MLX5_SET64(flow_table_context, ft_mdev, - sw_owner_icm_root_1, icm_addr_tx); + sw_owner_icm_root_1, attr->icm_addr_tx); } } + MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en, + attr->decap_en); + MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en, + attr->reformat_en); + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (err) return err; *table_id = MLX5_GET(create_flow_table_out, out, table_id); - if (!sw_owner && table_type == MLX5_FLOW_TABLE_TYPE_FDB) + if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB && + fdb_rx_icm_addr) *fdb_rx_icm_addr = (u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) | (u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c index 60ef6e6171e3..9d2180cb095f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c @@ -7,6 +7,7 @@ struct mlx5dr_fw_recalc_cs_ft * mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num) { + struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; u32 table_id, group_id, modify_hdr_id; u64 rx_icm_addr, modify_ttl_action; @@ -16,9 +17,14 @@ mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num) if (!recalc_cs_ft) return NULL; - ret = mlx5dr_cmd_create_flow_table(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB, - 0, 0, dmn->info.caps.max_ft_level - 1, - false, true, &rx_icm_addr, &table_id); + ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB; + ft_attr.level = dmn->info.caps.max_ft_level - 1; + ft_attr.term_tbl = true; + + ret = mlx5dr_cmd_create_flow_table(dmn->mdev, + &ft_attr, + &rx_icm_addr, + &table_id); if (ret) { mlx5dr_err(dmn, "Failed creating TTL W/A FW flow table %d\n", ret); goto free_ttl_tbl; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c index e178d8d3dbc9..7a4e6a43bdea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c @@ -211,6 +211,7 @@ static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl) static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl) { + struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; u64 icm_addr_rx = 0; u64 icm_addr_tx = 0; int ret; @@ -221,13 +222,14 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl) if (tbl->tx.s_anchor) icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr; - ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, - tbl->table_type, - icm_addr_rx, - icm_addr_tx, - tbl->dmn->info.caps.max_ft_level - 1, - true, false, NULL, - &tbl->table_id); + ft_attr.table_type = tbl->table_type; + ft_attr.icm_addr_rx = icm_addr_rx; + ft_attr.icm_addr_tx = icm_addr_tx; + ft_attr.level = tbl->dmn->info.caps.max_ft_level - 1; + ft_attr.sw_owner = true; + + ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr, + NULL, &tbl->table_id); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 290fe61c33d0..bc82b76cf04e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -867,6 +867,17 @@ struct mlx5dr_cmd_query_flow_table_details { u64 sw_owner_icm_root_0; }; +struct mlx5dr_cmd_create_flow_table_attr { + u32 table_type; + u64 icm_addr_rx; + u64 icm_addr_tx; + u8 level; + bool sw_owner; + bool term_tbl; + bool decap_en; + bool reformat_en; +}; + /* internal API functions */ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, struct mlx5dr_cmd_caps *caps); @@ -904,12 +915,7 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev, u32 table_id, u32 group_id); int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev, - u32 table_type, - u64 icm_addr_rx, - u64 icm_addr_tx, - u8 level, - bool sw_owner, - bool term_tbl, + struct mlx5dr_cmd_create_flow_table_attr *attr, u64 *fdb_rx_icm_addr, u32 *table_id); int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev, From 6de03d2dcb0ec53e5862ae5844563bbbccde74bb Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Thu, 2 Jan 2020 15:54:35 +0200 Subject: [PATCH 08/13] net/mlx5: DR, Create FTE entry in the FW from SW-steering Implement the FW command to setup a FTE (Flow Table Entry) into the FW managed flow tables. Signed-off-by: Erez Shitrit Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_cmd.c | 205 ++++++++++++++++++ .../mellanox/mlx5/core/steering/dr_types.h | 37 ++++ 2 files changed, 242 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index ec35b297dcab..461b39376daf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -479,3 +479,208 @@ int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, return 0; } + +static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev, + struct mlx5dr_cmd_fte_info *fte, + bool *extended_dest) +{ + int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink); + int num_fwd_destinations = 0; + int num_encap = 0; + int i; + + *extended_dest = false; + if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) + return 0; + for (i = 0; i < fte->dests_size; i++) { + if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT && + fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) + num_encap++; + num_fwd_destinations++; + } + + if (num_fwd_destinations > 1 && num_encap > 0) + *extended_dest = true; + + if (*extended_dest && !fw_log_max_fdb_encap_uplink) { + mlx5_core_warn(dev, "FW does not support extended destination"); + return -EOPNOTSUPP; + } + if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) { + mlx5_core_warn(dev, "FW does not support more than %d encaps", + 1 << fw_log_max_fdb_encap_uplink); + return -EOPNOTSUPP; + } + + return 0; +} + +int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, + int opmod, int modify_mask, + struct mlx5dr_cmd_ft_info *ft, + u32 group_id, + struct mlx5dr_cmd_fte_info *fte) +{ + u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {}; + void *in_flow_context, *vlan; + bool extended_dest = false; + void *in_match_value; + unsigned int inlen; + int dst_cnt_size; + void *in_dests; + u32 *in; + int err; + int i; + + if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest)) + return -EOPNOTSUPP; + + if (!extended_dest) + dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct); + else + dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format); + + inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); + MLX5_SET(set_fte_in, in, op_mod, opmod); + MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask); + MLX5_SET(set_fte_in, in, table_type, ft->type); + MLX5_SET(set_fte_in, in, table_id, ft->id); + MLX5_SET(set_fte_in, in, flow_index, fte->index); + if (ft->vport) { + MLX5_SET(set_fte_in, in, vport_number, ft->vport); + MLX5_SET(set_fte_in, in, other_vport, 1); + } + + in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); + MLX5_SET(flow_context, in_flow_context, group_id, group_id); + + MLX5_SET(flow_context, in_flow_context, flow_tag, + fte->flow_context.flow_tag); + MLX5_SET(flow_context, in_flow_context, flow_source, + fte->flow_context.flow_source); + + MLX5_SET(flow_context, in_flow_context, extended_destination, + extended_dest); + if (extended_dest) { + u32 action; + + action = fte->action.action & + ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + MLX5_SET(flow_context, in_flow_context, action, action); + } else { + MLX5_SET(flow_context, in_flow_context, action, + fte->action.action); + if (fte->action.pkt_reformat) + MLX5_SET(flow_context, in_flow_context, packet_reformat_id, + fte->action.pkt_reformat->id); + } + if (fte->action.modify_hdr) + MLX5_SET(flow_context, in_flow_context, modify_header_id, + fte->action.modify_hdr->id); + + vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); + + MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype); + MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid); + MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio); + + vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2); + + MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype); + MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid); + MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio); + + in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, + match_value); + memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM); + + in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + int list_size = 0; + + for (i = 0; i < fte->dests_size; i++) { + unsigned int id, type = fte->dest_arr[i].type; + + if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + + switch (type) { + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: + id = fte->dest_arr[i].ft_num; + type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + break; + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: + id = fte->dest_arr[i].ft_id; + break; + case MLX5_FLOW_DESTINATION_TYPE_VPORT: + id = fte->dest_arr[i].vport.num; + MLX5_SET(dest_format_struct, in_dests, + destination_eswitch_owner_vhca_id_valid, + !!(fte->dest_arr[i].vport.flags & + MLX5_FLOW_DEST_VPORT_VHCA_ID)); + MLX5_SET(dest_format_struct, in_dests, + destination_eswitch_owner_vhca_id, + fte->dest_arr[i].vport.vhca_id); + if (extended_dest && (fte->dest_arr[i].vport.flags & + MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) { + MLX5_SET(dest_format_struct, in_dests, + packet_reformat, + !!(fte->dest_arr[i].vport.flags & + MLX5_FLOW_DEST_VPORT_REFORMAT_ID)); + MLX5_SET(extended_dest_format, in_dests, + packet_reformat_id, + fte->dest_arr[i].vport.reformat_id); + } + break; + default: + id = fte->dest_arr[i].tir_num; + } + + MLX5_SET(dest_format_struct, in_dests, destination_type, + type); + MLX5_SET(dest_format_struct, in_dests, destination_id, id); + in_dests += dst_cnt_size; + list_size++; + } + + MLX5_SET(flow_context, in_flow_context, destination_list_size, + list_size); + } + + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev, + log_max_flow_counter, + ft->type)); + int list_size = 0; + + for (i = 0; i < fte->dests_size; i++) { + if (fte->dest_arr[i].type != + MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + + MLX5_SET(flow_counter_list, in_dests, flow_counter_id, + fte->dest_arr[i].counter_id); + in_dests += dst_cnt_size; + list_size++; + } + if (list_size > max_list_size) { + err = -EINVAL; + goto err_out; + } + + MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, + list_size); + } + + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); +err_out: + kvfree(in); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index bc82b76cf04e..09ac9aadad1a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -1057,6 +1057,43 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn, int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, struct mlx5dr_action *action); +struct mlx5dr_cmd_ft_info { + u32 id; + u16 vport; + enum fs_flow_table_type type; +}; + +struct mlx5dr_cmd_flow_destination_hw_info { + enum mlx5_flow_destination_type type; + union { + u32 tir_num; + u32 ft_num; + u32 ft_id; + u32 counter_id; + struct { + u16 num; + u16 vhca_id; + u32 reformat_id; + u8 flags; + } vport; + }; +}; + +struct mlx5dr_cmd_fte_info { + u32 dests_size; + u32 index; + struct mlx5_flow_context flow_context; + u32 *val; + struct mlx5_flow_act action; + struct mlx5dr_cmd_flow_destination_hw_info *dest_arr; +}; + +int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, + int opmod, int modify_mask, + struct mlx5dr_cmd_ft_info *ft, + u32 group_id, + struct mlx5dr_cmd_fte_info *fte); + struct mlx5dr_fw_recalc_cs_ft { u64 rx_icm_addr; u32 table_id; From 34583beea4b741589793ce3ad5436b0a7c6d0cdb Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Tue, 24 Dec 2019 22:54:15 +0200 Subject: [PATCH 09/13] net/mlx5: DR, Create multi-destination table for SW-steering use Currently SW steering doesn't have the means to access HW iterators to support multi-destination (FTEs) flow table entries. In order to support multi-destination FTEs for port-mirroring, SW steering will create a dedicated multi-destination FW managed flow table and FTEs via direct FW commands that we introduced in the previous patch. Signed-off-by: Erez Shitrit Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_fw.c | 67 +++++++++++++++++++ .../mellanox/mlx5/core/steering/dr_types.h | 8 +++ 2 files changed, 75 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c index 9d2180cb095f..1fbcd012bb85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c @@ -97,3 +97,70 @@ void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn, kfree(recalc_cs_ft); } + +int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, + struct mlx5dr_cmd_flow_destination_hw_info *dest, + int num_dest, + bool reformat_req, + u32 *tbl_id, + u32 *group_id) +{ + struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; + struct mlx5dr_cmd_fte_info fte_info = {}; + u32 val[MLX5_ST_SZ_DW_MATCH_PARAM] = {}; + struct mlx5dr_cmd_ft_info ft_info = {}; + int ret; + + ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB; + ft_attr.level = dmn->info.caps.max_ft_level - 2; + ft_attr.reformat_en = reformat_req; + ft_attr.decap_en = reformat_req; + + ret = mlx5dr_cmd_create_flow_table(dmn->mdev, &ft_attr, NULL, tbl_id); + if (ret) { + mlx5dr_err(dmn, "Failed creating multi dest FW flow table %d\n", ret); + return ret; + } + + ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev, + MLX5_FLOW_TABLE_TYPE_FDB, + *tbl_id, group_id); + if (ret) { + mlx5dr_err(dmn, "Failed creating multi dest FW flow group %d\n", ret); + goto free_flow_table; + } + + ft_info.id = *tbl_id; + ft_info.type = FS_FT_FDB; + fte_info.action.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + fte_info.dests_size = num_dest; + fte_info.val = val; + fte_info.dest_arr = dest; + + ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info); + if (ret) { + mlx5dr_err(dmn, "Failed setting fte into table %d\n", ret); + goto free_flow_group; + } + + return 0; + +free_flow_group: + mlx5dr_cmd_destroy_flow_group(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB, + *tbl_id, *group_id); +free_flow_table: + mlx5dr_cmd_destroy_flow_table(dmn->mdev, *tbl_id, + MLX5_FLOW_TABLE_TYPE_FDB); + return ret; +} + +void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, + u32 tbl_id, u32 group_id) +{ + mlx5dr_cmd_del_flow_table_entry(dmn->mdev, FS_FT_FDB, tbl_id); + mlx5dr_cmd_destroy_flow_group(dmn->mdev, + MLX5_FLOW_TABLE_TYPE_FDB, + tbl_id, group_id); + mlx5dr_cmd_destroy_flow_table(dmn->mdev, tbl_id, + MLX5_FLOW_TABLE_TYPE_FDB); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 09ac9aadad1a..c37226a14311 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -1108,4 +1108,12 @@ void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn, int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, u32 vport_num, u64 *rx_icm_addr); +int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, + struct mlx5dr_cmd_flow_destination_hw_info *dest, + int num_dest, + bool reformat_req, + u32 *tbl_id, + u32 *group_id); +void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id, + u32 group_id); #endif /* _DR_TYPES_H_ */ From 988fd6b32d07400bd66259603ac9e08c33c02a05 Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Thu, 2 Jan 2020 16:55:45 +0200 Subject: [PATCH 10/13] net/mlx5: DR, Pass table flags at creation to lower layer We need to have the flow-table flags when creation sw-steering tables, this parameter exists in the layer between fs_core to sw_steering, this patch gives it to the creation function. Signed-off-by: Erez Shitrit Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/steering/dr_table.c | 7 ++++++- .../net/ethernet/mellanox/mlx5/core/steering/dr_types.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h | 4 ++-- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c index 7a4e6a43bdea..14ce2d7dbb66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c @@ -211,6 +211,8 @@ static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl) static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl) { + bool en_encap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT); + bool en_decap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; u64 icm_addr_rx = 0; u64 icm_addr_tx = 0; @@ -227,6 +229,8 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl) ft_attr.icm_addr_tx = icm_addr_tx; ft_attr.level = tbl->dmn->info.caps.max_ft_level - 1; ft_attr.sw_owner = true; + ft_attr.decap_en = en_decap; + ft_attr.reformat_en = en_encap; ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr, NULL, &tbl->table_id); @@ -234,7 +238,7 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl) return ret; } -struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level) +struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u32 flags) { struct mlx5dr_table *tbl; int ret; @@ -247,6 +251,7 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level) tbl->dmn = dmn; tbl->level = level; + tbl->flags = flags; refcount_set(&tbl->refcount, 1); ret = dr_table_init(tbl); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index c37226a14311..de6bfa655326 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -679,6 +679,7 @@ struct mlx5dr_table { u32 level; u32 table_type; u32 table_id; + u32 flags; struct list_head matcher_list; struct mlx5dr_action *miss_action; refcount_t refcount; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 3d587d0bdbbe..8ed0f087b1e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -74,7 +74,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns, next_ft); tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, - ft->level); + ft->level, ft->flags); if (!tbl) { mlx5_core_err(ns->dev, "Failed creating dr flow_table\n"); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index adda9cbfba45..fb3ac697df1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -46,7 +46,7 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, struct mlx5dr_domain *peer_dmn); struct mlx5dr_table * -mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level); +mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags); int mlx5dr_table_destroy(struct mlx5dr_table *table); @@ -131,7 +131,7 @@ mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, struct mlx5dr_domain *peer_dmn) { } static inline struct mlx5dr_table * -mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level) { return NULL; } +mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags) { return NULL; } static inline int mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; } From aec292ee6f290920fc77907dfe1b94baa600484d Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Sun, 15 Dec 2019 21:15:15 +0200 Subject: [PATCH 11/13] net/mlx5: DR, Align dest FT action creation to API Function prefix was changed to be similar to other action APIs. In order to support other FW tables the mlx5_flow_table struct was replaced with table id and type. Signed-off-by: Alex Vesker Reviewed-by: Erez Shitrit Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_action.c | 21 ++++++++++++------- .../mellanox/mlx5/core/steering/dr_types.h | 5 +++-- .../mellanox/mlx5/core/steering/fs_dr.c | 6 +++--- .../mellanox/mlx5/core/steering/mlx5dr.h | 8 +++---- 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 004c56c2fc0c..3e2318761c8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -690,9 +690,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, /* get the relevant addresses */ if (!action->dest_tbl.fw_tbl.rx_icm_addr) { - ret = mlx5dr_cmd_query_flow_table(action->dest_tbl.fw_tbl.mdev, - action->dest_tbl.fw_tbl.ft->type, - action->dest_tbl.fw_tbl.ft->id, + ret = mlx5dr_cmd_query_flow_table(dmn->mdev, + action->dest_tbl.fw_tbl.type, + action->dest_tbl.fw_tbl.id, &output); if (!ret) { action->dest_tbl.fw_tbl.tx_icm_addr = @@ -982,8 +982,8 @@ dec_ref: } struct mlx5dr_action * -mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft, - struct mlx5_core_dev *mdev) +mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn, + struct mlx5_flow_table *ft) { struct mlx5dr_action *action; @@ -992,8 +992,11 @@ mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft, return NULL; action->dest_tbl.is_fw_tbl = 1; - action->dest_tbl.fw_tbl.ft = ft; - action->dest_tbl.fw_tbl.mdev = mdev; + action->dest_tbl.fw_tbl.type = ft->type; + action->dest_tbl.fw_tbl.id = ft->id; + action->dest_tbl.fw_tbl.dmn = dmn; + + refcount_inc(&dmn->refcount); return action; } @@ -1559,7 +1562,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) switch (action->action_type) { case DR_ACTION_TYP_FT: - if (!action->dest_tbl.is_fw_tbl) + if (action->dest_tbl.is_fw_tbl) + refcount_dec(&action->dest_tbl.fw_tbl.dmn->refcount); + else refcount_dec(&action->dest_tbl.tbl->refcount); break; case DR_ACTION_TYP_TNL_L2_TO_L2: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index de6bfa655326..27f1d931bf9f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -743,10 +743,11 @@ struct mlx5dr_action { union { struct mlx5dr_table *tbl; struct { - struct mlx5_flow_table *ft; + struct mlx5dr_domain *dmn; + u32 id; + enum fs_flow_table_type type; u64 rx_icm_addr; u64 tx_icm_addr; - struct mlx5_core_dev *mdev; } fw_tbl; }; } dest_tbl; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 8ed0f087b1e0..e51262ec77bb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -184,13 +184,13 @@ static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain, dest_attr->vport.vhca_id); } -static struct mlx5dr_action *create_ft_action(struct mlx5_core_dev *dev, +static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain, struct mlx5_flow_rule *dst) { struct mlx5_flow_table *dest_ft = dst->dest_attr.ft; if (mlx5_dr_is_fw_table(dest_ft->flags)) - return mlx5dr_create_action_dest_flow_fw_table(dest_ft, dev); + return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft); return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table); } @@ -373,7 +373,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = tmp_action; break; case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: - tmp_action = create_ft_action(dev, dst); + tmp_action = create_ft_action(domain, dst); if (!tmp_action) { err = -ENOMEM; goto free_actions; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index fb3ac697df1b..932362d89c66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -75,8 +75,8 @@ struct mlx5dr_action * mlx5dr_action_create_dest_table(struct mlx5dr_table *table); struct mlx5dr_action * -mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft, - struct mlx5_core_dev *mdev); +mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain, + struct mlx5_flow_table *ft); struct mlx5dr_action * mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, @@ -165,8 +165,8 @@ static inline struct mlx5dr_action * mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; } static inline struct mlx5dr_action * -mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft, - struct mlx5_core_dev *mdev) { return NULL; } +mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain, + struct mlx5_flow_table *ft) { return NULL; } static inline struct mlx5dr_action * mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, From b8853c969f406bfb682740b7646ee8b4623ee955 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Sun, 15 Dec 2019 21:27:54 +0200 Subject: [PATCH 12/13] net/mlx5: DR, Add support for multiple destination table action A multiple destination table action allows HW packet duplication to multiple destinations, this is useful for multicast or mirroring traffic for debug. Duplicating is done using a FW flow table with multiple destinations. The new action creation function, mlx5dr_action_create_mult_dest_tbl will allow creating a single table to iterate over several dr actions. Signed-off-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_action.c | 114 ++++++++++++++++++ .../mellanox/mlx5/core/steering/dr_types.h | 3 + .../mellanox/mlx5/core/steering/mlx5dr.h | 15 +++ 3 files changed, 132 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 3e2318761c8c..9359eed10889 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -981,6 +981,104 @@ dec_ref: return NULL; } +struct mlx5dr_action * +mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, + struct mlx5dr_action_dest *dests, + u32 num_of_dests) +{ + struct mlx5dr_cmd_flow_destination_hw_info *hw_dests; + struct mlx5dr_action **ref_actions; + struct mlx5dr_action *action; + bool reformat_req = false; + u32 num_of_ref = 0; + int ret; + int i; + + if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) { + mlx5dr_err(dmn, "Multiple destination support is for FDB only\n"); + return NULL; + } + + hw_dests = kzalloc(sizeof(*hw_dests) * num_of_dests, GFP_KERNEL); + if (!hw_dests) + return NULL; + + ref_actions = kzalloc(sizeof(*ref_actions) * num_of_dests * 2, GFP_KERNEL); + if (!ref_actions) + goto free_hw_dests; + + for (i = 0; i < num_of_dests; i++) { + struct mlx5dr_action *reformat_action = dests[i].reformat; + struct mlx5dr_action *dest_action = dests[i].dest; + + ref_actions[num_of_ref++] = dest_action; + + switch (dest_action->action_type) { + case DR_ACTION_TYP_VPORT: + hw_dests[i].vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; + hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + hw_dests[i].vport.num = dest_action->vport.caps->num; + hw_dests[i].vport.vhca_id = dest_action->vport.caps->vhca_gvmi; + if (reformat_action) { + reformat_req = true; + hw_dests[i].vport.reformat_id = + reformat_action->reformat.reformat_id; + ref_actions[num_of_ref++] = reformat_action; + hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; + } + break; + + case DR_ACTION_TYP_FT: + hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + if (dest_action->dest_tbl.is_fw_tbl) + hw_dests[i].ft_id = dest_action->dest_tbl.fw_tbl.id; + else + hw_dests[i].ft_id = dest_action->dest_tbl.tbl->table_id; + break; + + default: + mlx5dr_dbg(dmn, "Invalid multiple destinations action\n"); + goto free_ref_actions; + } + } + + action = dr_action_create_generic(DR_ACTION_TYP_FT); + if (!action) + goto free_ref_actions; + + ret = mlx5dr_fw_create_md_tbl(dmn, + hw_dests, + num_of_dests, + reformat_req, + &action->dest_tbl.fw_tbl.id, + &action->dest_tbl.fw_tbl.group_id); + if (ret) + goto free_action; + + refcount_inc(&dmn->refcount); + + for (i = 0; i < num_of_ref; i++) + refcount_inc(&ref_actions[i]->refcount); + + action->dest_tbl.is_fw_tbl = true; + action->dest_tbl.fw_tbl.dmn = dmn; + action->dest_tbl.fw_tbl.type = FS_FT_FDB; + action->dest_tbl.fw_tbl.ref_actions = ref_actions; + action->dest_tbl.fw_tbl.num_of_ref_actions = num_of_ref; + + kfree(hw_dests); + + return action; + +free_action: + kfree(action); +free_ref_actions: + kfree(ref_actions); +free_hw_dests: + kfree(hw_dests); + return NULL; +} + struct mlx5dr_action * mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn, struct mlx5_flow_table *ft) @@ -1566,6 +1664,22 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) refcount_dec(&action->dest_tbl.fw_tbl.dmn->refcount); else refcount_dec(&action->dest_tbl.tbl->refcount); + + if (action->dest_tbl.is_fw_tbl && + action->dest_tbl.fw_tbl.num_of_ref_actions) { + struct mlx5dr_action **ref_actions; + int i; + + ref_actions = action->dest_tbl.fw_tbl.ref_actions; + for (i = 0; i < action->dest_tbl.fw_tbl.num_of_ref_actions; i++) + refcount_dec(&ref_actions[i]->refcount); + + kfree(ref_actions); + + mlx5dr_fw_destroy_md_tbl(action->dest_tbl.fw_tbl.dmn, + action->dest_tbl.fw_tbl.id, + action->dest_tbl.fw_tbl.group_id); + } break; case DR_ACTION_TYP_TNL_L2_TO_L2: refcount_dec(&action->reformat.dmn->refcount); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 27f1d931bf9f..0fc52d634e10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -745,9 +745,12 @@ struct mlx5dr_action { struct { struct mlx5dr_domain *dmn; u32 id; + u32 group_id; enum fs_flow_table_type type; u64 rx_icm_addr; u64 tx_icm_addr; + struct mlx5dr_action **ref_actions; + u32 num_of_ref_actions; } fw_tbl; }; } dest_tbl; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index 932362d89c66..e1edc9c247b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -33,6 +33,11 @@ struct mlx5dr_match_parameters { u64 *match_buf; /* Device spec format */ }; +struct mlx5dr_action_dest { + struct mlx5dr_action *dest; + struct mlx5dr_action *reformat; +}; + #ifdef CONFIG_MLX5_SW_STEERING struct mlx5dr_domain * @@ -83,6 +88,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, u32 vport, u8 vhca_id_valid, u16 vhca_id); +struct mlx5dr_action * +mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, + struct mlx5dr_action_dest *dests, + u32 num_of_dests); + struct mlx5dr_action *mlx5dr_action_create_drop(void); struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value); @@ -173,6 +183,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, u32 vport, u8 vhca_id_valid, u16 vhca_id) { return NULL; } +static inline struct mlx5dr_action * +mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, + struct mlx5dr_action_dest *dests, + u32 num_of_dests) { return NULL; } + static inline struct mlx5dr_action * mlx5dr_action_create_drop(void) { return NULL; } From 7ee3f6d2486e25d96d2309da4d53fe10a58e2b63 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Sun, 15 Dec 2019 21:50:32 +0200 Subject: [PATCH 13/13] net/mlx5: DR, Create multiple destination action from dr_create_fte Until now it was possible to pass a packet to a single destination such as vport or flow table. With the new support if multiple vports or multiple tables are provided as destinations, fs_dr will create a multiple destination table action, this action should replace other destination actions provided to mlx5dr_create_rule. Each vport destination can be provided with a reformat actions which will be done before forwarding the packet to the vport. Signed-off-by: Alex Vesker Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/fs_dr.c | 86 +++++++++++++++---- 1 file changed, 71 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index e51262ec77bb..b43275cde8bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -206,6 +206,12 @@ static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domai return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr)); } +static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst) +{ + return dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT && + dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; +} + #define MLX5_FLOW_CONTEXT_ACTION_MAX 20 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, @@ -213,7 +219,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, struct fs_fte *fte) { struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain; - struct mlx5dr_action *term_action = NULL; + struct mlx5dr_action_dest *term_actions; struct mlx5dr_match_parameters params; struct mlx5_core_dev *dev = ns->dev; struct mlx5dr_action **fs_dr_actions; @@ -223,6 +229,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, struct mlx5dr_rule *rule; struct mlx5_flow_rule *dst; int fs_dr_num_actions = 0; + int num_term_actions = 0; int num_actions = 0; size_t match_sz; int err = 0; @@ -233,18 +240,38 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions), GFP_KERNEL); - if (!actions) - return -ENOMEM; + if (!actions) { + err = -ENOMEM; + goto out_err; + } fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*fs_dr_actions), GFP_KERNEL); if (!fs_dr_actions) { - kfree(actions); - return -ENOMEM; + err = -ENOMEM; + goto free_actions_alloc; + } + + term_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, + sizeof(*term_actions), GFP_KERNEL); + if (!term_actions) { + err = -ENOMEM; + goto free_fs_dr_actions_alloc; } match_sz = sizeof(fte->val); + /* Drop reformat action bit if destination vport set with reformat */ + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + list_for_each_entry(dst, &fte->node.children, node.list) { + if (!contain_vport_reformat_action(dst)) + continue; + + fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + break; + } + } + /* The order of the actions are must to be keep, only the following * order is supported by SW steering: * TX: push vlan -> modify header -> encap @@ -335,7 +362,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, goto free_actions; } fs_dr_actions[fs_dr_num_actions++] = tmp_action; - term_action = tmp_action; + term_actions[num_term_actions++].dest = tmp_action; } if (fte->flow_context.flow_tag) { @@ -354,7 +381,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, enum mlx5_flow_destination_type type = dst->dest_attr.type; u32 id; - if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || + num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) { err = -ENOSPC; goto free_actions; } @@ -379,7 +407,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, goto free_actions; } fs_dr_actions[fs_dr_num_actions++] = tmp_action; - term_action = tmp_action; + term_actions[num_term_actions++].dest = tmp_action; break; case MLX5_FLOW_DESTINATION_TYPE_VPORT: tmp_action = create_vport_action(domain, dst); @@ -388,7 +416,14 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, goto free_actions; } fs_dr_actions[fs_dr_num_actions++] = tmp_action; - term_action = tmp_action; + term_actions[num_term_actions].dest = tmp_action; + + if (dst->dest_attr.vport.flags & + MLX5_FLOW_DEST_VPORT_REFORMAT_ID) + term_actions[num_term_actions].reformat = + dst->dest_attr.vport.pkt_reformat->action.dr_action; + + num_term_actions++; break; default: err = -EOPNOTSUPP; @@ -399,9 +434,22 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, params.match_sz = match_sz; params.match_buf = (u64 *)fte->val; + if (num_term_actions == 1) { + if (term_actions->reformat) + actions[num_actions++] = term_actions->reformat; - if (term_action) - actions[num_actions++] = term_action; + actions[num_actions++] = term_actions->dest; + } else if (num_term_actions > 1) { + tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, + term_actions, + num_term_actions); + if (!tmp_action) { + err = -EOPNOTSUPP; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher, ¶ms, @@ -412,7 +460,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, goto free_actions; } + kfree(term_actions); kfree(actions); + fte->fs_dr_rule.dr_rule = rule; fte->fs_dr_rule.num_actions = fs_dr_num_actions; fte->fs_dr_rule.dr_actions = fs_dr_actions; @@ -420,13 +470,18 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, return 0; free_actions: - for (i = 0; i < fs_dr_num_actions; i++) + /* Free in reverse order to handle action dependencies */ + for (i = fs_dr_num_actions - 1; i >= 0; i--) if (!IS_ERR_OR_NULL(fs_dr_actions[i])) mlx5dr_action_destroy(fs_dr_actions[i]); - mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err); - kfree(actions); + kfree(term_actions); +free_fs_dr_actions_alloc: kfree(fs_dr_actions); +free_actions_alloc: + kfree(actions); +out_err: + mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err); return err; } @@ -533,7 +588,8 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, if (err) return err; - for (i = 0; i < rule->num_actions; i++) + /* Free in reverse order to handle action dependencies */ + for (i = rule->num_actions - 1; i >= 0; i--) if (!IS_ERR_OR_NULL(rule->dr_actions[i])) mlx5dr_action_destroy(rule->dr_actions[i]);